summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c5
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c1
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c46
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c9
-rw-r--r--drivers/accel/ivpu/ivpu_job.c4
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c36
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c39
-rw-r--r--drivers/acpi/apei/ghes.c89
-rw-r--r--drivers/acpi/ec.c112
-rw-r--r--drivers/ata/Kconfig5
-rw-r--r--drivers/ata/ahci.c448
-rw-r--r--drivers/ata/ahci.h10
-rw-r--r--drivers/ata/ahci_ceva.c125
-rw-r--r--drivers/ata/libahci.c21
-rw-r--r--drivers/ata/libata-core.c59
-rw-r--r--drivers/ata/pata_parport/pata_parport.c2
-rw-r--r--drivers/atm/fore200e.c6
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/base/arch_topology.c13
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/core.c26
-rw-r--r--drivers/base/cpu.c3
-rw-r--r--drivers/base/platform-msi.c119
-rw-r--r--drivers/base/regmap/internal.h1
-rw-r--r--drivers/base/regmap/regcache-flat.c2
-rw-r--r--drivers/base/regmap/regcache.c4
-rw-r--r--drivers/base/regmap/regmap-kunit.c123
-rw-r--r--drivers/base/regmap/regmap.c10
-rw-r--r--drivers/bcma/main.c2
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/aoe/aoeblk.c15
-rw-r--r--drivers/block/aoe/aoecmd.c12
-rw-r--r--drivers/block/aoe/aoenet.c1
-rw-r--r--drivers/block/ataflop.c2
-rw-r--r--drivers/block/brd.c26
-rw-r--r--drivers/block/drbd/drbd_int.h4
-rw-r--r--drivers/block/drbd/drbd_main.c17
-rw-r--r--drivers/block/drbd/drbd_nl.c268
-rw-r--r--drivers/block/drbd/drbd_state.c24
-rw-r--r--drivers/block/drbd/drbd_state_change.h8
-rw-r--r--drivers/block/floppy.c17
-rw-r--r--drivers/block/loop.c75
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c13
-rw-r--r--drivers/block/n64cart.c12
-rw-r--r--drivers/block/nbd.c49
-rw-r--r--drivers/block/null_blk/main.c535
-rw-r--r--drivers/block/null_blk/null_blk.h24
-rw-r--r--drivers/block/null_blk/trace.h5
-rw-r--r--drivers/block/null_blk/zoned.c25
-rw-r--r--drivers/block/pktcdvd.c109
-rw-r--r--drivers/block/ps3disk.c17
-rw-r--r--drivers/block/ps3vram.c6
-rw-r--r--drivers/block/rbd.c31
-rw-r--r--drivers/block/rnbd/rnbd-clt.c64
-rw-r--r--drivers/block/rnbd/rnbd-srv.c28
-rw-r--r--drivers/block/rnbd/rnbd-srv.h2
-rw-r--r--drivers/block/sunvdc.c18
-rw-r--r--drivers/block/swim.c8
-rw-r--r--drivers/block/swim3.c2
-rw-r--r--drivers/block/ublk_drv.c111
-rw-r--r--drivers/block/virtio_blk.c310
-rw-r--r--drivers/block/xen-blkback/blkback.c4
-rw-r--r--drivers/block/xen-blkback/common.h4
-rw-r--r--drivers/block/xen-blkback/xenbus.c37
-rw-r--r--drivers/block/xen-blkfront.c53
-rw-r--r--drivers/block/z2ram.c2
-rw-r--r--drivers/block/zram/zram_drv.c77
-rw-r--r--drivers/block/zram/zram_drv.h2
-rw-r--r--drivers/bluetooth/btbcm.c12
-rw-r--r--drivers/bluetooth/btintel.c116
-rw-r--r--drivers/bluetooth/btmtk.c5
-rw-r--r--drivers/bluetooth/btmtk.h1
-rw-r--r--drivers/bluetooth/btnxpuart.c27
-rw-r--r--drivers/bluetooth/btqca.c2
-rw-r--r--drivers/bluetooth/btrtl.c14
-rw-r--r--drivers/bluetooth/btusb.c30
-rw-r--r--drivers/bluetooth/hci_bcm4377.c3
-rw-r--r--drivers/bluetooth/hci_h5.c5
-rw-r--r--drivers/bluetooth/hci_qca.c28
-rw-r--r--drivers/bluetooth/hci_serdev.c9
-rw-r--r--drivers/bluetooth/hci_uart.h12
-rw-r--r--drivers/bus/Kconfig5
-rw-r--r--drivers/bus/imx-weim.c2
-rw-r--r--drivers/bus/sunxi-rsb.c4
-rw-r--r--drivers/bus/ti-sysc.c2
-rw-r--r--drivers/cache/ax45mp_cache.c4
-rw-r--r--drivers/cdrom/gdrom.c20
-rw-r--r--drivers/clk/rockchip/clk-rk3588.c5
-rw-r--r--drivers/clk/rockchip/clk.c17
-rw-r--r--drivers/clk/rockchip/clk.h2
-rw-r--r--drivers/clk/samsung/clk-gs101.c2
-rw-r--r--drivers/clocksource/arm_arch_timer.c6
-rw-r--r--drivers/comedi/drivers/comedi_8255.c1
-rw-r--r--drivers/comedi/drivers/comedi_test.c30
-rw-r--r--drivers/connector/cn_proc.c5
-rw-r--r--drivers/counter/counter-core.c7
-rw-r--r--drivers/cpufreq/intel_pstate.c3
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c34
-rw-r--r--drivers/crypto/ccp/Kconfig2
-rw-r--r--drivers/crypto/ccp/sev-dev.c1160
-rw-r--r--drivers/crypto/ccp/sev-dev.h5
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c4
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c5
-rw-r--r--drivers/cxl/acpi.c46
-rw-r--r--drivers/cxl/core/cdat.c86
-rw-r--r--drivers/cxl/core/mbox.c4
-rw-r--r--drivers/cxl/core/memdev.c63
-rw-r--r--drivers/cxl/core/pci.c49
-rw-r--r--drivers/cxl/core/region.c62
-rw-r--r--drivers/cxl/core/trace.h6
-rw-r--r--drivers/cxl/cxl.h2
-rw-r--r--drivers/cxl/cxlmem.h10
-rw-r--r--drivers/cxl/mem.c56
-rw-r--r--drivers/cxl/pci.c57
-rw-r--r--drivers/dax/super.c3
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c17
-rw-r--r--drivers/dma/dw-edma/dw-hdma-v0-core.c39
-rw-r--r--drivers/dma/dw-edma/dw-hdma-v0-regs.h2
-rw-r--r--drivers/dma/fsl-edma-common.c2
-rw-r--r--drivers/dma/fsl-edma-common.h5
-rw-r--r--drivers/dma/fsl-edma-main.c4
-rw-r--r--drivers/dma/fsl-qdma.c38
-rw-r--r--drivers/dma/idxd/cdev.c2
-rw-r--r--drivers/dma/idxd/debugfs.c2
-rw-r--r--drivers/dma/idxd/idxd.h1
-rw-r--r--drivers/dma/idxd/init.c15
-rw-r--r--drivers/dma/idxd/irq.c3
-rw-r--r--drivers/dma/mv_xor_v2.c8
-rw-r--r--drivers/dma/ptdma/ptdma-dmaengine.c2
-rw-r--r--drivers/dma/qcom/hidma.c6
-rw-r--r--drivers/dpll/dpll_core.c35
-rw-r--r--drivers/dpll/dpll_core.h2
-rw-r--r--drivers/dpll/dpll_netlink.c67
-rw-r--r--drivers/dpll/dpll_nl.c4
-rw-r--r--drivers/dpll/dpll_nl.h2
-rw-r--r--drivers/edac/Kconfig1
-rw-r--r--drivers/edac/amd64_edac.c290
-rw-r--r--drivers/edac/i10nm_base.c1
-rw-r--r--drivers/edac/igen6_edac.c2
-rw-r--r--drivers/edac/mce_amd.c4
-rw-r--r--drivers/edac/synopsys_edac.c4
-rw-r--r--drivers/edac/versal_edac.c199
-rw-r--r--drivers/firewire/core-card.c32
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/firmware/arm_ffa/bus.c2
-rw-r--r--drivers/firmware/arm_scmi/bus.c26
-rw-r--r--drivers/firmware/arm_scmi/clock.c194
-rw-r--r--drivers/firmware/arm_scmi/common.h2
-rw-r--r--drivers/firmware/arm_scmi/driver.c99
-rw-r--r--drivers/firmware/arm_scmi/notify.c17
-rw-r--r--drivers/firmware/arm_scmi/notify.h4
-rw-r--r--drivers/firmware/arm_scmi/optee.c6
-rw-r--r--drivers/firmware/arm_scmi/perf.c163
-rw-r--r--drivers/firmware/arm_scmi/power.c30
-rw-r--r--drivers/firmware/arm_scmi/powercap.c45
-rw-r--r--drivers/firmware/arm_scmi/protocols.h5
-rw-r--r--drivers/firmware/arm_scmi/reset.c37
-rw-r--r--drivers/firmware/arm_scmi/sensors.c37
-rw-r--r--drivers/firmware/arm_scmi/smc.c7
-rw-r--r--drivers/firmware/arm_scmi/system.c16
-rw-r--r--drivers/firmware/efi/arm-runtime.c2
-rw-r--r--drivers/firmware/efi/capsule-loader.c2
-rw-r--r--drivers/firmware/efi/cper.c19
-rw-r--r--drivers/firmware/efi/efi-init.c19
-rw-r--r--drivers/firmware/efi/efi-pstore.c43
-rw-r--r--drivers/firmware/efi/libstub/Makefile4
-rw-r--r--drivers/firmware/efi/libstub/alignedmem.c1
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c8
-rw-r--r--drivers/firmware/efi/libstub/efistub.h5
-rw-r--r--drivers/firmware/efi/libstub/kaslr.c2
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c12
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c28
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.h4
-rw-r--r--drivers/firmware/efi/libstub/zboot.c2
-rw-r--r--drivers/firmware/efi/riscv-runtime.c2
-rw-r--r--drivers/firmware/google/cbmem.c8
-rw-r--r--drivers/firmware/google/coreboot_table.c22
-rw-r--r--drivers/firmware/google/coreboot_table.h3
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c8
-rw-r--r--drivers/firmware/google/memconsole-coreboot.c8
-rw-r--r--drivers/firmware/google/vpd.c8
-rw-r--r--drivers/firmware/microchip/mpfs-auto-update.c5
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c2
-rw-r--r--drivers/gpio/gpio-74x164.c4
-rw-r--r--drivers/gpio/gpio-mvebu.c18
-rw-r--r--drivers/gpio/gpiolib.c23
-rw-r--r--drivers/gpu/drm/Kconfig5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c99
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c5
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c12
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c29
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c5
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c70
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c32
-rw-r--r--drivers/gpu/drm/drm_buddy.c22
-rw-r--r--drivers/gpu/drm/drm_crtc.c1
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c44
-rw-r--r--drivers/gpu/drm/drm_prime.c2
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c8
-rw-r--r--drivers/gpu/drm/drm_syncobj.c19
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc_regs.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c3
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c6
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c1
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c1
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c5
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c20
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c22
-rw-r--r--drivers/gpu/drm/msm/dp/dp_reg.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c11
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c32
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c1
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c7
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/client.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/object.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c83
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c4
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c15
-rw-r--r--drivers/gpu/drm/tegra/drm.c23
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c306
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c2
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c8
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs_test.c1
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c11
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h1
-rw-r--r--drivers/gpu/drm/xe/xe_device.c39
-rw-r--r--drivers/gpu/drm/xe/xe_device.h4
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_display.c6
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c12
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c129
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h20
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c12
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c2
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c10
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c28
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c50
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.h19
-rw-r--r--drivers/gpu/drm/xe/xe_range_fence.c7
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c1
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c60
-rw-r--r--drivers/gpu/drm/xe/xe_sync.h4
-rw-r--r--drivers/gpu/drm/xe/xe_sync_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c5
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h59
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c167
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h21
-rw-r--r--drivers/gpu/host1x/dev.c15
-rw-r--r--drivers/gpu/host1x/dev.h6
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c8
-rw-r--r--drivers/hid/hid-logitech-hidpp.c13
-rw-r--r--drivers/hid/hid-multitouch.c4
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c4
-rw-r--r--drivers/hid/wacom_sys.c63
-rw-r--r--drivers/hid/wacom_wac.c9
-rw-r--r--drivers/hv/channel.c176
-rw-r--r--drivers/hv/hv_util.c31
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c7
-rw-r--r--drivers/hwmon/coretemp.c44
-rw-r--r--drivers/hwmon/fam15h_power.c2
-rw-r--r--drivers/hwmon/nct6775-core.c14
-rw-r--r--drivers/i2c/busses/Makefile6
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c10
-rw-r--r--drivers/i2c/busses/i2c-imx.c5
-rw-r--r--drivers/i2c/busses/i2c-pasemi-core.c6
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c14
-rw-r--r--drivers/i2c/busses/i2c-wmt.c6
-rw-r--r--drivers/iio/accel/Kconfig2
-rw-r--r--drivers/iio/accel/adxl367.c8
-rw-r--r--drivers/iio/accel/adxl367_i2c.c2
-rw-r--r--drivers/iio/adc/ad4130.c12
-rw-r--r--drivers/iio/adc/ad7091r8.c2
-rw-r--r--drivers/iio/humidity/Kconfig12
-rw-r--r--drivers/iio/humidity/Makefile1
-rw-r--r--drivers/iio/humidity/hdc3020.c2
-rw-r--r--drivers/iio/imu/bno055/Kconfig1
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c5
-rw-r--r--drivers/iio/industrialio-core.c5
-rw-r--r--drivers/iio/light/hid-sensor-als.c1
-rw-r--r--drivers/iio/magnetometer/rm3100-core.c10
-rw-r--r--drivers/iio/pressure/bmp280-spi.c51
-rw-r--r--drivers/iio/pressure/dlhl60d.c7
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c43
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c3
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c6
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c2
-rw-r--r--drivers/infiniband/hw/irdma/defs.h1
-rw-r--r--drivers/infiniband/hw/irdma/hw.c8
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c9
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c6
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c2
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c17
-rw-r--r--drivers/input/joystick/xpad.c6
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c10
-rw-r--r--drivers/input/mouse/bcm5974.c20
-rw-r--r--drivers/input/rmi4/rmi_driver.c6
-rw-r--r--drivers/interconnect/qcom/sc8180x.c1
-rw-r--r--drivers/interconnect/qcom/sm8550.c1
-rw-r--r--drivers/interconnect/qcom/sm8650.c2
-rw-r--r--drivers/interconnect/qcom/x1e80100.c1
-rw-r--r--drivers/iommu/Kconfig8
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/amd/amd_iommu.h42
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h34
-rw-r--r--drivers/iommu/amd/init.c137
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c21
-rw-r--r--drivers/iommu/amd/iommu.c634
-rw-r--r--drivers/iommu/apple-dart.c3
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c60
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c819
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h4
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c1
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c20
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c3
-rw-r--r--drivers/iommu/dma-iommu.c5
-rw-r--r--drivers/iommu/exynos-iommu.c2
-rw-r--r--drivers/iommu/intel/Kconfig12
-rw-r--r--drivers/iommu/intel/Makefile2
-rw-r--r--drivers/iommu/intel/dmar.c26
-rw-r--r--drivers/iommu/intel/iommu.c551
-rw-r--r--drivers/iommu/intel/iommu.h21
-rw-r--r--drivers/iommu/intel/nested.c16
-rw-r--r--drivers/iommu/intel/pasid.c210
-rw-r--r--drivers/iommu/intel/pasid.h3
-rw-r--r--drivers/iommu/intel/perf.c2
-rw-r--r--drivers/iommu/intel/svm.c76
-rw-r--r--drivers/iommu/io-pgfault.c463
-rw-r--r--drivers/iommu/iommu-priv.h5
-rw-r--r--drivers/iommu/iommu-sva.c88
-rw-r--r--drivers/iommu/iommu-sva.h71
-rw-r--r--drivers/iommu/iommu.c280
-rw-r--r--drivers/iommu/iommufd/hw_pagetable.c3
-rw-r--r--drivers/iommu/iommufd/io_pagetable.c9
-rw-r--r--drivers/iommu/iommufd/iommufd_test.h1
-rw-r--r--drivers/iommu/iommufd/iova_bitmap.c68
-rw-r--r--drivers/iommu/iommufd/selftest.c148
-rw-r--r--drivers/iommu/iova.c143
-rw-r--r--drivers/iommu/ipmmu-vmsa.c19
-rw-r--r--drivers/iommu/irq_remapping.c3
-rw-r--r--drivers/iommu/msm_iommu.c4
-rw-r--r--drivers/iommu/mtk_iommu.c5
-rw-r--r--drivers/iommu/mtk_iommu_v1.c7
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/rockchip-iommu.c2
-rw-r--r--drivers/iommu/sprd-iommu.c3
-rw-r--r--drivers/iommu/sun50i-iommu.c2
-rw-r--r--drivers/iommu/tegra-smmu.c4
-rw-r--r--drivers/iommu/virtio-iommu.c3
-rw-r--r--drivers/irqchip/Kconfig11
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c2
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c2
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c5
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c68
-rw-r--r--drivers/irqchip/irq-gic-v3.c57
-rw-r--r--drivers/irqchip/irq-gic.c27
-rw-r--r--drivers/irqchip/irq-imgpdc.c7
-rw-r--r--drivers/irqchip/irq-imx-intmux.c18
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c14
-rw-r--r--drivers/irqchip/irq-keystone.c5
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c24
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c12
-rw-r--r--drivers/irqchip/irq-madera.c8
-rw-r--r--drivers/irqchip/irq-mbigen.c8
-rw-r--r--drivers/irqchip/irq-meson-gpio.c5
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c12
-rw-r--r--drivers/irqchip/irq-pruss-intc.c14
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c4
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c11
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c9
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c7
-rw-r--r--drivers/irqchip/irq-riscv-intc.c104
-rw-r--r--drivers/irqchip/irq-sifive-plic.c283
-rw-r--r--drivers/irqchip/irq-starfive-jh8100-intc.c207
-rw-r--r--drivers/irqchip/irq-stm32-exti.c9
-rw-r--r--drivers/irqchip/irq-ts4800.c12
-rw-r--r--drivers/irqchip/irq-vic.c3
-rw-r--r--drivers/isdn/capi/capi.c21
-rw-r--r--drivers/isdn/mISDN/dsp_pipeline.c16
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c16
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c8
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/super.c133
-rw-r--r--drivers/md/dm-bio-prison-v1.c2
-rw-r--r--drivers/md/dm-bufio.c74
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-crypt.c109
-rw-r--r--drivers/md/dm-dust.c2
-rw-r--r--drivers/md/dm-ebs-target.c2
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-integrity.c105
-rw-r--r--drivers/md/dm-io.c23
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/dm-kcopyd.c4
-rw-r--r--drivers/md/dm-log-userspace-base.c2
-rw-r--r--drivers/md/dm-log.c6
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-ps-round-robin.c2
-rw-r--r--drivers/md/dm-raid.c101
-rw-r--r--drivers/md/dm-raid1.c6
-rw-r--r--drivers/md/dm-region-hash.c2
-rw-r--r--drivers/md/dm-snap-persistent.c4
-rw-r--r--drivers/md/dm-thin.c22
-rw-r--r--drivers/md/dm-vdo/Kconfig17
-rw-r--r--drivers/md/dm-vdo/Makefile57
-rw-r--r--drivers/md/dm-vdo/action-manager.c388
-rw-r--r--drivers/md/dm-vdo/action-manager.h110
-rw-r--r--drivers/md/dm-vdo/admin-state.c506
-rw-r--r--drivers/md/dm-vdo/admin-state.h178
-rw-r--r--drivers/md/dm-vdo/block-map.c3318
-rw-r--r--drivers/md/dm-vdo/block-map.h394
-rw-r--r--drivers/md/dm-vdo/completion.c140
-rw-r--r--drivers/md/dm-vdo/completion.h152
-rw-r--r--drivers/md/dm-vdo/constants.h96
-rw-r--r--drivers/md/dm-vdo/cpu.h59
-rw-r--r--drivers/md/dm-vdo/data-vio.c2063
-rw-r--r--drivers/md/dm-vdo/data-vio.h670
-rw-r--r--drivers/md/dm-vdo/dedupe.c3003
-rw-r--r--drivers/md/dm-vdo/dedupe.h120
-rw-r--r--drivers/md/dm-vdo/dm-vdo-target.c2910
-rw-r--r--drivers/md/dm-vdo/dump.c275
-rw-r--r--drivers/md/dm-vdo/dump.h17
-rw-r--r--drivers/md/dm-vdo/encodings.c1483
-rw-r--r--drivers/md/dm-vdo/encodings.h1298
-rw-r--r--drivers/md/dm-vdo/errors.c307
-rw-r--r--drivers/md/dm-vdo/errors.h73
-rw-r--r--drivers/md/dm-vdo/flush.c560
-rw-r--r--drivers/md/dm-vdo/flush.h44
-rw-r--r--drivers/md/dm-vdo/funnel-queue.c170
-rw-r--r--drivers/md/dm-vdo/funnel-queue.h110
-rw-r--r--drivers/md/dm-vdo/funnel-workqueue.c638
-rw-r--r--drivers/md/dm-vdo/funnel-workqueue.h51
-rw-r--r--drivers/md/dm-vdo/indexer/chapter-index.c293
-rw-r--r--drivers/md/dm-vdo/indexer/chapter-index.h61
-rw-r--r--drivers/md/dm-vdo/indexer/config.c376
-rw-r--r--drivers/md/dm-vdo/indexer/config.h124
-rw-r--r--drivers/md/dm-vdo/indexer/delta-index.c1970
-rw-r--r--drivers/md/dm-vdo/indexer/delta-index.h279
-rw-r--r--drivers/md/dm-vdo/indexer/funnel-requestqueue.c279
-rw-r--r--drivers/md/dm-vdo/indexer/funnel-requestqueue.h31
-rw-r--r--drivers/md/dm-vdo/indexer/geometry.c201
-rw-r--r--drivers/md/dm-vdo/indexer/geometry.h140
-rw-r--r--drivers/md/dm-vdo/indexer/hash-utils.h66
-rw-r--r--drivers/md/dm-vdo/indexer/index-layout.c1765
-rw-r--r--drivers/md/dm-vdo/indexer/index-layout.h43
-rw-r--r--drivers/md/dm-vdo/indexer/index-page-map.c173
-rw-r--r--drivers/md/dm-vdo/indexer/index-page-map.h50
-rw-r--r--drivers/md/dm-vdo/indexer/index-session.c739
-rw-r--r--drivers/md/dm-vdo/indexer/index-session.h85
-rw-r--r--drivers/md/dm-vdo/indexer/index.c1388
-rw-r--r--drivers/md/dm-vdo/indexer/index.h83
-rw-r--r--drivers/md/dm-vdo/indexer/indexer.h353
-rw-r--r--drivers/md/dm-vdo/indexer/io-factory.c415
-rw-r--r--drivers/md/dm-vdo/indexer/io-factory.h64
-rw-r--r--drivers/md/dm-vdo/indexer/open-chapter.c426
-rw-r--r--drivers/md/dm-vdo/indexer/open-chapter.h79
-rw-r--r--drivers/md/dm-vdo/indexer/radix-sort.c330
-rw-r--r--drivers/md/dm-vdo/indexer/radix-sort.h26
-rw-r--r--drivers/md/dm-vdo/indexer/sparse-cache.c624
-rw-r--r--drivers/md/dm-vdo/indexer/sparse-cache.h46
-rw-r--r--drivers/md/dm-vdo/indexer/volume-index.c1283
-rw-r--r--drivers/md/dm-vdo/indexer/volume-index.h193
-rw-r--r--drivers/md/dm-vdo/indexer/volume.c1693
-rw-r--r--drivers/md/dm-vdo/indexer/volume.h172
-rw-r--r--drivers/md/dm-vdo/int-map.c707
-rw-r--r--drivers/md/dm-vdo/int-map.h39
-rw-r--r--drivers/md/dm-vdo/io-submitter.c477
-rw-r--r--drivers/md/dm-vdo/io-submitter.h47
-rw-r--r--drivers/md/dm-vdo/logger.c239
-rw-r--r--drivers/md/dm-vdo/logger.h100
-rw-r--r--drivers/md/dm-vdo/logical-zone.c373
-rw-r--r--drivers/md/dm-vdo/logical-zone.h89
-rw-r--r--drivers/md/dm-vdo/memory-alloc.c438
-rw-r--r--drivers/md/dm-vdo/memory-alloc.h162
-rw-r--r--drivers/md/dm-vdo/message-stats.c432
-rw-r--r--drivers/md/dm-vdo/message-stats.h13
-rw-r--r--drivers/md/dm-vdo/murmurhash3.c175
-rw-r--r--drivers/md/dm-vdo/murmurhash3.h15
-rw-r--r--drivers/md/dm-vdo/numeric.h78
-rw-r--r--drivers/md/dm-vdo/packer.c780
-rw-r--r--drivers/md/dm-vdo/packer.h122
-rw-r--r--drivers/md/dm-vdo/permassert.c26
-rw-r--r--drivers/md/dm-vdo/permassert.h45
-rw-r--r--drivers/md/dm-vdo/physical-zone.c644
-rw-r--r--drivers/md/dm-vdo/physical-zone.h115
-rw-r--r--drivers/md/dm-vdo/priority-table.c224
-rw-r--r--drivers/md/dm-vdo/priority-table.h47
-rw-r--r--drivers/md/dm-vdo/recovery-journal.c1762
-rw-r--r--drivers/md/dm-vdo/recovery-journal.h316
-rw-r--r--drivers/md/dm-vdo/repair.c1756
-rw-r--r--drivers/md/dm-vdo/repair.h14
-rw-r--r--drivers/md/dm-vdo/slab-depot.c5101
-rw-r--r--drivers/md/dm-vdo/slab-depot.h601
-rw-r--r--drivers/md/dm-vdo/statistics.h278
-rw-r--r--drivers/md/dm-vdo/status-codes.c94
-rw-r--r--drivers/md/dm-vdo/status-codes.h86
-rw-r--r--drivers/md/dm-vdo/string-utils.c22
-rw-r--r--drivers/md/dm-vdo/string-utils.h23
-rw-r--r--drivers/md/dm-vdo/thread-device.c34
-rw-r--r--drivers/md/dm-vdo/thread-device.h20
-rw-r--r--drivers/md/dm-vdo/thread-registry.c93
-rw-r--r--drivers/md/dm-vdo/thread-registry.h32
-rw-r--r--drivers/md/dm-vdo/thread-utils.c108
-rw-r--r--drivers/md/dm-vdo/thread-utils.h20
-rw-r--r--drivers/md/dm-vdo/time-utils.h28
-rw-r--r--drivers/md/dm-vdo/types.h393
-rw-r--r--drivers/md/dm-vdo/vdo.c1730
-rw-r--r--drivers/md/dm-vdo/vdo.h362
-rw-r--r--drivers/md/dm-vdo/vio.c500
-rw-r--r--drivers/md/dm-vdo/vio.h199
-rw-r--r--drivers/md/dm-vdo/wait-queue.c205
-rw-r--r--drivers/md/dm-vdo/wait-queue.h138
-rw-r--r--drivers/md/dm-verity-fec.c21
-rw-r--r--drivers/md/dm-verity-target.c173
-rw-r--r--drivers/md/dm-verity.h15
-rw-r--r--drivers/md/dm-writecache.c10
-rw-r--r--drivers/md/dm-zoned-metadata.c5
-rw-r--r--drivers/md/dm.c55
-rw-r--r--drivers/md/md-bitmap.c18
-rw-r--r--drivers/md/md-linear.h17
-rw-r--r--drivers/md/md-multipath.h32
-rw-r--r--drivers/md/md.c482
-rw-r--r--drivers/md/md.h79
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c2
-rw-r--r--drivers/md/raid0.c42
-rw-r--r--drivers/md/raid1-10.c69
-rw-r--r--drivers/md/raid1.c601
-rw-r--r--drivers/md/raid1.h1
-rw-r--r--drivers/md/raid10.c159
-rw-r--r--drivers/md/raid5-ppl.c3
-rw-r--r--drivers/md/raid5.c302
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c3
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c3
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c24
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c3
-rw-r--r--drivers/media/rc/Kconfig1
-rw-r--r--drivers/media/rc/bpf-lirc.c8
-rw-r--r--drivers/media/rc/ir_toy.c2
-rw-r--r--drivers/media/rc/lirc_dev.c5
-rw-r--r--drivers/media/rc/rc-core-priv.h2
-rw-r--r--drivers/memory/emif.c65
-rw-r--r--drivers/memory/stm32-fmc2-ebi.c729
-rw-r--r--drivers/memory/tegra/tegra234.c48
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/memstick/core/ms_block.c14
-rw-r--r--drivers/memstick/core/mspro_block.c15
-rw-r--r--drivers/misc/fastrpc.c10
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c21
-rw-r--r--drivers/misc/lkdtm/bugs.c3
-rw-r--r--drivers/misc/lkdtm/core.c22
-rw-r--r--drivers/misc/lkdtm/heap.c2
-rw-r--r--drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c8
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mei/vsc-tp.c1
-rw-r--r--drivers/misc/vmw_vmci/vmci_datagram.c10
-rw-r--r--drivers/mmc/core/block.c24
-rw-r--r--drivers/mmc/core/bus.c4
-rw-r--r--drivers/mmc/core/bus.h2
-rw-r--r--drivers/mmc/core/host.c11
-rw-r--r--drivers/mmc/core/mmc.c4
-rw-r--r--drivers/mmc/core/queue.c98
-rw-r--r--drivers/mmc/core/sd.c2
-rw-r--r--drivers/mmc/core/sd.h2
-rw-r--r--drivers/mmc/core/sdio.c2
-rw-r--r--drivers/mmc/core/sdio_bus.c2
-rw-r--r--drivers/mmc/core/slot-gpio.c6
-rw-r--r--drivers/mmc/host/Kconfig9
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/davinci_mmc.c59
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c1
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798cv200.c1
-rw-r--r--drivers/mmc/host/dw_mmc-hi3798mv200.c251
-rw-r--r--drivers/mmc/host/dw_mmc.c1
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-clkc.c43
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c13
-rw-r--r--drivers/mmc/host/mmc_spi.c30
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c24
-rw-r--r--drivers/mmc/host/moxart-mmc.c90
-rw-r--r--drivers/mmc/host/mvsdio.c71
-rw-r--r--drivers/mmc/host/mxcmmc.c53
-rw-r--r--drivers/mmc/host/omap.c53
-rw-r--r--drivers/mmc/host/renesas_sdhi.h3
-rw-r--r--drivers/mmc/host/sdhci-esdhc-mcf.c12
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c2
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c66
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c30
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c48
-rw-r--r--drivers/mmc/host/sh_mmcif.c114
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c6
-rw-r--r--drivers/mmc/host/wbsd.c2
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c4
-rw-r--r--drivers/mtd/devices/block2mtd.c46
-rw-r--r--drivers/mtd/mtd_blkdevs.c12
-rw-r--r--drivers/mtd/mtdcore.c1
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c13
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c6
-rw-r--r--drivers/mtd/ubi/block.c6
-rw-r--r--drivers/net/amt.c10
-rw-r--r--drivers/net/arcnet/arc-rawmode.c1
-rw-r--r--drivers/net/arcnet/arc-rimi.c1
-rw-r--r--drivers/net/arcnet/arcnet.c1
-rw-r--r--drivers/net/arcnet/capmode.c1
-rw-r--r--drivers/net/arcnet/com20020-pci.c1
-rw-r--r--drivers/net/arcnet/com20020.c1
-rw-r--r--drivers/net/arcnet/com20020_cs.c1
-rw-r--r--drivers/net/arcnet/com90io.c1
-rw-r--r--drivers/net/arcnet/com90xx.c1
-rw-r--r--drivers/net/arcnet/rfc1051.c1
-rw-r--r--drivers/net/arcnet/rfc1201.c1
-rw-r--r--drivers/net/bareudp.c25
-rw-r--r--drivers/net/bonding/bond_3ad.c165
-rw-r--r--drivers/net/bonding/bond_main.c63
-rw-r--r--drivers/net/bonding/bond_netlink.c16
-rw-r--r--drivers/net/bonding/bond_options.c28
-rw-r--r--drivers/net/can/Kconfig3
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/dev/netlink.c2
-rw-r--r--drivers/net/can/esd/Kconfig12
-rw-r--r--drivers/net/can/esd/Makefile7
-rw-r--r--drivers/net/can/esd/esd_402_pci-core.c514
-rw-r--r--drivers/net/can/esd/esdacc.c764
-rw-r--r--drivers/net/can/esd/esdacc.h356
-rw-r--r--drivers/net/can/kvaser_pciefd.c62
-rw-r--r--drivers/net/can/m_can/m_can.c579
-rw-r--r--drivers/net/can/m_can/m_can.h35
-rw-r--r--drivers/net/can/m_can/m_can_pci.c1
-rw-r--r--drivers/net/can/m_can/m_can_platform.c5
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c33
-rw-r--r--drivers/net/can/softing/softing_fw.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c2
-rw-r--r--drivers/net/can/usb/Kconfig1
-rw-r--r--drivers/net/can/usb/gs_usb.c2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c3
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/can/xilinx_can.c169
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/b53/b53_common.c42
-rw-r--r--drivers/net/dsa/b53/b53_priv.h7
-rw-r--r--drivers/net/dsa/bcm_sf2.c2
-rw-r--r--drivers/net/dsa/dsa_loop_bdinfo.c1
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c404
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h1
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c112
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h2
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c5
-rw-r--r--drivers/net/dsa/mt7530-mdio.c7
-rw-r--r--drivers/net/dsa/mt7530.c570
-rw-r--r--drivers/net/dsa/mt7530.h38
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c35
-rw-r--r--drivers/net/dsa/mv88e6xxx/pcs-6185.c3
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c19
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c4
-rw-r--r--drivers/net/dsa/qca/qca8k.h4
-rw-r--r--drivers/net/dsa/realtek/Kconfig20
-rw-r--r--drivers/net/dsa/realtek/Makefile13
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c205
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.h48
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c279
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.h48
-rw-r--r--drivers/net/dsa/realtek/realtek.h14
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c132
-rw-r--r--drivers/net/dsa/realtek/rtl8366-core.c22
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c119
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.c335
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.h24
-rw-r--r--drivers/net/dummy.c11
-rw-r--r--drivers/net/ethernet/Kconfig3
-rw-r--r--drivers/net/ethernet/adi/Kconfig1
-rw-r--r--drivers/net/ethernet/adi/adin1110.c10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c323
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h7
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c49
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h39
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c181
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_xdp.c1
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c10
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c30
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c95
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h4
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c8
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c22
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c53
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c25
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c13
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h1
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c96
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h25
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c12
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c212
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c50
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c921
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h74
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c464
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c11
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c54
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c14
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.c3
-rw-r--r--drivers/net/ethernet/ec_bhf.c1
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c52
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c4
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c148
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c18
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve.h171
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c50
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h20
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h18
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c62
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c928
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c135
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c159
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c128
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c108
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c48
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h1
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c2
-rw-r--r--drivers/net/ethernet/intel/Kconfig9
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c23
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h93
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c97
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c591
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c42
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c144
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c183
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c68
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c99
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c95
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c309
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c276
-rw-r--r--drivers/net/ethernet/intel/ice/ice_osdep.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c229
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c31
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h146
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c7
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c39
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c2280
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h70
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c43
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c62
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile1
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h10
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c25
-rw-r--r--drivers/net/ethernet/intel/igc/igc_leds.c280
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c54
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c70
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c155
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c262
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h112
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c70
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c118
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c242
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h54
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h189
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c66
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c294
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig19
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c489
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c500
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h160
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c273
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c1231
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h334
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c430
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h166
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h154
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h162
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c510
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h224
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c330
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h276
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h617
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c186
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c75
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw_qos.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c123
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c524
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c734
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c168
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c327
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c8
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c5
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c4
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_lag.c9
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c88
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c6
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c19
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c115
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h90
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_fw.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c379
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h23
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c120
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c18
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c950
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c64
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c3
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.c17
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.h16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k_common.c17
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k_common.h29
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c21
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.h15
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c71
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h22
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c17
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.h4
-rw-r--r--drivers/net/ethernet/realtek/r8169_leds.c145
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c267
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c7
-rw-r--r--drivers/net/ethernet/renesas/Kconfig1
-rw-r--r--drivers/net/ethernet/renesas/ravb.h60
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c1207
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h1
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c6
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c1
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c1
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c1
-rw-r--r--drivers/net/ethernet/sfc/siena/tx_common.c5
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c5
-rw-r--r--drivers/net/ethernet/sfc/tx_tso.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c1
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c1
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h60
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c58
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c137
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c229
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c87
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c4
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h4
-rw-r--r--drivers/net/ethernet/ti/cpts.c17
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c4
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c20
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c22
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c269
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h7
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c141
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c82
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h17
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c3
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c4
-rw-r--r--drivers/net/fddi/skfp/skfddi.c1
-rw-r--r--drivers/net/geneve.c58
-rw-r--r--drivers/net/gtp.c53
-rw-r--r--drivers/net/hyperv/netvsc_drv.c82
-rw-r--r--drivers/net/ieee802154/at86rf230.c5
-rw-r--r--drivers/net/ieee802154/ca8210.c10
-rw-r--r--drivers/net/ieee802154/fakelb.c1
-rw-r--r--drivers/net/ieee802154/mcr20a.c5
-rw-r--r--drivers/net/ieee802154/mrf24j40.c4
-rw-r--r--drivers/net/ipa/ipa.h5
-rw-r--r--drivers/net/ipa/ipa_cmd.c6
-rw-r--r--drivers/net/ipa/ipa_endpoint.c29
-rw-r--r--drivers/net/ipa/ipa_interrupt.c119
-rw-r--r--drivers/net/ipa/ipa_interrupt.h30
-rw-r--r--drivers/net/ipa/ipa_main.c60
-rw-r--r--drivers/net/ipa/ipa_mem.c37
-rw-r--r--drivers/net/ipa/ipa_mem.h5
-rw-r--r--drivers/net/ipa/ipa_modem.c110
-rw-r--r--drivers/net/ipa/ipa_power.c108
-rw-r--r--drivers/net/ipa/ipa_power.h29
-rw-r--r--drivers/net/ipa/ipa_qmi.c10
-rw-r--r--drivers/net/ipa/ipa_reg.c8
-rw-r--r--drivers/net/ipa/ipa_reg.h4
-rw-r--r--drivers/net/ipa/ipa_smp2p.c33
-rw-r--r--drivers/net/ipa/ipa_smp2p.h7
-rw-r--r--drivers/net/ipa/ipa_table.c18
-rw-r--r--drivers/net/ipa/ipa_uc.c9
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/ipvlan/ipvtap.c1
-rw-r--r--drivers/net/loopback.c1
-rw-r--r--drivers/net/macsec.c12
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c94
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c109
-rw-r--r--drivers/net/mdio/of_mdio.c79
-rw-r--r--drivers/net/netconsole.c365
-rw-r--r--drivers/net/netdevsim/bus.c149
-rw-r--r--drivers/net/netdevsim/dev.c8
-rw-r--r--drivers/net/netdevsim/netdev.c53
-rw-r--r--drivers/net/netdevsim/netdevsim.h3
-rw-r--r--drivers/net/netkit.c2
-rw-r--r--drivers/net/nlmon.c24
-rw-r--r--drivers/net/pcs/pcs-lynx.c1
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c1
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c5
-rw-r--r--drivers/net/pcs/pcs-xpcs.c18
-rw-r--r--drivers/net/phy/Kconfig8
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/adin1100.c55
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c103
-rw-r--r--drivers/net/phy/at803x.c2432
-rw-r--r--drivers/net/phy/broadcom.c3
-rw-r--r--drivers/net/phy/dp83822.c211
-rw-r--r--drivers/net/phy/dp83867.c22
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c640
-rw-r--r--drivers/net/phy/marvell-88x2222.c2
-rw-r--r--drivers/net/phy/marvell.c7
-rw-r--r--drivers/net/phy/mdio_bus.c48
-rw-r--r--drivers/net/phy/mdio_devres.c1
-rw-r--r--drivers/net/phy/micrel.c109
-rw-r--r--drivers/net/phy/mxl-gpy.c20
-rw-r--r--drivers/net/phy/phy-c45.c137
-rw-r--r--drivers/net/phy/phy.c61
-rw-r--r--drivers/net/phy/phy_device.c208
-rw-r--r--drivers/net/phy/phylink.c8
-rw-r--r--drivers/net/phy/qcom/Kconfig30
-rw-r--r--drivers/net/phy/qcom/Makefile6
-rw-r--r--drivers/net/phy/qcom/at803x.c1106
-rw-r--r--drivers/net/phy/qcom/qca807x.c849
-rw-r--r--drivers/net/phy/qcom/qca808x.c663
-rw-r--r--drivers/net/phy/qcom/qca83xx.c275
-rw-r--r--drivers/net/phy/qcom/qcom-phy-lib.c676
-rw-r--r--drivers/net/phy/qcom/qcom.h243
-rw-r--r--drivers/net/phy/realtek.c48
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c2
-rw-r--r--drivers/net/plip/plip.c1
-rw-r--r--drivers/net/ppp/bsd_comp.c1
-rw-r--r--drivers/net/ppp/ppp_async.c5
-rw-r--r--drivers/net/ppp/ppp_deflate.c1
-rw-r--r--drivers/net/ppp/ppp_generic.c21
-rw-r--r--drivers/net/ppp/ppp_synctty.c1
-rw-r--r--drivers/net/ppp/pppoe.c23
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c35
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/ax88179_178a.c20
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/lan78xx.c12
-rw-r--r--drivers/net/usb/r8152.c49
-rw-r--r--drivers/net/usb/smsc95xx.c5
-rw-r--r--drivers/net/usb/sr9800.c4
-rw-r--r--drivers/net/usb/usbnet.c13
-rw-r--r--drivers/net/veth.c117
-rw-r--r--drivers/net/vsockmon.c19
-rw-r--r--drivers/net/vxlan/vxlan_core.c68
-rw-r--r--drivers/net/wan/Kconfig12
-rw-r--r--drivers/net/wan/Makefile1
-rw-r--r--drivers/net/wan/framer/framer-core.c30
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256.c6
-rw-r--r--drivers/net/wan/fsl_qmc_hdlc.c797
-rw-r--r--drivers/net/wireguard/receive.c2
-rw-r--r--drivers/net/wireless/admtek/adm8211.c4
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h12
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c26
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h62
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c108
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h42
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c20
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c19
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c1202
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c73
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c62
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c267
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h11
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c5
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c303
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h151
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile4
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c270
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h84
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c25
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c9
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c166
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c30
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.c171
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h33
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c415
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h20
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c15
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c33
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h55
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c1309
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c52
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c142
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.h23
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c94
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c429
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h35
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c13
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h116
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.h29
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c330
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h202
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/reg_aic.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c5
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/b43.h16
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c20
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ht.c6
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/pio.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c82
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c46
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c152
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h60
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c116
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h127
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c632
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h216
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c500
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h199
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c427
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h210
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c67
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c80
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c132
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c159
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c340
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c243
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c171
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c151
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/module.c10
-rw-r--r--drivers/net/wireless/intersil/p54/main.c4
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c1
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c13
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c2
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c106
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c107
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c212
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h94
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_dma.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_regs.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_usb.c75
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c75
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c53
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/wed.c213
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c16
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c110
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c94
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h6
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c82
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c40
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h11
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c4
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c4
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h28
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c3
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c33
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c588
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c109
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c195
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h43
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c44
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c7
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c40
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c40
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c61
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h109
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c646
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h5
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c393
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h21
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c382
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h362
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c9
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse_be.c142
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c2496
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h1532
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c341
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h93
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c19
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c363
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c215
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h67
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c121
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c1105
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h113
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c331
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c10
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h572
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c162
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.c72
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c78
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c82
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_table.c142
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c81
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c1773
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c378
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c50
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c12
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c19
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_sdio.c42
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c75
-rw-r--r--drivers/net/wireless/st/cw1200/main.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c1
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c1
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c10
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c1
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c147
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h5
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_def.h2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c4
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c5
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c47
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.h18
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c14
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.h1
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c103
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h14
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port.h4
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c110
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.h10
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_wwan.c115
-rw-r--r--drivers/net/wwan/t7xx/t7xx_reg.h24
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c132
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.h1
-rw-r--r--drivers/net/wwan/wwan_core.c36
-rw-r--r--drivers/net/wwan/wwan_hwsim.c16
-rw-r--r--drivers/net/xen-netback/netback.c1
-rw-r--r--drivers/nvdimm/btt.c14
-rw-r--r--drivers/nvdimm/pmem.c14
-rw-r--r--drivers/nvme/host/apple.c2
-rw-r--r--drivers/nvme/host/core.c476
-rw-r--r--drivers/nvme/host/fabrics.c23
-rw-r--r--drivers/nvme/host/ioctl.c2
-rw-r--r--drivers/nvme/host/multipath.c17
-rw-r--r--drivers/nvme/host/nvme.h14
-rw-r--r--drivers/nvme/host/rdma.c14
-rw-r--r--drivers/nvme/host/sysfs.c37
-rw-r--r--drivers/nvme/host/tcp.c7
-rw-r--r--drivers/nvme/host/zns.c24
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/configfs.c28
-rw-r--r--drivers/nvme/target/core.c18
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/fabrics-cmd.c9
-rw-r--r--drivers/nvme/target/fcloop.c17
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c16
-rw-r--r--drivers/nvme/target/nvmet.h8
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/nvme/target/rdma.c10
-rw-r--r--drivers/nvme/target/tcp.c4
-rw-r--r--drivers/nvme/target/zns.c5
-rw-r--r--drivers/nvmem/core.c5
-rw-r--r--drivers/of/property.c65
-rw-r--r--drivers/of/unittest.c12
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c10
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c65
-rw-r--r--drivers/pci/msi/irqdomain.c2
-rw-r--r--drivers/pci/pci.c41
-rw-r--r--drivers/pci/pci.h5
-rw-r--r--drivers/perf/arm-cmn.c11
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c4
-rw-r--r--drivers/perf/cxl_pmu.c12
-rw-r--r--drivers/perf/riscv_pmu.c18
-rw-r--r--drivers/perf/riscv_pmu_legacy.c10
-rw-r--r--drivers/perf/riscv_pmu_sbi.c8
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c166
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c16
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c10
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c6
-rw-r--r--drivers/pinctrl/pinctrl-amd.c2
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32mp257.c2
-rw-r--r--drivers/platform/x86/amd/pmf/core.c17
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h5
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c78
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c2
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c2
-rw-r--r--drivers/platform/x86/intel/vbtn.c3
-rw-r--r--drivers/platform/x86/p2sb.c25
-rw-r--r--drivers/platform/x86/serdev_helpers.h80
-rw-r--r--drivers/platform/x86/think-lmi.c20
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c5
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c29
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c38
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c1
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c4
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h1
-rw-r--r--drivers/pmdomain/arm/scmi_perf_domain.c3
-rw-r--r--drivers/pmdomain/core.c2
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.c15
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c8
-rw-r--r--drivers/pmdomain/renesas/r8a77980-sysc.c3
-rw-r--r--drivers/power/supply/Kconfig1
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c4
-rw-r--r--drivers/powercap/intel_rapl_common.c2
-rw-r--r--drivers/ptp/Kconfig12
-rw-r--r--drivers/ptp/Makefile1
-rw-r--r--drivers/ptp/ptp_clock.c66
-rw-r--r--drivers/ptp/ptp_fc3.c1014
-rw-r--r--drivers/ptp/ptp_fc3.h45
-rw-r--r--drivers/ptp/ptp_kvm_common.c10
-rw-r--r--drivers/ptp/ptp_kvm_x86.c4
-rw-r--r--drivers/ptp/ptp_ocp.c311
-rw-r--r--drivers/ptp/ptp_private.h2
-rw-r--r--drivers/ptp/ptp_sysfs.c13
-rw-r--r--drivers/ptp/ptp_vclock.c2
-rw-r--r--drivers/pwm/core.c724
-rw-r--r--drivers/pwm/pwm-ab8500.c36
-rw-r--r--drivers/pwm/pwm-apple.c18
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c42
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c32
-rw-r--r--drivers/pwm/pwm-atmel.c47
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c19
-rw-r--r--drivers/pwm/pwm-bcm-kona.c23
-rw-r--r--drivers/pwm/pwm-bcm2835.c22
-rw-r--r--drivers/pwm/pwm-berlin.c29
-rw-r--r--drivers/pwm/pwm-brcmstb.c17
-rw-r--r--drivers/pwm/pwm-clk.c27
-rw-r--r--drivers/pwm/pwm-clps711x.c28
-rw-r--r--drivers/pwm/pwm-crc.c22
-rw-r--r--drivers/pwm/pwm-cros-ec.c57
-rw-r--r--drivers/pwm/pwm-dwc-core.c26
-rw-r--r--drivers/pwm/pwm-dwc.c77
-rw-r--r--drivers/pwm/pwm-dwc.h14
-rw-r--r--drivers/pwm/pwm-ep93xx.c21
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c49
-rw-r--r--drivers/pwm/pwm-hibvt.c70
-rw-r--r--drivers/pwm/pwm-img.c60
-rw-r--r--drivers/pwm/pwm-imx-tpm.c44
-rw-r--r--drivers/pwm/pwm-imx1.c20
-rw-r--r--drivers/pwm/pwm-imx27.c35
-rw-r--r--drivers/pwm/pwm-intel-lgm.c17
-rw-r--r--drivers/pwm/pwm-iqs620a.c30
-rw-r--r--drivers/pwm/pwm-jz4740.c36
-rw-r--r--drivers/pwm/pwm-keembay.c17
-rw-r--r--drivers/pwm/pwm-lp3943.c17
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c34
-rw-r--r--drivers/pwm/pwm-lpc32xx.c21
-rw-r--r--drivers/pwm/pwm-lpss-pci.c10
-rw-r--r--drivers/pwm/pwm-lpss-platform.c10
-rw-r--r--drivers/pwm/pwm-lpss.c38
-rw-r--r--drivers/pwm/pwm-lpss.h1
-rw-r--r--drivers/pwm/pwm-mediatek.c38
-rw-r--r--drivers/pwm/pwm-meson.c110
-rw-r--r--drivers/pwm/pwm-microchip-core.c17
-rw-r--r--drivers/pwm/pwm-mtk-disp.c25
-rw-r--r--drivers/pwm/pwm-mxs.c32
-rw-r--r--drivers/pwm/pwm-ntxec.c14
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c47
-rw-r--r--drivers/pwm/pwm-pca9685.c161
-rw-r--r--drivers/pwm/pwm-pxa.c25
-rw-r--r--drivers/pwm/pwm-raspberrypi-poe.c20
-rw-r--r--drivers/pwm/pwm-rcar.c27
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c20
-rw-r--r--drivers/pwm/pwm-rockchip.c24
-rw-r--r--drivers/pwm/pwm-rz-mtu3.c60
-rw-r--r--drivers/pwm/pwm-samsung.c94
-rw-r--r--drivers/pwm/pwm-sifive.c30
-rw-r--r--drivers/pwm/pwm-sl28cpld.c13
-rw-r--r--drivers/pwm/pwm-spear.c18
-rw-r--r--drivers/pwm/pwm-sprd.c58
-rw-r--r--drivers/pwm/pwm-sti.c70
-rw-r--r--drivers/pwm/pwm-stm32-lp.c31
-rw-r--r--drivers/pwm/pwm-stm32.c56
-rw-r--r--drivers/pwm/pwm-stmpe.c58
-rw-r--r--drivers/pwm/pwm-sun4i.c100
-rw-r--r--drivers/pwm/pwm-sunplus.c17
-rw-r--r--drivers/pwm/pwm-tegra.c50
-rw-r--r--drivers/pwm/pwm-tiecap.c55
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c72
-rw-r--r--drivers/pwm/pwm-twl-led.c55
-rw-r--r--drivers/pwm/pwm-twl.c50
-rw-r--r--drivers/pwm/pwm-visconti.c17
-rw-r--r--drivers/pwm/pwm-vt8500.c43
-rw-r--r--drivers/pwm/pwm-xilinx.c34
-rw-r--r--drivers/pwm/sysfs.c4
-rw-r--r--drivers/ras/Kconfig13
-rw-r--r--drivers/ras/Makefile3
-rw-r--r--drivers/ras/amd/atl/Kconfig21
-rw-r--r--drivers/ras/amd/atl/Makefile18
-rw-r--r--drivers/ras/amd/atl/access.c133
-rw-r--r--drivers/ras/amd/atl/core.c225
-rw-r--r--drivers/ras/amd/atl/dehash.c500
-rw-r--r--drivers/ras/amd/atl/denormalize.c718
-rw-r--r--drivers/ras/amd/atl/internal.h306
-rw-r--r--drivers/ras/amd/atl/map.c682
-rw-r--r--drivers/ras/amd/atl/reg_fields.h606
-rw-r--r--drivers/ras/amd/atl/system.c288
-rw-r--r--drivers/ras/amd/atl/umc.c341
-rw-r--r--drivers/ras/amd/fmpm.c1013
-rw-r--r--drivers/ras/cec.c10
-rw-r--r--drivers/ras/debugfs.c8
-rw-r--r--drivers/ras/debugfs.h2
-rw-r--r--drivers/ras/ras.c31
-rw-r--r--drivers/regulator/core.c3
-rw-r--r--drivers/regulator/da9055-regulator.c48
-rw-r--r--drivers/regulator/da9121-regulator.c1
-rw-r--r--drivers/regulator/fixed-helper.c4
-rw-r--r--drivers/regulator/internal.h2
-rw-r--r--drivers/regulator/lp873x-regulator.c3
-rw-r--r--drivers/regulator/lp87565-regulator.c3
-rw-r--r--drivers/regulator/lp8788-buck.c64
-rw-r--r--drivers/regulator/max5970-regulator.c8
-rw-r--r--drivers/regulator/max8973-regulator.c36
-rw-r--r--drivers/regulator/max8997-regulator.c85
-rw-r--r--drivers/regulator/max8998.c150
-rw-r--r--drivers/regulator/mp8859.c252
-rw-r--r--drivers/regulator/pwm-regulator.c40
-rw-r--r--drivers/regulator/qcom_smd-regulator.c19
-rw-r--r--drivers/regulator/rk808-regulator.c10
-rw-r--r--drivers/regulator/userspace-consumer.c1
-rw-r--r--drivers/rtc/lib_test.c2
-rw-r--r--drivers/s390/block/dasd.c190
-rw-r--r--drivers/s390/block/dasd_3990_erp.c80
-rw-r--r--drivers/s390/block/dasd_alias.c8
-rw-r--r--drivers/s390/block/dasd_devmap.c34
-rw-r--r--drivers/s390/block/dasd_diag.c26
-rw-r--r--drivers/s390/block/dasd_eckd.c186
-rw-r--r--drivers/s390/block/dasd_eer.c7
-rw-r--r--drivers/s390/block/dasd_erp.c9
-rw-r--r--drivers/s390/block/dasd_fba.c88
-rw-r--r--drivers/s390/block/dasd_genhd.c54
-rw-r--r--drivers/s390/block/dasd_int.h37
-rw-r--r--drivers/s390/block/dasd_ioctl.c8
-rw-r--r--drivers/s390/block/dasd_proc.c5
-rw-r--r--drivers/s390/block/dcssblk.c10
-rw-r--r--drivers/s390/block/scm_blk.c17
-rw-r--r--drivers/s390/char/vmur.c4
-rw-r--r--drivers/s390/char/zcore.c1
-rw-r--r--drivers/s390/cio/ccwgroup.c4
-rw-r--r--drivers/s390/cio/chsc.c4
-rw-r--r--drivers/s390/cio/chsc_sch.c20
-rw-r--r--drivers/s390/cio/cmf.c6
-rw-r--r--drivers/s390/cio/css.c4
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/device_ops.c6
-rw-r--r--drivers/s390/cio/scm.c4
-rw-r--r--drivers/s390/crypto/ap_bus.c257
-rw-r--r--drivers/s390/crypto/ap_bus.h8
-rw-r--r--drivers/s390/crypto/ap_debug.h4
-rw-r--r--drivers/s390/crypto/ap_queue.c31
-rw-r--r--drivers/s390/crypto/pkey_api.c226
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c2
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c35
-rw-r--r--drivers/s390/crypto/zcrypt_api.c228
-rw-r--r--drivers/s390/crypto/zcrypt_api.h9
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c214
-rw-r--r--drivers/s390/crypto/zcrypt_debug.h4
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c127
-rw-r--r--drivers/s390/crypto/zcrypt_error.h5
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c14
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c45
-rw-r--r--drivers/s390/net/qeth_l3_main.c9
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c20
-rw-r--r--drivers/scsi/fnic/fnic.h3
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c5
-rw-r--r--drivers/scsi/fnic/fnic_main.c1
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c12
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c4
-rw-r--r--drivers/scsi/scsi.c22
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/sd.c26
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c5
-rw-r--r--drivers/soc/mediatek/Kconfig9
-rw-r--r--drivers/soc/mediatek/Makefile1
-rw-r--r--drivers/soc/mediatek/mtk-socinfo.c191
-rw-r--r--drivers/soc/microchip/Kconfig2
-rw-r--r--drivers/soc/qcom/Kconfig9
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/apr.c2
-rw-r--r--drivers/soc/qcom/llcc-qcom.c2
-rw-r--r--drivers/soc/qcom/pmic_glink.c21
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c16
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c1
-rw-r--r--drivers/soc/qcom/qcom-pbs.c236
-rw-r--r--drivers/soc/qcom/qcom_aoss.c105
-rw-r--r--drivers/soc/qcom/smem.c11
-rw-r--r--drivers/soc/qcom/smp2p.c6
-rw-r--r--drivers/soc/qcom/socinfo.c7
-rw-r--r--drivers/soc/qcom/spm.c248
-rw-r--r--drivers/soc/qcom/trace-aoss.h48
-rw-r--r--drivers/soc/renesas/Kconfig17
-rw-r--r--drivers/soc/renesas/rcar-rst.c1
-rw-r--r--drivers/soc/renesas/renesas-soc.c8
-rw-r--r--drivers/soc/samsung/Kconfig1
-rw-r--r--drivers/soc/samsung/exynos-pmu.c235
-rw-r--r--drivers/soc/samsung/exynos-pmu.h1
-rw-r--r--drivers/soc/tegra/Kconfig5
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c118
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c23
-rw-r--r--drivers/soc/tegra/fuse/fuse.h8
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c108
-rw-r--r--drivers/soc/tegra/pmc.c87
-rw-r--r--drivers/spi/spi-cadence-quadspi.c33
-rw-r--r--drivers/spi/spi-cs42l43.c3
-rw-r--r--drivers/spi/spi-imx.c9
-rw-r--r--drivers/spi/spi-intel-pci.c1
-rw-r--r--drivers/spi/spi-mxs.c3
-rw-r--r--drivers/spi/spi-omap2-mcspi.c137
-rw-r--r--drivers/spi/spi-ppc4xx.c7
-rw-r--r--drivers/ssb/main.c2
-rw-r--r--drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt2
-rw-r--r--drivers/staging/greybus/pwm.c133
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c58
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c52
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c59
-rw-r--r--drivers/staging/vt6655/device_main.c6
-rw-r--r--drivers/staging/vt6656/main_usb.c6
-rw-r--r--drivers/target/target_core_configfs.c48
-rw-r--r--drivers/target/target_core_iblock.c18
-rw-r--r--drivers/target/target_core_iblock.h2
-rw-r--r--drivers/target/target_core_pscsi.c31
-rw-r--r--drivers/target/target_core_pscsi.h2
-rw-r--r--drivers/tee/optee/device.c3
-rw-r--r--drivers/tee/tee_core.c2
-rw-r--r--drivers/thermal/intel/intel_hfi.c2
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c2
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c2
-rw-r--r--drivers/thunderbolt/switch.c3
-rw-r--r--drivers/thunderbolt/tb_regs.h2
-rw-r--r--drivers/thunderbolt/usb4.c2
-rw-r--r--drivers/tty/hvc/Kconfig8
-rw-r--r--drivers/tty/serial/8250/8250_dw.c6
-rw-r--r--drivers/tty/serial/8250/8250_pci1xxxx.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c60
-rw-r--r--drivers/tty/serial/fsl_lpuart.c7
-rw-r--r--drivers/tty/serial/imx.c22
-rw-r--r--drivers/tty/serial/mxs-auart.c5
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c10
-rw-r--r--drivers/tty/serial/serial_port.c25
-rw-r--r--drivers/tty/serial/stm32-usart.c4
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/ufs/core/ufshcd.c9
-rw-r--r--drivers/ufs/host/ufs-qcom.c8
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c8
-rw-r--r--drivers/usb/cdns3/core.c1
-rw-r--r--drivers/usb/cdns3/drd.c13
-rw-r--r--drivers/usb/cdns3/drd.h6
-rw-r--r--drivers/usb/cdns3/host.c16
-rw-r--r--drivers/usb/core/hcd.c23
-rw-r--r--drivers/usb/core/port.c5
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/gadget.c11
-rw-r--r--drivers/usb/dwc3/gadget.h2
-rw-r--r--drivers/usb/gadget/function/f_ncm.c10
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c3
-rw-r--r--drivers/usb/host/uhci-grlib.c1
-rw-r--r--drivers/usb/host/xhci-ring.c8
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c8
-rw-r--r--drivers/usb/roles/class.c29
-rw-r--r--drivers/usb/storage/isd200.c23
-rw-r--r--drivers/usb/storage/scsiglue.c7
-rw-r--r--drivers/usb/storage/uas.c7
-rw-r--r--drivers/usb/typec/altmodes/displayport.c18
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c9
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c1
-rw-r--r--drivers/vhost/net.c91
-rw-r--r--drivers/video/fbdev/core/fbcon.c8
-rw-r--r--drivers/video/fbdev/hyperv_fb.c2
-rw-r--r--drivers/watchdog/Kconfig1
-rw-r--r--drivers/watchdog/s3c2410_wdt.c8
-rw-r--r--drivers/xen/events/events_base.c10
-rw-r--r--drivers/xen/gntalloc.c2
-rw-r--r--drivers/xen/pcpu.c2
-rw-r--r--drivers/xen/privcmd.c15
-rw-r--r--drivers/xen/xen-balloon.c2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c15
-rw-r--r--drivers/zorro/zorro-driver.c2
-rw-r--r--drivers/zorro/zorro.h2
2061 files changed, 131400 insertions, 31287 deletions
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 9418c73ee8ef..4b0640226986 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -480,9 +480,8 @@ static int ivpu_pci_init(struct ivpu_device *vdev)
/* Clear any pending errors */
pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
- /* VPU 37XX does not require 10m D3hot delay */
- if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
- pdev->d3hot_delay = 0;
+ /* NPU does not require 10m D3hot delay */
+ pdev->d3hot_delay = 0;
ret = pcim_enable_device(pdev);
if (ret) {
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 6576232f3e67..5fa8bd4603d5 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -222,7 +222,6 @@ ivpu_fw_init_wa(struct ivpu_device *vdev)
const struct vpu_firmware_header *fw_hdr = (const void *)vdev->fw->file->data;
if (IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, BOOT, 3, 17) ||
- (ivpu_hw_gen(vdev) > IVPU_HW_37XX) ||
(ivpu_test_mode & IVPU_TEST_MODE_D0I3_MSG_DISABLE))
vdev->wa.disable_d0i3_msg = true;
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index f15a93d83057..89af1006df55 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -510,22 +510,12 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
return ret;
}
-static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
-{
- ivpu_boot_dpu_active_drive(vdev, false);
- ivpu_boot_pwr_island_isolation_drive(vdev, true);
- ivpu_boot_pwr_island_trickle_drive(vdev, false);
- ivpu_boot_pwr_island_drive(vdev, false);
-
- return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
-}
-
static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
{
u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
- val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
+ val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
@@ -616,12 +606,37 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)
return 0;
}
+static int ivpu_hw_37xx_ip_reset(struct ivpu_device *vdev)
+{
+ int ret;
+ u32 val;
+
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
+ return ret;
+ }
+
+ val = REGB_RD32(VPU_37XX_BUTTRESS_VPU_IP_RESET);
+ val = REG_SET_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
+ REGB_WR32(VPU_37XX_BUTTRESS_VPU_IP_RESET, val);
+
+ ret = REGB_POLL_FLD(VPU_37XX_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret)
+ ivpu_err(vdev, "Timed out waiting for RESET completion\n");
+
+ return ret;
+}
+
static int ivpu_hw_37xx_reset(struct ivpu_device *vdev)
{
int ret = 0;
- if (ivpu_boot_pwr_domain_disable(vdev)) {
- ivpu_err(vdev, "Failed to disable power domain\n");
+ if (ivpu_hw_37xx_ip_reset(vdev)) {
+ ivpu_err(vdev, "Failed to reset NPU\n");
ret = -EIO;
}
@@ -661,6 +676,11 @@ static int ivpu_hw_37xx_power_up(struct ivpu_device *vdev)
{
int ret;
+ /* PLL requests may fail when powering down, so issue WP 0 here */
+ ret = ivpu_pll_disable(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to disable PLL: %d\n", ret);
+
ret = ivpu_hw_37xx_d0i3_disable(vdev);
if (ret)
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
index 704288084f37..a1523d0b1ef3 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -24,7 +24,7 @@
#define SKU_HW_ID_SHIFT 16u
#define SKU_HW_ID_MASK 0xffff0000u
-#define PLL_CONFIG_DEFAULT 0x1
+#define PLL_CONFIG_DEFAULT 0x0
#define PLL_CDYN_DEFAULT 0x80
#define PLL_EPP_DEFAULT 0x80
#define PLL_REF_CLK_FREQ (50 * 1000000)
@@ -530,7 +530,7 @@ static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
- val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
+ val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
@@ -704,7 +704,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
u32 tile_disable;
- u32 tile_enable;
u32 fuse;
fuse = REGB_RD32(VPU_40XX_BUTTRESS_TILE_FUSE);
@@ -725,10 +724,6 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
else
ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", TILE_MAX_NUM);
- tile_enable = (~tile_disable) & TILE_MAX_MASK;
-
- hw->sku = REG_SET_FLD_NUM(SKU, HW_ID, LNL_HW_ID, hw->sku);
- hw->sku = REG_SET_FLD_NUM(SKU, TILE, tile_enable, hw->sku);
hw->tile_fuse = tile_disable;
hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 0440bee3ecaf..e70cfb859339 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -294,7 +294,7 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
return -ENOENT;
if (job->file_priv->has_mmu_faults)
- job_status = VPU_JSM_STATUS_ABORTED;
+ job_status = DRM_IVPU_JOB_STATUS_ABORTED;
job->bos[CMD_BUF_IDX]->job_status = job_status;
dma_fence_signal(job->done_fence);
@@ -315,7 +315,7 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
unsigned long id;
xa_for_each(&vdev->submitted_jobs_xa, id, job)
- ivpu_job_signal_and_destroy(vdev, id, VPU_JSM_STATUS_ABORTED);
+ ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
}
static int ivpu_job_submit(struct ivpu_job *job)
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index 9a3122ffce03..91bd640655ab 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -72,10 +72,10 @@
#define IVPU_MMU_Q_COUNT_LOG2 4 /* 16 entries */
#define IVPU_MMU_Q_COUNT ((u32)1 << IVPU_MMU_Q_COUNT_LOG2)
-#define IVPU_MMU_Q_WRAP_BIT (IVPU_MMU_Q_COUNT << 1)
-#define IVPU_MMU_Q_WRAP_MASK (IVPU_MMU_Q_WRAP_BIT - 1)
-#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
+#define IVPU_MMU_Q_WRAP_MASK GENMASK(IVPU_MMU_Q_COUNT_LOG2, 0)
+#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
#define IVPU_MMU_Q_IDX(val) ((val) & IVPU_MMU_Q_IDX_MASK)
+#define IVPU_MMU_Q_WRP(val) ((val) & IVPU_MMU_Q_COUNT)
#define IVPU_MMU_CMDQ_CMD_SIZE 16
#define IVPU_MMU_CMDQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE)
@@ -475,20 +475,32 @@ static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
return 0;
}
+static bool ivpu_mmu_queue_is_full(struct ivpu_mmu_queue *q)
+{
+ return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
+ (IVPU_MMU_Q_WRP(q->prod) != IVPU_MMU_Q_WRP(q->cons)));
+}
+
+static bool ivpu_mmu_queue_is_empty(struct ivpu_mmu_queue *q)
+{
+ return ((IVPU_MMU_Q_IDX(q->prod) == IVPU_MMU_Q_IDX(q->cons)) &&
+ (IVPU_MMU_Q_WRP(q->prod) == IVPU_MMU_Q_WRP(q->cons)));
+}
+
static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
{
- struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
- u64 *queue_buffer = q->base;
- int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
+ struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
+ u64 *queue_buffer = cmdq->base;
+ int idx = IVPU_MMU_Q_IDX(cmdq->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
- if (!CIRC_SPACE(IVPU_MMU_Q_IDX(q->prod), IVPU_MMU_Q_IDX(q->cons), IVPU_MMU_Q_COUNT)) {
+ if (ivpu_mmu_queue_is_full(cmdq)) {
ivpu_err(vdev, "Failed to write MMU CMD %s\n", name);
return -EBUSY;
}
queue_buffer[idx] = data0;
queue_buffer[idx + 1] = data1;
- q->prod = (q->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
+ cmdq->prod = (cmdq->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1);
@@ -560,7 +572,6 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
mmu->cmdq.cons = 0;
memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
- clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE);
mmu->evtq.prod = 0;
mmu->evtq.cons = 0;
@@ -874,14 +885,10 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
evtq->prod = REGV_RD32(IVPU_MMU_REG_EVTQ_PROD_SEC);
- if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
+ if (ivpu_mmu_queue_is_empty(evtq))
return NULL;
- clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
-
evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
- REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, evtq->cons);
-
return evt;
}
@@ -902,6 +909,7 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
}
ivpu_mmu_user_context_mark_invalid(vdev, ssid);
+ REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
}
}
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index f501f27ebafd..5f73854234ba 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -58,11 +58,14 @@ static int ivpu_suspend(struct ivpu_device *vdev)
{
int ret;
+ /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
+ pci_save_state(to_pci_dev(vdev->drm.dev));
+
ret = ivpu_shutdown(vdev);
- if (ret) {
+ if (ret)
ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
- return ret;
- }
+
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
return ret;
}
@@ -71,6 +74,9 @@ static int ivpu_resume(struct ivpu_device *vdev)
{
int ret;
+ pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
+ pci_restore_state(to_pci_dev(vdev->drm.dev));
+
retry:
ret = ivpu_hw_power_up(vdev);
if (ret) {
@@ -120,15 +126,20 @@ static void ivpu_pm_recovery_work(struct work_struct *work)
ivpu_fw_log_dump(vdev);
-retry:
- ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
- if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) {
- cond_resched();
- goto retry;
- }
+ atomic_inc(&vdev->pm->reset_counter);
+ atomic_set(&vdev->pm->reset_pending, 1);
+ down_write(&vdev->pm->reset_lock);
+
+ ivpu_suspend(vdev);
+ ivpu_pm_prepare_cold_boot(vdev);
+ ivpu_jobs_abort_all(vdev);
+
+ ret = ivpu_resume(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
- if (ret && ret != -EAGAIN)
- ivpu_err(vdev, "Failed to reset VPU: %d\n", ret);
+ up_write(&vdev->pm->reset_lock);
+ atomic_set(&vdev->pm->reset_pending, 0);
kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
pm_runtime_mark_last_busy(vdev->drm.dev);
@@ -200,9 +211,6 @@ int ivpu_pm_suspend_cb(struct device *dev)
ivpu_suspend(vdev);
ivpu_pm_prepare_warm_boot(vdev);
- pci_save_state(to_pci_dev(dev));
- pci_set_power_state(to_pci_dev(dev), PCI_D3hot);
-
ivpu_dbg(vdev, PM, "Suspend done.\n");
return 0;
@@ -216,9 +224,6 @@ int ivpu_pm_resume_cb(struct device *dev)
ivpu_dbg(vdev, PM, "Resume..\n");
- pci_set_power_state(to_pci_dev(dev), PCI_D0);
- pci_restore_state(to_pci_dev(dev));
-
ret = ivpu_resume(vdev);
if (ret)
ivpu_err(vdev, "Failed to resume: %d\n", ret);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 7b7c605166e0..ab2a82cb1b0b 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -26,7 +26,6 @@
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/cper.h>
-#include <linux/cxl-event.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/ratelimit.h>
@@ -674,78 +673,6 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
schedule_work(&entry->work);
}
-/*
- * Only a single callback can be registered for CXL CPER events.
- */
-static DECLARE_RWSEM(cxl_cper_rw_sem);
-static cxl_cper_callback cper_callback;
-
-/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */
-
-/*
- * General Media Event Record
- * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
- */
-#define CPER_SEC_CXL_GEN_MEDIA_GUID \
- GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \
- 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6)
-
-/*
- * DRAM Event Record
- * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
- */
-#define CPER_SEC_CXL_DRAM_GUID \
- GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \
- 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24)
-
-/*
- * Memory Module Event Record
- * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
- */
-#define CPER_SEC_CXL_MEM_MODULE_GUID \
- GUID_INIT(0xfe927475, 0xdd59, 0x4339, \
- 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74)
-
-static void cxl_cper_post_event(enum cxl_event_type event_type,
- struct cxl_cper_event_rec *rec)
-{
- if (rec->hdr.length <= sizeof(rec->hdr) ||
- rec->hdr.length > sizeof(*rec)) {
- pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n",
- rec->hdr.length);
- return;
- }
-
- if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) {
- pr_err(FW_WARN "CXL CPER invalid event\n");
- return;
- }
-
- guard(rwsem_read)(&cxl_cper_rw_sem);
- if (cper_callback)
- cper_callback(event_type, rec);
-}
-
-int cxl_cper_register_callback(cxl_cper_callback callback)
-{
- guard(rwsem_write)(&cxl_cper_rw_sem);
- if (cper_callback)
- return -EINVAL;
- cper_callback = callback;
- return 0;
-}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_register_callback, CXL);
-
-int cxl_cper_unregister_callback(cxl_cper_callback callback)
-{
- guard(rwsem_write)(&cxl_cper_rw_sem);
- if (callback != cper_callback)
- return -EINVAL;
- cper_callback = NULL;
- return 0;
-}
-EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_callback, CXL);
-
static bool ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
@@ -780,22 +707,6 @@ static bool ghes_do_proc(struct ghes *ghes,
}
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
queued = ghes_handle_arm_hw_error(gdata, sev, sync);
- } else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
- struct cxl_cper_event_rec *rec =
- acpi_hest_get_payload(gdata);
-
- cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec);
- } else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) {
- struct cxl_cper_event_rec *rec =
- acpi_hest_get_payload(gdata);
-
- cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec);
- } else if (guid_equal(sec_type,
- &CPER_SEC_CXL_MEM_MODULE_GUID)) {
- struct cxl_cper_event_rec *rec =
- acpi_hest_get_payload(gdata);
-
- cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec);
} else {
void *err = acpi_hest_get_payload(gdata);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index dbdee2924594..02255795b800 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -525,10 +525,12 @@ static void acpi_ec_clear(struct acpi_ec *ec)
static void acpi_ec_enable_event(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
if (acpi_ec_started(ec))
__acpi_ec_enable_event(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
/* Drain additional events if hardware requires that */
if (EC_FLAGS_CLEAR_ON_RESUME)
@@ -544,9 +546,11 @@ static void __acpi_ec_flush_work(void)
static void acpi_ec_disable_event(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
__acpi_ec_disable_event(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
/*
* When ec_freeze_events is true, we need to flush events in
@@ -567,9 +571,10 @@ void acpi_ec_flush_work(void)
static bool acpi_ec_guard_event(struct acpi_ec *ec)
{
+ unsigned long flags;
bool guarded;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
/*
* If firmware SCI_EVT clearing timing is "event", we actually
* don't know when the SCI_EVT will be cleared by firmware after
@@ -585,29 +590,31 @@ static bool acpi_ec_guard_event(struct acpi_ec *ec)
guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
ec->event_state != EC_EVENT_READY &&
(!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return guarded;
}
static int ec_transaction_polled(struct acpi_ec *ec)
{
+ unsigned long flags;
int ret = 0;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
ret = 1;
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return ret;
}
static int ec_transaction_completed(struct acpi_ec *ec)
{
+ unsigned long flags;
int ret = 0;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
ret = 1;
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return ret;
}
@@ -749,6 +756,7 @@ static int ec_guard(struct acpi_ec *ec)
static int ec_poll(struct acpi_ec *ec)
{
+ unsigned long flags;
int repeat = 5; /* number of command restarts */
while (repeat--) {
@@ -757,14 +765,14 @@ static int ec_poll(struct acpi_ec *ec)
do {
if (!ec_guard(ec))
return 0;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
advance_transaction(ec, false);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
} while (time_before(jiffies, delay));
pr_debug("controller reset, restart transaction\n");
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
start_transaction(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
return -ETIME;
}
@@ -772,10 +780,11 @@ static int ec_poll(struct acpi_ec *ec)
static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
struct transaction *t)
{
+ unsigned long tmp;
int ret = 0;
/* start transaction */
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, tmp);
/* Enable GPE for command processing (IBF=0/OBF=1) */
if (!acpi_ec_submit_flushable_request(ec)) {
ret = -EINVAL;
@@ -786,11 +795,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
ec->curr = t;
ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
start_transaction(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, tmp);
ret = ec_poll(ec);
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, tmp);
if (t->irq_count == ec_storm_threshold)
acpi_ec_unmask_events(ec);
ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
@@ -799,7 +808,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
acpi_ec_complete_request(ec);
ec_dbg_ref(ec, "Decrease command");
unlock:
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, tmp);
return ret;
}
@@ -927,7 +936,9 @@ EXPORT_SYMBOL(ec_get_handle);
static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
ec_dbg_drv("Starting EC");
/* Enable GPE for event processing (SCI_EVT=1) */
@@ -937,28 +948,31 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
}
ec_log_drv("EC started");
}
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static bool acpi_ec_stopped(struct acpi_ec *ec)
{
+ unsigned long flags;
bool flushed;
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
flushed = acpi_ec_flushed(ec);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
return flushed;
}
static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
if (acpi_ec_started(ec)) {
ec_dbg_drv("Stopping EC");
set_bit(EC_FLAGS_STOPPED, &ec->flags);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
wait_event(ec->wait, acpi_ec_stopped(ec));
- spin_lock(&ec->lock);
+ spin_lock_irqsave(&ec->lock, flags);
/* Disable GPE for event processing (SCI_EVT=1) */
if (!suspending) {
acpi_ec_complete_request(ec);
@@ -969,25 +983,29 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
clear_bit(EC_FLAGS_STOPPED, &ec->flags);
ec_log_drv("EC stopped");
}
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static void acpi_ec_enter_noirq(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
ec->busy_polling = true;
ec->polling_guard = 0;
ec_log_drv("interrupt blocked");
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static void acpi_ec_leave_noirq(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
ec->busy_polling = ec_busy_polling;
ec->polling_guard = ec_polling_guard;
ec_log_drv("interrupt unblocked");
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
void acpi_ec_block_transactions(void)
@@ -1119,9 +1137,9 @@ static void acpi_ec_event_processor(struct work_struct *work)
ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
ec->queries_in_progress--;
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
acpi_ec_put_query_handler(handler);
kfree(q);
@@ -1184,12 +1202,12 @@ static int acpi_ec_submit_query(struct acpi_ec *ec)
*/
ec_dbg_evt("Query(0x%02x) scheduled", value);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
ec->queries_in_progress++;
queue_work(ec_query_wq, &q->work);
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
return 0;
@@ -1205,14 +1223,14 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec_dbg_evt("Event started");
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
while (ec->events_to_process) {
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
acpi_ec_submit_query(ec);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
ec->events_to_process--;
}
@@ -1229,11 +1247,11 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec_dbg_evt("Event stopped");
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
guard_timeout = !!ec_guard(ec);
- spin_lock(&ec->lock);
+ spin_lock_irq(&ec->lock);
/* Take care of SCI_EVT unless someone else is doing that. */
if (guard_timeout && !ec->curr)
@@ -1246,7 +1264,7 @@ static void acpi_ec_event_handler(struct work_struct *work)
ec->events_in_progress--;
- spin_unlock(&ec->lock);
+ spin_unlock_irq(&ec->lock);
}
static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt)
@@ -1271,11 +1289,13 @@ static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt
static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
{
- spin_lock(&ec->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ec->lock, flags);
clear_gpe_and_advance_transaction(ec, true);
- spin_unlock(&ec->lock);
+ spin_unlock_irqrestore(&ec->lock, flags);
}
static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
@@ -2085,7 +2105,7 @@ bool acpi_ec_dispatch_gpe(void)
* Dispatch the EC GPE in-band, but do not report wakeup in any case
* to allow the caller to process events properly after that.
*/
- spin_lock(&first_ec->lock);
+ spin_lock_irq(&first_ec->lock);
if (acpi_ec_gpe_status_set(first_ec)) {
pm_pr_dbg("ACPI EC GPE status set\n");
@@ -2094,7 +2114,7 @@ bool acpi_ec_dispatch_gpe(void)
work_in_progress = acpi_ec_work_in_progress(first_ec);
}
- spin_unlock(&first_ec->lock);
+ spin_unlock_irq(&first_ec->lock);
if (!work_in_progress)
return false;
@@ -2107,11 +2127,11 @@ bool acpi_ec_dispatch_gpe(void)
pm_pr_dbg("ACPI EC work flushed\n");
- spin_lock(&first_ec->lock);
+ spin_lock_irq(&first_ec->lock);
work_in_progress = acpi_ec_work_in_progress(first_ec);
- spin_unlock(&first_ec->lock);
+ spin_unlock_irq(&first_ec->lock);
} while (work_in_progress && !pm_wakeup_pending());
return false;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 42b51c9812a0..928ec93c6b45 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -116,15 +116,14 @@ config SATA_AHCI
If unsure, say N.
config SATA_MOBILE_LPM_POLICY
- int "Default SATA Link Power Management policy for low power chipsets"
+ int "Default SATA Link Power Management policy"
range 0 4
default 0
depends on SATA_AHCI
help
Select the Default SATA Link Power Management (LPM) policy to use
for chipsets / "South Bridges" supporting low-power modes. Such
- chipsets are typically found on most laptops but desktops and
- servers now also widely use chipsets supporting low power modes.
+ chipsets are ubiquitous across laptops, desktops and servers.
The value set has the following meanings:
0 => Keep firmware settings
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index da2e74fce2d9..78570684ff68 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -50,11 +50,18 @@ enum board_ids {
board_ahci,
board_ahci_43bit_dma,
board_ahci_ign_iferr,
- board_ahci_low_power,
board_ahci_no_debounce_delay,
- board_ahci_nomsi,
- board_ahci_noncq,
- board_ahci_nosntf,
+ board_ahci_no_msi,
+ /*
+ * board_ahci_pcs_quirk is for legacy Intel platforms.
+ * Modern Intel platforms should use board_ahci instead.
+ * (Some modern Intel platforms might have been added with
+ * board_ahci_pcs_quirk, however, we cannot change them to board_ahci
+ * without testing that the platform actually works without the quirk.)
+ */
+ board_ahci_pcs_quirk,
+ board_ahci_pcs_quirk_no_devslp,
+ board_ahci_pcs_quirk_no_sntf,
board_ahci_yes_fbs,
/* board IDs for specific chipsets in alphabetical order */
@@ -68,12 +75,6 @@ enum board_ids {
board_ahci_sb700, /* for SB700 and SB800 */
board_ahci_vt8251,
- /*
- * board IDs for Intel chipsets that support more than 6 ports
- * *and* end up needing the PCS quirk.
- */
- board_ahci_pcs7,
-
/* aliases */
board_ahci_mcp_linux = board_ahci_mcp65,
board_ahci_mcp67 = board_ahci_mcp65,
@@ -143,36 +144,38 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_low_power] = {
- AHCI_HFLAGS (AHCI_HFLAG_USE_LPM_POLICY),
+ [board_ahci_no_debounce_delay] = {
.flags = AHCI_FLAG_COMMON,
+ .link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_no_debounce_delay] = {
+ [board_ahci_no_msi] = {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
.flags = AHCI_FLAG_COMMON,
- .link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_nomsi] = {
- AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
+ [board_ahci_pcs_quirk] = {
+ AHCI_HFLAGS (AHCI_HFLAG_INTEL_PCS_QUIRK),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_noncq] = {
- AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
+ [board_ahci_pcs_quirk_no_devslp] = {
+ AHCI_HFLAGS (AHCI_HFLAG_INTEL_PCS_QUIRK |
+ AHCI_HFLAG_NO_DEVSLP),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
- [board_ahci_nosntf] = {
- AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
+ [board_ahci_pcs_quirk_no_sntf] = {
+ AHCI_HFLAGS (AHCI_HFLAG_INTEL_PCS_QUIRK |
+ AHCI_HFLAG_NO_SNTF),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
@@ -194,6 +197,7 @@ static const struct ata_port_info ahci_port_info[] = {
.port_ops = &ahci_ops,
},
[board_ahci_avn] = {
+ AHCI_HFLAGS (AHCI_HFLAG_INTEL_PCS_QUIRK),
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
@@ -252,119 +256,113 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_vt8251_ops,
},
- [board_ahci_pcs7] = {
- .flags = AHCI_FLAG_COMMON,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
- },
};
static const struct pci_device_id ahci_pci_tbl[] = {
/* Intel */
- { PCI_VDEVICE(INTEL, 0x06d6), board_ahci }, /* Comet Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
- { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
- { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
- { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
- { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
+ { PCI_VDEVICE(INTEL, 0x06d6), board_ahci_pcs_quirk }, /* Comet Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x2652), board_ahci_pcs_quirk }, /* ICH6 */
+ { PCI_VDEVICE(INTEL, 0x2653), board_ahci_pcs_quirk }, /* ICH6M */
+ { PCI_VDEVICE(INTEL, 0x27c1), board_ahci_pcs_quirk }, /* ICH7 */
+ { PCI_VDEVICE(INTEL, 0x27c5), board_ahci_pcs_quirk }, /* ICH7M */
+ { PCI_VDEVICE(INTEL, 0x27c3), board_ahci_pcs_quirk }, /* ICH7R */
{ PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
- { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
- { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
- { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
- { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
- { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
- { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8/Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
- { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
- { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
- { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2929), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292a), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292b), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292c), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292f), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x294e), board_ahci_low_power }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
- { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
- { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
- { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
- { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
- { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
- { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
- { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b29), board_ahci_low_power }, /* PCH M AHCI */
- { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_low_power }, /* PCH M RAID */
- { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
- { PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b2), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b3), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b4), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b5), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b6), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19b7), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19bE), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19bF), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c0), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c1), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c2), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c3), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c4), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c5), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c6), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19c7), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */
- { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
- { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_low_power }, /* CPT M AHCI */
- { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1c05), board_ahci_low_power }, /* CPT M RAID */
- { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
- { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
- { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
- { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
- { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
- { PCI_VDEVICE(INTEL, 0x1e03), board_ahci_low_power }, /* Panther M AHCI */
- { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x1e07), board_ahci_low_power }, /* Panther M RAID */
- { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
- { PCI_VDEVICE(INTEL, 0x8c03), board_ahci_low_power }, /* Lynx M AHCI */
- { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c05), board_ahci_low_power }, /* Lynx M RAID */
- { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c07), board_ahci_low_power }, /* Lynx M RAID */
- { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_low_power }, /* Lynx M RAID */
- { PCI_VDEVICE(INTEL, 0x9c02), board_ahci_low_power }, /* Lynx LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c03), board_ahci_low_power }, /* Lynx LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c04), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c05), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c06), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_low_power }, /* Lynx LP RAID */
- { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_low_power }, /* Cannon Lake PCH-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
- { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
- { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f25), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f26), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x2681), board_ahci_pcs_quirk }, /* ESB2 */
+ { PCI_VDEVICE(INTEL, 0x2682), board_ahci_pcs_quirk }, /* ESB2 */
+ { PCI_VDEVICE(INTEL, 0x2683), board_ahci_pcs_quirk }, /* ESB2 */
+ { PCI_VDEVICE(INTEL, 0x27c6), board_ahci_pcs_quirk }, /* ICH7-M DH */
+ { PCI_VDEVICE(INTEL, 0x2821), board_ahci_pcs_quirk }, /* ICH8 */
+ { PCI_VDEVICE(INTEL, 0x2822), board_ahci_pcs_quirk_no_sntf }, /* ICH8/Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0x2824), board_ahci_pcs_quirk }, /* ICH8 */
+ { PCI_VDEVICE(INTEL, 0x2829), board_ahci_pcs_quirk }, /* ICH8M */
+ { PCI_VDEVICE(INTEL, 0x282a), board_ahci_pcs_quirk }, /* ICH8M */
+ { PCI_VDEVICE(INTEL, 0x2922), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2923), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2924), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2925), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2927), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x2929), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292a), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292b), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292c), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292f), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x294d), board_ahci_pcs_quirk }, /* ICH9 */
+ { PCI_VDEVICE(INTEL, 0x294e), board_ahci_pcs_quirk }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x502a), board_ahci_pcs_quirk }, /* Tolapai */
+ { PCI_VDEVICE(INTEL, 0x502b), board_ahci_pcs_quirk }, /* Tolapai */
+ { PCI_VDEVICE(INTEL, 0x3a05), board_ahci_pcs_quirk }, /* ICH10 */
+ { PCI_VDEVICE(INTEL, 0x3a22), board_ahci_pcs_quirk }, /* ICH10 */
+ { PCI_VDEVICE(INTEL, 0x3a25), board_ahci_pcs_quirk }, /* ICH10 */
+ { PCI_VDEVICE(INTEL, 0x3b22), board_ahci_pcs_quirk }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x3b23), board_ahci_pcs_quirk }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x3b24), board_ahci_pcs_quirk }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b25), board_ahci_pcs_quirk }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b29), board_ahci_pcs_quirk }, /* PCH M AHCI */
+ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci_pcs_quirk }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_pcs_quirk }, /* PCH M RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci_pcs_quirk }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci_pcs_quirk }, /* CPT AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_pcs_quirk }, /* CPT M AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci_pcs_quirk }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci_pcs_quirk }, /* CPT M RAID */
+ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci_pcs_quirk }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci_pcs_quirk }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1d02), board_ahci_pcs_quirk }, /* PBG AHCI */
+ { PCI_VDEVICE(INTEL, 0x1d04), board_ahci_pcs_quirk }, /* PBG RAID */
+ { PCI_VDEVICE(INTEL, 0x1d06), board_ahci_pcs_quirk }, /* PBG RAID */
+ { PCI_VDEVICE(INTEL, 0x2323), board_ahci_pcs_quirk }, /* DH89xxCC AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e02), board_ahci_pcs_quirk }, /* Panther Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e03), board_ahci_pcs_quirk }, /* Panther M AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e04), board_ahci_pcs_quirk }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e05), board_ahci_pcs_quirk }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e06), board_ahci_pcs_quirk }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e07), board_ahci_pcs_quirk }, /* Panther M RAID */
+ { PCI_VDEVICE(INTEL, 0x1e0e), board_ahci_pcs_quirk }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c02), board_ahci_pcs_quirk }, /* Lynx Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c03), board_ahci_pcs_quirk }, /* Lynx M AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c04), board_ahci_pcs_quirk }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c05), board_ahci_pcs_quirk }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c06), board_ahci_pcs_quirk }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c07), board_ahci_pcs_quirk }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci_pcs_quirk }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_pcs_quirk }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci_pcs_quirk }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci_pcs_quirk }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c05), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c06), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_pcs_quirk }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_pcs_quirk }, /* Cannon Lake PCH-LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f22), board_ahci_pcs_quirk }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f23), board_ahci_pcs_quirk }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f24), board_ahci_pcs_quirk }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f25), board_ahci_pcs_quirk }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f26), board_ahci_pcs_quirk }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f27), board_ahci_pcs_quirk }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci_pcs_quirk }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci_pcs_quirk }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */
@@ -373,65 +371,65 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg/Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* *burg SATA0 'RAID' */
- { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* *burg SATA1 'RAID' */
- { PCI_VDEVICE(INTEL, 0x282f), board_ahci }, /* *burg SATA2 'RAID' */
- { PCI_VDEVICE(INTEL, 0x43d4), board_ahci }, /* Rocket Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x43d5), board_ahci }, /* Rocket Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x43d6), board_ahci }, /* Rocket Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x43d7), board_ahci }, /* Rocket Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
- { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x8d0e), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x8d62), board_ahci }, /* Wellsburg AHCI */
- { PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
- { PCI_VDEVICE(INTEL, 0x9c83), board_ahci_low_power }, /* Wildcat LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c85), board_ahci_low_power }, /* Wildcat LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c87), board_ahci_low_power }, /* Wildcat LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_low_power }, /* Wildcat LP RAID */
- { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
- { PCI_VDEVICE(INTEL, 0x8c83), board_ahci_low_power }, /* 9 Series M AHCI */
- { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c85), board_ahci_low_power }, /* 9 Series M RAID */
- { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c87), board_ahci_low_power }, /* 9 Series M RAID */
- { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_low_power }, /* 9 Series M RAID */
- { PCI_VDEVICE(INTEL, 0x9d03), board_ahci_low_power }, /* Sunrise LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9d05), board_ahci_low_power }, /* Sunrise LP RAID */
- { PCI_VDEVICE(INTEL, 0x9d07), board_ahci_low_power }, /* Sunrise LP RAID */
- { PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
- { PCI_VDEVICE(INTEL, 0xa103), board_ahci_low_power }, /* Sunrise M AHCI */
- { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0xa107), board_ahci_low_power }, /* Sunrise M RAID */
- { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
- { PCI_VDEVICE(INTEL, 0x06d7), board_ahci }, /* Comet Lake-H RAID */
- { PCI_VDEVICE(INTEL, 0xa386), board_ahci }, /* Comet Lake PCH-V RAID */
- { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_low_power }, /* Bay Trail AHCI */
- { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_low_power }, /* Bay Trail AHCI */
- { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_low_power }, /* Cherry Tr. AHCI */
- { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_low_power }, /* ApolloLake AHCI */
- { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
- { PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
- { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x2823), board_ahci_pcs_quirk }, /* Wellsburg/Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0x2826), board_ahci_pcs_quirk }, /* *burg SATA0 'RAID' */
+ { PCI_VDEVICE(INTEL, 0x2827), board_ahci_pcs_quirk }, /* *burg SATA1 'RAID' */
+ { PCI_VDEVICE(INTEL, 0x282f), board_ahci_pcs_quirk }, /* *burg SATA2 'RAID' */
+ { PCI_VDEVICE(INTEL, 0x43d4), board_ahci_pcs_quirk }, /* Rocket Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x43d5), board_ahci_pcs_quirk }, /* Rocket Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x43d6), board_ahci_pcs_quirk }, /* Rocket Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x43d7), board_ahci_pcs_quirk }, /* Rocket Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x8d02), board_ahci_pcs_quirk }, /* Wellsburg AHCI */
+ { PCI_VDEVICE(INTEL, 0x8d04), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d06), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d0e), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d62), board_ahci_pcs_quirk }, /* Wellsburg AHCI */
+ { PCI_VDEVICE(INTEL, 0x8d64), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d66), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci_pcs_quirk }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x23a3), board_ahci_pcs_quirk }, /* Coleto Creek AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c83), board_ahci_pcs_quirk }, /* Wildcat LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c85), board_ahci_pcs_quirk }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci_pcs_quirk }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_pcs_quirk }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x8c82), board_ahci_pcs_quirk }, /* 9 Series AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c83), board_ahci_pcs_quirk }, /* 9 Series M AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c84), board_ahci_pcs_quirk }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c85), board_ahci_pcs_quirk }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c86), board_ahci_pcs_quirk }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci_pcs_quirk }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci_pcs_quirk }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_pcs_quirk }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x9d03), board_ahci_pcs_quirk }, /* Sunrise LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9d05), board_ahci_pcs_quirk }, /* Sunrise LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci_pcs_quirk }, /* Sunrise LP RAID */
+ { PCI_VDEVICE(INTEL, 0xa102), board_ahci_pcs_quirk }, /* Sunrise Point-H AHCI */
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci_pcs_quirk }, /* Sunrise M AHCI */
+ { PCI_VDEVICE(INTEL, 0xa105), board_ahci_pcs_quirk }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa106), board_ahci_pcs_quirk }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa107), board_ahci_pcs_quirk }, /* Sunrise M RAID */
+ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci_pcs_quirk }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa182), board_ahci_pcs_quirk }, /* Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0xa186), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa202), board_ahci_pcs_quirk }, /* Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0xa206), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa252), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa256), board_ahci_pcs_quirk }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa356), board_ahci_pcs_quirk }, /* Cannon Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x06d7), board_ahci_pcs_quirk }, /* Comet Lake-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa386), board_ahci_pcs_quirk }, /* Comet Lake PCH-V RAID */
+ { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_pcs_quirk }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_pcs_quirk_no_devslp }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_pcs_quirk }, /* Cherry Tr. AHCI */
+ { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_pcs_quirk }, /* ApolloLake AHCI */
+ { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_pcs_quirk }, /* Ice Lake LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x02d3), board_ahci_pcs_quirk }, /* Comet Lake PCH-U AHCI */
+ { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_pcs_quirk }, /* Comet Lake PCH RAID */
/* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
- { PCI_VDEVICE(INTEL, 0x4b63), board_ahci_low_power }, /* Elkhart Lake AHCI */
- { PCI_VDEVICE(INTEL, 0x7ae2), board_ahci_low_power }, /* Alder Lake-P AHCI */
+ { PCI_VDEVICE(INTEL, 0x4b63), board_ahci_pcs_quirk }, /* Elkhart Lake AHCI */
+ { PCI_VDEVICE(INTEL, 0x7ae2), board_ahci_pcs_quirk }, /* Alder Lake-P AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -459,14 +457,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
{ PCI_VDEVICE(AMD, 0x7801), board_ahci_no_debounce_delay }, /* AMD Hudson-2 (AHCI mode) */
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
- { PCI_VDEVICE(AMD, 0x7901), board_ahci_low_power }, /* AMD Green Sardine */
+ { PCI_VDEVICE(AMD, 0x7901), board_ahci }, /* AMD Green Sardine */
/* AMD is using RAID class only for ahci controllers */
{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
/* Dell S140/S150 */
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_SUBVENDOR_ID_DELL, PCI_ANY_ID,
- PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
+ PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci_pcs_quirk },
/* VIA */
{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
@@ -623,8 +621,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
* Samsung SSDs found on some macbooks. NCQ times out if MSI is
* enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731
*/
- { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
- { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi },
+ { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_no_msi },
+ { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_no_msi },
/* Enmotus */
{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
@@ -671,9 +669,17 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
{
- if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) {
- dev_info(&pdev->dev, "ASM1166 has only six ports\n");
- hpriv->saved_port_map = 0x3f;
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA) {
+ switch (pdev->device) {
+ case 0x1166:
+ dev_info(&pdev->dev, "ASM1166 has only six ports\n");
+ hpriv->saved_port_map = 0x3f;
+ break;
+ case 0x1064:
+ dev_info(&pdev->dev, "ASM1064 has only four ports\n");
+ hpriv->saved_port_map = 0xf;
+ break;
+ }
}
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
@@ -1423,17 +1429,6 @@ static bool ahci_broken_online(struct pci_dev *pdev)
return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
}
-static bool ahci_broken_devslp(struct pci_dev *pdev)
-{
- /* device with broken DEVSLP but still showing SDS capability */
- static const struct pci_device_id ids[] = {
- { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */
- {}
- };
-
- return pci_match_id(ids, pdev);
-}
-
#ifdef CONFIG_ATA_ACPI
static void ahci_gtf_filter_workaround(struct ata_host *host)
{
@@ -1642,14 +1637,31 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
}
-static void ahci_update_initial_lpm_policy(struct ata_port *ap,
- struct ahci_host_priv *hpriv)
+static void ahci_mark_external_port(struct ata_port *ap)
{
- int policy = CONFIG_SATA_MOBILE_LPM_POLICY;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 tmp;
+ /* mark external ports (hotplug-capable, eSATA) */
+ tmp = readl(port_mmio + PORT_CMD);
+ if (((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)) ||
+ (tmp & PORT_CMD_HPCP))
+ ap->pflags |= ATA_PFLAG_EXTERNAL;
+}
+
+static void ahci_update_initial_lpm_policy(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ int policy = CONFIG_SATA_MOBILE_LPM_POLICY;
- /* Ignore processing for chipsets that don't use policy */
- if (!(hpriv->flags & AHCI_HFLAG_USE_LPM_POLICY))
+ /*
+ * AHCI contains a known incompatibility between LPM and hot-plug
+ * removal events, see 7.3.1 Hot Plug Removal Detection and Power
+ * Management Interaction in AHCI 1.3.1. Therefore, do not enable
+ * LPM if the port advertises itself as an external port.
+ */
+ if (ap->pflags & ATA_PFLAG_EXTERNAL)
return;
/* user modified policy via module param */
@@ -1672,17 +1684,9 @@ update_policy:
static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
{
- const struct pci_device_id *id = pci_match_id(ahci_pci_tbl, pdev);
u16 tmp16;
- /*
- * Only apply the 6-port PCS quirk for known legacy platforms.
- */
- if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
- return;
-
- /* Skip applying the quirk on Denverton and beyond */
- if (((enum board_ids) id->driver_data) >= board_ahci_pcs7)
+ if (!(hpriv->flags & AHCI_HFLAG_INTEL_PCS_QUIRK))
return;
/*
@@ -1817,10 +1821,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
&dev_attr_remapped_nvme.attr,
NULL);
- /* must set flag prior to save config in order to take effect */
- if (ahci_broken_devslp(pdev))
- hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
-
#ifdef CONFIG_ARM64
if (pdev->vendor == PCI_VENDOR_ID_HUAWEI &&
pdev->device == 0xa235 &&
@@ -1934,7 +1934,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ap->flags & ATA_FLAG_EM)
ap->em_message_type = hpriv->em_msg_type;
- ahci_update_initial_lpm_policy(ap, hpriv);
+ ahci_mark_external_port(ap);
+
+ ahci_update_initial_lpm_policy(ap);
/* disabled/not-implemented port */
if (!(hpriv->port_map & (1 << i)))
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index df8f8a1a3a34..344c87210d8f 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -241,13 +241,11 @@ enum {
AHCI_HFLAG_YES_ALPM = BIT(23), /* force ALPM cap on */
AHCI_HFLAG_NO_WRITE_TO_RO = BIT(24), /* don't write to read
only registers */
- AHCI_HFLAG_USE_LPM_POLICY = BIT(25), /* chipset that should use
- SATA_MOBILE_LPM_POLICY
- as default lpm_policy */
- AHCI_HFLAG_SUSPEND_PHYS = BIT(26), /* handle PHYs during
+ AHCI_HFLAG_SUSPEND_PHYS = BIT(25), /* handle PHYs during
suspend/resume */
- AHCI_HFLAG_NO_SXS = BIT(28), /* SXS not supported */
- AHCI_HFLAG_43BIT_ONLY = BIT(29), /* 43bit DMA addr limit */
+ AHCI_HFLAG_NO_SXS = BIT(26), /* SXS not supported */
+ AHCI_HFLAG_43BIT_ONLY = BIT(27), /* 43bit DMA addr limit */
+ AHCI_HFLAG_INTEL_PCS_QUIRK = BIT(28), /* apply Intel PCS quirk */
/* ap->flags bits */
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index 64f7f7d6ba84..11a2c199a7c2 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -88,7 +88,6 @@ struct ceva_ahci_priv {
u32 axicc;
bool is_cci_enabled;
int flags;
- struct reset_control *rst;
};
static unsigned int ceva_ahci_read_id(struct ata_device *dev,
@@ -189,6 +188,60 @@ static const struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
+static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
+{
+ int rc, i;
+
+ rc = ahci_platform_enable_regulators(hpriv);
+ if (rc)
+ return rc;
+
+ rc = ahci_platform_enable_clks(hpriv);
+ if (rc)
+ goto disable_regulator;
+
+ /* Assert the controller reset */
+ rc = ahci_platform_assert_rsts(hpriv);
+ if (rc)
+ goto disable_clks;
+
+ for (i = 0; i < hpriv->nports; i++) {
+ rc = phy_init(hpriv->phys[i]);
+ if (rc)
+ goto disable_rsts;
+ }
+
+ /* De-assert the controller reset */
+ ahci_platform_deassert_rsts(hpriv);
+
+ for (i = 0; i < hpriv->nports; i++) {
+ rc = phy_power_on(hpriv->phys[i]);
+ if (rc) {
+ phy_exit(hpriv->phys[i]);
+ goto disable_phys;
+ }
+ }
+
+ return 0;
+
+disable_rsts:
+ ahci_platform_deassert_rsts(hpriv);
+
+disable_phys:
+ while (--i >= 0) {
+ phy_power_off(hpriv->phys[i]);
+ phy_exit(hpriv->phys[i]);
+ }
+
+disable_clks:
+ ahci_platform_disable_clks(hpriv);
+
+disable_regulator:
+ ahci_platform_disable_regulators(hpriv);
+
+ return rc;
+}
+
static int ceva_ahci_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -203,47 +256,19 @@ static int ceva_ahci_probe(struct platform_device *pdev)
return -ENOMEM;
cevapriv->ahci_pdev = pdev;
-
- cevapriv->rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
- NULL);
- if (IS_ERR(cevapriv->rst))
- dev_err_probe(&pdev->dev, PTR_ERR(cevapriv->rst),
- "failed to get reset\n");
-
hpriv = ahci_platform_get_resources(pdev, 0);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
- if (!cevapriv->rst) {
- rc = ahci_platform_enable_resources(hpriv);
- if (rc)
- return rc;
- } else {
- int i;
+ hpriv->rsts = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ NULL);
+ if (IS_ERR(hpriv->rsts))
+ return dev_err_probe(&pdev->dev, PTR_ERR(hpriv->rsts),
+ "failed to get reset\n");
- rc = ahci_platform_enable_clks(hpriv);
- if (rc)
- return rc;
- /* Assert the controller reset */
- reset_control_assert(cevapriv->rst);
-
- for (i = 0; i < hpriv->nports; i++) {
- rc = phy_init(hpriv->phys[i]);
- if (rc)
- return rc;
- }
-
- /* De-assert the controller reset */
- reset_control_deassert(cevapriv->rst);
-
- for (i = 0; i < hpriv->nports; i++) {
- rc = phy_power_on(hpriv->phys[i]);
- if (rc) {
- phy_exit(hpriv->phys[i]);
- return rc;
- }
- }
- }
+ rc = ceva_ahci_platform_enable_resources(hpriv);
+ if (rc)
+ return rc;
if (of_property_read_bool(np, "ceva,broken-gen2"))
cevapriv->flags = CEVA_FLAG_BROKEN_GEN2;
@@ -252,52 +277,60 @@ static int ceva_ahci_probe(struct platform_device *pdev)
if (of_property_read_u8_array(np, "ceva,p0-cominit-params",
(u8 *)&cevapriv->pp2c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-cominit-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
if (of_property_read_u8_array(np, "ceva,p1-cominit-params",
(u8 *)&cevapriv->pp2c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-cominit-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
/* Read OOB timing value for COMWAKE from device-tree*/
if (of_property_read_u8_array(np, "ceva,p0-comwake-params",
(u8 *)&cevapriv->pp3c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-comwake-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
if (of_property_read_u8_array(np, "ceva,p1-comwake-params",
(u8 *)&cevapriv->pp3c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-comwake-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
/* Read phy BURST timing value from device-tree */
if (of_property_read_u8_array(np, "ceva,p0-burst-params",
(u8 *)&cevapriv->pp4c[0], 4) < 0) {
dev_warn(dev, "ceva,p0-burst-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
if (of_property_read_u8_array(np, "ceva,p1-burst-params",
(u8 *)&cevapriv->pp4c[1], 4) < 0) {
dev_warn(dev, "ceva,p1-burst-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
/* Read phy RETRY interval timing value from device-tree */
if (of_property_read_u16_array(np, "ceva,p0-retry-params",
(u16 *)&cevapriv->pp5c[0], 2) < 0) {
dev_warn(dev, "ceva,p0-retry-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
if (of_property_read_u16_array(np, "ceva,p1-retry-params",
(u16 *)&cevapriv->pp5c[1], 2) < 0) {
dev_warn(dev, "ceva,p1-retry-params property not defined\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto disable_resources;
}
/*
@@ -335,7 +368,7 @@ static int __maybe_unused ceva_ahci_resume(struct device *dev)
struct ahci_host_priv *hpriv = host->private_data;
int rc;
- rc = ahci_platform_enable_resources(hpriv);
+ rc = ceva_ahci_platform_enable_resources(hpriv);
if (rc)
return rc;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 1a63200ea437..83431aae74d8 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1280,10 +1280,8 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
int port_no, void __iomem *mmio,
void __iomem *port_mmio)
{
- struct ahci_host_priv *hpriv = ap->host->private_data;
const char *emsg = NULL;
int rc;
- u32 tmp;
/* make sure port is not active */
rc = ahci_deinit_port(ap, &emsg);
@@ -1291,11 +1289,6 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
dev_warn(dev, "%s (%d)\n", emsg, rc);
ahci_port_clear_pending_irq(ap);
-
- /* mark esata ports */
- tmp = readl(port_mmio + PORT_CMD);
- if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
- ap->pflags |= ATA_PFLAG_EXTERNAL;
}
void ahci_init_controller(struct ata_host *host)
@@ -2627,8 +2620,8 @@ void ahci_print_info(struct ata_host *host, const char *scc_s)
speed_s = "?";
dev_info(host->dev,
- "AHCI %02x%02x.%02x%02x "
- "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
+ "AHCI vers %02x%02x.%02x%02x, "
+ "%u command slots, %s Gbps, %s mode\n"
,
(vers >> 24) & 0xff,
@@ -2637,12 +2630,18 @@ void ahci_print_info(struct ata_host *host, const char *scc_s)
vers & 0xff,
((cap >> 8) & 0x1f) + 1,
- (cap & 0x1f) + 1,
speed_s,
- impl,
scc_s);
dev_info(host->dev,
+ "%u/%u ports implemented (port mask 0x%x)\n"
+ ,
+
+ hweight32(impl),
+ (cap & 0x1f) + 1,
+ impl);
+
+ dev_info(host->dev,
"flags: "
"%s%s%s%s%s%s%s"
"%s%s%s%s%s%s%s"
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 09ed67772fae..be3412cdb22e 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2001,6 +2001,33 @@ bool ata_dev_power_init_tf(struct ata_device *dev, struct ata_taskfile *tf,
return true;
}
+static bool ata_dev_power_is_active(struct ata_device *dev)
+{
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ ata_tf_init(dev, &tf);
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf.protocol = ATA_PROT_NODATA;
+ tf.command = ATA_CMD_CHK_POWER;
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (err_mask) {
+ ata_dev_err(dev, "Check power mode failed (err_mask=0x%x)\n",
+ err_mask);
+ /*
+ * Assume we are in standby mode so that we always force a
+ * spinup in ata_dev_power_set_active().
+ */
+ return false;
+ }
+
+ ata_dev_dbg(dev, "Power mode: 0x%02x\n", tf.nsect);
+
+ /* Active or idle */
+ return tf.nsect == 0xff;
+}
+
/**
* ata_dev_power_set_standby - Set a device power mode to standby
* @dev: target device
@@ -2017,6 +2044,11 @@ void ata_dev_power_set_standby(struct ata_device *dev)
struct ata_taskfile tf;
unsigned int err_mask;
+ /* If the device is already sleeping or in standby, do nothing. */
+ if ((dev->flags & ATA_DFLAG_SLEEPING) ||
+ !ata_dev_power_is_active(dev))
+ return;
+
/*
* Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5)
* causing some drives to spin up and down again. For these, do nothing
@@ -2042,33 +2074,6 @@ void ata_dev_power_set_standby(struct ata_device *dev)
err_mask);
}
-static bool ata_dev_power_is_active(struct ata_device *dev)
-{
- struct ata_taskfile tf;
- unsigned int err_mask;
-
- ata_tf_init(dev, &tf);
- tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
- tf.protocol = ATA_PROT_NODATA;
- tf.command = ATA_CMD_CHK_POWER;
-
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
- if (err_mask) {
- ata_dev_err(dev, "Check power mode failed (err_mask=0x%x)\n",
- err_mask);
- /*
- * Assume we are in standby mode so that we always force a
- * spinup in ata_dev_power_set_active().
- */
- return false;
- }
-
- ata_dev_dbg(dev, "Power mode: 0x%02x\n", tf.nsect);
-
- /* Active or idle */
- return tf.nsect == 0xff;
-}
-
/**
* ata_dev_power_set_active - Set a device power mode to active
* @dev: target device
diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c
index a7adfdcb5e27..9a2cb9ca9d1d 100644
--- a/drivers/ata/pata_parport/pata_parport.c
+++ b/drivers/ata/pata_parport/pata_parport.c
@@ -464,7 +464,7 @@ static void pata_parport_bus_release(struct device *dev)
/* nothing to do here but required to avoid warning on device removal */
}
-static struct bus_type pata_parport_bus_type = {
+static const struct bus_type pata_parport_bus_type = {
.name = DRV_NAME,
};
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 50d8ce20ae5b..9fb1575f8d88 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2550,14 +2550,12 @@ static int fore200e_sba_probe(struct platform_device *op)
return 0;
}
-static int fore200e_sba_remove(struct platform_device *op)
+static void fore200e_sba_remove(struct platform_device *op)
{
struct fore200e *fore200e = dev_get_drvdata(&op->dev);
fore200e_shutdown(fore200e);
kfree(fore200e);
-
- return 0;
}
static const struct of_device_id fore200e_sba_match[] = {
@@ -2574,7 +2572,7 @@ static struct platform_driver fore200e_sba_driver = {
.of_match_table = fore200e_sba_match,
},
.probe = fore200e_sba_probe,
- .remove = fore200e_sba_remove,
+ .remove_new = fore200e_sba_remove,
};
#endif
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index e327a0229dc1..e7f713cd70d3 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -2930,6 +2930,8 @@ open_card_ubr0(struct idt77252_dev *card)
vc->scq = alloc_scq(card, vc->class);
if (!vc->scq) {
printk("%s: can't get SCQ.\n", card->name);
+ kfree(card->vcs[0]);
+ card->vcs[0] = NULL;
return -ENOMEM;
}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 018ac202de34..024b78a0cfc1 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -431,9 +431,6 @@ init_cpu_capacity_callback(struct notifier_block *nb,
struct cpufreq_policy *policy = data;
int cpu;
- if (!raw_capacity)
- return 0;
-
if (val != CPUFREQ_CREATE_POLICY)
return 0;
@@ -450,9 +447,11 @@ init_cpu_capacity_callback(struct notifier_block *nb,
}
if (cpumask_empty(cpus_to_visit)) {
- topology_normalize_cpu_scale();
- schedule_work(&update_topology_flags_work);
- free_raw_capacity();
+ if (raw_capacity) {
+ topology_normalize_cpu_scale();
+ schedule_work(&update_topology_flags_work);
+ free_raw_capacity();
+ }
pr_debug("cpu_capacity: parsing done\n");
schedule_work(&parsing_done_work);
}
@@ -472,7 +471,7 @@ static int __init register_cpufreq_notifier(void)
* On ACPI-based systems skip registering cpufreq notifier as cpufreq
* information is not needed for cpu capacity initialization.
*/
- if (!acpi_disabled || !raw_capacity)
+ if (!acpi_disabled)
return -EINVAL;
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
diff --git a/drivers/base/base.h b/drivers/base/base.h
index eb4c0ace9242..0738ccad08b2 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -207,7 +207,7 @@ static inline int devtmpfs_init(void) { return 0; }
#endif
#ifdef CONFIG_BLOCK
-extern struct class block_class;
+extern const struct class block_class;
static inline bool is_blockdev(struct device *dev)
{
return dev->class == &block_class;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 14d46af40f9a..9828da9b933c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -125,7 +125,7 @@ static void __fwnode_link_del(struct fwnode_link *link)
*/
static void __fwnode_link_cycle(struct fwnode_link *link)
{
- pr_debug("%pfwf: Relaxing link with %pfwf\n",
+ pr_debug("%pfwf: cycle: depends on %pfwf\n",
link->consumer, link->supplier);
link->flags |= FWLINK_FLAG_CYCLE;
}
@@ -284,10 +284,12 @@ static bool device_is_ancestor(struct device *dev, struct device *target)
return false;
}
+#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \
+ DL_FLAG_CYCLE | \
+ DL_FLAG_MANAGED)
static inline bool device_link_flag_is_sync_state_only(u32 flags)
{
- return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) ==
- (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED);
+ return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY;
}
/**
@@ -1943,6 +1945,7 @@ static bool __fw_devlink_relax_cycles(struct device *con,
/* Termination condition. */
if (sup_dev == con) {
+ pr_debug("----- cycle: start -----\n");
ret = true;
goto out;
}
@@ -1974,8 +1977,11 @@ static bool __fw_devlink_relax_cycles(struct device *con,
else
par_dev = fwnode_get_next_parent_dev(sup_handle);
- if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode))
+ if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) {
+ pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle,
+ par_dev->fwnode);
ret = true;
+ }
if (!sup_dev)
goto out;
@@ -1991,6 +1997,8 @@ static bool __fw_devlink_relax_cycles(struct device *con,
if (__fw_devlink_relax_cycles(con,
dev_link->supplier->fwnode)) {
+ pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle,
+ dev_link->supplier->fwnode);
fw_devlink_relax_link(dev_link);
dev_link->flags |= DL_FLAG_CYCLE;
ret = true;
@@ -2058,13 +2066,19 @@ static int fw_devlink_create_devlink(struct device *con,
/*
* SYNC_STATE_ONLY device links don't block probing and supports cycles.
- * So cycle detection isn't necessary and shouldn't be done.
+ * So, one might expect that cycle detection isn't necessary for them.
+ * However, if the device link was marked as SYNC_STATE_ONLY because
+ * it's part of a cycle, then we still need to do cycle detection. This
+ * is because the consumer and supplier might be part of multiple cycles
+ * and we need to detect all those cycles.
*/
- if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ if (!device_link_flag_is_sync_state_only(flags) ||
+ flags & DL_FLAG_CYCLE) {
device_links_write_lock();
if (__fw_devlink_relax_cycles(con, sup_handle)) {
__fwnode_link_cycle(link);
flags = fw_devlink_get_flags(link->flags);
+ pr_debug("----- cycle: end -----\n");
dev_info(con, "Fixed dependency cycle(s) with %pfwf\n",
sup_handle);
}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 47de0f140ba6..0b33e81f9c9b 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -588,6 +588,7 @@ CPU_SHOW_VULN_FALLBACK(mmio_stale_data);
CPU_SHOW_VULN_FALLBACK(retbleed);
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds);
+CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -602,6 +603,7 @@ static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
+static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -617,6 +619,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_retbleed.attr,
&dev_attr_spec_rstack_overflow.attr,
&dev_attr_gather_data_sampling.attr,
+ &dev_attr_reg_file_data_sampling.attr,
NULL
};
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index f37ad34c80ec..0d01890160f3 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -13,6 +13,8 @@
#include <linux/msi.h>
#include <linux/slab.h>
+/* Begin of removal area. Once everything is converted over. Cleanup the includes too! */
+
#define DEV_ID_SHIFT 21
#define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT))
@@ -204,8 +206,8 @@ static void platform_msi_free_priv_data(struct device *dev)
* Returns:
* Zero for success, or an error code in case of failure
*/
-int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg)
+static int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg)
{
int err;
@@ -219,18 +221,6 @@ int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
return err;
}
-EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
-
-/**
- * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
- * @dev: The device for which to free interrupts
- */
-void platform_msi_domain_free_irqs(struct device *dev)
-{
- msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
- platform_msi_free_priv_data(dev);
-}
-EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
/**
* platform_msi_get_host_data - Query the private data associated with
@@ -350,3 +340,104 @@ int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int vir
return msi_domain_populate_irqs(domain->parent, dev, virq, nr_irqs, &data->arg);
}
+
+/* End of removal area */
+
+/* Real per device domain interfaces */
+
+/*
+ * This indirection can go when platform_device_msi_init_and_alloc_irqs()
+ * is switched to a proper irq_chip::irq_write_msi_msg() callback. Keep it
+ * simple for now.
+ */
+static void platform_msi_write_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ irq_write_msi_msg_t cb = d->chip_data;
+
+ cb(irq_data_get_msi_desc(d), msg);
+}
+
+static void platform_msi_set_desc_byindex(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = desc->msi_index;
+}
+
+static const struct msi_domain_template platform_msi_template = {
+ .chip = {
+ .name = "pMSI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_write_msi_msg = platform_msi_write_msi_msg,
+ /* The rest is filled in by the platform MSI parent */
+ },
+
+ .ops = {
+ .set_desc = platform_msi_set_desc_byindex,
+ },
+
+ .info = {
+ .bus_token = DOMAIN_BUS_DEVICE_MSI,
+ },
+};
+
+/**
+ * platform_device_msi_init_and_alloc_irqs - Initialize platform device MSI
+ * and allocate interrupts for @dev
+ * @dev: The device for which to allocate interrupts
+ * @nvec: The number of interrupts to allocate
+ * @write_msi_msg: Callback to write an interrupt message for @dev
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ *
+ * This creates a MSI domain on @dev which has @dev->msi.domain as
+ * parent. The parent domain sets up the new domain. The domain has
+ * a fixed size of @nvec. The domain is managed by devres and will
+ * be removed when the device is removed.
+ *
+ * Note: For migration purposes this falls back to the original platform_msi code
+ * up to the point where all platforms have been converted to the MSI
+ * parent model.
+ */
+int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg)
+{
+ struct irq_domain *domain = dev->msi.domain;
+
+ if (!domain || !write_msi_msg)
+ return -EINVAL;
+
+ /* Migration support. Will go away once everything is converted */
+ if (!irq_domain_is_msi_parent(domain))
+ return platform_msi_domain_alloc_irqs(dev, nvec, write_msi_msg);
+
+ /*
+ * @write_msi_msg is stored in the resulting msi_domain_info::data.
+ * The underlying domain creation mechanism will assign that
+ * callback to the resulting irq chip.
+ */
+ if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN,
+ &platform_msi_template,
+ nvec, NULL, write_msi_msg))
+ return -ENODEV;
+
+ return msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, 0, nvec - 1);
+}
+EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs);
+
+/**
+ * platform_device_msi_free_irqs_all - Free all interrupts for @dev
+ * @dev: The device for which to free interrupts
+ */
+void platform_device_msi_free_irqs_all(struct device *dev)
+{
+ struct irq_domain *domain = dev->msi.domain;
+
+ msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN);
+
+ /* Migration support. Will go away once everything is converted */
+ if (!irq_domain_is_msi_parent(domain))
+ platform_msi_free_priv_data(dev);
+}
+EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 583dd5d7d46b..bcdb25bec77c 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -93,6 +93,7 @@ struct regmap {
#endif
unsigned int max_register;
+ bool max_register_is_set;
bool (*writeable_reg)(struct device *dev, unsigned int reg);
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index b7e4b2464102..9b17c77dec9d 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -23,7 +23,7 @@ static int regcache_flat_init(struct regmap *map)
int i;
unsigned int *cache;
- if (!map || map->reg_stride_order < 0 || !map->max_register)
+ if (!map || map->reg_stride_order < 0 || !map->max_register_is_set)
return -EINVAL;
map->cache = kcalloc(regcache_flat_get_index(map, map->max_register)
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index ac63a73ccdaa..2e41cb12b8e2 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -187,8 +187,10 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
return 0;
}
- if (!map->max_register && map->num_reg_defaults_raw)
+ if (!map->max_register_is_set && map->num_reg_defaults_raw) {
map->max_register = (map->num_reg_defaults_raw - 1) * map->reg_stride;
+ map->max_register_is_set = true;
+ }
if (map->cache_ops->init) {
dev_dbg(map->dev, "Initializing %s cache\n",
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index 026bdcb45127..bb2ab6129f38 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -9,6 +9,23 @@
#define BLOCK_TEST_SIZE 12
+static void get_changed_bytes(void *orig, void *new, size_t size)
+{
+ char *o = orig;
+ char *n = new;
+ int i;
+
+ get_random_bytes(new, size);
+
+ /*
+ * This could be nicer and more efficient but we shouldn't
+ * super care.
+ */
+ for (i = 0; i < size; i++)
+ while (n[i] == o[i])
+ get_random_bytes(&n[i], 1);
+}
+
static const struct regmap_config test_regmap_config = {
.max_register = BLOCK_TEST_SIZE,
.reg_stride = 1,
@@ -1202,7 +1219,8 @@ static void raw_noinc_write(struct kunit *test)
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
- unsigned int val, val_test, val_last;
+ unsigned int val;
+ u16 val_test, val_last;
u16 val_array[BLOCK_TEST_SIZE];
config = raw_regmap_config;
@@ -1251,7 +1269,7 @@ static void raw_sync(struct kunit *test)
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
- u16 val[2];
+ u16 val[3];
u16 *hw_buf;
unsigned int rval;
int i;
@@ -1265,17 +1283,13 @@ static void raw_sync(struct kunit *test)
hw_buf = (u16 *)data->vals;
- get_random_bytes(&val, sizeof(val));
+ get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
/* Do a regular write and a raw write in cache only mode */
regcache_cache_only(map, true);
- KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
- if (config.val_format_endian == REGMAP_ENDIAN_BIG)
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
- be16_to_cpu(val[0])));
- else
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
- le16_to_cpu(val[0])));
+ KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
+ sizeof(u16) * 2));
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
/* We should read back the new values, and defaults for the rest */
for (i = 0; i < config.max_register + 1; i++) {
@@ -1284,24 +1298,34 @@ static void raw_sync(struct kunit *test)
switch (i) {
case 2:
case 3:
- case 6:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
- be16_to_cpu(val[i % 2]));
+ be16_to_cpu(val[i - 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
- le16_to_cpu(val[i % 2]));
+ le16_to_cpu(val[i - 2]));
}
break;
+ case 4:
+ KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
+ break;
default:
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
break;
}
}
+
+ /*
+ * The value written via _write() was translated by the core,
+ * translate the original copy for comparison purposes.
+ */
+ if (config.val_format_endian == REGMAP_ENDIAN_BIG)
+ val[2] = cpu_to_be16(val[2]);
+ else
+ val[2] = cpu_to_le16(val[2]);
/* The values should not appear in the "hardware" */
- KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val));
- KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16));
+ KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
for (i = 0; i < config.max_register + 1; i++)
data->written[i] = false;
@@ -1312,8 +1336,72 @@ static void raw_sync(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* The values should now appear in the "hardware" */
- KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
- KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16));
+ KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
+
+ regmap_exit(map);
+}
+
+static void raw_ranges(struct kunit *test)
+{
+ struct raw_test_types *t = (struct raw_test_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val;
+ int i;
+
+ config = raw_regmap_config;
+ config.volatile_reg = test_range_all_volatile;
+ config.ranges = &test_range;
+ config.num_ranges = 1;
+ config.max_register = test_range.range_max;
+
+ map = gen_raw_regmap(&config, t, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Reset the page to a non-zero value to trigger a change */
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
+ test_range.range_max));
+
+ /* Check we set the page and use the window for writes */
+ data->written[test_range.selector_reg] = false;
+ data->written[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
+
+ data->written[test_range.selector_reg] = false;
+ data->written[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
+ test_range.range_min +
+ test_range.window_len,
+ 0));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
+
+ /* Same for reads */
+ data->written[test_range.selector_reg] = false;
+ data->read[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
+
+ data->written[test_range.selector_reg] = false;
+ data->read[test_range.window_start] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
+ test_range.range_min +
+ test_range.window_len,
+ &val));
+ KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
+ KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
+
+ /* No physical access triggered in the virtual range */
+ for (i = test_range.range_min; i < test_range.range_max; i++) {
+ KUNIT_EXPECT_FALSE(test, data->read[i]);
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+ }
regmap_exit(map);
}
@@ -1345,6 +1433,7 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
+ KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params),
{}
};
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 6db77d8e45f9..5cb425f6f02d 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -89,7 +89,7 @@ EXPORT_SYMBOL_GPL(regmap_check_range_table);
bool regmap_writeable(struct regmap *map, unsigned int reg)
{
- if (map->max_register && reg > map->max_register)
+ if (map->max_register_is_set && reg > map->max_register)
return false;
if (map->writeable_reg)
@@ -112,7 +112,7 @@ bool regmap_cached(struct regmap *map, unsigned int reg)
if (!map->cache_ops)
return false;
- if (map->max_register && reg > map->max_register)
+ if (map->max_register_is_set && reg > map->max_register)
return false;
map->lock(map->lock_arg);
@@ -129,7 +129,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
if (!map->reg_read)
return false;
- if (map->max_register && reg > map->max_register)
+ if (map->max_register_is_set && reg > map->max_register)
return false;
if (map->format.format_write)
@@ -787,6 +787,7 @@ struct regmap *__regmap_init(struct device *dev,
map->bus = bus;
map->bus_context = bus_context;
map->max_register = config->max_register;
+ map->max_register_is_set = map->max_register ?: config->max_register_is_0;
map->wr_table = config->wr_table;
map->rd_table = config->rd_table;
map->volatile_table = config->volatile_table;
@@ -1412,6 +1413,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
regmap_debugfs_exit(map);
map->max_register = config->max_register;
+ map->max_register_is_set = map->max_register ?: config->max_register_is_0;
map->writeable_reg = config->writeable_reg;
map->readable_reg = config->readable_reg;
map->volatile_reg = config->volatile_reg;
@@ -3383,7 +3385,7 @@ EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
*/
int regmap_get_max_register(struct regmap *map)
{
- return map->max_register ? map->max_register : -EINVAL;
+ return map->max_register_is_set ? map->max_register : -EINVAL;
}
EXPORT_SYMBOL_GPL(regmap_get_max_register);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 7061d3ee836a..6b5d34919c72 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -68,7 +68,7 @@ static struct attribute *bcma_device_attrs[] = {
};
ATTRIBUTE_GROUPS(bcma_device);
-static struct bus_type bcma_bus_type = {
+static const struct bus_type bcma_bus_type = {
.name = "bcma",
.match = bcma_bus_match,
.probe = bcma_device_probe,
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 2b98114a9fe0..a25414228e47 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1779,7 +1779,7 @@ static int fd_alloc_disk(int drive, int system)
struct gendisk *disk;
int err;
- disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL);
+ disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index b1b47d88f5db..b6dac8cee70f 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -24,8 +24,8 @@ static DEFINE_MUTEX(aoeblk_mutex);
static struct kmem_cache *buf_pool_cache;
static struct dentry *aoe_debugfs_dir;
-/* GPFS needs a larger value than the default. */
-static int aoe_maxsectors;
+/* random default picked from the historic block max_sectors cap */
+static int aoe_maxsectors = 2560;
module_param(aoe_maxsectors, int, 0644);
MODULE_PARM_DESC(aoe_maxsectors,
"When nonzero, set the maximum number of sectors per I/O request");
@@ -334,6 +334,10 @@ aoeblk_gdalloc(void *vp)
mempool_t *mp;
struct blk_mq_tag_set *set;
sector_t ssize;
+ struct queue_limits lim = {
+ .max_hw_sectors = aoe_maxsectors,
+ .io_opt = SZ_2M,
+ };
ulong flags;
int late = 0;
int err;
@@ -371,7 +375,7 @@ aoeblk_gdalloc(void *vp)
goto err_mempool;
}
- gd = blk_mq_alloc_disk(set, d);
+ gd = blk_mq_alloc_disk(set, &lim, d);
if (IS_ERR(gd)) {
pr_err("aoe: cannot allocate block queue for %ld.%d\n",
d->aoemajor, d->aoeminor);
@@ -384,14 +388,9 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->flags & DEVFL_TKILL);
WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP);
- /* random number picked from the history block max_sectors cap */
- blk_queue_max_hw_sectors(gd->queue, 2560u);
- blk_queue_io_opt(gd->queue, SZ_2M);
d->bufpool = mp;
d->blkq = gd->queue;
d->gd = gd;
- if (aoe_maxsectors)
- blk_queue_max_hw_sectors(gd->queue, aoe_maxsectors);
gd->major = AOE_MAJOR;
gd->first_minor = d->sysminor;
gd->minors = AOE_PARTITIONS;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d7317425be51..cc9077b588d7 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -419,13 +419,16 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
rcu_read_lock();
for_each_netdev_rcu(&init_net, ifp) {
dev_hold(ifp);
- if (!is_aoe_netif(ifp))
- goto cont;
+ if (!is_aoe_netif(ifp)) {
+ dev_put(ifp);
+ continue;
+ }
skb = new_skb(sizeof *h + sizeof *ch);
if (skb == NULL) {
printk(KERN_INFO "aoe: skb alloc failure\n");
- goto cont;
+ dev_put(ifp);
+ continue;
}
skb_put(skb, sizeof *h + sizeof *ch);
skb->dev = ifp;
@@ -440,9 +443,6 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
h->major = cpu_to_be16(aoemajor);
h->minor = aoeminor;
h->cmd = AOECMD_CFG;
-
-cont:
- dev_put(ifp);
}
rcu_read_unlock();
}
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index c51ea95bc2ce..923a134fd766 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -63,6 +63,7 @@ tx(int id) __must_hold(&txlock)
pr_warn("aoe: packet could not be sent on %s. %s\n",
ifp ? ifp->name : "netif",
"consider increasing tx_queue_len");
+ dev_put(ifp);
spin_lock_irq(&txlock);
}
return 0;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 50949207798d..cacc4ba942a8 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1994,7 +1994,7 @@ static int ataflop_alloc_disk(unsigned int drive, unsigned int type)
{
struct gendisk *disk;
- disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL);
+ disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 970bd6ff38c4..e322cef6596b 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -318,6 +318,16 @@ static int brd_alloc(int i)
struct gendisk *disk;
char buf[DISK_NAME_LEN];
int err = -ENOMEM;
+ struct queue_limits lim = {
+ /*
+ * This is so fdisk will align partitions on 4k, because of
+ * direct_access API needing 4k alignment, returning a PFN
+ * (This is only a problem on very small devices <= 4M,
+ * otherwise fdisk will align on 1M. Regardless this call
+ * is harmless)
+ */
+ .physical_block_size = PAGE_SIZE,
+ };
list_for_each_entry(brd, &brd_devices, brd_list)
if (brd->brd_number == i)
@@ -335,10 +345,11 @@ static int brd_alloc(int i)
debugfs_create_u64(buf, 0444, brd_debugfs_dir,
&brd->brd_nr_pages);
- disk = brd->brd_disk = blk_alloc_disk(NUMA_NO_NODE);
- if (!disk)
+ disk = brd->brd_disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(disk)) {
+ err = PTR_ERR(disk);
goto out_free_dev;
-
+ }
disk->major = RAMDISK_MAJOR;
disk->first_minor = i * max_part;
disk->minors = max_part;
@@ -347,15 +358,6 @@ static int brd_alloc(int i)
strscpy(disk->disk_name, buf, DISK_NAME_LEN);
set_capacity(disk, rd_size * 2);
- /*
- * This is so fdisk will align partitions on 4k, because of
- * direct_access API needing 4k alignment, returning a PFN
- * (This is only a problem on very small devices <= 4M,
- * otherwise fdisk will align on 1M. Regardless this call
- * is harmless)
- */
- blk_queue_physical_block_size(disk->queue, PAGE_SIZE);
-
/* Tell the block layer that this is not a rotational device */
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index c21e3732759e..94dc0a235919 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -524,9 +524,9 @@ struct drbd_md {
struct drbd_backing_dev {
struct block_device *backing_bdev;
- struct bdev_handle *backing_bdev_handle;
+ struct file *backing_bdev_file;
struct block_device *md_bdev;
- struct bdev_handle *md_bdev_handle;
+ struct file *f_md_bdev;
struct drbd_md md;
struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
sector_t known_size; /* last known size of that backing device */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 6bc86106c7b2..113b441d4d36 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2690,6 +2690,14 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
int id;
int vnr = adm_ctx->volume;
enum drbd_ret_code err = ERR_NOMEM;
+ struct queue_limits lim = {
+ /*
+ * Setting the max_hw_sectors to an odd value of 8kibyte here.
+ * This triggers a max_bio_size message upon first attach or
+ * connect.
+ */
+ .max_hw_sectors = DRBD_MAX_BIO_SIZE_SAFE >> 8,
+ };
device = minor_to_device(minor);
if (device)
@@ -2708,9 +2716,11 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
drbd_init_set_defaults(device);
- disk = blk_alloc_disk(NUMA_NO_NODE);
- if (!disk)
+ disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(disk)) {
+ err = PTR_ERR(disk);
goto out_no_disk;
+ }
device->vdisk = disk;
device->rq_queue = disk->queue;
@@ -2727,9 +2737,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
blk_queue_write_cache(disk->queue, true, true);
- /* Setting the max_hw_sectors to an odd value of 8kibyte here
- This triggers a max_bio_size message upon first attach or connect */
- blk_queue_max_hw_sectors(disk->queue, DRBD_MAX_BIO_SIZE_SAFE >> 8);
device->md_io.page = alloc_page(GFP_KERNEL);
if (!device->md_io.page)
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 43747a1aae43..5d65c9754d83 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1189,9 +1189,31 @@ static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
return 0;
}
-static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
+static unsigned int drbd_max_peer_bio_size(struct drbd_device *device)
{
- q->limits.discard_granularity = granularity;
+ /*
+ * We may ignore peer limits if the peer is modern enough. From 8.3.8
+ * onwards the peer can use multiple BIOs for a single peer_request.
+ */
+ if (device->state.conn < C_WF_REPORT_PARAMS)
+ return device->peer_max_bio_size;
+
+ if (first_peer_device(device)->connection->agreed_pro_version < 94)
+ return min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+
+ /*
+ * Correct old drbd (up to 8.3.7) if it believes it can do more than
+ * 32KiB.
+ */
+ if (first_peer_device(device)->connection->agreed_pro_version == 94)
+ return DRBD_MAX_SIZE_H80_PACKET;
+
+ /*
+ * drbd 8.3.8 onwards, before 8.4.0
+ */
+ if (first_peer_device(device)->connection->agreed_pro_version < 100)
+ return DRBD_MAX_BIO_SIZE_P95;
+ return DRBD_MAX_BIO_SIZE;
}
static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
@@ -1204,149 +1226,119 @@ static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
return AL_EXTENT_SIZE >> 9;
}
-static void decide_on_discard_support(struct drbd_device *device,
+static bool drbd_discard_supported(struct drbd_connection *connection,
struct drbd_backing_dev *bdev)
{
- struct drbd_connection *connection =
- first_peer_device(device)->connection;
- struct request_queue *q = device->rq_queue;
- unsigned int max_discard_sectors;
-
if (bdev && !bdev_max_discard_sectors(bdev->backing_bdev))
- goto not_supported;
+ return false;
if (connection->cstate >= C_CONNECTED &&
!(connection->agreed_features & DRBD_FF_TRIM)) {
drbd_info(connection,
"peer DRBD too old, does not support TRIM: disabling discards\n");
- goto not_supported;
+ return false;
}
- /*
- * We don't care for the granularity, really.
- *
- * Stacking limits below should fix it for the local device. Whether or
- * not it is a suitable granularity on the remote device is not our
- * problem, really. If you care, you need to use devices with similar
- * topology on all peers.
- */
- blk_queue_discard_granularity(q, 512);
- max_discard_sectors = drbd_max_discard_sectors(connection);
- blk_queue_max_discard_sectors(q, max_discard_sectors);
- blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
- return;
-
-not_supported:
- blk_queue_discard_granularity(q, 0);
- blk_queue_max_discard_sectors(q, 0);
+ return true;
}
-static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
+/* This is the workaround for "bio would need to, but cannot, be split" */
+static unsigned int drbd_backing_dev_max_segments(struct drbd_device *device)
{
- /* Fixup max_write_zeroes_sectors after blk_stack_limits():
- * if we can handle "zeroes" efficiently on the protocol,
- * we want to do that, even if our backend does not announce
- * max_write_zeroes_sectors itself. */
- struct drbd_connection *connection = first_peer_device(device)->connection;
- /* If the peer announces WZEROES support, use it. Otherwise, rather
- * send explicit zeroes than rely on some discard-zeroes-data magic. */
- if (connection->agreed_features & DRBD_FF_WZEROES)
- q->limits.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
- else
- q->limits.max_write_zeroes_sectors = 0;
-}
+ unsigned int max_segments;
-static void fixup_discard_support(struct drbd_device *device, struct request_queue *q)
-{
- unsigned int max_discard = device->rq_queue->limits.max_discard_sectors;
- unsigned int discard_granularity =
- device->rq_queue->limits.discard_granularity >> SECTOR_SHIFT;
+ rcu_read_lock();
+ max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs;
+ rcu_read_unlock();
- if (discard_granularity > max_discard) {
- blk_queue_discard_granularity(q, 0);
- blk_queue_max_discard_sectors(q, 0);
- }
+ if (!max_segments)
+ return BLK_MAX_SEGMENTS;
+ return max_segments;
}
-static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
- unsigned int max_bio_size, struct o_qlim *o)
+void drbd_reconsider_queue_parameters(struct drbd_device *device,
+ struct drbd_backing_dev *bdev, struct o_qlim *o)
{
+ struct drbd_connection *connection =
+ first_peer_device(device)->connection;
struct request_queue * const q = device->rq_queue;
- unsigned int max_hw_sectors = max_bio_size >> 9;
- unsigned int max_segments = 0;
+ unsigned int now = queue_max_hw_sectors(q) << 9;
+ struct queue_limits lim;
struct request_queue *b = NULL;
- struct disk_conf *dc;
+ unsigned int new;
if (bdev) {
b = bdev->backing_bdev->bd_disk->queue;
- max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
- rcu_read_lock();
- dc = rcu_dereference(device->ldev->disk_conf);
- max_segments = dc->max_bio_bvecs;
- rcu_read_unlock();
-
- blk_set_stacking_limits(&q->limits);
+ device->local_max_bio_size =
+ queue_max_hw_sectors(b) << SECTOR_SHIFT;
}
- blk_queue_max_hw_sectors(q, max_hw_sectors);
- /* This is the workaround for "bio would need to, but cannot, be split" */
- blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
- blk_queue_segment_boundary(q, PAGE_SIZE-1);
- decide_on_discard_support(device, bdev);
-
- if (b) {
- blk_stack_limits(&q->limits, &b->limits, 0);
- disk_update_readahead(device->vdisk);
+ /*
+ * We may later detach and re-attach on a disconnected Primary. Avoid
+ * decreasing the value in this case.
+ *
+ * We want to store what we know the peer DRBD can handle, not what the
+ * peer IO backend can handle.
+ */
+ new = min3(DRBD_MAX_BIO_SIZE, device->local_max_bio_size,
+ max(drbd_max_peer_bio_size(device), device->peer_max_bio_size));
+ if (new != now) {
+ if (device->state.role == R_PRIMARY && new < now)
+ drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n",
+ new, now);
+ drbd_info(device, "max BIO size = %u\n", new);
}
- fixup_write_zeroes(device, q);
- fixup_discard_support(device, q);
-}
-
-void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o)
-{
- unsigned int now, new, local, peer;
-
- now = queue_max_hw_sectors(device->rq_queue) << 9;
- local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
- peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
+ lim = queue_limits_start_update(q);
if (bdev) {
- local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
- device->local_max_bio_size = local;
+ blk_set_stacking_limits(&lim);
+ lim.max_segments = drbd_backing_dev_max_segments(device);
+ } else {
+ lim.max_segments = BLK_MAX_SEGMENTS;
}
- local = min(local, DRBD_MAX_BIO_SIZE);
- /* We may ignore peer limits if the peer is modern enough.
- Because new from 8.3.8 onwards the peer can use multiple
- BIOs for a single peer_request */
- if (device->state.conn >= C_WF_REPORT_PARAMS) {
- if (first_peer_device(device)->connection->agreed_pro_version < 94)
- peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
- /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
- else if (first_peer_device(device)->connection->agreed_pro_version == 94)
- peer = DRBD_MAX_SIZE_H80_PACKET;
- else if (first_peer_device(device)->connection->agreed_pro_version < 100)
- peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
- else
- peer = DRBD_MAX_BIO_SIZE;
+ lim.max_hw_sectors = new >> SECTOR_SHIFT;
+ lim.seg_boundary_mask = PAGE_SIZE - 1;
- /* We may later detach and re-attach on a disconnected Primary.
- * Avoid this setting to jump back in that case.
- * We want to store what we know the peer DRBD can handle,
- * not what the peer IO backend can handle. */
- if (peer > device->peer_max_bio_size)
- device->peer_max_bio_size = peer;
+ /*
+ * We don't care for the granularity, really.
+ *
+ * Stacking limits below should fix it for the local device. Whether or
+ * not it is a suitable granularity on the remote device is not our
+ * problem, really. If you care, you need to use devices with similar
+ * topology on all peers.
+ */
+ if (drbd_discard_supported(connection, bdev)) {
+ lim.discard_granularity = 512;
+ lim.max_hw_discard_sectors =
+ drbd_max_discard_sectors(connection);
+ } else {
+ lim.discard_granularity = 0;
+ lim.max_hw_discard_sectors = 0;
}
- new = min(local, peer);
- if (device->state.role == R_PRIMARY && new < now)
- drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
+ if (bdev)
+ blk_stack_limits(&lim, &b->limits, 0);
- if (new != now)
- drbd_info(device, "max BIO size = %u\n", new);
+ /*
+ * If we can handle "zeroes" efficiently on the protocol, we want to do
+ * that, even if our backend does not announce max_write_zeroes_sectors
+ * itself.
+ */
+ if (connection->agreed_features & DRBD_FF_WZEROES)
+ lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
+ else
+ lim.max_write_zeroes_sectors = 0;
+
+ if ((lim.discard_granularity >> SECTOR_SHIFT) >
+ lim.max_hw_discard_sectors) {
+ lim.discard_granularity = 0;
+ lim.max_hw_discard_sectors = 0;
+ }
- drbd_setup_queue_param(device, bdev, new, o);
+ if (queue_limits_commit_update(q, &lim))
+ drbd_err(device, "setting new queue limits failed\n");
}
/* Starts the worker thread */
@@ -1635,45 +1627,45 @@ success:
return 0;
}
-static struct bdev_handle *open_backing_dev(struct drbd_device *device,
+static struct file *open_backing_dev(struct drbd_device *device,
const char *bdev_path, void *claim_ptr, bool do_bd_link)
{
- struct bdev_handle *handle;
+ struct file *file;
int err = 0;
- handle = bdev_open_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE,
- claim_ptr, NULL);
- if (IS_ERR(handle)) {
+ file = bdev_file_open_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ claim_ptr, NULL);
+ if (IS_ERR(file)) {
drbd_err(device, "open(\"%s\") failed with %ld\n",
- bdev_path, PTR_ERR(handle));
- return handle;
+ bdev_path, PTR_ERR(file));
+ return file;
}
if (!do_bd_link)
- return handle;
+ return file;
- err = bd_link_disk_holder(handle->bdev, device->vdisk);
+ err = bd_link_disk_holder(file_bdev(file), device->vdisk);
if (err) {
- bdev_release(handle);
+ fput(file);
drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
bdev_path, err);
- handle = ERR_PTR(err);
+ file = ERR_PTR(err);
}
- return handle;
+ return file;
}
static int open_backing_devices(struct drbd_device *device,
struct disk_conf *new_disk_conf,
struct drbd_backing_dev *nbc)
{
- struct bdev_handle *handle;
+ struct file *file;
- handle = open_backing_dev(device, new_disk_conf->backing_dev, device,
+ file = open_backing_dev(device, new_disk_conf->backing_dev, device,
true);
- if (IS_ERR(handle))
+ if (IS_ERR(file))
return ERR_OPEN_DISK;
- nbc->backing_bdev = handle->bdev;
- nbc->backing_bdev_handle = handle;
+ nbc->backing_bdev = file_bdev(file);
+ nbc->backing_bdev_file = file;
/*
* meta_dev_idx >= 0: external fixed size, possibly multiple
@@ -1683,7 +1675,7 @@ static int open_backing_devices(struct drbd_device *device,
* should check it for you already; but if you don't, or
* someone fooled it, we need to double check here)
*/
- handle = open_backing_dev(device, new_disk_conf->meta_dev,
+ file = open_backing_dev(device, new_disk_conf->meta_dev,
/* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
* if potentially shared with other drbd minors */
(new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder,
@@ -1691,21 +1683,21 @@ static int open_backing_devices(struct drbd_device *device,
* as would happen with internal metadata. */
(new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT &&
new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL));
- if (IS_ERR(handle))
+ if (IS_ERR(file))
return ERR_OPEN_MD_DISK;
- nbc->md_bdev = handle->bdev;
- nbc->md_bdev_handle = handle;
+ nbc->md_bdev = file_bdev(file);
+ nbc->f_md_bdev = file;
return NO_ERROR;
}
static void close_backing_dev(struct drbd_device *device,
- struct bdev_handle *handle, bool do_bd_unlink)
+ struct file *bdev_file, bool do_bd_unlink)
{
- if (!handle)
+ if (!bdev_file)
return;
if (do_bd_unlink)
- bd_unlink_disk_holder(handle->bdev, device->vdisk);
- bdev_release(handle);
+ bd_unlink_disk_holder(file_bdev(bdev_file), device->vdisk);
+ fput(bdev_file);
}
void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
@@ -1713,9 +1705,9 @@ void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *
if (ldev == NULL)
return;
- close_backing_dev(device, ldev->md_bdev_handle,
+ close_backing_dev(device, ldev->f_md_bdev,
ldev->md_bdev != ldev->backing_bdev);
- close_backing_dev(device, ldev->backing_bdev_handle, true);
+ close_backing_dev(device, ldev->backing_bdev_file, true);
kfree(ldev->disk_conf);
kfree(ldev);
@@ -2131,9 +2123,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
fail:
conn_reconfig_done(connection);
if (nbc) {
- close_backing_dev(device, nbc->md_bdev_handle,
+ close_backing_dev(device, nbc->f_md_bdev,
nbc->md_bdev != nbc->backing_bdev);
- close_backing_dev(device, nbc->backing_bdev_handle, true);
+ close_backing_dev(device, nbc->backing_bdev_file, true);
kfree(nbc);
}
kfree(new_disk_conf);
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 287a8d1d3f70..e858e7e0383f 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -1542,9 +1542,10 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device,
int notify_resource_state_change(struct sk_buff *skb,
unsigned int seq,
- struct drbd_resource_state_change *resource_state_change,
+ void *state_change,
enum drbd_notification_type type)
{
+ struct drbd_resource_state_change *resource_state_change = state_change;
struct drbd_resource *resource = resource_state_change->resource;
struct resource_info resource_info = {
.res_role = resource_state_change->role[NEW],
@@ -1558,13 +1559,14 @@ int notify_resource_state_change(struct sk_buff *skb,
int notify_connection_state_change(struct sk_buff *skb,
unsigned int seq,
- struct drbd_connection_state_change *connection_state_change,
+ void *state_change,
enum drbd_notification_type type)
{
- struct drbd_connection *connection = connection_state_change->connection;
+ struct drbd_connection_state_change *p = state_change;
+ struct drbd_connection *connection = p->connection;
struct connection_info connection_info = {
- .conn_connection_state = connection_state_change->cstate[NEW],
- .conn_role = connection_state_change->peer_role[NEW],
+ .conn_connection_state = p->cstate[NEW],
+ .conn_role = p->peer_role[NEW],
};
return notify_connection_state(skb, seq, connection, &connection_info, type);
@@ -1572,9 +1574,10 @@ int notify_connection_state_change(struct sk_buff *skb,
int notify_device_state_change(struct sk_buff *skb,
unsigned int seq,
- struct drbd_device_state_change *device_state_change,
+ void *state_change,
enum drbd_notification_type type)
{
+ struct drbd_device_state_change *device_state_change = state_change;
struct drbd_device *device = device_state_change->device;
struct device_info device_info = {
.dev_disk_state = device_state_change->disk_state[NEW],
@@ -1585,9 +1588,10 @@ int notify_device_state_change(struct sk_buff *skb,
int notify_peer_device_state_change(struct sk_buff *skb,
unsigned int seq,
- struct drbd_peer_device_state_change *p,
+ void *state_change,
enum drbd_notification_type type)
{
+ struct drbd_peer_device_state_change *p = state_change;
struct drbd_peer_device *peer_device = p->peer_device;
struct peer_device_info peer_device_info = {
.peer_repl_state = p->repl_state[NEW],
@@ -1605,8 +1609,8 @@ static void broadcast_state_change(struct drbd_state_change *state_change)
struct drbd_resource_state_change *resource_state_change = &state_change->resource[0];
bool resource_state_has_changed;
unsigned int n_device, n_connection, n_peer_device, n_peer_devices;
- int (*last_func)(struct sk_buff *, unsigned int, void *,
- enum drbd_notification_type) = NULL;
+ int (*last_func)(struct sk_buff *, unsigned int,
+ void *, enum drbd_notification_type) = NULL;
void *last_arg = NULL;
#define HAS_CHANGED(state) ((state)[OLD] != (state)[NEW])
@@ -1616,7 +1620,7 @@ static void broadcast_state_change(struct drbd_state_change *state_change)
})
#define REMEMBER_STATE_CHANGE(func, arg, type) \
({ FINAL_STATE_CHANGE(type | NOTIFY_CONTINUES); \
- last_func = (typeof(last_func))func; \
+ last_func = func; \
last_arg = arg; \
})
diff --git a/drivers/block/drbd/drbd_state_change.h b/drivers/block/drbd/drbd_state_change.h
index 9d78d8e3912e..a56a57d67686 100644
--- a/drivers/block/drbd/drbd_state_change.h
+++ b/drivers/block/drbd/drbd_state_change.h
@@ -46,19 +46,19 @@ extern void forget_state_change(struct drbd_state_change *);
extern int notify_resource_state_change(struct sk_buff *,
unsigned int,
- struct drbd_resource_state_change *,
+ void *,
enum drbd_notification_type type);
extern int notify_connection_state_change(struct sk_buff *,
unsigned int,
- struct drbd_connection_state_change *,
+ void *,
enum drbd_notification_type type);
extern int notify_device_state_change(struct sk_buff *,
unsigned int,
- struct drbd_device_state_change *,
+ void *,
enum drbd_notification_type type);
extern int notify_peer_device_state_change(struct sk_buff *,
unsigned int,
- struct drbd_peer_device_state_change *,
+ void *,
enum drbd_notification_type type);
#endif /* DRBD_STATE_CHANGE_H */
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index d0e41d52d6a9..1b399ec8c07d 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -530,14 +530,13 @@ static struct format_descr format_req;
static char *floppy_track_buffer;
static int max_buffer_sectors;
-typedef void (*done_f)(int);
static const struct cont_t {
void (*interrupt)(void);
/* this is called after the interrupt of the
* main command */
void (*redo)(void); /* this is called to retry the operation */
void (*error)(void); /* this is called to tally an error */
- done_f done; /* this is called to say if the operation has
+ void (*done)(int); /* this is called to say if the operation has
* succeeded/failed */
} *cont;
@@ -985,6 +984,10 @@ static void empty(void)
{
}
+static void empty_done(int result)
+{
+}
+
static void (*floppy_work_fn)(void);
static void floppy_work_workfn(struct work_struct *work)
@@ -1998,14 +2001,14 @@ static const struct cont_t wakeup_cont = {
.interrupt = empty,
.redo = do_wakeup,
.error = empty,
- .done = (done_f)empty
+ .done = empty_done,
};
static const struct cont_t intr_cont = {
.interrupt = empty,
.redo = process_fd_request,
.error = empty,
- .done = (done_f)empty
+ .done = empty_done,
};
/* schedules handler, waiting for completion. May be interrupted, will then
@@ -4513,13 +4516,15 @@ static bool floppy_available(int drive)
static int floppy_alloc_disk(unsigned int drive, unsigned int type)
{
+ struct queue_limits lim = {
+ .max_hw_sectors = 64,
+ };
struct gendisk *disk;
- disk = blk_mq_alloc_disk(&tag_sets[drive], NULL);
+ disk = blk_mq_alloc_disk(&tag_sets[drive], &lim, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
- blk_queue_max_hw_sectors(disk->queue, 64);
disk->major = FLOPPY_MAJOR;
disk->first_minor = TOMINOR(drive) | (type << 2);
disk->minors = 1;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f8145499da38..28a95fd366fe 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -750,12 +750,13 @@ static void loop_sysfs_exit(struct loop_device *lo)
&loop_attribute_group);
}
-static void loop_config_discard(struct loop_device *lo)
+static void loop_config_discard(struct loop_device *lo,
+ struct queue_limits *lim)
{
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
- struct request_queue *q = lo->lo_queue;
- u32 granularity, max_discard_sectors;
+ u32 granularity = 0, max_discard_sectors = 0;
+ struct kstatfs sbuf;
/*
* If the backing device is a block device, mirror its zeroing
@@ -775,29 +776,17 @@ static void loop_config_discard(struct loop_device *lo)
* We use punch hole to reclaim the free space used by the
* image a.k.a. discard.
*/
- } else if (!file->f_op->fallocate) {
- max_discard_sectors = 0;
- granularity = 0;
-
- } else {
- struct kstatfs sbuf;
-
+ } else if (file->f_op->fallocate && !vfs_statfs(&file->f_path, &sbuf)) {
max_discard_sectors = UINT_MAX >> 9;
- if (!vfs_statfs(&file->f_path, &sbuf))
- granularity = sbuf.f_bsize;
- else
- max_discard_sectors = 0;
+ granularity = sbuf.f_bsize;
}
- if (max_discard_sectors) {
- q->limits.discard_granularity = granularity;
- blk_queue_max_discard_sectors(q, max_discard_sectors);
- blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
- } else {
- q->limits.discard_granularity = 0;
- blk_queue_max_discard_sectors(q, 0);
- blk_queue_max_write_zeroes_sectors(q, 0);
- }
+ lim->max_hw_discard_sectors = max_discard_sectors;
+ lim->max_write_zeroes_sectors = max_discard_sectors;
+ if (max_discard_sectors)
+ lim->discard_granularity = granularity;
+ else
+ lim->discard_granularity = 0;
}
struct loop_worker {
@@ -986,6 +975,20 @@ loop_set_status_from_info(struct loop_device *lo,
return 0;
}
+static int loop_reconfigure_limits(struct loop_device *lo, unsigned short bsize,
+ bool update_discard_settings)
+{
+ struct queue_limits lim;
+
+ lim = queue_limits_start_update(lo->lo_queue);
+ lim.logical_block_size = bsize;
+ lim.physical_block_size = bsize;
+ lim.io_min = bsize;
+ if (update_discard_settings)
+ loop_config_discard(lo, &lim);
+ return queue_limits_commit_update(lo->lo_queue, &lim);
+}
+
static int loop_configure(struct loop_device *lo, blk_mode_t mode,
struct block_device *bdev,
const struct loop_config *config)
@@ -1083,11 +1086,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
else
bsize = 512;
- blk_queue_logical_block_size(lo->lo_queue, bsize);
- blk_queue_physical_block_size(lo->lo_queue, bsize);
- blk_queue_io_min(lo->lo_queue, bsize);
+ error = loop_reconfigure_limits(lo, bsize, true);
+ if (WARN_ON_ONCE(error))
+ goto out_unlock;
- loop_config_discard(lo);
loop_update_rotational(lo);
loop_update_dio(lo);
loop_sysfs_init(lo);
@@ -1154,9 +1156,7 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
lo->lo_offset = 0;
lo->lo_sizelimit = 0;
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
- blk_queue_logical_block_size(lo->lo_queue, 512);
- blk_queue_physical_block_size(lo->lo_queue, 512);
- blk_queue_io_min(lo->lo_queue, 512);
+ loop_reconfigure_limits(lo, 512, false);
invalidate_disk(lo->lo_disk);
loop_sysfs_exit(lo);
/* let user-space know about this change */
@@ -1488,9 +1488,7 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
invalidate_bdev(lo->lo_device);
blk_mq_freeze_queue(lo->lo_queue);
- blk_queue_logical_block_size(lo->lo_queue, arg);
- blk_queue_physical_block_size(lo->lo_queue, arg);
- blk_queue_io_min(lo->lo_queue, arg);
+ err = loop_reconfigure_limits(lo, arg, false);
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue);
@@ -1982,6 +1980,12 @@ static const struct blk_mq_ops loop_mq_ops = {
static int loop_add(int i)
{
+ struct queue_limits lim = {
+ /*
+ * Random number picked from the historic block max_sectors cap.
+ */
+ .max_hw_sectors = 2560u,
+ };
struct loop_device *lo;
struct gendisk *disk;
int err;
@@ -2025,16 +2029,13 @@ static int loop_add(int i)
if (err)
goto out_free_idr;
- disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, lo);
+ disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, &lim, lo);
if (IS_ERR(disk)) {
err = PTR_ERR(disk);
goto out_cleanup_tags;
}
lo->lo_queue = lo->lo_disk->queue;
- /* random number picked from the history block max_sectors cap */
- blk_queue_max_hw_sectors(lo->lo_queue, 2560u);
-
/*
* By default, we do buffer IO, so it doesn't make sense to enable
* merge because the I/O submitted to backing file is handled page by
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index b200950e8fb5..43a187609ef7 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3401,6 +3401,12 @@ static const struct blk_mq_ops mtip_mq_ops = {
*/
static int mtip_block_initialize(struct driver_data *dd)
{
+ struct queue_limits lim = {
+ .physical_block_size = 4096,
+ .max_hw_sectors = 0xffff,
+ .max_segments = MTIP_MAX_SG,
+ .max_segment_size = 0x400000,
+ };
int rv = 0, wait_for_rebuild = 0;
sector_t capacity;
unsigned int index = 0;
@@ -3431,7 +3437,7 @@ static int mtip_block_initialize(struct driver_data *dd)
goto block_queue_alloc_tag_error;
}
- dd->disk = blk_mq_alloc_disk(&dd->tags, dd);
+ dd->disk = blk_mq_alloc_disk(&dd->tags, &lim, dd);
if (IS_ERR(dd->disk)) {
dev_err(&dd->pdev->dev,
"Unable to allocate request queue\n");
@@ -3481,12 +3487,7 @@ skip_create_disk:
/* Set device limits. */
blk_queue_flag_set(QUEUE_FLAG_NONROT, dd->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, dd->queue);
- blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
- blk_queue_physical_block_size(dd->queue, 4096);
- blk_queue_max_hw_sectors(dd->queue, 0xffff);
- blk_queue_max_segment_size(dd->queue, 0x400000);
dma_set_max_seg_size(&dd->pdev->dev, 0x400000);
- blk_queue_io_min(dd->queue, 4096);
/* Set the capacity of the device in 512 byte sectors. */
if (!(mtip_hw_get_capacity(dd, &capacity))) {
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
index d914156db2d8..27b2187e7a6d 100644
--- a/drivers/block/n64cart.c
+++ b/drivers/block/n64cart.c
@@ -114,6 +114,10 @@ static const struct block_device_operations n64cart_fops = {
*/
static int __init n64cart_probe(struct platform_device *pdev)
{
+ struct queue_limits lim = {
+ .physical_block_size = 4096,
+ .logical_block_size = 4096,
+ };
struct gendisk *disk;
int err = -ENOMEM;
@@ -131,9 +135,11 @@ static int __init n64cart_probe(struct platform_device *pdev)
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
- disk = blk_alloc_disk(NUMA_NO_NODE);
- if (!disk)
+ disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(disk)) {
+ err = PTR_ERR(disk);
goto out;
+ }
disk->first_minor = 0;
disk->flags = GENHD_FL_NO_PART;
@@ -145,8 +151,6 @@ static int __init n64cart_probe(struct platform_device *pdev)
set_disk_ro(disk, 1);
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
- blk_queue_physical_block_size(disk->queue, 4096);
- blk_queue_logical_block_size(disk->queue, 4096);
err = add_disk(disk);
if (err)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 33a8f37bb6a1..9d4ec9273bf9 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -316,9 +316,12 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
nsock->sent = 0;
}
-static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
+static int __nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
loff_t blksize)
{
+ struct queue_limits lim;
+ int error;
+
if (!blksize)
blksize = 1u << NBD_DEF_BLKSIZE_BITS;
@@ -334,10 +337,16 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
if (!nbd->pid)
return 0;
+ lim = queue_limits_start_update(nbd->disk->queue);
if (nbd->config->flags & NBD_FLAG_SEND_TRIM)
- blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
- blk_queue_logical_block_size(nbd->disk->queue, blksize);
- blk_queue_physical_block_size(nbd->disk->queue, blksize);
+ lim.max_hw_discard_sectors = UINT_MAX;
+ else
+ lim.max_hw_discard_sectors = 0;
+ lim.logical_block_size = blksize;
+ lim.physical_block_size = blksize;
+ error = queue_limits_commit_update(nbd->disk->queue, &lim);
+ if (error)
+ return error;
if (max_part)
set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
@@ -346,6 +355,18 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
return 0;
}
+static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
+ loff_t blksize)
+{
+ int error;
+
+ blk_mq_freeze_queue(nbd->disk->queue);
+ error = __nbd_set_size(nbd, bytesize, blksize);
+ blk_mq_unfreeze_queue(nbd->disk->queue);
+
+ return error;
+}
+
static void nbd_complete_rq(struct request *req)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
@@ -1351,7 +1372,6 @@ static void nbd_config_put(struct nbd_device *nbd)
nbd->config = NULL;
nbd->tag_set.timeout = 0;
- blk_queue_max_discard_sectors(nbd->disk->queue, 0);
mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
@@ -1783,6 +1803,12 @@ static const struct blk_mq_ops nbd_mq_ops = {
static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
{
+ struct queue_limits lim = {
+ .max_hw_sectors = 65536,
+ .max_user_sectors = 256,
+ .max_segments = USHRT_MAX,
+ .max_segment_size = UINT_MAX,
+ };
struct nbd_device *nbd;
struct gendisk *disk;
int err = -ENOMEM;
@@ -1823,7 +1849,7 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
if (err < 0)
goto out_free_tags;
- disk = blk_mq_alloc_disk(&nbd->tag_set, NULL);
+ disk = blk_mq_alloc_disk(&nbd->tag_set, &lim, NULL);
if (IS_ERR(disk)) {
err = PTR_ERR(disk);
goto out_free_idr;
@@ -1843,11 +1869,6 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
* Tell the block layer that we are not a rotational device
*/
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
- blk_queue_max_discard_sectors(disk->queue, 0);
- blk_queue_max_segment_size(disk->queue, UINT_MAX);
- blk_queue_max_segments(disk->queue, USHRT_MAX);
- blk_queue_max_hw_sectors(disk->queue, 65536);
- disk->queue->limits.max_sectors = 256;
mutex_init(&nbd->config_lock);
refcount_set(&nbd->config_refs, 0);
@@ -2433,6 +2454,12 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
}
dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
+ if (!dev_list) {
+ nlmsg_free(reply);
+ ret = -EMSGSIZE;
+ goto out;
+ }
+
if (index == -1) {
ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
if (ret) {
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 36755f263e8e..71c39bcd872c 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -115,6 +115,18 @@ module_param_string(init_hctx, g_init_hctx_str, sizeof(g_init_hctx_str), 0444);
MODULE_PARM_DESC(init_hctx, "Fault injection to fail hctx init. init_hctx=<interval>,<probability>,<space>,<times>");
#endif
+/*
+ * Historic queue modes.
+ *
+ * These days nothing but NULL_Q_MQ is actually supported, but we keep it the
+ * enum for error reporting.
+ */
+enum {
+ NULL_Q_BIO = 0,
+ NULL_Q_RQ = 1,
+ NULL_Q_MQ = 2,
+};
+
static int g_queue_mode = NULL_Q_MQ;
static int null_param_store_val(const char *str, int *val, int min, int max)
@@ -165,8 +177,8 @@ static bool g_blocking;
module_param_named(blocking, g_blocking, bool, 0444);
MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
-static bool shared_tags;
-module_param(shared_tags, bool, 0444);
+static bool g_shared_tags;
+module_param_named(shared_tags, g_shared_tags, bool, 0444);
MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
static bool g_shared_tag_bitmap;
@@ -426,6 +438,7 @@ NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
NULLB_DEVICE_ATTR(no_sched, bool, NULL);
+NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
@@ -571,6 +584,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_zone_offline,
&nullb_device_attr_virt_boundary,
&nullb_device_attr_no_sched,
+ &nullb_device_attr_shared_tags,
&nullb_device_attr_shared_tag_bitmap,
NULL,
};
@@ -653,10 +667,11 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page)
"badblocks,blocking,blocksize,cache_size,"
"completion_nsec,discard,home_node,hw_queue_depth,"
"irqmode,max_sectors,mbps,memory_backed,no_sched,"
- "poll_queues,power,queue_mode,shared_tag_bitmap,size,"
- "submit_queues,use_per_node_hctx,virt_boundary,zoned,"
- "zone_capacity,zone_max_active,zone_max_open,"
- "zone_nr_conv,zone_offline,zone_readonly,zone_size\n");
+ "poll_queues,power,queue_mode,shared_tag_bitmap,"
+ "shared_tags,size,submit_queues,use_per_node_hctx,"
+ "virt_boundary,zoned,zone_capacity,zone_max_active,"
+ "zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
+ "zone_size\n");
}
CONFIGFS_ATTR_RO(memb_group_, features);
@@ -738,6 +753,7 @@ static struct nullb_device *null_alloc_dev(void)
dev->zone_max_active = g_zone_max_active;
dev->virt_boundary = g_virt_boundary;
dev->no_sched = g_no_sched;
+ dev->shared_tags = g_shared_tags;
dev->shared_tag_bitmap = g_shared_tag_bitmap;
return dev;
}
@@ -752,98 +768,11 @@ static void null_free_dev(struct nullb_device *dev)
kfree(dev);
}
-static void put_tag(struct nullb_queue *nq, unsigned int tag)
-{
- clear_bit_unlock(tag, nq->tag_map);
-
- if (waitqueue_active(&nq->wait))
- wake_up(&nq->wait);
-}
-
-static unsigned int get_tag(struct nullb_queue *nq)
-{
- unsigned int tag;
-
- do {
- tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
- if (tag >= nq->queue_depth)
- return -1U;
- } while (test_and_set_bit_lock(tag, nq->tag_map));
-
- return tag;
-}
-
-static void free_cmd(struct nullb_cmd *cmd)
-{
- put_tag(cmd->nq, cmd->tag);
-}
-
-static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
-
-static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
-{
- struct nullb_cmd *cmd;
- unsigned int tag;
-
- tag = get_tag(nq);
- if (tag != -1U) {
- cmd = &nq->cmds[tag];
- cmd->tag = tag;
- cmd->error = BLK_STS_OK;
- cmd->nq = nq;
- if (nq->dev->irqmode == NULL_IRQ_TIMER) {
- hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- cmd->timer.function = null_cmd_timer_expired;
- }
- return cmd;
- }
-
- return NULL;
-}
-
-static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, struct bio *bio)
-{
- struct nullb_cmd *cmd;
- DEFINE_WAIT(wait);
-
- do {
- /*
- * This avoids multiple return statements, multiple calls to
- * __alloc_cmd() and a fast path call to prepare_to_wait().
- */
- cmd = __alloc_cmd(nq);
- if (cmd) {
- cmd->bio = bio;
- return cmd;
- }
- prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
- io_schedule();
- finish_wait(&nq->wait, &wait);
- } while (1);
-}
-
-static void end_cmd(struct nullb_cmd *cmd)
-{
- int queue_mode = cmd->nq->dev->queue_mode;
-
- switch (queue_mode) {
- case NULL_Q_MQ:
- blk_mq_end_request(cmd->rq, cmd->error);
- return;
- case NULL_Q_BIO:
- cmd->bio->bi_status = cmd->error;
- bio_endio(cmd->bio);
- break;
- }
-
- free_cmd(cmd);
-}
-
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
{
- end_cmd(container_of(timer, struct nullb_cmd, timer));
+ struct nullb_cmd *cmd = container_of(timer, struct nullb_cmd, timer);
+ blk_mq_end_request(blk_mq_rq_from_pdu(cmd), cmd->error);
return HRTIMER_NORESTART;
}
@@ -856,7 +785,9 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd)
static void null_complete_rq(struct request *rq)
{
- end_cmd(blk_mq_rq_to_pdu(rq));
+ struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
+
+ blk_mq_end_request(rq, cmd->error);
}
static struct nullb_page *null_alloc_page(void)
@@ -1273,7 +1204,7 @@ static int null_transfer(struct nullb *nullb, struct page *page,
static int null_handle_rq(struct nullb_cmd *cmd)
{
- struct request *rq = cmd->rq;
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
struct nullb *nullb = cmd->nq->dev->nullb;
int err;
unsigned int len;
@@ -1298,63 +1229,21 @@ static int null_handle_rq(struct nullb_cmd *cmd)
return 0;
}
-static int null_handle_bio(struct nullb_cmd *cmd)
-{
- struct bio *bio = cmd->bio;
- struct nullb *nullb = cmd->nq->dev->nullb;
- int err;
- unsigned int len;
- sector_t sector = bio->bi_iter.bi_sector;
- struct bio_vec bvec;
- struct bvec_iter iter;
-
- spin_lock_irq(&nullb->lock);
- bio_for_each_segment(bvec, bio, iter) {
- len = bvec.bv_len;
- err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
- op_is_write(bio_op(bio)), sector,
- bio->bi_opf & REQ_FUA);
- if (err) {
- spin_unlock_irq(&nullb->lock);
- return err;
- }
- sector += len >> SECTOR_SHIFT;
- }
- spin_unlock_irq(&nullb->lock);
- return 0;
-}
-
-static void null_stop_queue(struct nullb *nullb)
-{
- struct request_queue *q = nullb->q;
-
- if (nullb->dev->queue_mode == NULL_Q_MQ)
- blk_mq_stop_hw_queues(q);
-}
-
-static void null_restart_queue_async(struct nullb *nullb)
-{
- struct request_queue *q = nullb->q;
-
- if (nullb->dev->queue_mode == NULL_Q_MQ)
- blk_mq_start_stopped_hw_queues(q, true);
-}
-
static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
blk_status_t sts = BLK_STS_OK;
- struct request *rq = cmd->rq;
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
if (!hrtimer_active(&nullb->bw_timer))
hrtimer_restart(&nullb->bw_timer);
if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
- null_stop_queue(nullb);
+ blk_mq_stop_hw_queues(nullb->q);
/* race with timer */
if (atomic_long_read(&nullb->cur_bytes) > 0)
- null_restart_queue_async(nullb);
+ blk_mq_start_stopped_hw_queues(nullb->q, true);
/* requeue request */
sts = BLK_STS_DEV_RESOURCE;
}
@@ -1381,37 +1270,29 @@ static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
sector_t nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
- int err;
if (op == REQ_OP_DISCARD)
return null_handle_discard(dev, sector, nr_sectors);
+ return errno_to_blk_status(null_handle_rq(cmd));
- if (dev->queue_mode == NULL_Q_BIO)
- err = null_handle_bio(cmd);
- else
- err = null_handle_rq(cmd);
-
- return errno_to_blk_status(err);
}
static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
struct nullb_device *dev = cmd->nq->dev;
struct bio *bio;
- if (dev->memory_backed)
- return;
-
- if (dev->queue_mode == NULL_Q_BIO && bio_op(cmd->bio) == REQ_OP_READ) {
- zero_fill_bio(cmd->bio);
- } else if (req_op(cmd->rq) == REQ_OP_READ) {
- __rq_for_each_bio(bio, cmd->rq)
+ if (!dev->memory_backed && req_op(rq) == REQ_OP_READ) {
+ __rq_for_each_bio(bio, rq)
zero_fill_bio(bio);
}
}
static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
{
+ struct request *rq = blk_mq_rq_from_pdu(cmd);
+
/*
* Since root privileges are required to configure the null_blk
* driver, it is fine that this driver does not initialize the
@@ -1425,20 +1306,10 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
/* Complete IO by inline, softirq or timer */
switch (cmd->nq->dev->irqmode) {
case NULL_IRQ_SOFTIRQ:
- switch (cmd->nq->dev->queue_mode) {
- case NULL_Q_MQ:
- blk_mq_complete_request(cmd->rq);
- break;
- case NULL_Q_BIO:
- /*
- * XXX: no proper submitting cpu information available.
- */
- end_cmd(cmd);
- break;
- }
+ blk_mq_complete_request(rq);
break;
case NULL_IRQ_NONE:
- end_cmd(cmd);
+ blk_mq_end_request(rq, cmd->error);
break;
case NULL_IRQ_TIMER:
null_cmd_end_timer(cmd);
@@ -1499,7 +1370,7 @@ static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
return HRTIMER_NORESTART;
atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
- null_restart_queue_async(nullb);
+ blk_mq_start_stopped_hw_queues(nullb->q, true);
hrtimer_forward_now(&nullb->bw_timer, timer_interval);
@@ -1516,26 +1387,6 @@ static void nullb_setup_bwtimer(struct nullb *nullb)
hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
}
-static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
-{
- int index = 0;
-
- if (nullb->nr_queues != 1)
- index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
-
- return &nullb->queues[index];
-}
-
-static void null_submit_bio(struct bio *bio)
-{
- sector_t sector = bio->bi_iter.bi_sector;
- sector_t nr_sectors = bio_sectors(bio);
- struct nullb *nullb = bio->bi_bdev->bd_disk->private_data;
- struct nullb_queue *nq = nullb_to_queue(nullb);
-
- null_handle_cmd(alloc_cmd(nq, bio), sector, nr_sectors, bio_op(bio));
-}
-
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
static bool should_timeout_request(struct request *rq)
@@ -1655,7 +1506,7 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
blk_rq_sectors(req));
if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
blk_mq_end_request_batch))
- end_cmd(cmd);
+ blk_mq_end_request(req, cmd->error);
nr++;
}
@@ -1711,7 +1562,6 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
}
- cmd->rq = rq;
cmd->error = BLK_STS_OK;
cmd->nq = nq;
cmd->fake_timeout = should_timeout_request(rq) ||
@@ -1770,34 +1620,8 @@ static void null_queue_rqs(struct request **rqlist)
*rqlist = requeue_list;
}
-static void cleanup_queue(struct nullb_queue *nq)
-{
- bitmap_free(nq->tag_map);
- kfree(nq->cmds);
-}
-
-static void cleanup_queues(struct nullb *nullb)
-{
- int i;
-
- for (i = 0; i < nullb->nr_queues; i++)
- cleanup_queue(&nullb->queues[i]);
-
- kfree(nullb->queues);
-}
-
-static void null_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
-{
- struct nullb_queue *nq = hctx->driver_data;
- struct nullb *nullb = nq->dev->nullb;
-
- nullb->nr_queues--;
-}
-
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
{
- init_waitqueue_head(&nq->wait);
- nq->queue_depth = nullb->queue_depth;
nq->dev = nullb->dev;
INIT_LIST_HEAD(&nq->poll_list);
spin_lock_init(&nq->poll_lock);
@@ -1815,7 +1639,6 @@ static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
nq = &nullb->queues[hctx_idx];
hctx->driver_data = nq;
null_init_queue(nullb, nq);
- nullb->nr_queues++;
return 0;
}
@@ -1828,7 +1651,6 @@ static const struct blk_mq_ops null_mq_ops = {
.poll = null_poll,
.map_queues = null_map_queues,
.init_hctx = null_init_hctx,
- .exit_hctx = null_exit_hctx,
};
static void null_del_dev(struct nullb *nullb)
@@ -1849,21 +1671,20 @@ static void null_del_dev(struct nullb *nullb)
if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
hrtimer_cancel(&nullb->bw_timer);
atomic_long_set(&nullb->cur_bytes, LONG_MAX);
- null_restart_queue_async(nullb);
+ blk_mq_start_stopped_hw_queues(nullb->q, true);
}
put_disk(nullb->disk);
- if (dev->queue_mode == NULL_Q_MQ &&
- nullb->tag_set == &nullb->__tag_set)
+ if (nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
- cleanup_queues(nullb);
+ kfree(nullb->queues);
if (null_cache_active(nullb))
null_free_device_storage(nullb->dev, true);
kfree(nullb);
dev->nullb = NULL;
}
-static void null_config_discard(struct nullb *nullb)
+static void null_config_discard(struct nullb *nullb, struct queue_limits *lim)
{
if (nullb->dev->discard == false)
return;
@@ -1880,43 +1701,14 @@ static void null_config_discard(struct nullb *nullb)
return;
}
- blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
+ lim->max_hw_discard_sectors = UINT_MAX >> 9;
}
-static const struct block_device_operations null_bio_ops = {
- .owner = THIS_MODULE,
- .submit_bio = null_submit_bio,
- .report_zones = null_report_zones,
-};
-
-static const struct block_device_operations null_rq_ops = {
+static const struct block_device_operations null_ops = {
.owner = THIS_MODULE,
.report_zones = null_report_zones,
};
-static int setup_commands(struct nullb_queue *nq)
-{
- struct nullb_cmd *cmd;
- int i;
-
- nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
- if (!nq->cmds)
- return -ENOMEM;
-
- nq->tag_map = bitmap_zalloc(nq->queue_depth, GFP_KERNEL);
- if (!nq->tag_map) {
- kfree(nq->cmds);
- return -ENOMEM;
- }
-
- for (i = 0; i < nq->queue_depth; i++) {
- cmd = &nq->cmds[i];
- cmd->tag = -1U;
- }
-
- return 0;
-}
-
static int setup_queues(struct nullb *nullb)
{
int nqueues = nr_cpu_ids;
@@ -1929,101 +1721,66 @@ static int setup_queues(struct nullb *nullb)
if (!nullb->queues)
return -ENOMEM;
- nullb->queue_depth = nullb->dev->hw_queue_depth;
return 0;
}
-static int init_driver_queues(struct nullb *nullb)
+static int null_init_tag_set(struct blk_mq_tag_set *set, int poll_queues)
{
- struct nullb_queue *nq;
- int i, ret = 0;
-
- for (i = 0; i < nullb->dev->submit_queues; i++) {
- nq = &nullb->queues[i];
-
- null_init_queue(nullb, nq);
-
- ret = setup_commands(nq);
- if (ret)
- return ret;
- nullb->nr_queues++;
+ set->ops = &null_mq_ops;
+ set->cmd_size = sizeof(struct nullb_cmd);
+ set->timeout = 5 * HZ;
+ set->nr_maps = 1;
+ if (poll_queues) {
+ set->nr_hw_queues += poll_queues;
+ set->nr_maps += 2;
}
- return 0;
+ return blk_mq_alloc_tag_set(set);
}
-static int null_gendisk_register(struct nullb *nullb)
+static int null_init_global_tag_set(void)
{
- sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT;
- struct gendisk *disk = nullb->disk;
+ int error;
- set_capacity(disk, size);
-
- disk->major = null_major;
- disk->first_minor = nullb->index;
- disk->minors = 1;
- if (queue_is_mq(nullb->q))
- disk->fops = &null_rq_ops;
- else
- disk->fops = &null_bio_ops;
- disk->private_data = nullb;
- strscpy_pad(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+ if (tag_set.ops)
+ return 0;
- if (nullb->dev->zoned) {
- int ret = null_register_zoned_dev(nullb);
+ tag_set.nr_hw_queues = g_submit_queues;
+ tag_set.queue_depth = g_hw_queue_depth;
+ tag_set.numa_node = g_home_node;
+ tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ if (g_no_sched)
+ tag_set.flags |= BLK_MQ_F_NO_SCHED;
+ if (g_shared_tag_bitmap)
+ tag_set.flags |= BLK_MQ_F_TAG_HCTX_SHARED;
+ if (g_blocking)
+ tag_set.flags |= BLK_MQ_F_BLOCKING;
- if (ret)
- return ret;
- }
-
- return add_disk(disk);
+ error = null_init_tag_set(&tag_set, g_poll_queues);
+ if (error)
+ tag_set.ops = NULL;
+ return error;
}
-static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
+static int null_setup_tagset(struct nullb *nullb)
{
- unsigned int flags = BLK_MQ_F_SHOULD_MERGE;
- int hw_queues, numa_node;
- unsigned int queue_depth;
- int poll_queues;
-
- if (nullb) {
- hw_queues = nullb->dev->submit_queues;
- poll_queues = nullb->dev->poll_queues;
- queue_depth = nullb->dev->hw_queue_depth;
- numa_node = nullb->dev->home_node;
- if (nullb->dev->no_sched)
- flags |= BLK_MQ_F_NO_SCHED;
- if (nullb->dev->shared_tag_bitmap)
- flags |= BLK_MQ_F_TAG_HCTX_SHARED;
- if (nullb->dev->blocking)
- flags |= BLK_MQ_F_BLOCKING;
- } else {
- hw_queues = g_submit_queues;
- poll_queues = g_poll_queues;
- queue_depth = g_hw_queue_depth;
- numa_node = g_home_node;
- if (g_no_sched)
- flags |= BLK_MQ_F_NO_SCHED;
- if (g_shared_tag_bitmap)
- flags |= BLK_MQ_F_TAG_HCTX_SHARED;
- if (g_blocking)
- flags |= BLK_MQ_F_BLOCKING;
- }
-
- set->ops = &null_mq_ops;
- set->cmd_size = sizeof(struct nullb_cmd);
- set->flags = flags;
- set->driver_data = nullb;
- set->nr_hw_queues = hw_queues;
- set->queue_depth = queue_depth;
- set->numa_node = numa_node;
- if (poll_queues) {
- set->nr_hw_queues += poll_queues;
- set->nr_maps = 3;
- } else {
- set->nr_maps = 1;
+ if (nullb->dev->shared_tags) {
+ nullb->tag_set = &tag_set;
+ return null_init_global_tag_set();
}
- return blk_mq_alloc_tag_set(set);
+ nullb->tag_set = &nullb->__tag_set;
+ nullb->tag_set->driver_data = nullb;
+ nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues;
+ nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth;
+ nullb->tag_set->numa_node = nullb->dev->home_node;
+ nullb->tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
+ if (nullb->dev->no_sched)
+ nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED;
+ if (nullb->dev->shared_tag_bitmap)
+ nullb->tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
+ if (nullb->dev->blocking)
+ nullb->tag_set->flags |= BLK_MQ_F_BLOCKING;
+ return null_init_tag_set(nullb->tag_set, nullb->dev->poll_queues);
}
static int null_validate_conf(struct nullb_device *dev)
@@ -2032,11 +1789,15 @@ static int null_validate_conf(struct nullb_device *dev)
pr_err("legacy IO path is no longer available\n");
return -EINVAL;
}
+ if (dev->queue_mode == NULL_Q_BIO) {
+ pr_err("BIO-based IO path is no longer available, using blk-mq instead.\n");
+ dev->queue_mode = NULL_Q_MQ;
+ }
dev->blocksize = round_down(dev->blocksize, 512);
dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
- if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
+ if (dev->use_per_node_hctx) {
if (dev->submit_queues != nr_online_nodes)
dev->submit_queues = nr_online_nodes;
} else if (dev->submit_queues > nr_cpu_ids)
@@ -2048,8 +1809,6 @@ static int null_validate_conf(struct nullb_device *dev)
if (dev->poll_queues > g_poll_queues)
dev->poll_queues = g_poll_queues;
dev->prev_poll_queues = dev->poll_queues;
-
- dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
/* Do memory allocation, so set blocking */
@@ -2060,9 +1819,6 @@ static int null_validate_conf(struct nullb_device *dev)
dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
dev->cache_size);
dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
- /* can not stop a queue */
- if (dev->queue_mode == NULL_Q_BIO)
- dev->mbps = 0;
if (dev->zoned &&
(!dev->zone_size || !is_power_of_2(dev->zone_size))) {
@@ -2102,6 +1858,12 @@ static bool null_setup_fault(void)
static int null_add_dev(struct nullb_device *dev)
{
+ struct queue_limits lim = {
+ .logical_block_size = dev->blocksize,
+ .physical_block_size = dev->blocksize,
+ .max_hw_sectors = dev->max_sectors,
+ };
+
struct nullb *nullb;
int rv;
@@ -2123,36 +1885,25 @@ static int null_add_dev(struct nullb_device *dev)
if (rv)
goto out_free_nullb;
- if (dev->queue_mode == NULL_Q_MQ) {
- if (shared_tags) {
- nullb->tag_set = &tag_set;
- rv = 0;
- } else {
- nullb->tag_set = &nullb->__tag_set;
- rv = null_init_tag_set(nullb, nullb->tag_set);
- }
+ rv = null_setup_tagset(nullb);
+ if (rv)
+ goto out_cleanup_queues;
+ if (dev->virt_boundary)
+ lim.virt_boundary_mask = PAGE_SIZE - 1;
+ null_config_discard(nullb, &lim);
+ if (dev->zoned) {
+ rv = null_init_zoned_dev(dev, &lim);
if (rv)
- goto out_cleanup_queues;
-
- nullb->tag_set->timeout = 5 * HZ;
- nullb->disk = blk_mq_alloc_disk(nullb->tag_set, nullb);
- if (IS_ERR(nullb->disk)) {
- rv = PTR_ERR(nullb->disk);
goto out_cleanup_tags;
- }
- nullb->q = nullb->disk->queue;
- } else if (dev->queue_mode == NULL_Q_BIO) {
- rv = -ENOMEM;
- nullb->disk = blk_alloc_disk(nullb->dev->home_node);
- if (!nullb->disk)
- goto out_cleanup_queues;
+ }
- nullb->q = nullb->disk->queue;
- rv = init_driver_queues(nullb);
- if (rv)
- goto out_cleanup_disk;
+ nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb);
+ if (IS_ERR(nullb->disk)) {
+ rv = PTR_ERR(nullb->disk);
+ goto out_cleanup_zone;
}
+ nullb->q = nullb->disk->queue;
if (dev->mbps) {
set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
@@ -2164,12 +1915,6 @@ static int null_add_dev(struct nullb_device *dev)
blk_queue_write_cache(nullb->q, true, true);
}
- if (dev->zoned) {
- rv = null_init_zoned_dev(dev, nullb->q);
- if (rv)
- goto out_cleanup_disk;
- }
-
nullb->q->queuedata = nullb;
blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
@@ -2177,22 +1922,12 @@ static int null_add_dev(struct nullb_device *dev)
rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
if (rv < 0) {
mutex_unlock(&lock);
- goto out_cleanup_zone;
+ goto out_cleanup_disk;
}
nullb->index = rv;
dev->index = rv;
mutex_unlock(&lock);
- blk_queue_logical_block_size(nullb->q, dev->blocksize);
- blk_queue_physical_block_size(nullb->q, dev->blocksize);
- if (dev->max_sectors)
- blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
-
- if (dev->virt_boundary)
- blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1);
-
- null_config_discard(nullb);
-
if (config_item_name(&dev->group.cg_item)) {
/* Use configfs dir name as the device name */
snprintf(nullb->disk_name, sizeof(nullb->disk_name),
@@ -2201,7 +1936,22 @@ static int null_add_dev(struct nullb_device *dev)
sprintf(nullb->disk_name, "nullb%d", nullb->index);
}
- rv = null_gendisk_register(nullb);
+ set_capacity(nullb->disk,
+ ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT);
+ nullb->disk->major = null_major;
+ nullb->disk->first_minor = nullb->index;
+ nullb->disk->minors = 1;
+ nullb->disk->fops = &null_ops;
+ nullb->disk->private_data = nullb;
+ strscpy_pad(nullb->disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+
+ if (nullb->dev->zoned) {
+ rv = null_register_zoned_dev(nullb);
+ if (rv)
+ goto out_ida_free;
+ }
+
+ rv = add_disk(nullb->disk);
if (rv)
goto out_ida_free;
@@ -2220,10 +1970,10 @@ out_cleanup_zone:
out_cleanup_disk:
put_disk(nullb->disk);
out_cleanup_tags:
- if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
+ if (nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
out_cleanup_queues:
- cleanup_queues(nullb);
+ kfree(nullb->queues);
out_free_nullb:
kfree(nullb);
dev->nullb = NULL;
@@ -2299,7 +2049,7 @@ static int __init null_init(void)
return -EINVAL;
}
- if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
+ if (g_use_per_node_hctx) {
if (g_submit_queues != nr_online_nodes) {
pr_warn("submit_queues param is set to %u.\n",
nr_online_nodes);
@@ -2311,18 +2061,12 @@ static int __init null_init(void)
g_submit_queues = 1;
}
- if (g_queue_mode == NULL_Q_MQ && shared_tags) {
- ret = null_init_tag_set(NULL, &tag_set);
- if (ret)
- return ret;
- }
-
config_group_init(&nullb_subsys.su_group);
mutex_init(&nullb_subsys.su_mutex);
ret = configfs_register_subsystem(&nullb_subsys);
if (ret)
- goto err_tagset;
+ return ret;
mutex_init(&lock);
@@ -2349,9 +2093,6 @@ err_dev:
unregister_blkdev(null_major, "nullb");
err_conf:
configfs_unregister_subsystem(&nullb_subsys);
-err_tagset:
- if (g_queue_mode == NULL_Q_MQ && shared_tags)
- blk_mq_free_tag_set(&tag_set);
return ret;
}
@@ -2370,7 +2111,7 @@ static void __exit null_exit(void)
}
mutex_unlock(&lock);
- if (g_queue_mode == NULL_Q_MQ && shared_tags)
+ if (tag_set.ops)
blk_mq_free_tag_set(&tag_set);
}
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 929f659dd255..477b97746823 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -16,11 +16,6 @@
#include <linux/mutex.h>
struct nullb_cmd {
- union {
- struct request *rq;
- struct bio *bio;
- };
- unsigned int tag;
blk_status_t error;
bool fake_timeout;
struct nullb_queue *nq;
@@ -28,16 +23,11 @@ struct nullb_cmd {
};
struct nullb_queue {
- unsigned long *tag_map;
- wait_queue_head_t wait;
- unsigned int queue_depth;
struct nullb_device *dev;
unsigned int requeue_selection;
struct list_head poll_list;
spinlock_t poll_lock;
-
- struct nullb_cmd *cmds;
};
struct nullb_zone {
@@ -60,13 +50,6 @@ struct nullb_zone {
unsigned int capacity;
};
-/* Queue modes */
-enum {
- NULL_Q_BIO = 0,
- NULL_Q_RQ = 1,
- NULL_Q_MQ = 2,
-};
-
struct nullb_device {
struct nullb *nullb;
struct config_group group;
@@ -119,6 +102,7 @@ struct nullb_device {
bool zoned; /* if device is zoned */
bool virt_boundary; /* virtual boundary on/off for the device */
bool no_sched; /* no IO scheduler for the device */
+ bool shared_tags; /* share tag set between devices for blk-mq */
bool shared_tag_bitmap; /* use hostwide shared tags */
};
@@ -130,14 +114,12 @@ struct nullb {
struct gendisk *disk;
struct blk_mq_tag_set *tag_set;
struct blk_mq_tag_set __tag_set;
- unsigned int queue_depth;
atomic_long_t cur_bytes;
struct hrtimer bw_timer;
unsigned long cache_flush_pos;
spinlock_t lock;
struct nullb_queue *queues;
- unsigned int nr_queues;
char disk_name[DISK_NAME_LEN];
};
@@ -147,7 +129,7 @@ blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, unsigned int nr_sectors);
#ifdef CONFIG_BLK_DEV_ZONED
-int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
+int null_init_zoned_dev(struct nullb_device *dev, struct queue_limits *lim);
int null_register_zoned_dev(struct nullb *nullb);
void null_free_zoned_dev(struct nullb_device *dev);
int null_report_zones(struct gendisk *disk, sector_t sector,
@@ -160,7 +142,7 @@ ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
size_t count, enum blk_zone_cond cond);
#else
static inline int null_init_zoned_dev(struct nullb_device *dev,
- struct request_queue *q)
+ struct queue_limits *lim)
{
pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
return -EINVAL;
diff --git a/drivers/block/null_blk/trace.h b/drivers/block/null_blk/trace.h
index 6b2b370e786f..ef2d05d5f0df 100644
--- a/drivers/block/null_blk/trace.h
+++ b/drivers/block/null_blk/trace.h
@@ -41,10 +41,11 @@ TRACE_EVENT(nullb_zone_op,
__field(unsigned int, zone_cond)
),
TP_fast_assign(
- __entry->op = req_op(cmd->rq);
+ __entry->op = req_op(blk_mq_rq_from_pdu(cmd));
__entry->zone_no = zone_no;
__entry->zone_cond = zone_cond;
- __assign_disk_name(__entry->disk, cmd->rq->q->disk);
+ __assign_disk_name(__entry->disk,
+ blk_mq_rq_from_pdu(cmd)->q->disk);
),
TP_printk("%s req=%-15s zone_no=%u zone_cond=%-10s",
__print_disk_name(__entry->disk),
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 6f5e0994862e..1689e2584104 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -58,7 +58,8 @@ static inline void null_unlock_zone(struct nullb_device *dev,
mutex_unlock(&zone->mutex);
}
-int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
+int null_init_zoned_dev(struct nullb_device *dev,
+ struct queue_limits *lim)
{
sector_t dev_capacity_sects, zone_capacity_sects;
struct nullb_zone *zone;
@@ -151,27 +152,22 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
sector += dev->zone_size_sects;
}
+ lim->zoned = true;
+ lim->chunk_sectors = dev->zone_size_sects;
+ lim->max_zone_append_sectors = dev->zone_size_sects;
+ lim->max_open_zones = dev->zone_max_open;
+ lim->max_active_zones = dev->zone_max_active;
return 0;
}
int null_register_zoned_dev(struct nullb *nullb)
{
- struct nullb_device *dev = nullb->dev;
struct request_queue *q = nullb->q;
- disk_set_zoned(nullb->disk);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
- blk_queue_chunk_sectors(q, dev->zone_size_sects);
nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
- blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
- disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
- disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
-
- if (queue_is_mq(q))
- return blk_revalidate_disk_zones(nullb->disk, NULL);
-
- return 0;
+ return blk_revalidate_disk_zones(nullb->disk, NULL);
}
void null_free_zoned_dev(struct nullb_device *dev)
@@ -394,10 +390,7 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
*/
if (append) {
sector = zone->wp;
- if (dev->queue_mode == NULL_Q_MQ)
- cmd->rq->__sector = sector;
- else
- cmd->bio->bi_iter.bi_sector = sector;
+ blk_mq_rq_from_pdu(cmd)->__sector = sector;
} else if (sector != zone->wp) {
ret = BLK_STS_IOERR;
goto unlock;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index d56d972aadb3..21728e9ea5c3 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -340,8 +340,8 @@ static ssize_t device_map_show(const struct class *c, const struct class_attribu
n += sysfs_emit_at(data, n, "%s %u:%u %u:%u\n",
pd->disk->disk_name,
MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
- MAJOR(pd->bdev_handle->bdev->bd_dev),
- MINOR(pd->bdev_handle->bdev->bd_dev));
+ MAJOR(file_bdev(pd->bdev_file)->bd_dev),
+ MINOR(file_bdev(pd->bdev_file)->bd_dev));
}
mutex_unlock(&ctl_mutex);
return n;
@@ -438,7 +438,7 @@ static int pkt_seq_show(struct seq_file *m, void *p)
int states[PACKET_NUM_STATES];
seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name,
- pd->bdev_handle->bdev);
+ file_bdev(pd->bdev_file));
seq_printf(m, "\nSettings:\n");
seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
@@ -715,7 +715,7 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
*/
static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
{
- struct request_queue *q = bdev_get_queue(pd->bdev_handle->bdev);
+ struct request_queue *q = bdev_get_queue(file_bdev(pd->bdev_file));
struct scsi_cmnd *scmd;
struct request *rq;
int ret = 0;
@@ -828,6 +828,12 @@ static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
*/
static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
{
+ /*
+ * Some CDRW drives can not handle writes larger than one packet,
+ * even if the size is a multiple of the packet size.
+ */
+ bio->bi_opf |= REQ_NOMERGE;
+
spin_lock(&pd->iosched.lock);
if (bio_data_dir(bio) == READ)
bio_list_add(&pd->iosched.read_queue, bio);
@@ -1048,7 +1054,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
continue;
bio = pkt->r_bios[f];
- bio_init(bio, pd->bdev_handle->bdev, bio->bi_inline_vecs, 1,
+ bio_init(bio, file_bdev(pd->bdev_file), bio->bi_inline_vecs, 1,
REQ_OP_READ);
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio->bi_end_io = pkt_end_io_read;
@@ -1264,7 +1270,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
struct device *ddev = disk_to_dev(pd->disk);
int f;
- bio_init(pkt->w_bio, pd->bdev_handle->bdev, pkt->w_bio->bi_inline_vecs,
+ bio_init(pkt->w_bio, file_bdev(pd->bdev_file), pkt->w_bio->bi_inline_vecs,
pkt->frames, REQ_OP_WRITE);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
@@ -2162,20 +2168,20 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
int ret;
long lba;
struct request_queue *q;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
/*
* We need to re-open the cdrom device without O_NONBLOCK to be able
* to read/write from/to it. It is already opened in O_NONBLOCK mode
* so open should not fail.
*/
- bdev_handle = bdev_open_by_dev(pd->bdev_handle->bdev->bd_dev,
+ bdev_file = bdev_file_open_by_dev(file_bdev(pd->bdev_file)->bd_dev,
BLK_OPEN_READ, pd, NULL);
- if (IS_ERR(bdev_handle)) {
- ret = PTR_ERR(bdev_handle);
+ if (IS_ERR(bdev_file)) {
+ ret = PTR_ERR(bdev_file);
goto out;
}
- pd->open_bdev_handle = bdev_handle;
+ pd->f_open_bdev = bdev_file;
ret = pkt_get_last_written(pd, &lba);
if (ret) {
@@ -2184,18 +2190,13 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
}
set_capacity(pd->disk, lba << 2);
- set_capacity_and_notify(pd->bdev_handle->bdev->bd_disk, lba << 2);
+ set_capacity_and_notify(file_bdev(pd->bdev_file)->bd_disk, lba << 2);
- q = bdev_get_queue(pd->bdev_handle->bdev);
+ q = bdev_get_queue(file_bdev(pd->bdev_file));
if (write) {
ret = pkt_open_write(pd);
if (ret)
goto out_putdev;
- /*
- * Some CDRW drives can not handle writes larger than one packet,
- * even if the size is a multiple of the packet size.
- */
- blk_queue_max_hw_sectors(q, pd->settings.size);
set_bit(PACKET_WRITABLE, &pd->flags);
} else {
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
@@ -2218,7 +2219,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
return 0;
out_putdev:
- bdev_release(bdev_handle);
+ fput(bdev_file);
out:
return ret;
}
@@ -2237,8 +2238,8 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
pkt_lock_door(pd, 0);
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
- bdev_release(pd->open_bdev_handle);
- pd->open_bdev_handle = NULL;
+ fput(pd->f_open_bdev);
+ pd->f_open_bdev = NULL;
pkt_shrink_pktlist(pd);
}
@@ -2326,7 +2327,7 @@ static void pkt_end_io_read_cloned(struct bio *bio)
static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
{
- struct bio *cloned_bio = bio_alloc_clone(pd->bdev_handle->bdev, bio,
+ struct bio *cloned_bio = bio_alloc_clone(file_bdev(pd->bdev_file), bio,
GFP_NOIO, &pkt_bio_set);
struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
@@ -2338,9 +2339,9 @@ static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
pkt_queue_bio(pd, cloned_bio);
}
-static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
+static void pkt_make_request_write(struct bio *bio)
{
- struct pktcdvd_device *pd = q->queuedata;
+ struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data;
sector_t zone;
struct packet_data *pkt;
int was_empty, blocked_bio;
@@ -2432,7 +2433,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
static void pkt_submit_bio(struct bio *bio)
{
- struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata;
+ struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data;
struct device *ddev = disk_to_dev(pd->disk);
struct bio *split;
@@ -2476,7 +2477,7 @@ static void pkt_submit_bio(struct bio *bio)
split = bio;
}
- pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
+ pkt_make_request_write(split);
} while (split != bio);
return;
@@ -2484,20 +2485,11 @@ end_io:
bio_io_error(bio);
}
-static void pkt_init_queue(struct pktcdvd_device *pd)
-{
- struct request_queue *q = pd->disk->queue;
-
- blk_queue_logical_block_size(q, CD_FRAMESIZE);
- blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
- q->queuedata = pd;
-}
-
static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
{
struct device *ddev = disk_to_dev(pd->disk);
int i;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct scsi_device *sdev;
if (pd->pkt_dev == dev) {
@@ -2508,9 +2500,9 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
struct pktcdvd_device *pd2 = pkt_devs[i];
if (!pd2)
continue;
- if (pd2->bdev_handle->bdev->bd_dev == dev) {
+ if (file_bdev(pd2->bdev_file)->bd_dev == dev) {
dev_err(ddev, "%pg already setup\n",
- pd2->bdev_handle->bdev);
+ file_bdev(pd2->bdev_file));
return -EBUSY;
}
if (pd2->pkt_dev == dev) {
@@ -2519,13 +2511,13 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
}
}
- bdev_handle = bdev_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY,
+ bdev_file = bdev_file_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY,
NULL, NULL);
- if (IS_ERR(bdev_handle))
- return PTR_ERR(bdev_handle);
- sdev = scsi_device_from_queue(bdev_handle->bdev->bd_disk->queue);
+ if (IS_ERR(bdev_file))
+ return PTR_ERR(bdev_file);
+ sdev = scsi_device_from_queue(file_bdev(bdev_file)->bd_disk->queue);
if (!sdev) {
- bdev_release(bdev_handle);
+ fput(bdev_file);
return -EINVAL;
}
put_device(&sdev->sdev_gendev);
@@ -2533,10 +2525,8 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
- pd->bdev_handle = bdev_handle;
- set_blocksize(bdev_handle->bdev, CD_FRAMESIZE);
-
- pkt_init_queue(pd);
+ pd->bdev_file = bdev_file;
+ set_blocksize(file_bdev(bdev_file), CD_FRAMESIZE);
atomic_set(&pd->cdrw.pending_bios, 0);
pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name);
@@ -2546,11 +2536,11 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
}
proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd);
- dev_notice(ddev, "writer mapped to %pg\n", bdev_handle->bdev);
+ dev_notice(ddev, "writer mapped to %pg\n", file_bdev(bdev_file));
return 0;
out_mem:
- bdev_release(bdev_handle);
+ fput(bdev_file);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return -ENOMEM;
@@ -2605,9 +2595,9 @@ static unsigned int pkt_check_events(struct gendisk *disk,
if (!pd)
return 0;
- if (!pd->bdev_handle)
+ if (!pd->bdev_file)
return 0;
- attached_disk = pd->bdev_handle->bdev->bd_disk;
+ attached_disk = file_bdev(pd->bdev_file)->bd_disk;
if (!attached_disk || !attached_disk->fops->check_events)
return 0;
return attached_disk->fops->check_events(attached_disk, clearing);
@@ -2634,6 +2624,10 @@ static const struct block_device_operations pktcdvd_ops = {
*/
static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
{
+ struct queue_limits lim = {
+ .max_hw_sectors = PACKET_MAX_SECTORS,
+ .logical_block_size = CD_FRAMESIZE,
+ };
int idx;
int ret = -ENOMEM;
struct pktcdvd_device *pd;
@@ -2673,10 +2667,11 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
pd->write_congestion_on = write_congestion_on;
pd->write_congestion_off = write_congestion_off;
- ret = -ENOMEM;
- disk = blk_alloc_disk(NUMA_NO_NODE);
- if (!disk)
+ disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(disk)) {
+ ret = PTR_ERR(disk);
goto out_mem;
+ }
pd->disk = disk;
disk->major = pktdev_major;
disk->first_minor = idx;
@@ -2692,7 +2687,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
goto out_mem2;
/* inherit events of the host device */
- disk->events = pd->bdev_handle->bdev->bd_disk->events;
+ disk->events = file_bdev(pd->bdev_file)->bd_disk->events;
ret = add_disk(disk);
if (ret)
@@ -2757,7 +2752,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
pkt_debugfs_dev_remove(pd);
pkt_sysfs_dev_remove(pd);
- bdev_release(pd->bdev_handle);
+ fput(pd->bdev_file);
remove_proc_entry(pd->disk->disk_name, pkt_proc);
dev_notice(ddev, "writer unmapped\n");
@@ -2784,7 +2779,7 @@ static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
if (pd) {
- ctrl_cmd->dev = new_encode_dev(pd->bdev_handle->bdev->bd_dev);
+ ctrl_cmd->dev = new_encode_dev(file_bdev(pd->bdev_file)->bd_dev);
ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
} else {
ctrl_cmd->dev = 0;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 36d7b36c60c7..b810ac0a5c4b 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -382,6 +382,14 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
struct ps3disk_private *priv;
int error;
unsigned int devidx;
+ struct queue_limits lim = {
+ .logical_block_size = dev->blk_size,
+ .max_hw_sectors = dev->bounce_size >> 9,
+ .max_segments = -1,
+ .max_segment_size = dev->bounce_size,
+ .dma_alignment = dev->blk_size - 1,
+ };
+
struct request_queue *queue;
struct gendisk *gendisk;
@@ -431,7 +439,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
if (error)
goto fail_teardown;
- gendisk = blk_mq_alloc_disk(&priv->tag_set, dev);
+ gendisk = blk_mq_alloc_disk(&priv->tag_set, &lim, dev);
if (IS_ERR(gendisk)) {
dev_err(&dev->sbd.core, "%s:%u: blk_mq_alloc_disk failed\n",
__func__, __LINE__);
@@ -441,15 +449,8 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
queue = gendisk->queue;
- blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9);
- blk_queue_dma_alignment(queue, dev->blk_size-1);
- blk_queue_logical_block_size(queue, dev->blk_size);
-
blk_queue_write_cache(queue, true, false);
- blk_queue_max_segments(queue, -1);
- blk_queue_max_segment_size(queue, dev->bounce_size);
-
priv->gendisk = gendisk;
gendisk->major = ps3disk_major;
gendisk->first_minor = devidx * PS3DISK_MINORS;
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 38d42af01b25..bdcf083b45e2 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -730,10 +730,10 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
ps3vram_proc_init(dev);
- gendisk = blk_alloc_disk(NUMA_NO_NODE);
- if (!gendisk) {
+ gendisk = blk_alloc_disk(NULL, NUMA_NO_NODE);
+ if (IS_ERR(gendisk)) {
dev_err(&dev->core, "blk_alloc_disk failed\n");
- error = -ENOMEM;
+ error = PTR_ERR(gendisk);
goto out_cache_cleanup;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 12b5d53ec856..26ff5cd2bf0a 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -575,7 +575,7 @@ static const struct attribute_group rbd_bus_group = {
};
__ATTRIBUTE_GROUPS(rbd_bus);
-static struct bus_type rbd_bus_type = {
+static const struct bus_type rbd_bus_type = {
.name = "rbd",
.bus_groups = rbd_bus_groups,
};
@@ -4952,6 +4952,14 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
struct request_queue *q;
unsigned int objset_bytes =
rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
+ struct queue_limits lim = {
+ .max_hw_sectors = objset_bytes >> SECTOR_SHIFT,
+ .max_user_sectors = objset_bytes >> SECTOR_SHIFT,
+ .io_min = rbd_dev->opts->alloc_size,
+ .io_opt = rbd_dev->opts->alloc_size,
+ .max_segments = USHRT_MAX,
+ .max_segment_size = UINT_MAX,
+ };
int err;
memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
@@ -4966,7 +4974,13 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
if (err)
return err;
- disk = blk_mq_alloc_disk(&rbd_dev->tag_set, rbd_dev);
+ if (rbd_dev->opts->trim) {
+ lim.discard_granularity = rbd_dev->opts->alloc_size;
+ lim.max_hw_discard_sectors = objset_bytes >> SECTOR_SHIFT;
+ lim.max_write_zeroes_sectors = objset_bytes >> SECTOR_SHIFT;
+ }
+
+ disk = blk_mq_alloc_disk(&rbd_dev->tag_set, &lim, rbd_dev);
if (IS_ERR(disk)) {
err = PTR_ERR(disk);
goto out_tag_set;
@@ -4987,19 +5001,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
- blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
- q->limits.max_sectors = queue_max_hw_sectors(q);
- blk_queue_max_segments(q, USHRT_MAX);
- blk_queue_max_segment_size(q, UINT_MAX);
- blk_queue_io_min(q, rbd_dev->opts->alloc_size);
- blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
-
- if (rbd_dev->opts->trim) {
- q->limits.discard_granularity = rbd_dev->opts->alloc_size;
- blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
- blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
- }
-
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 4044c369d22a..b7ffe03c6160 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1329,43 +1329,6 @@ static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
}
}
-static void setup_request_queue(struct rnbd_clt_dev *dev,
- struct rnbd_msg_open_rsp *rsp)
-{
- blk_queue_logical_block_size(dev->queue,
- le16_to_cpu(rsp->logical_block_size));
- blk_queue_physical_block_size(dev->queue,
- le16_to_cpu(rsp->physical_block_size));
- blk_queue_max_hw_sectors(dev->queue,
- dev->sess->max_io_size / SECTOR_SIZE);
-
- /*
- * we don't support discards to "discontiguous" segments
- * in on request
- */
- blk_queue_max_discard_segments(dev->queue, 1);
-
- blk_queue_max_discard_sectors(dev->queue,
- le32_to_cpu(rsp->max_discard_sectors));
- dev->queue->limits.discard_granularity =
- le32_to_cpu(rsp->discard_granularity);
- dev->queue->limits.discard_alignment =
- le32_to_cpu(rsp->discard_alignment);
- if (le16_to_cpu(rsp->secure_discard))
- blk_queue_max_secure_erase_sectors(dev->queue,
- le32_to_cpu(rsp->max_discard_sectors));
- blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
- blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
- blk_queue_max_segments(dev->queue, dev->sess->max_segments);
- blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
- blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
- blk_queue_write_cache(dev->queue,
- !!(rsp->cache_policy & RNBD_WRITEBACK),
- !!(rsp->cache_policy & RNBD_FUA));
- blk_queue_max_write_zeroes_sectors(dev->queue,
- le32_to_cpu(rsp->max_write_zeroes_sectors));
-}
-
static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev,
struct rnbd_msg_open_rsp *rsp, int idx)
{
@@ -1403,18 +1366,41 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev,
static int rnbd_client_setup_device(struct rnbd_clt_dev *dev,
struct rnbd_msg_open_rsp *rsp)
{
+ struct queue_limits lim = {
+ .logical_block_size = le16_to_cpu(rsp->logical_block_size),
+ .physical_block_size = le16_to_cpu(rsp->physical_block_size),
+ .io_opt = dev->sess->max_io_size,
+ .max_hw_sectors = dev->sess->max_io_size / SECTOR_SIZE,
+ .max_hw_discard_sectors = le32_to_cpu(rsp->max_discard_sectors),
+ .discard_granularity = le32_to_cpu(rsp->discard_granularity),
+ .discard_alignment = le32_to_cpu(rsp->discard_alignment),
+ .max_segments = dev->sess->max_segments,
+ .virt_boundary_mask = SZ_4K - 1,
+ .max_write_zeroes_sectors =
+ le32_to_cpu(rsp->max_write_zeroes_sectors),
+ };
int idx = dev->clt_device_id;
dev->size = le64_to_cpu(rsp->nsectors) *
le16_to_cpu(rsp->logical_block_size);
- dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, dev);
+ if (rsp->secure_discard) {
+ lim.max_secure_erase_sectors =
+ le32_to_cpu(rsp->max_discard_sectors);
+ }
+
+ dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, &lim, dev);
if (IS_ERR(dev->gd))
return PTR_ERR(dev->gd);
dev->queue = dev->gd->queue;
rnbd_init_mq_hw_queues(dev);
- setup_request_queue(dev, rsp);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
+ blk_queue_write_cache(dev->queue,
+ !!(rsp->cache_policy & RNBD_WRITEBACK),
+ !!(rsp->cache_policy & RNBD_FUA));
+
return rnbd_clt_setup_gen_disk(dev, rsp, idx);
}
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index 3a0d5dcec6f2..f6e3a3c4b76c 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -145,7 +145,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
priv->sess_dev = sess_dev;
priv->id = id;
- bio = bio_alloc(sess_dev->bdev_handle->bdev, 1,
+ bio = bio_alloc(file_bdev(sess_dev->bdev_file), 1,
rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
if (bio_add_page(bio, virt_to_page(data), datalen,
offset_in_page(data)) != datalen) {
@@ -219,7 +219,7 @@ void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id)
rnbd_put_sess_dev(sess_dev);
wait_for_completion(&dc); /* wait for inflights to drop to zero */
- bdev_release(sess_dev->bdev_handle);
+ fput(sess_dev->bdev_file);
mutex_lock(&sess_dev->dev->lock);
list_del(&sess_dev->dev_list);
if (!sess_dev->readonly)
@@ -534,7 +534,7 @@ rnbd_srv_get_or_create_srv_dev(struct block_device *bdev,
static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
struct rnbd_srv_sess_dev *sess_dev)
{
- struct block_device *bdev = sess_dev->bdev_handle->bdev;
+ struct block_device *bdev = file_bdev(sess_dev->bdev_file);
rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
rsp->device_id = cpu_to_le32(sess_dev->device_id);
@@ -560,7 +560,7 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
static struct rnbd_srv_sess_dev *
rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
const struct rnbd_msg_open *open_msg,
- struct bdev_handle *handle, bool readonly,
+ struct file *bdev_file, bool readonly,
struct rnbd_srv_dev *srv_dev)
{
struct rnbd_srv_sess_dev *sdev = rnbd_sess_dev_alloc(srv_sess);
@@ -572,7 +572,7 @@ rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
strscpy(sdev->pathname, open_msg->dev_name, sizeof(sdev->pathname));
- sdev->bdev_handle = handle;
+ sdev->bdev_file = bdev_file;
sdev->sess = srv_sess;
sdev->dev = srv_dev;
sdev->readonly = readonly;
@@ -678,7 +678,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
struct rnbd_srv_dev *srv_dev;
struct rnbd_srv_sess_dev *srv_sess_dev;
const struct rnbd_msg_open *open_msg = msg;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
blk_mode_t open_flags = BLK_OPEN_READ;
char *full_path;
struct rnbd_msg_open_rsp *rsp = data;
@@ -716,15 +716,15 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
goto reject;
}
- bdev_handle = bdev_open_by_path(full_path, open_flags, NULL, NULL);
- if (IS_ERR(bdev_handle)) {
- ret = PTR_ERR(bdev_handle);
+ bdev_file = bdev_file_open_by_path(full_path, open_flags, NULL, NULL);
+ if (IS_ERR(bdev_file)) {
+ ret = PTR_ERR(bdev_file);
pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %pe\n",
- full_path, srv_sess->sessname, bdev_handle);
+ full_path, srv_sess->sessname, bdev_file);
goto free_path;
}
- srv_dev = rnbd_srv_get_or_create_srv_dev(bdev_handle->bdev, srv_sess,
+ srv_dev = rnbd_srv_get_or_create_srv_dev(file_bdev(bdev_file), srv_sess,
open_msg->access_mode);
if (IS_ERR(srv_dev)) {
pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %pe\n",
@@ -734,7 +734,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
}
srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg,
- bdev_handle,
+ bdev_file,
open_msg->access_mode == RNBD_ACCESS_RO,
srv_dev);
if (IS_ERR(srv_sess_dev)) {
@@ -750,7 +750,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
*/
mutex_lock(&srv_dev->lock);
if (!srv_dev->dev_kobj.state_in_sysfs) {
- ret = rnbd_srv_create_dev_sysfs(srv_dev, bdev_handle->bdev);
+ ret = rnbd_srv_create_dev_sysfs(srv_dev, file_bdev(bdev_file));
if (ret) {
mutex_unlock(&srv_dev->lock);
rnbd_srv_err(srv_sess_dev,
@@ -793,7 +793,7 @@ srv_dev_put:
}
rnbd_put_srv_dev(srv_dev);
blkdev_put:
- bdev_release(bdev_handle);
+ fput(bdev_file);
free_path:
kfree(full_path);
reject:
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index 343cc682b617..18d873808b8d 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -46,7 +46,7 @@ struct rnbd_srv_dev {
struct rnbd_srv_sess_dev {
/* Entry inside rnbd_srv_dev struct */
struct list_head dev_list;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct rnbd_srv_session *sess;
struct rnbd_srv_dev *dev;
struct kobject kobj;
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 7bf4b48e2282..c99dd6698977 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -784,6 +784,14 @@ static const struct blk_mq_ops vdc_mq_ops = {
static int probe_disk(struct vdc_port *port)
{
+ struct queue_limits lim = {
+ .physical_block_size = port->vdisk_phys_blksz,
+ .max_hw_sectors = port->max_xfer_size,
+ /* Each segment in a request is up to an aligned page in size. */
+ .seg_boundary_mask = PAGE_SIZE - 1,
+ .max_segment_size = PAGE_SIZE,
+ .max_segments = port->ring_cookies,
+ };
struct request_queue *q;
struct gendisk *g;
int err;
@@ -824,7 +832,7 @@ static int probe_disk(struct vdc_port *port)
if (err)
return err;
- g = blk_mq_alloc_disk(&port->tag_set, port);
+ g = blk_mq_alloc_disk(&port->tag_set, &lim, port);
if (IS_ERR(g)) {
printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
port->vio.name);
@@ -835,12 +843,6 @@ static int probe_disk(struct vdc_port *port)
port->disk = g;
q = g->queue;
- /* Each segment in a request is up to an aligned page in size. */
- blk_queue_segment_boundary(q, PAGE_SIZE - 1);
- blk_queue_max_segment_size(q, PAGE_SIZE);
-
- blk_queue_max_segments(q, port->ring_cookies);
- blk_queue_max_hw_sectors(q, port->max_xfer_size);
g->major = vdc_major;
g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
g->minors = 1 << PARTITION_SHIFT;
@@ -872,8 +874,6 @@ static int probe_disk(struct vdc_port *port)
}
}
- blk_queue_physical_block_size(q, port->vdisk_phys_blksz);
-
pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
g->disk_name,
port->vdisk_size, (port->vdisk_size >> (20 - 9)),
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index f85b6af414b4..6731678f3a41 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -820,7 +820,7 @@ static int swim_floppy_init(struct swim_priv *swd)
goto exit_put_disks;
swd->unit[drive].disk =
- blk_mq_alloc_disk(&swd->unit[drive].tag_set,
+ blk_mq_alloc_disk(&swd->unit[drive].tag_set, NULL,
&swd->unit[drive]);
if (IS_ERR(swd->unit[drive].disk)) {
blk_mq_free_tag_set(&swd->unit[drive].tag_set);
@@ -916,7 +916,7 @@ out:
return ret;
}
-static int swim_remove(struct platform_device *dev)
+static void swim_remove(struct platform_device *dev)
{
struct swim_priv *swd = platform_get_drvdata(dev);
int drive;
@@ -937,13 +937,11 @@ static int swim_remove(struct platform_device *dev)
release_mem_region(res->start, resource_size(res));
kfree(swd);
-
- return 0;
}
static struct platform_driver swim_driver = {
.probe = swim_probe,
- .remove = swim_remove,
+ .remove_new = swim_remove,
.driver = {
.name = CARDNAME,
},
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index c2bc85826358..a04756ac778e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1210,7 +1210,7 @@ static int swim3_attach(struct macio_dev *mdev,
if (rc)
goto out_unregister;
- disk = blk_mq_alloc_disk(&fs->tag_set, fs);
+ disk = blk_mq_alloc_disk(&fs->tag_set, NULL, fs);
if (IS_ERR(disk)) {
rc = PTR_ERR(disk);
goto out_free_tag_set;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 1dfb2e77898b..bea3d5cf8a83 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -246,21 +246,12 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
return 0;
}
-static int ublk_dev_param_zoned_apply(struct ublk_device *ub)
+static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
{
- const struct ublk_param_zoned *p = &ub->params.zoned;
-
- disk_set_zoned(ub->ub_disk);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue);
blk_queue_required_elevator_features(ub->ub_disk->queue,
ELEVATOR_F_ZBD_SEQ_WRITE);
- disk_set_max_active_zones(ub->ub_disk, p->max_active_zones);
- disk_set_max_open_zones(ub->ub_disk, p->max_open_zones);
- blk_queue_max_zone_append_sectors(ub->ub_disk->queue, p->max_zone_append_sectors);
-
ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
-
- return 0;
}
/* Based on virtblk_alloc_report_buffer */
@@ -432,9 +423,8 @@ static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
return -EOPNOTSUPP;
}
-static int ublk_dev_param_zoned_apply(struct ublk_device *ub)
+static void ublk_dev_param_zoned_apply(struct ublk_device *ub)
{
- return -EOPNOTSUPP;
}
static int ublk_revalidate_disk_zones(struct ublk_device *ub)
@@ -498,11 +488,6 @@ static void ublk_dev_param_basic_apply(struct ublk_device *ub)
struct request_queue *q = ub->ub_disk->queue;
const struct ublk_param_basic *p = &ub->params.basic;
- blk_queue_logical_block_size(q, 1 << p->logical_bs_shift);
- blk_queue_physical_block_size(q, 1 << p->physical_bs_shift);
- blk_queue_io_min(q, 1 << p->io_min_shift);
- blk_queue_io_opt(q, 1 << p->io_opt_shift);
-
blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE,
p->attrs & UBLK_ATTR_FUA);
if (p->attrs & UBLK_ATTR_ROTATIONAL)
@@ -510,29 +495,12 @@ static void ublk_dev_param_basic_apply(struct ublk_device *ub)
else
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- blk_queue_max_hw_sectors(q, p->max_sectors);
- blk_queue_chunk_sectors(q, p->chunk_sectors);
- blk_queue_virt_boundary(q, p->virt_boundary_mask);
-
if (p->attrs & UBLK_ATTR_READ_ONLY)
set_disk_ro(ub->ub_disk, true);
set_capacity(ub->ub_disk, p->dev_sectors);
}
-static void ublk_dev_param_discard_apply(struct ublk_device *ub)
-{
- struct request_queue *q = ub->ub_disk->queue;
- const struct ublk_param_discard *p = &ub->params.discard;
-
- q->limits.discard_alignment = p->discard_alignment;
- q->limits.discard_granularity = p->discard_granularity;
- blk_queue_max_discard_sectors(q, p->max_discard_sectors);
- blk_queue_max_write_zeroes_sectors(q,
- p->max_write_zeroes_sectors);
- blk_queue_max_discard_segments(q, p->max_discard_segments);
-}
-
static int ublk_validate_params(const struct ublk_device *ub)
{
/* basic param is the only one which must be set */
@@ -576,20 +544,12 @@ static int ublk_validate_params(const struct ublk_device *ub)
return 0;
}
-static int ublk_apply_params(struct ublk_device *ub)
+static void ublk_apply_params(struct ublk_device *ub)
{
- if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
- return -EINVAL;
-
ublk_dev_param_basic_apply(ub);
- if (ub->params.types & UBLK_PARAM_TYPE_DISCARD)
- ublk_dev_param_discard_apply(ub);
-
if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
- return ublk_dev_param_zoned_apply(ub);
-
- return 0;
+ ublk_dev_param_zoned_apply(ub);
}
static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
@@ -645,14 +605,16 @@ static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
return ubq->flags & UBLK_F_NEED_GET_DATA;
}
-static struct ublk_device *ublk_get_device(struct ublk_device *ub)
+/* Called in slow path only, keep it noinline for trace purpose */
+static noinline struct ublk_device *ublk_get_device(struct ublk_device *ub)
{
if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
return ub;
return NULL;
}
-static void ublk_put_device(struct ublk_device *ub)
+/* Called in slow path only, keep it noinline for trace purpose */
+static noinline void ublk_put_device(struct ublk_device *ub)
{
put_device(&ub->cdev_dev);
}
@@ -711,7 +673,7 @@ static void ublk_free_disk(struct gendisk *disk)
struct ublk_device *ub = disk->private_data;
clear_bit(UB_STATE_USED, &ub->state);
- put_device(&ub->cdev_dev);
+ ublk_put_device(ub);
}
static void ublk_store_owner_uid_gid(unsigned int *owner_uid,
@@ -2182,7 +2144,7 @@ static void ublk_remove(struct ublk_device *ub)
cancel_work_sync(&ub->stop_work);
cancel_work_sync(&ub->quiesce_work);
cdev_device_del(&ub->cdev, &ub->cdev_dev);
- put_device(&ub->cdev_dev);
+ ublk_put_device(ub);
ublks_added--;
}
@@ -2205,12 +2167,47 @@ static struct ublk_device *ublk_get_device_from_id(int idx)
static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
{
const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
+ const struct ublk_param_basic *p = &ub->params.basic;
int ublksrv_pid = (int)header->data[0];
+ struct queue_limits lim = {
+ .logical_block_size = 1 << p->logical_bs_shift,
+ .physical_block_size = 1 << p->physical_bs_shift,
+ .io_min = 1 << p->io_min_shift,
+ .io_opt = 1 << p->io_opt_shift,
+ .max_hw_sectors = p->max_sectors,
+ .chunk_sectors = p->chunk_sectors,
+ .virt_boundary_mask = p->virt_boundary_mask,
+
+ };
struct gendisk *disk;
int ret = -EINVAL;
if (ublksrv_pid <= 0)
return -EINVAL;
+ if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
+ return -EINVAL;
+
+ if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
+ const struct ublk_param_discard *pd = &ub->params.discard;
+
+ lim.discard_alignment = pd->discard_alignment;
+ lim.discard_granularity = pd->discard_granularity;
+ lim.max_hw_discard_sectors = pd->max_discard_sectors;
+ lim.max_write_zeroes_sectors = pd->max_write_zeroes_sectors;
+ lim.max_discard_segments = pd->max_discard_segments;
+ }
+
+ if (ub->params.types & UBLK_PARAM_TYPE_ZONED) {
+ const struct ublk_param_zoned *p = &ub->params.zoned;
+
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+ return -EOPNOTSUPP;
+
+ lim.zoned = true;
+ lim.max_active_zones = p->max_active_zones;
+ lim.max_open_zones = p->max_open_zones;
+ lim.max_zone_append_sectors = p->max_zone_append_sectors;
+ }
if (wait_for_completion_interruptible(&ub->completion) != 0)
return -EINTR;
@@ -2222,7 +2219,7 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
goto out_unlock;
}
- disk = blk_mq_alloc_disk(&ub->tag_set, NULL);
+ disk = blk_mq_alloc_disk(&ub->tag_set, &lim, NULL);
if (IS_ERR(disk)) {
ret = PTR_ERR(disk);
goto out_unlock;
@@ -2234,15 +2231,13 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
ub->dev_info.ublksrv_pid = ublksrv_pid;
ub->ub_disk = disk;
- ret = ublk_apply_params(ub);
- if (ret)
- goto out_put_disk;
+ ublk_apply_params(ub);
/* don't probe partitions if any one ubq daemon is un-trusted */
if (ub->nr_privileged_daemon != ub->nr_queues_ready)
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
- get_device(&ub->cdev_dev);
+ ublk_get_device(ub);
ub->dev_info.state = UBLK_S_DEV_LIVE;
if (ublk_dev_is_zoned(ub)) {
@@ -2262,7 +2257,6 @@ out_put_cdev:
ub->dev_info.state = UBLK_S_DEV_DEAD;
ublk_put_device(ub);
}
-out_put_disk:
if (ret)
put_disk(disk);
out_unlock:
@@ -2474,7 +2468,7 @@ static inline bool ublk_idr_freed(int id)
return ptr == NULL;
}
-static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
+static int ublk_ctrl_del_dev(struct ublk_device **p_ub, bool wait)
{
struct ublk_device *ub = *p_ub;
int idx = ub->ub_number;
@@ -2508,7 +2502,7 @@ static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
* - the device number is freed already, we will not find this
* device via ublk_get_device_from_id()
*/
- if (wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx)))
+ if (wait && wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx)))
return -EINTR;
return 0;
}
@@ -2907,7 +2901,10 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
ret = ublk_ctrl_add_dev(cmd);
break;
case UBLK_CMD_DEL_DEV:
- ret = ublk_ctrl_del_dev(&ub);
+ ret = ublk_ctrl_del_dev(&ub, true);
+ break;
+ case UBLK_U_CMD_DEL_DEV_ASYNC:
+ ret = ublk_ctrl_del_dev(&ub, false);
break;
case UBLK_CMD_GET_QUEUE_AFFINITY:
ret = ublk_ctrl_get_queue_affinity(ub, cmd);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5bf98fd6a651..42dea7601d87 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -720,25 +720,24 @@ fail_report:
return ret;
}
-static int virtblk_probe_zoned_device(struct virtio_device *vdev,
- struct virtio_blk *vblk,
- struct request_queue *q)
+static int virtblk_read_zoned_limits(struct virtio_blk *vblk,
+ struct queue_limits *lim)
{
+ struct virtio_device *vdev = vblk->vdev;
u32 v, wg;
dev_dbg(&vdev->dev, "probing host-managed zoned device\n");
- disk_set_zoned(vblk->disk);
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
+ lim->zoned = true;
virtio_cread(vdev, struct virtio_blk_config,
zoned.max_open_zones, &v);
- disk_set_max_open_zones(vblk->disk, v);
+ lim->max_open_zones = v;
dev_dbg(&vdev->dev, "max open zones = %u\n", v);
virtio_cread(vdev, struct virtio_blk_config,
zoned.max_active_zones, &v);
- disk_set_max_active_zones(vblk->disk, v);
+ lim->max_active_zones = v;
dev_dbg(&vdev->dev, "max active zones = %u\n", v);
virtio_cread(vdev, struct virtio_blk_config,
@@ -747,8 +746,8 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
dev_warn(&vdev->dev, "zero write granularity reported\n");
return -ENODEV;
}
- blk_queue_physical_block_size(q, wg);
- blk_queue_io_min(q, wg);
+ lim->physical_block_size = wg;
+ lim->io_min = wg;
dev_dbg(&vdev->dev, "write granularity = %u\n", wg);
@@ -764,13 +763,13 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
vblk->zone_sectors);
return -ENODEV;
}
- blk_queue_chunk_sectors(q, vblk->zone_sectors);
+ lim->chunk_sectors = vblk->zone_sectors;
dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors);
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
dev_warn(&vblk->vdev->dev,
"ignoring negotiated F_DISCARD for zoned device\n");
- blk_queue_max_discard_sectors(q, 0);
+ lim->max_hw_discard_sectors = 0;
}
virtio_cread(vdev, struct virtio_blk_config,
@@ -785,25 +784,21 @@ static int virtblk_probe_zoned_device(struct virtio_device *vdev,
wg, v);
return -ENODEV;
}
- blk_queue_max_zone_append_sectors(q, v);
+ lim->max_zone_append_sectors = v;
dev_dbg(&vdev->dev, "max append sectors = %u\n", v);
- return blk_revalidate_disk_zones(vblk->disk, NULL);
+ return 0;
}
-
#else
-
/*
- * Zoned block device support is not configured in this kernel.
- * Host-managed zoned devices can't be supported, but others are
- * good to go as regular block devices.
+ * Zoned block device support is not configured in this kernel, host-managed
+ * zoned devices can't be supported.
*/
#define virtblk_report_zones NULL
-
-static inline int virtblk_probe_zoned_device(struct virtio_device *vdev,
- struct virtio_blk *vblk, struct request_queue *q)
+static inline int virtblk_read_zoned_limits(struct virtio_blk *vblk,
+ struct queue_limits *lim)
{
- dev_err(&vdev->dev,
+ dev_err(&vblk->vdev->dev,
"virtio_blk: zoned devices are not supported");
return -EOPNOTSUPP;
}
@@ -1248,31 +1243,17 @@ static const struct blk_mq_ops virtio_mq_ops = {
static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
-static int virtblk_probe(struct virtio_device *vdev)
+static int virtblk_read_limits(struct virtio_blk *vblk,
+ struct queue_limits *lim)
{
- struct virtio_blk *vblk;
- struct request_queue *q;
- int err, index;
-
+ struct virtio_device *vdev = vblk->vdev;
u32 v, blk_size, max_size, sg_elems, opt_io_size;
u32 max_discard_segs = 0;
u32 discard_granularity = 0;
u16 min_io_size;
u8 physical_block_exp, alignment_offset;
- unsigned int queue_depth;
size_t max_dma_size;
-
- if (!vdev->config->get) {
- dev_err(&vdev->dev, "%s failure: config access disabled\n",
- __func__);
- return -EINVAL;
- }
-
- err = ida_alloc_range(&vd_index_ida, 0,
- minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL);
- if (err < 0)
- goto out;
- index = err;
+ int err;
/* We need to know how many segments before we allocate. */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
@@ -1286,78 +1267,11 @@ static int virtblk_probe(struct virtio_device *vdev)
/* Prevent integer overflows and honor max vq size */
sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
- vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
- if (!vblk) {
- err = -ENOMEM;
- goto out_free_index;
- }
-
- mutex_init(&vblk->vdev_mutex);
-
- vblk->vdev = vdev;
-
- INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
-
- err = init_vq(vblk);
- if (err)
- goto out_free_vblk;
-
- /* Default queue sizing is to fill the ring. */
- if (!virtblk_queue_depth) {
- queue_depth = vblk->vqs[0].vq->num_free;
- /* ... but without indirect descs, we use 2 descs per req */
- if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
- queue_depth /= 2;
- } else {
- queue_depth = virtblk_queue_depth;
- }
-
- memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
- vblk->tag_set.ops = &virtio_mq_ops;
- vblk->tag_set.queue_depth = queue_depth;
- vblk->tag_set.numa_node = NUMA_NO_NODE;
- vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- vblk->tag_set.cmd_size =
- sizeof(struct virtblk_req) +
- sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
- vblk->tag_set.driver_data = vblk;
- vblk->tag_set.nr_hw_queues = vblk->num_vqs;
- vblk->tag_set.nr_maps = 1;
- if (vblk->io_queues[HCTX_TYPE_POLL])
- vblk->tag_set.nr_maps = 3;
-
- err = blk_mq_alloc_tag_set(&vblk->tag_set);
- if (err)
- goto out_free_vq;
-
- vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
- if (IS_ERR(vblk->disk)) {
- err = PTR_ERR(vblk->disk);
- goto out_free_tags;
- }
- q = vblk->disk->queue;
-
- virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
-
- vblk->disk->major = major;
- vblk->disk->first_minor = index_to_minor(index);
- vblk->disk->minors = 1 << PART_BITS;
- vblk->disk->private_data = vblk;
- vblk->disk->fops = &virtblk_fops;
- vblk->index = index;
-
- /* configure queue flush support */
- virtblk_update_cache_mode(vdev);
-
- /* If disk is read-only in the host, the guest should obey */
- if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
- set_disk_ro(vblk->disk, 1);
-
/* We can handle whatever the host told us to handle. */
- blk_queue_max_segments(q, sg_elems);
+ lim->max_segments = sg_elems;
/* No real sector limit. */
- blk_queue_max_hw_sectors(q, UINT_MAX);
+ lim->max_hw_sectors = UINT_MAX;
max_dma_size = virtio_max_dma_size(vdev);
max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size;
@@ -1369,7 +1283,7 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!err)
max_size = min(max_size, v);
- blk_queue_max_segment_size(q, max_size);
+ lim->max_segment_size = max_size;
/* Host can optionally specify the block size of the device */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
@@ -1381,38 +1295,37 @@ static int virtblk_probe(struct virtio_device *vdev)
dev_err(&vdev->dev,
"virtio_blk: invalid block size: 0x%x\n",
blk_size);
- goto out_cleanup_disk;
+ return err;
}
- blk_queue_logical_block_size(q, blk_size);
+ lim->logical_block_size = blk_size;
} else
- blk_size = queue_logical_block_size(q);
+ blk_size = lim->logical_block_size;
/* Use topology information if available */
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, physical_block_exp,
&physical_block_exp);
if (!err && physical_block_exp)
- blk_queue_physical_block_size(q,
- blk_size * (1 << physical_block_exp));
+ lim->physical_block_size = blk_size * (1 << physical_block_exp);
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, alignment_offset,
&alignment_offset);
if (!err && alignment_offset)
- blk_queue_alignment_offset(q, blk_size * alignment_offset);
+ lim->alignment_offset = blk_size * alignment_offset;
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, min_io_size,
&min_io_size);
if (!err && min_io_size)
- blk_queue_io_min(q, blk_size * min_io_size);
+ lim->io_min = blk_size * min_io_size;
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
struct virtio_blk_config, opt_io_size,
&opt_io_size);
if (!err && opt_io_size)
- blk_queue_io_opt(q, blk_size * opt_io_size);
+ lim->io_opt = blk_size * opt_io_size;
if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
virtio_cread(vdev, struct virtio_blk_config,
@@ -1420,7 +1333,7 @@ static int virtblk_probe(struct virtio_device *vdev)
virtio_cread(vdev, struct virtio_blk_config,
max_discard_sectors, &v);
- blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
+ lim->max_hw_discard_sectors = v ? v : UINT_MAX;
virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
&max_discard_segs);
@@ -1429,7 +1342,7 @@ static int virtblk_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
virtio_cread(vdev, struct virtio_blk_config,
max_write_zeroes_sectors, &v);
- blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
+ lim->max_write_zeroes_sectors = v ? v : UINT_MAX;
}
/* The discard and secure erase limits are combined since the Linux
@@ -1455,8 +1368,7 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!v) {
dev_err(&vdev->dev,
"virtio_blk: secure_erase_sector_alignment can't be 0\n");
- err = -EINVAL;
- goto out_cleanup_disk;
+ return -EINVAL;
}
discard_granularity = min_not_zero(discard_granularity, v);
@@ -1470,11 +1382,10 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!v) {
dev_err(&vdev->dev,
"virtio_blk: max_secure_erase_sectors can't be 0\n");
- err = -EINVAL;
- goto out_cleanup_disk;
+ return -EINVAL;
}
- blk_queue_max_secure_erase_sectors(q, v);
+ lim->max_secure_erase_sectors = v;
virtio_cread(vdev, struct virtio_blk_config,
max_secure_erase_seg, &v);
@@ -1485,8 +1396,7 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!v) {
dev_err(&vdev->dev,
"virtio_blk: max_secure_erase_seg can't be 0\n");
- err = -EINVAL;
- goto out_cleanup_disk;
+ return -EINVAL;
}
max_discard_segs = min_not_zero(max_discard_segs, v);
@@ -1502,45 +1412,142 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!max_discard_segs)
max_discard_segs = sg_elems;
- blk_queue_max_discard_segments(q,
- min(max_discard_segs, MAX_DISCARD_SEGMENTS));
+ lim->max_discard_segments =
+ min(max_discard_segs, MAX_DISCARD_SEGMENTS);
if (discard_granularity)
- q->limits.discard_granularity = discard_granularity << SECTOR_SHIFT;
+ lim->discard_granularity =
+ discard_granularity << SECTOR_SHIFT;
else
- q->limits.discard_granularity = blk_size;
+ lim->discard_granularity = blk_size;
}
- virtblk_update_capacity(vblk, false);
- virtio_device_ready(vdev);
-
- /*
- * All steps that follow use the VQs therefore they need to be
- * placed after the virtio_device_ready() call above.
- */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_ZONED)) {
u8 model;
- virtio_cread(vdev, struct virtio_blk_config, zoned.model,
- &model);
+ virtio_cread(vdev, struct virtio_blk_config, zoned.model, &model);
switch (model) {
case VIRTIO_BLK_Z_NONE:
case VIRTIO_BLK_Z_HA:
- /* Present the host-aware device as non-zoned */
- break;
+ /* treat host-aware devices as non-zoned */
+ return 0;
case VIRTIO_BLK_Z_HM:
- err = virtblk_probe_zoned_device(vdev, vblk, q);
+ err = virtblk_read_zoned_limits(vblk, lim);
if (err)
- goto out_cleanup_disk;
+ return err;
break;
default:
- dev_err(&vdev->dev, "unsupported zone model %d\n",
- model);
- err = -EINVAL;
- goto out_cleanup_disk;
+ dev_err(&vdev->dev, "unsupported zone model %d\n", model);
+ return -EINVAL;
}
}
+ return 0;
+}
+
+static int virtblk_probe(struct virtio_device *vdev)
+{
+ struct virtio_blk *vblk;
+ struct queue_limits lim = { };
+ int err, index;
+ unsigned int queue_depth;
+
+ if (!vdev->config->get) {
+ dev_err(&vdev->dev, "%s failure: config access disabled\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ err = ida_alloc_range(&vd_index_ida, 0,
+ minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL);
+ if (err < 0)
+ goto out;
+ index = err;
+
+ vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
+ if (!vblk) {
+ err = -ENOMEM;
+ goto out_free_index;
+ }
+
+ mutex_init(&vblk->vdev_mutex);
+
+ vblk->vdev = vdev;
+
+ INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
+
+ err = init_vq(vblk);
+ if (err)
+ goto out_free_vblk;
+
+ /* Default queue sizing is to fill the ring. */
+ if (!virtblk_queue_depth) {
+ queue_depth = vblk->vqs[0].vq->num_free;
+ /* ... but without indirect descs, we use 2 descs per req */
+ if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
+ queue_depth /= 2;
+ } else {
+ queue_depth = virtblk_queue_depth;
+ }
+
+ memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
+ vblk->tag_set.ops = &virtio_mq_ops;
+ vblk->tag_set.queue_depth = queue_depth;
+ vblk->tag_set.numa_node = NUMA_NO_NODE;
+ vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ vblk->tag_set.cmd_size =
+ sizeof(struct virtblk_req) +
+ sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
+ vblk->tag_set.driver_data = vblk;
+ vblk->tag_set.nr_hw_queues = vblk->num_vqs;
+ vblk->tag_set.nr_maps = 1;
+ if (vblk->io_queues[HCTX_TYPE_POLL])
+ vblk->tag_set.nr_maps = 3;
+
+ err = blk_mq_alloc_tag_set(&vblk->tag_set);
+ if (err)
+ goto out_free_vq;
+
+ err = virtblk_read_limits(vblk, &lim);
+ if (err)
+ goto out_free_tags;
+
+ vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, &lim, vblk);
+ if (IS_ERR(vblk->disk)) {
+ err = PTR_ERR(vblk->disk);
+ goto out_free_tags;
+ }
+
+ virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
+
+ vblk->disk->major = major;
+ vblk->disk->first_minor = index_to_minor(index);
+ vblk->disk->minors = 1 << PART_BITS;
+ vblk->disk->private_data = vblk;
+ vblk->disk->fops = &virtblk_fops;
+ vblk->index = index;
+
+ /* configure queue flush support */
+ virtblk_update_cache_mode(vdev);
+
+ /* If disk is read-only in the host, the guest should obey */
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
+ set_disk_ro(vblk->disk, 1);
+
+ virtblk_update_capacity(vblk, false);
+ virtio_device_ready(vdev);
+
+ /*
+ * All steps that follow use the VQs therefore they need to be
+ * placed after the virtio_device_ready() call above.
+ */
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && lim.zoned) {
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue);
+ err = blk_revalidate_disk_zones(vblk->disk, NULL);
+ if (err)
+ goto out_cleanup_disk;
+ }
+
err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
if (err)
goto out_cleanup_disk;
@@ -1593,14 +1600,15 @@ static int virtblk_freeze(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
+ /* Ensure no requests in virtqueues before deleting vqs. */
+ blk_mq_freeze_queue(vblk->disk->queue);
+
/* Ensure we don't receive any more interrupts */
virtio_reset_device(vdev);
/* Make sure no work handler is accessing the device. */
flush_work(&vblk->config_work);
- blk_mq_quiesce_queue(vblk->disk->queue);
-
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
@@ -1618,7 +1626,7 @@ static int virtblk_restore(struct virtio_device *vdev)
virtio_device_ready(vdev);
- blk_mq_unquiesce_queue(vblk->disk->queue);
+ blk_mq_unfreeze_queue(vblk->disk->queue);
return 0;
}
#endif
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 4defd7f387c7..944576d582fb 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -465,7 +465,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
}
req->dev = vbd->pdevice;
- req->bdev = vbd->bdev_handle->bdev;
+ req->bdev = file_bdev(vbd->bdev_file);
rc = 0;
out:
@@ -969,7 +969,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring,
int err = 0;
int status = BLKIF_RSP_OKAY;
struct xen_blkif *blkif = ring->blkif;
- struct block_device *bdev = blkif->vbd.bdev_handle->bdev;
+ struct block_device *bdev = file_bdev(blkif->vbd.bdev_file);
struct phys_req preq;
xen_blkif_get(blkif);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 1432c83183d0..b427d54bc120 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -221,7 +221,7 @@ struct xen_vbd {
unsigned char type;
/* phys device that this vbd maps to. */
u32 pdevice;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
/* Cached size parameter. */
sector_t size;
unsigned int flush_support:1;
@@ -360,7 +360,7 @@ struct pending_req {
};
-#define vbd_sz(_v) bdev_nr_sectors((_v)->bdev_handle->bdev)
+#define vbd_sz(_v) bdev_nr_sectors(file_bdev((_v)->bdev_file))
#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
#define xen_blkif_put(_b) \
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index e34219ea2b05..0621878940ae 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -81,7 +81,7 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
int i;
/* Not ready to connect? */
- if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev_handle)
+ if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev_file)
return;
/* Already connected? */
@@ -99,13 +99,12 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
return;
}
- err = sync_blockdev(blkif->vbd.bdev_handle->bdev);
+ err = sync_blockdev(file_bdev(blkif->vbd.bdev_file));
if (err) {
xenbus_dev_error(blkif->be->dev, err, "block flush");
return;
}
- invalidate_inode_pages2(
- blkif->vbd.bdev_handle->bdev->bd_inode->i_mapping);
+ invalidate_inode_pages2(blkif->vbd.bdev_file->f_mapping);
for (i = 0; i < blkif->nr_rings; i++) {
ring = &blkif->rings[i];
@@ -473,9 +472,9 @@ static void xenvbd_sysfs_delif(struct xenbus_device *dev)
static void xen_vbd_free(struct xen_vbd *vbd)
{
- if (vbd->bdev_handle)
- bdev_release(vbd->bdev_handle);
- vbd->bdev_handle = NULL;
+ if (vbd->bdev_file)
+ fput(vbd->bdev_file);
+ vbd->bdev_file = NULL;
}
static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
@@ -483,7 +482,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
int cdrom)
{
struct xen_vbd *vbd;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
vbd = &blkif->vbd;
vbd->handle = handle;
@@ -492,17 +491,17 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
vbd->pdevice = MKDEV(major, minor);
- bdev_handle = bdev_open_by_dev(vbd->pdevice, vbd->readonly ?
+ bdev_file = bdev_file_open_by_dev(vbd->pdevice, vbd->readonly ?
BLK_OPEN_READ : BLK_OPEN_WRITE, NULL, NULL);
- if (IS_ERR(bdev_handle)) {
+ if (IS_ERR(bdev_file)) {
pr_warn("xen_vbd_create: device %08x could not be opened\n",
vbd->pdevice);
return -ENOENT;
}
- vbd->bdev_handle = bdev_handle;
- if (vbd->bdev_handle->bdev->bd_disk == NULL) {
+ vbd->bdev_file = bdev_file;
+ if (file_bdev(vbd->bdev_file)->bd_disk == NULL) {
pr_warn("xen_vbd_create: device %08x doesn't exist\n",
vbd->pdevice);
xen_vbd_free(vbd);
@@ -510,14 +509,14 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
}
vbd->size = vbd_sz(vbd);
- if (cdrom || disk_to_cdi(vbd->bdev_handle->bdev->bd_disk))
+ if (cdrom || disk_to_cdi(file_bdev(vbd->bdev_file)->bd_disk))
vbd->type |= VDISK_CDROM;
- if (vbd->bdev_handle->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
+ if (file_bdev(vbd->bdev_file)->bd_disk->flags & GENHD_FL_REMOVABLE)
vbd->type |= VDISK_REMOVABLE;
- if (bdev_write_cache(bdev_handle->bdev))
+ if (bdev_write_cache(file_bdev(bdev_file)))
vbd->flush_support = true;
- if (bdev_max_secure_erase_sectors(bdev_handle->bdev))
+ if (bdev_max_secure_erase_sectors(file_bdev(bdev_file)))
vbd->discard_secure = true;
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
@@ -570,7 +569,7 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info
struct xen_blkif *blkif = be->blkif;
int err;
int state = 0;
- struct block_device *bdev = be->blkif->vbd.bdev_handle->bdev;
+ struct block_device *bdev = file_bdev(be->blkif->vbd.bdev_file);
if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
return;
@@ -932,7 +931,7 @@ again:
}
err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
(unsigned long)bdev_logical_block_size(
- be->blkif->vbd.bdev_handle->bdev));
+ file_bdev(be->blkif->vbd.bdev_file)));
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/sector-size",
dev->nodename);
@@ -940,7 +939,7 @@ again:
}
err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
bdev_physical_block_size(
- be->blkif->vbd.bdev_handle->bdev));
+ file_bdev(be->blkif->vbd.bdev_file)));
if (err)
xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
dev->nodename);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 434fab306777..fd7c0ff2139c 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -941,39 +941,35 @@ static const struct blk_mq_ops blkfront_mq_ops = {
.complete = blkif_complete_rq,
};
-static void blkif_set_queue_limits(struct blkfront_info *info)
+static void blkif_set_queue_limits(const struct blkfront_info *info,
+ struct queue_limits *lim)
{
- struct request_queue *rq = info->rq;
- struct gendisk *gd = info->gd;
unsigned int segments = info->max_indirect_segments ? :
BLKIF_MAX_SEGMENTS_PER_REQUEST;
- blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
-
if (info->feature_discard) {
- blk_queue_max_discard_sectors(rq, get_capacity(gd));
- rq->limits.discard_granularity = info->discard_granularity ?:
- info->physical_sector_size;
- rq->limits.discard_alignment = info->discard_alignment;
+ lim->max_hw_discard_sectors = UINT_MAX;
+ if (info->discard_granularity)
+ lim->discard_granularity = info->discard_granularity;
+ lim->discard_alignment = info->discard_alignment;
if (info->feature_secdiscard)
- blk_queue_max_secure_erase_sectors(rq,
- get_capacity(gd));
+ lim->max_secure_erase_sectors = UINT_MAX;
}
/* Hard sector size and max sectors impersonate the equiv. hardware. */
- blk_queue_logical_block_size(rq, info->sector_size);
- blk_queue_physical_block_size(rq, info->physical_sector_size);
- blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
+ lim->logical_block_size = info->sector_size;
+ lim->physical_block_size = info->physical_sector_size;
+ lim->max_hw_sectors = (segments * XEN_PAGE_SIZE) / 512;
/* Each segment in a request is up to an aligned page in size. */
- blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
- blk_queue_max_segment_size(rq, PAGE_SIZE);
+ lim->seg_boundary_mask = PAGE_SIZE - 1;
+ lim->max_segment_size = PAGE_SIZE;
/* Ensure a merged request will fit in a single I/O ring slot. */
- blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
+ lim->max_segments = segments / GRANTS_PER_PSEG;
/* Make sure buffer addresses are sector-aligned. */
- blk_queue_dma_alignment(rq, 511);
+ lim->dma_alignment = 511;
}
static const char *flush_info(struct blkfront_info *info)
@@ -1070,6 +1066,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
struct blkfront_info *info, u16 sector_size,
unsigned int physical_sector_size)
{
+ struct queue_limits lim = {};
struct gendisk *gd;
int nr_minors = 1;
int err;
@@ -1136,11 +1133,13 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
if (err)
goto out_release_minors;
- gd = blk_mq_alloc_disk(&info->tag_set, info);
+ blkif_set_queue_limits(info, &lim);
+ gd = blk_mq_alloc_disk(&info->tag_set, &lim, info);
if (IS_ERR(gd)) {
err = PTR_ERR(gd);
goto out_free_tag_set;
}
+ blk_queue_flag_set(QUEUE_FLAG_VIRT, gd->queue);
strcpy(gd->disk_name, DEV_NAME);
ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
@@ -1162,7 +1161,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
info->gd = gd;
info->sector_size = sector_size;
info->physical_sector_size = physical_sector_size;
- blkif_set_queue_limits(info);
xlvbd_flush(info);
@@ -2006,18 +2004,19 @@ static int blkfront_probe(struct xenbus_device *dev,
static int blkif_recover(struct blkfront_info *info)
{
+ struct queue_limits lim;
unsigned int r_index;
struct request *req, *n;
int rc;
struct bio *bio;
- unsigned int segs;
struct blkfront_ring_info *rinfo;
+ lim = queue_limits_start_update(info->rq);
blkfront_gather_backend_features(info);
- /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
- blkif_set_queue_limits(info);
- segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
- blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
+ blkif_set_queue_limits(info, &lim);
+ rc = queue_limits_commit_update(info->rq, &lim);
+ if (rc)
+ return rc;
for_each_rinfo(info, rinfo, r_index) {
rc = blkfront_setup_indirect(rinfo);
@@ -2037,7 +2036,9 @@ static int blkif_recover(struct blkfront_info *info)
list_for_each_entry_safe(req, n, &info->requests, queuelist) {
/* Requeue pending requests (flush or discard) */
list_del_init(&req->queuelist);
- BUG_ON(req->nr_phys_segments > segs);
+ BUG_ON(req->nr_phys_segments >
+ (info->max_indirect_segments ? :
+ BLKIF_MAX_SEGMENTS_PER_REQUEST));
blk_mq_requeue_request(req, false);
}
blk_mq_start_stopped_hw_queues(info->rq, true);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 11493167b0a8..7c5f4e4d9b50 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -318,7 +318,7 @@ static int z2ram_register_disk(int minor)
struct gendisk *disk;
int err;
- disk = blk_mq_alloc_disk(&tag_set, NULL);
+ disk = blk_mq_alloc_disk(&tag_set, NULL, NULL);
if (IS_ERR(disk))
return PTR_ERR(disk);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 6772e0c654fa..da7a20fa6152 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -426,11 +426,11 @@ static void reset_bdev(struct zram *zram)
if (!zram->backing_dev)
return;
- bdev_release(zram->bdev_handle);
+ fput(zram->bdev_file);
/* hope filp_close flush all of IO */
filp_close(zram->backing_dev, NULL);
zram->backing_dev = NULL;
- zram->bdev_handle = NULL;
+ zram->bdev_file = NULL;
zram->disk->fops = &zram_devops;
kvfree(zram->bitmap);
zram->bitmap = NULL;
@@ -476,7 +476,7 @@ static ssize_t backing_dev_store(struct device *dev,
struct address_space *mapping;
unsigned int bitmap_sz;
unsigned long nr_pages, *bitmap = NULL;
- struct bdev_handle *bdev_handle = NULL;
+ struct file *bdev_file = NULL;
int err;
struct zram *zram = dev_to_zram(dev);
@@ -513,11 +513,11 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
}
- bdev_handle = bdev_open_by_dev(inode->i_rdev,
+ bdev_file = bdev_file_open_by_dev(inode->i_rdev,
BLK_OPEN_READ | BLK_OPEN_WRITE, zram, NULL);
- if (IS_ERR(bdev_handle)) {
- err = PTR_ERR(bdev_handle);
- bdev_handle = NULL;
+ if (IS_ERR(bdev_file)) {
+ err = PTR_ERR(bdev_file);
+ bdev_file = NULL;
goto out;
}
@@ -531,7 +531,7 @@ static ssize_t backing_dev_store(struct device *dev,
reset_bdev(zram);
- zram->bdev_handle = bdev_handle;
+ zram->bdev_file = bdev_file;
zram->backing_dev = backing_dev;
zram->bitmap = bitmap;
zram->nr_pages = nr_pages;
@@ -544,8 +544,8 @@ static ssize_t backing_dev_store(struct device *dev,
out:
kvfree(bitmap);
- if (bdev_handle)
- bdev_release(bdev_handle);
+ if (bdev_file)
+ fput(bdev_file);
if (backing_dev)
filp_close(backing_dev, NULL);
@@ -587,7 +587,7 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
{
struct bio *bio;
- bio = bio_alloc(zram->bdev_handle->bdev, 1, parent->bi_opf, GFP_NOIO);
+ bio = bio_alloc(file_bdev(zram->bdev_file), 1, parent->bi_opf, GFP_NOIO);
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
__bio_add_page(bio, page, PAGE_SIZE, 0);
bio_chain(bio, parent);
@@ -703,7 +703,7 @@ static ssize_t writeback_store(struct device *dev,
continue;
}
- bio_init(&bio, zram->bdev_handle->bdev, &bio_vec, 1,
+ bio_init(&bio, file_bdev(zram->bdev_file), &bio_vec, 1,
REQ_OP_WRITE | REQ_SYNC);
bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
__bio_add_page(&bio, page, PAGE_SIZE, 0);
@@ -785,7 +785,7 @@ static void zram_sync_read(struct work_struct *work)
struct bio_vec bv;
struct bio bio;
- bio_init(&bio, zw->zram->bdev_handle->bdev, &bv, 1, REQ_OP_READ);
+ bio_init(&bio, file_bdev(zw->zram->bdev_file), &bv, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9);
__bio_add_page(&bio, zw->page, PAGE_SIZE, 0);
zw->error = submit_bio_wait(&bio);
@@ -2177,6 +2177,28 @@ ATTRIBUTE_GROUPS(zram_disk);
*/
static int zram_add(void)
{
+ struct queue_limits lim = {
+ .logical_block_size = ZRAM_LOGICAL_BLOCK_SIZE,
+ /*
+ * To ensure that we always get PAGE_SIZE aligned and
+ * n*PAGE_SIZED sized I/O requests.
+ */
+ .physical_block_size = PAGE_SIZE,
+ .io_min = PAGE_SIZE,
+ .io_opt = PAGE_SIZE,
+ .max_hw_discard_sectors = UINT_MAX,
+ /*
+ * zram_bio_discard() will clear all logical blocks if logical
+ * block size is identical with physical block size(PAGE_SIZE).
+ * But if it is different, we will skip discarding some parts of
+ * logical blocks in the part of the request range which isn't
+ * aligned to physical block size. So we can't ensure that all
+ * discarded logical blocks are zeroed.
+ */
+#if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE
+ .max_write_zeroes_sectors = UINT_MAX,
+#endif
+ };
struct zram *zram;
int ret, device_id;
@@ -2195,11 +2217,11 @@ static int zram_add(void)
#endif
/* gendisk structure */
- zram->disk = blk_alloc_disk(NUMA_NO_NODE);
- if (!zram->disk) {
+ zram->disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(zram->disk)) {
pr_err("Error allocating disk structure for device %d\n",
device_id);
- ret = -ENOMEM;
+ ret = PTR_ERR(zram->disk);
goto out_free_idr;
}
@@ -2216,29 +2238,6 @@ static int zram_add(void)
/* zram devices sort of resembles non-rotational disks */
blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue);
-
- /*
- * To ensure that we always get PAGE_SIZE aligned
- * and n*PAGE_SIZED sized I/O requests.
- */
- blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
- blk_queue_logical_block_size(zram->disk->queue,
- ZRAM_LOGICAL_BLOCK_SIZE);
- blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
- blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
- blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
-
- /*
- * zram_bio_discard() will clear all logical blocks if logical block
- * size is identical with physical block size(PAGE_SIZE). But if it is
- * different, we will skip discarding some parts of logical blocks in
- * the part of the request range which isn't aligned to physical block
- * size. So we can't ensure that all discarded logical blocks are
- * zeroed.
- */
- if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
- blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
-
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
if (ret)
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 3b94d12f41b4..37bf29f34d26 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -132,7 +132,7 @@ struct zram {
spinlock_t wb_limit_lock;
bool wb_limit_enable;
u64 bd_wb_limit;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
unsigned long *bitmap;
unsigned long nr_pages;
#endif
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 0a5445ac5e1b..f9a7c790d7e2 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -11,6 +11,7 @@
#include <linux/firmware.h>
#include <linux/dmi.h>
#include <linux/of.h>
+#include <linux/string.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -543,8 +544,6 @@ static const char *btbcm_get_board_name(struct device *dev)
struct device_node *root;
char *board_type;
const char *tmp;
- int len;
- int i;
root = of_find_node_by_path("/");
if (!root)
@@ -554,13 +553,8 @@ static const char *btbcm_get_board_name(struct device *dev)
return NULL;
/* get rid of any '/' in the compatible string */
- len = strlen(tmp) + 1;
- board_type = devm_kzalloc(dev, len, GFP_KERNEL);
- strscpy(board_type, tmp, len);
- for (i = 0; i < len; i++) {
- if (board_type[i] == '/')
- board_type[i] = '-';
- }
+ board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
+ strreplace(board_type, '/', '-');
of_node_put(root);
return board_type;
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index cdc5c08824a0..6ba7f5d1b837 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -441,7 +441,7 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver)
return PTR_ERR(skb);
}
- if (skb->len != sizeof(*ver)) {
+ if (!skb || skb->len != sizeof(*ver)) {
bt_dev_err(hdev, "Intel version event size mismatch");
kfree_skb(skb);
return -EILSEQ;
@@ -2670,6 +2670,119 @@ static void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
}
}
+static void btintel_print_fseq_info(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ u8 *p;
+ u32 val;
+ const char *str;
+
+ skb = __hci_cmd_sync(hdev, 0xfcb3, 0, NULL, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_dbg(hdev, "Reading fseq status command failed (%ld)",
+ PTR_ERR(skb));
+ return;
+ }
+
+ if (skb->len < (sizeof(u32) * 16 + 2)) {
+ bt_dev_dbg(hdev, "Malformed packet of length %u received",
+ skb->len);
+ kfree_skb(skb);
+ return;
+ }
+
+ p = skb_pull_data(skb, 1);
+ if (*p) {
+ bt_dev_dbg(hdev, "Failed to get fseq status (0x%2.2x)", *p);
+ kfree_skb(skb);
+ return;
+ }
+
+ p = skb_pull_data(skb, 1);
+ switch (*p) {
+ case 0:
+ str = "Success";
+ break;
+ case 1:
+ str = "Fatal error";
+ break;
+ case 2:
+ str = "Semaphore acquire error";
+ break;
+ default:
+ str = "Unknown error";
+ break;
+ }
+
+ if (*p) {
+ bt_dev_err(hdev, "Fseq status: %s (0x%2.2x)", str, *p);
+ kfree_skb(skb);
+ return;
+ }
+
+ bt_dev_info(hdev, "Fseq status: %s (0x%2.2x)", str, *p);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Reason: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Global version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Installed version: 0x%8.8x", val);
+
+ p = skb->data;
+ skb_pull_data(skb, 4);
+ bt_dev_info(hdev, "Fseq executed: %2.2u.%2.2u.%2.2u.%2.2u", p[0], p[1],
+ p[2], p[3]);
+
+ p = skb->data;
+ skb_pull_data(skb, 4);
+ bt_dev_info(hdev, "Fseq BT Top: %2.2u.%2.2u.%2.2u.%2.2u", p[0], p[1],
+ p[2], p[3]);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq Top init version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq Cnvio init version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq MBX Wifi file version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq BT version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq Top reset address: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq MBX timeout: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq MBX ack: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq CNVi id: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq CNVr id: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq Error handle: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq Magic noalive indication: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq OTP version: 0x%8.8x", val);
+
+ val = get_unaligned_le32(skb_pull_data(skb, 4));
+ bt_dev_dbg(hdev, "Fseq MBX otp version: 0x%8.8x", val);
+
+ kfree_skb(skb);
+}
+
static int btintel_setup_combined(struct hci_dev *hdev)
{
const u8 param[1] = { 0xFF };
@@ -2902,6 +3015,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
btintel_register_devcoredump_support(hdev);
+ btintel_print_fseq_info(hdev);
break;
default:
bt_dev_err(hdev, "Unsupported Intel hw variant (%u)",
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index aaabb732082c..ac8ebccd3507 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -372,8 +372,10 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
struct btmediatek_data *data = hci_get_priv(hdev);
int err;
- if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
+ if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) {
+ kfree_skb(skb);
return 0;
+ }
switch (data->cd_info.state) {
case HCI_DEVCOREDUMP_IDLE:
@@ -420,5 +422,6 @@ MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_MT7622);
MODULE_FIRMWARE(FIRMWARE_MT7663);
MODULE_FIRMWARE(FIRMWARE_MT7668);
+MODULE_FIRMWARE(FIRMWARE_MT7922);
MODULE_FIRMWARE(FIRMWARE_MT7961);
MODULE_FIRMWARE(FIRMWARE_MT7925);
diff --git a/drivers/bluetooth/btmtk.h b/drivers/bluetooth/btmtk.h
index 56f5502baadf..cbcdb99a22e6 100644
--- a/drivers/bluetooth/btmtk.h
+++ b/drivers/bluetooth/btmtk.h
@@ -4,6 +4,7 @@
#define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin"
#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
+#define FIRMWARE_MT7922 "mediatek/BT_RAM_CODE_MT7922_1_1_hdr.bin"
#define FIRMWARE_MT7961 "mediatek/BT_RAM_CODE_MT7961_1_2_hdr.bin"
#define FIRMWARE_MT7925 "mediatek/mt7925/BT_RAM_CODE_MT7925_1_1_hdr.bin"
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 1d592ac413d1..0b93c2ff29e4 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -126,6 +126,7 @@ struct ps_data {
struct hci_dev *hdev;
struct work_struct work;
struct timer_list ps_timer;
+ struct mutex ps_lock;
};
struct wakeup_cmd_payload {
@@ -317,6 +318,9 @@ static void ps_start_timer(struct btnxpuart_dev *nxpdev)
if (psdata->cur_psmode == PS_MODE_ENABLE)
mod_timer(&psdata->ps_timer, jiffies + msecs_to_jiffies(psdata->h2c_ps_interval));
+
+ if (psdata->ps_state == PS_STATE_AWAKE && psdata->ps_cmd == PS_CMD_ENTER_PS)
+ cancel_work_sync(&psdata->work);
}
static void ps_cancel_timer(struct btnxpuart_dev *nxpdev)
@@ -337,6 +341,7 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state)
!test_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state))
return;
+ mutex_lock(&psdata->ps_lock);
switch (psdata->cur_h2c_wakeupmode) {
case WAKEUP_METHOD_DTR:
if (ps_state == PS_STATE_AWAKE)
@@ -350,12 +355,15 @@ static void ps_control(struct hci_dev *hdev, u8 ps_state)
status = serdev_device_break_ctl(nxpdev->serdev, 0);
else
status = serdev_device_break_ctl(nxpdev->serdev, -1);
+ msleep(20); /* Allow chip to detect UART-break and enter sleep */
bt_dev_dbg(hdev, "Set UART break: %s, status=%d",
str_on_off(ps_state == PS_STATE_SLEEP), status);
break;
}
if (!status)
psdata->ps_state = ps_state;
+ mutex_unlock(&psdata->ps_lock);
+
if (ps_state == PS_STATE_AWAKE)
btnxpuart_tx_wakeup(nxpdev);
}
@@ -391,17 +399,25 @@ static void ps_setup(struct hci_dev *hdev)
psdata->hdev = hdev;
INIT_WORK(&psdata->work, ps_work_func);
+ mutex_init(&psdata->ps_lock);
timer_setup(&psdata->ps_timer, ps_timeout_func, 0);
}
-static void ps_wakeup(struct btnxpuart_dev *nxpdev)
+static bool ps_wakeup(struct btnxpuart_dev *nxpdev)
{
struct ps_data *psdata = &nxpdev->psdata;
+ u8 ps_state;
+
+ mutex_lock(&psdata->ps_lock);
+ ps_state = psdata->ps_state;
+ mutex_unlock(&psdata->ps_lock);
- if (psdata->ps_state != PS_STATE_AWAKE) {
+ if (ps_state != PS_STATE_AWAKE) {
psdata->ps_cmd = PS_CMD_EXIT_PS;
schedule_work(&psdata->work);
+ return true;
}
+ return false;
}
static int send_ps_cmd(struct hci_dev *hdev, void *data)
@@ -1171,7 +1187,6 @@ static struct sk_buff *nxp_dequeue(void *data)
{
struct btnxpuart_dev *nxpdev = (struct btnxpuart_dev *)data;
- ps_wakeup(nxpdev);
ps_start_timer(nxpdev);
return skb_dequeue(&nxpdev->txq);
}
@@ -1186,6 +1201,9 @@ static void btnxpuart_tx_work(struct work_struct *work)
struct sk_buff *skb;
int len;
+ if (ps_wakeup(nxpdev))
+ return;
+
while ((skb = nxp_dequeue(nxpdev))) {
len = serdev_device_write_buf(serdev, skb->data, skb->len);
hdev->stat.byte_tx += len;
@@ -1234,6 +1252,9 @@ static int btnxpuart_close(struct hci_dev *hdev)
ps_wakeup(nxpdev);
serdev_device_close(nxpdev->serdev);
+ skb_queue_purge(&nxpdev->txq);
+ kfree_skb(nxpdev->rx_skb);
+ nxpdev->rx_skb = NULL;
clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state);
return 0;
}
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index fdb0fae88d1c..b40b32fa7f1c 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -152,7 +152,7 @@ static int qca_send_patch_config_cmd(struct hci_dev *hdev)
bt_dev_dbg(hdev, "QCA Patch config");
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, sizeof(cmd),
- cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
+ cmd, 0, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "Sending QCA Patch config failed (%d)", err);
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 277d039ecbb4..cc50de69e8dc 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -69,6 +69,7 @@ enum btrtl_chip_id {
CHIP_ID_8852B = 20,
CHIP_ID_8852C = 25,
CHIP_ID_8851B = 36,
+ CHIP_ID_8852BT = 47,
};
struct id_table {
@@ -307,6 +308,15 @@ static const struct id_table ic_id_table[] = {
.fw_name = "rtl_bt/rtl8851bu_fw",
.cfg_name = "rtl_bt/rtl8851bu_config",
.hw_info = "rtl8851bu" },
+
+ /* 8852BT/8852BE-VT */
+ { IC_INFO(RTL_ROM_LMP_8852A, 0x87, 0xc, HCI_USB),
+ .config_needed = false,
+ .has_rom_version = true,
+ .has_msft_ext = true,
+ .fw_name = "rtl_bt/rtl8852btu_fw",
+ .cfg_name = "rtl_bt/rtl8852btu_config",
+ .hw_info = "rtl8852btu" },
};
static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev,
@@ -645,6 +655,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
{ RTL_ROM_LMP_8852A, 20 }, /* 8852B */
{ RTL_ROM_LMP_8852A, 25 }, /* 8852C */
{ RTL_ROM_LMP_8851B, 36 }, /* 8851B */
+ { RTL_ROM_LMP_8852A, 47 }, /* 8852BT */
};
if (btrtl_dev->fw_len <= 8)
@@ -1275,6 +1286,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
case CHIP_ID_8852B:
case CHIP_ID_8852C:
case CHIP_ID_8851B:
+ case CHIP_ID_8852BT:
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
@@ -1505,6 +1517,8 @@ MODULE_FIRMWARE("rtl_bt/rtl8852bs_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852bs_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852bu_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852bu_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8852btu_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8852btu_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw_v2.bin");
MODULE_FIRMWARE("rtl_bt/rtl8852cu_config.bin");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index d31edad7a056..06e915b57283 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -553,6 +553,9 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x13d3, 0x3572), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Realtek 8852BT/8852BE-VT Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0x8520), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_REALTEK },
@@ -655,6 +658,11 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ /* Additional MediaTek MT7925 Bluetooth devices */
+ { USB_DEVICE(0x13d3, 0x3602), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+
/* Additional Realtek 8723AE Bluetooth devices */
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK },
@@ -3080,7 +3088,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
int err, status;
u32 dev_id = 0;
char fw_bin_name[64];
- u32 fw_version = 0;
+ u32 fw_version = 0, fw_flavor = 0;
u8 param;
struct btmediatek_data *mediatek;
@@ -3103,6 +3111,11 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
bt_dev_err(hdev, "Failed to get fw version (%d)", err);
return err;
}
+ err = btusb_mtk_id_get(data, 0x70010020, &fw_flavor);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to get fw flavor (%d)", err);
+ return err;
+ }
}
mediatek = hci_get_priv(hdev);
@@ -3127,6 +3140,10 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
snprintf(fw_bin_name, sizeof(fw_bin_name),
"mediatek/mt%04x/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
dev_id & 0xffff, dev_id & 0xffff, (fw_version & 0xff) + 1);
+ else if (dev_id == 0x7961 && fw_flavor)
+ snprintf(fw_bin_name, sizeof(fw_bin_name),
+ "mediatek/BT_RAM_CODE_MT%04x_1a_%x_hdr.bin",
+ dev_id & 0xffff, (fw_version & 0xff) + 1);
else
snprintf(fw_bin_name, sizeof(fw_bin_name),
"mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
@@ -3273,7 +3290,6 @@ static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btusb_data *data = hci_get_drvdata(hdev);
u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle);
- struct sk_buff *skb_cd;
switch (handle) {
case 0xfc6f: /* Firmware dump from device */
@@ -3286,9 +3302,12 @@ static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
* for backward compatibility, so we have to clone the packet
* extraly for the in-kernel coredump support.
*/
- skb_cd = skb_clone(skb, GFP_ATOMIC);
- if (skb_cd)
- btmtk_process_coredump(hdev, skb_cd);
+ if (IS_ENABLED(CONFIG_DEV_COREDUMP)) {
+ struct sk_buff *skb_cd = skb_clone(skb, GFP_ATOMIC);
+
+ if (skb_cd)
+ btmtk_process_coredump(hdev, skb_cd);
+ }
fallthrough;
case 0x05ff: /* Firmware debug logging 1 */
@@ -4481,6 +4500,7 @@ static int btusb_probe(struct usb_interface *intf,
set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks);
set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
+ set_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks);
}
if (!reset)
diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
index a61757835695..9a7243d5db71 100644
--- a/drivers/bluetooth/hci_bcm4377.c
+++ b/drivers/bluetooth/hci_bcm4377.c
@@ -1417,7 +1417,7 @@ static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377)
bda = (struct hci_rp_read_bd_addr *)skb->data;
if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr))
- set_bit(HCI_QUIRK_INVALID_BDADDR, &bcm4377->hdev->quirks);
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &bcm4377->hdev->quirks);
kfree_skb(skb);
return 0;
@@ -2368,7 +2368,6 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hdev->set_bdaddr = bcm4377_hci_set_bdaddr;
hdev->setup = bcm4377_hci_setup;
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
if (bcm4377->hw->broken_mws_transport_config)
set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
if (bcm4377->hw->broken_ext_scan)
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 71e748a9477e..c0436881a533 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -113,6 +113,7 @@ struct h5_vnd {
int (*suspend)(struct h5 *h5);
int (*resume)(struct h5 *h5);
const struct acpi_gpio_mapping *acpi_gpio_map;
+ int sizeof_priv;
};
struct h5_device_data {
@@ -863,7 +864,8 @@ static int h5_serdev_probe(struct serdev_device *serdev)
if (IS_ERR(h5->device_wake_gpio))
return PTR_ERR(h5->device_wake_gpio);
- return hci_uart_register_device(&h5->serdev_hu, &h5p);
+ return hci_uart_register_device_priv(&h5->serdev_hu, &h5p,
+ h5->vnd->sizeof_priv);
}
static void h5_serdev_remove(struct serdev_device *serdev)
@@ -1070,6 +1072,7 @@ static struct h5_vnd rtl_vnd = {
.suspend = h5_btrtl_suspend,
.resume = h5_btrtl_resume,
.acpi_gpio_map = acpi_btrtl_gpios,
+ .sizeof_priv = sizeof(struct btrealtek_data),
};
static const struct h5_device_data h5_data_rtl8822cs = {
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 94b8c406f0c0..8a60ad7acd70 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -7,6 +7,7 @@
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Acknowledgements:
* This file is based on hci_ll.c, which was...
@@ -1806,13 +1807,12 @@ static int qca_power_on(struct hci_dev *hdev)
static void hci_coredump_qca(struct hci_dev *hdev)
{
+ int err;
static const u8 param[] = { 0x26 };
- struct sk_buff *skb;
- skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
- if (IS_ERR(skb))
- bt_dev_err(hdev, "%s: trigger crash failed (%ld)", __func__, PTR_ERR(skb));
- kfree_skb(skb);
+ err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
+ if (err < 0)
+ bt_dev_err(hdev, "%s: trigger crash failed (%d)", __func__, err);
}
static int qca_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id)
@@ -1904,7 +1904,17 @@ retry:
case QCA_WCN6750:
case QCA_WCN6855:
case QCA_WCN7850:
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+
+ /* Set BDA quirk bit for reading BDA value from fwnode property
+ * only if that property exist in DT.
+ */
+ if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) {
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later");
+ } else {
+ bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA");
+ }
+
hci_set_aosp_capable(hdev);
ret = qca_read_soc_version(hdev, &ver, soc_type);
@@ -2316,7 +2326,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR_OR_NULL(qcadev->bt_en) &&
+ if (IS_ERR(qcadev->bt_en) &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855)) {
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
@@ -2325,7 +2335,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
GPIOD_IN);
- if (IS_ERR_OR_NULL(qcadev->sw_ctrl) &&
+ if (IS_ERR(qcadev->sw_ctrl) &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855 ||
data->soc_type == QCA_WCN7850))
@@ -2347,7 +2357,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
default:
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR_OR_NULL(qcadev->bt_en)) {
+ if (IS_ERR(qcadev->bt_en)) {
dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
power_ctrl_enabled = false;
}
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 39c8b567da3c..214fff876eae 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -300,8 +300,9 @@ static const struct serdev_device_ops hci_serdev_client_ops = {
.write_wakeup = hci_uart_write_wakeup,
};
-int hci_uart_register_device(struct hci_uart *hu,
- const struct hci_uart_proto *p)
+int hci_uart_register_device_priv(struct hci_uart *hu,
+ const struct hci_uart_proto *p,
+ int sizeof_priv)
{
int err;
struct hci_dev *hdev;
@@ -325,7 +326,7 @@ int hci_uart_register_device(struct hci_uart *hu,
set_bit(HCI_UART_PROTO_READY, &hu->flags);
/* Initialize and register HCI device */
- hdev = hci_alloc_dev();
+ hdev = hci_alloc_dev_priv(sizeof_priv);
if (!hdev) {
BT_ERR("Can't allocate HCI device");
err = -ENOMEM;
@@ -394,7 +395,7 @@ err_rwsem:
percpu_free_rwsem(&hu->proto_lock);
return err;
}
-EXPORT_SYMBOL_GPL(hci_uart_register_device);
+EXPORT_SYMBOL_GPL(hci_uart_register_device_priv);
void hci_uart_unregister_device(struct hci_uart *hu)
{
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index fb4a2d0d8cc8..68c8c7e95d64 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -97,7 +97,17 @@ struct hci_uart {
int hci_uart_register_proto(const struct hci_uart_proto *p);
int hci_uart_unregister_proto(const struct hci_uart_proto *p);
-int hci_uart_register_device(struct hci_uart *hu, const struct hci_uart_proto *p);
+
+int hci_uart_register_device_priv(struct hci_uart *hu,
+ const struct hci_uart_proto *p,
+ int sizeof_priv);
+
+static inline int hci_uart_register_device(struct hci_uart *hu,
+ const struct hci_uart_proto *p)
+{
+ return hci_uart_register_device_priv(hu, p, 0);
+}
+
void hci_uart_unregister_device(struct hci_uart *hu);
int hci_uart_tx_wakeup(struct hci_uart *hu);
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index e6742998f372..d5e7fa9173a1 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -186,11 +186,12 @@ config SUNXI_RSB
config TEGRA_ACONNECT
tristate "Tegra ACONNECT Bus Driver"
- depends on ARCH_TEGRA_210_SOC
+ depends on ARCH_TEGRA
depends on OF && PM
help
Driver for the Tegra ACONNECT bus which is used to interface with
- the devices inside the Audio Processing Engine (APE) for Tegra210.
+ the devices inside the Audio Processing Engine (APE) for
+ Tegra210 and later.
config TEGRA_GMI
tristate "Tegra Generic Memory Interface bus driver"
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 6b5da73c8541..837bf9d51c6e 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -120,7 +120,7 @@ static int imx_weim_gpr_setup(struct platform_device *pdev)
i++;
}
- if (i == 0 || i % 4)
+ if (i == 0)
goto err;
for (i = 0; i < ARRAY_SIZE(gprvals); i++) {
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index fd3e9d82340a..1e29ba76615d 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -128,7 +128,7 @@ struct sunxi_rsb {
};
/* bus / slave device related functions */
-static struct bus_type sunxi_rsb_bus;
+static const struct bus_type sunxi_rsb_bus;
static int sunxi_rsb_device_match(struct device *dev, struct device_driver *drv)
{
@@ -177,7 +177,7 @@ static int sunxi_rsb_device_modalias(const struct device *dev, struct kobj_ueven
return of_device_uevent_modalias(dev, env);
}
-static struct bus_type sunxi_rsb_bus = {
+static const struct bus_type sunxi_rsb_bus = {
.name = RSB_CTRL_NAME,
.match = sunxi_rsb_device_match,
.probe = sunxi_rsb_device_probe,
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 245e5e827d0d..41d33f39efe5 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -2400,7 +2400,7 @@ static int sysc_child_add_clocks(struct sysc *ddata,
return 0;
}
-static struct device_type sysc_device_type = {
+static const struct device_type sysc_device_type = {
};
static struct sysc *sysc_child_to_parent(struct device *dev)
diff --git a/drivers/cache/ax45mp_cache.c b/drivers/cache/ax45mp_cache.c
index 57186c58dc84..1d7dd3d2c101 100644
--- a/drivers/cache/ax45mp_cache.c
+++ b/drivers/cache/ax45mp_cache.c
@@ -129,8 +129,12 @@ static void ax45mp_dma_cache_wback(phys_addr_t paddr, size_t size)
unsigned long line_size;
unsigned long flags;
+ if (unlikely(start == end))
+ return;
+
line_size = ax45mp_priv.ax45mp_cache_line_size;
start = start & (~(line_size - 1));
+ end = ((end + line_size - 1) & (~(line_size - 1)));
local_irq_save(flags);
ax45mp_cpu_dcache_wb_range(start, end);
local_irq_restore(flags);
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index d668b174ace9..eefdd422ad8e 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -724,11 +724,6 @@ static void probe_gdrom_setupdisk(void)
static int probe_gdrom_setupqueue(void)
{
- blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
- /* using DMA so memory will need to be contiguous */
- blk_queue_max_segments(gd.gdrom_rq, 1);
- /* set a large max size to get most from DMA */
- blk_queue_max_segment_size(gd.gdrom_rq, 0x40000);
gd.disk->queue = gd.gdrom_rq;
return gdrom_init_dma_mode();
}
@@ -743,6 +738,13 @@ static const struct blk_mq_ops gdrom_mq_ops = {
*/
static int probe_gdrom(struct platform_device *devptr)
{
+ struct queue_limits lim = {
+ .logical_block_size = GDROM_HARD_SECTOR,
+ /* using DMA so memory will need to be contiguous */
+ .max_segments = 1,
+ /* set a large max size to get most from DMA */
+ .max_segment_size = 0x40000,
+ };
int err;
/*
@@ -778,7 +780,7 @@ static int probe_gdrom(struct platform_device *devptr)
if (err)
goto probe_fail_free_cd_info;
- gd.disk = blk_mq_alloc_disk(&gd.tag_set, NULL);
+ gd.disk = blk_mq_alloc_disk(&gd.tag_set, &lim, NULL);
if (IS_ERR(gd.disk)) {
err = PTR_ERR(gd.disk);
goto probe_fail_free_tag_set;
@@ -829,7 +831,7 @@ probe_fail_no_mem:
return err;
}
-static int remove_gdrom(struct platform_device *devptr)
+static void remove_gdrom(struct platform_device *devptr)
{
blk_mq_free_tag_set(&gd.tag_set);
free_irq(HW_EVENT_GDROM_CMD, &gd);
@@ -840,13 +842,11 @@ static int remove_gdrom(struct platform_device *devptr)
unregister_cdrom(gd.cd_info);
kfree(gd.cd_info);
kfree(gd.toc);
-
- return 0;
}
static struct platform_driver gdrom_driver = {
.probe = probe_gdrom,
- .remove = remove_gdrom,
+ .remove_new = remove_gdrom,
.driver = {
.name = GDROM_DEV_NAME,
},
diff --git a/drivers/clk/rockchip/clk-rk3588.c b/drivers/clk/rockchip/clk-rk3588.c
index 6994165e0395..0b60ae78f9d8 100644
--- a/drivers/clk/rockchip/clk-rk3588.c
+++ b/drivers/clk/rockchip/clk-rk3588.c
@@ -2458,15 +2458,18 @@ static struct rockchip_clk_branch rk3588_clk_branches[] __initdata = {
static void __init rk3588_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3588_clk_branches,
+ ARRAY_SIZE(rk3588_clk_branches)) + 1;
reg_base = of_iomap(np, 0);
if (!reg_base) {
pr_err("%s: could not map cru region\n", __func__);
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 4059d9365ae6..73d2cbdc716b 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -429,6 +429,23 @@ void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
}
EXPORT_SYMBOL_GPL(rockchip_clk_register_plls);
+unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
+ unsigned int nr_clk)
+{
+ unsigned long max = 0;
+ unsigned int idx;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ if (list->id > max)
+ max = list->id;
+ if (list->child && list->child->id > max)
+ max = list->id;
+ }
+
+ return max;
+}
+EXPORT_SYMBOL_GPL(rockchip_clk_find_max_clk_id);
+
void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk)
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index 758ebaf2236b..fd3b476dedda 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -973,6 +973,8 @@ struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
void __iomem *base, unsigned long nr_clks);
void rockchip_clk_of_add_provider(struct device_node *np,
struct rockchip_clk_provider *ctx);
+unsigned long rockchip_clk_find_max_clk_id(struct rockchip_clk_branch *list,
+ unsigned int nr_clk);
void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk);
diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
index 0964bb11657f..782993951fff 100644
--- a/drivers/clk/samsung/clk-gs101.c
+++ b/drivers/clk/samsung/clk-gs101.c
@@ -2475,7 +2475,7 @@ static const struct samsung_cmu_info misc_cmu_info __initconst = {
.nr_clk_ids = CLKS_NR_MISC,
.clk_regs = misc_clk_regs,
.nr_clk_regs = ARRAY_SIZE(misc_clk_regs),
- .clk_name = "dout_cmu_misc_bus",
+ .clk_name = "bus",
};
/* ---- platform_driver ----------------------------------------------------- */
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index e054de92de91..8d4a52056684 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -1807,7 +1807,7 @@ TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
#endif
int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts,
- struct clocksource **cs)
+ enum clocksource_ids *cs_id)
{
struct arm_smccc_res hvc_res;
u32 ptp_counter;
@@ -1831,8 +1831,8 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts,
*ts = ktime_to_timespec64(ktime);
if (cycle)
*cycle = (u64)hvc_res.a2 << 32 | hvc_res.a3;
- if (cs)
- *cs = &clocksource_counter;
+ if (cs_id)
+ *cs_id = CSID_ARM_ARCH_COUNTER;
return 0;
}
diff --git a/drivers/comedi/drivers/comedi_8255.c b/drivers/comedi/drivers/comedi_8255.c
index e4974b508328..a933ef53845a 100644
--- a/drivers/comedi/drivers/comedi_8255.c
+++ b/drivers/comedi/drivers/comedi_8255.c
@@ -159,6 +159,7 @@ static int __subdev_8255_init(struct comedi_device *dev,
return -ENOMEM;
spriv->context = context;
+ spriv->io = io;
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c
index 30ea8b53ebf8..05ae9122823f 100644
--- a/drivers/comedi/drivers/comedi_test.c
+++ b/drivers/comedi/drivers/comedi_test.c
@@ -87,6 +87,8 @@ struct waveform_private {
struct comedi_device *dev; /* parent comedi device */
u64 ao_last_scan_time; /* time of previous AO scan in usec */
unsigned int ao_scan_period; /* AO scan period in usec */
+ bool ai_timer_enable:1; /* should AI timer be running? */
+ bool ao_timer_enable:1; /* should AO timer be running? */
unsigned short ao_loopbacks[N_CHANS];
};
@@ -236,8 +238,12 @@ static void waveform_ai_timer(struct timer_list *t)
time_increment = devpriv->ai_convert_time - now;
else
time_increment = 1;
- mod_timer(&devpriv->ai_timer,
- jiffies + usecs_to_jiffies(time_increment));
+ spin_lock(&dev->spinlock);
+ if (devpriv->ai_timer_enable) {
+ mod_timer(&devpriv->ai_timer,
+ jiffies + usecs_to_jiffies(time_increment));
+ }
+ spin_unlock(&dev->spinlock);
}
overrun:
@@ -393,9 +399,12 @@ static int waveform_ai_cmd(struct comedi_device *dev,
* Seem to need an extra jiffy here, otherwise timer expires slightly
* early!
*/
+ spin_lock_bh(&dev->spinlock);
+ devpriv->ai_timer_enable = true;
devpriv->ai_timer.expires =
jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1;
add_timer(&devpriv->ai_timer);
+ spin_unlock_bh(&dev->spinlock);
return 0;
}
@@ -404,6 +413,9 @@ static int waveform_ai_cancel(struct comedi_device *dev,
{
struct waveform_private *devpriv = dev->private;
+ spin_lock_bh(&dev->spinlock);
+ devpriv->ai_timer_enable = false;
+ spin_unlock_bh(&dev->spinlock);
if (in_softirq()) {
/* Assume we were called from the timer routine itself. */
del_timer(&devpriv->ai_timer);
@@ -495,8 +507,12 @@ static void waveform_ao_timer(struct timer_list *t)
unsigned int time_inc = devpriv->ao_last_scan_time +
devpriv->ao_scan_period - now;
- mod_timer(&devpriv->ao_timer,
- jiffies + usecs_to_jiffies(time_inc));
+ spin_lock(&dev->spinlock);
+ if (devpriv->ao_timer_enable) {
+ mod_timer(&devpriv->ao_timer,
+ jiffies + usecs_to_jiffies(time_inc));
+ }
+ spin_unlock(&dev->spinlock);
}
underrun:
@@ -517,9 +533,12 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev,
async->inttrig = NULL;
devpriv->ao_last_scan_time = ktime_to_us(ktime_get());
+ spin_lock_bh(&dev->spinlock);
+ devpriv->ao_timer_enable = true;
devpriv->ao_timer.expires =
jiffies + usecs_to_jiffies(devpriv->ao_scan_period);
add_timer(&devpriv->ao_timer);
+ spin_unlock_bh(&dev->spinlock);
return 1;
}
@@ -604,6 +623,9 @@ static int waveform_ao_cancel(struct comedi_device *dev,
struct waveform_private *devpriv = dev->private;
s->async->inttrig = NULL;
+ spin_lock_bh(&dev->spinlock);
+ devpriv->ao_timer_enable = false;
+ spin_unlock_bh(&dev->spinlock);
if (in_softirq()) {
/* Assume we were called from the timer routine itself. */
del_timer(&devpriv->ao_timer);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 3d5e6d705fc6..44b19e696176 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -108,9 +108,8 @@ static inline void send_msg(struct cn_msg *msg)
filter_data[1] = 0;
}
- if (cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT,
- cn_filter, (void *)filter_data) == -ESRCH)
- atomic_set(&proc_event_num_listeners, 0);
+ cn_netlink_send_mult(msg, msg->len, 0, CN_IDX_PROC, GFP_NOWAIT,
+ cn_filter, (void *)filter_data);
local_unlock(&local_event.lock);
}
diff --git a/drivers/counter/counter-core.c b/drivers/counter/counter-core.c
index 09c77afb33ca..3f24481fc04a 100644
--- a/drivers/counter/counter-core.c
+++ b/drivers/counter/counter-core.c
@@ -31,10 +31,11 @@ struct counter_device_allochelper {
struct counter_device counter;
/*
- * This is cache line aligned to ensure private data behaves like if it
- * were kmalloced separately.
+ * This ensures private data behaves like if it were kmalloced
+ * separately. Also ensures the minimum alignment for safe DMA
+ * operations (which may or may not mean cache alignment).
*/
- unsigned long privdata[] ____cacheline_aligned;
+ unsigned long privdata[] __aligned(ARCH_DMA_MINALIGN);
};
static void counter_device_release(struct device *dev)
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index ca94e60e705a..79619227ea51 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2987,6 +2987,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
if (min_pstate < cpu->min_perf_ratio)
min_pstate = cpu->min_perf_ratio;
+ if (min_pstate > cpu->max_perf_ratio)
+ min_pstate = cpu->max_perf_ratio;
+
max_pstate = min(cap_pstate, cpu->max_perf_ratio);
if (max_pstate < min_pstate)
max_pstate = min_pstate;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 1262a7773ef3..de50c00ba218 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -299,22 +299,6 @@ theend:
return err;
}
-static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
-{
- struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
- struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun8i_ce_dev *ce = op->ce;
- struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
- int flow, err;
-
- flow = rctx->flow;
- err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
- local_bh_disable();
- crypto_finalize_skcipher_request(engine, breq, err);
- local_bh_enable();
-}
-
static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
void *async_req)
{
@@ -360,6 +344,23 @@ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
}
+static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = op->ce;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
+ int flow, err;
+
+ flow = rctx->flow;
+ err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
+ sun8i_ce_cipher_unprepare(engine, areq);
+ local_bh_disable();
+ crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
+}
+
int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
{
int err = sun8i_ce_cipher_prepare(engine, areq);
@@ -368,7 +369,6 @@ int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
return err;
sun8i_ce_cipher_run(engine, areq);
- sun8i_ce_cipher_unprepare(engine, areq);
return 0;
}
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 32268e239bf1..f394e45e11ab 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -38,7 +38,7 @@ config CRYPTO_DEV_CCP_CRYPTO
config CRYPTO_DEV_SP_PSP
bool "Platform Security Processor (PSP) device"
default y
- depends on CRYPTO_DEV_CCP_DD && X86_64
+ depends on CRYPTO_DEV_CCP_DD && X86_64 && AMD_IOMMU
help
Provide support for the AMD Platform Security Processor (PSP).
The PSP is a dedicated processor that provides support for key
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index e4d3f45242f6..f44efbb89c34 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -21,14 +21,18 @@
#include <linux/hw_random.h>
#include <linux/ccp.h>
#include <linux/firmware.h>
+#include <linux/panic_notifier.h>
#include <linux/gfp.h>
#include <linux/cpufeature.h>
#include <linux/fs.h>
#include <linux/fs_struct.h>
#include <linux/psp.h>
+#include <linux/amd-iommu.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
+#include <asm/e820/types.h>
+#include <asm/sev.h>
#include "psp-dev.h"
#include "sev-dev.h"
@@ -37,6 +41,19 @@
#define SEV_FW_FILE "amd/sev.fw"
#define SEV_FW_NAME_SIZE 64
+/* Minimum firmware version required for the SEV-SNP support */
+#define SNP_MIN_API_MAJOR 1
+#define SNP_MIN_API_MINOR 51
+
+/*
+ * Maximum number of firmware-writable buffers that might be specified
+ * in the parameters of a legacy SEV command buffer.
+ */
+#define CMD_BUF_FW_WRITABLE_MAX 2
+
+/* Leave room in the descriptor array for an end-of-list indicator. */
+#define CMD_BUF_DESC_MAX (CMD_BUF_FW_WRITABLE_MAX + 1)
+
static DEFINE_MUTEX(sev_cmd_mutex);
static struct sev_misc_dev *misc_dev;
@@ -68,9 +85,14 @@ static int psp_timeout;
* The TMR is a 1MB area that must be 1MB aligned. Use the page allocator
* to allocate the memory, which will return aligned memory for the specified
* allocation order.
+ *
+ * When SEV-SNP is enabled the TMR needs to be 2MB aligned and 2MB sized.
*/
-#define SEV_ES_TMR_SIZE (1024 * 1024)
+#define SEV_TMR_SIZE (1024 * 1024)
+#define SNP_TMR_SIZE (2 * 1024 * 1024)
+
static void *sev_es_tmr;
+static size_t sev_es_tmr_size = SEV_TMR_SIZE;
/* INIT_EX NV Storage:
* The NV Storage is a 32Kb area and must be 4Kb page aligned. Use the page
@@ -80,6 +102,13 @@ static void *sev_es_tmr;
#define NV_LENGTH (32 * 1024)
static void *sev_init_ex_buffer;
+/*
+ * SEV_DATA_RANGE_LIST:
+ * Array containing range of pages that firmware transitions to HV-fixed
+ * page state.
+ */
+static struct sev_data_range_list *snp_range_list;
+
static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
{
struct sev_device *sev = psp_master->sev_data;
@@ -115,6 +144,25 @@ static int sev_wait_cmd_ioc(struct sev_device *sev,
{
int ret;
+ /*
+ * If invoked during panic handling, local interrupts are disabled,
+ * so the PSP command completion interrupt can't be used. Poll for
+ * PSP command completion instead.
+ */
+ if (irqs_disabled()) {
+ unsigned long timeout_usecs = (timeout * USEC_PER_SEC) / 10;
+
+ /* Poll for SEV command completion: */
+ while (timeout_usecs--) {
+ *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
+ if (*reg & PSP_CMDRESP_RESP)
+ return 0;
+
+ udelay(10);
+ }
+ return -ETIMEDOUT;
+ }
+
ret = wait_event_timeout(sev->int_queue,
sev->int_rcvd, timeout * HZ);
if (!ret)
@@ -130,6 +178,8 @@ static int sev_cmd_buffer_len(int cmd)
switch (cmd) {
case SEV_CMD_INIT: return sizeof(struct sev_data_init);
case SEV_CMD_INIT_EX: return sizeof(struct sev_data_init_ex);
+ case SEV_CMD_SNP_SHUTDOWN_EX: return sizeof(struct sev_data_snp_shutdown_ex);
+ case SEV_CMD_SNP_INIT_EX: return sizeof(struct sev_data_snp_init_ex);
case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
@@ -158,23 +208,27 @@ static int sev_cmd_buffer_len(int cmd)
case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report);
case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel);
+ case SEV_CMD_SNP_GCTX_CREATE: return sizeof(struct sev_data_snp_addr);
+ case SEV_CMD_SNP_LAUNCH_START: return sizeof(struct sev_data_snp_launch_start);
+ case SEV_CMD_SNP_LAUNCH_UPDATE: return sizeof(struct sev_data_snp_launch_update);
+ case SEV_CMD_SNP_ACTIVATE: return sizeof(struct sev_data_snp_activate);
+ case SEV_CMD_SNP_DECOMMISSION: return sizeof(struct sev_data_snp_addr);
+ case SEV_CMD_SNP_PAGE_RECLAIM: return sizeof(struct sev_data_snp_page_reclaim);
+ case SEV_CMD_SNP_GUEST_STATUS: return sizeof(struct sev_data_snp_guest_status);
+ case SEV_CMD_SNP_LAUNCH_FINISH: return sizeof(struct sev_data_snp_launch_finish);
+ case SEV_CMD_SNP_DBG_DECRYPT: return sizeof(struct sev_data_snp_dbg);
+ case SEV_CMD_SNP_DBG_ENCRYPT: return sizeof(struct sev_data_snp_dbg);
+ case SEV_CMD_SNP_PAGE_UNSMASH: return sizeof(struct sev_data_snp_page_unsmash);
+ case SEV_CMD_SNP_PLATFORM_STATUS: return sizeof(struct sev_data_snp_addr);
+ case SEV_CMD_SNP_GUEST_REQUEST: return sizeof(struct sev_data_snp_guest_request);
+ case SEV_CMD_SNP_CONFIG: return sizeof(struct sev_user_data_snp_config);
+ case SEV_CMD_SNP_COMMIT: return sizeof(struct sev_data_snp_commit);
default: return 0;
}
return 0;
}
-static void *sev_fw_alloc(unsigned long len)
-{
- struct page *page;
-
- page = alloc_pages(GFP_KERNEL, get_order(len));
- if (!page)
- return NULL;
-
- return page_address(page);
-}
-
static struct file *open_file_as_root(const char *filename, int flags, umode_t mode)
{
struct file *fp;
@@ -305,13 +359,485 @@ static int sev_write_init_ex_file_if_required(int cmd_id)
return sev_write_init_ex_file();
}
+/*
+ * snp_reclaim_pages() needs __sev_do_cmd_locked(), and __sev_do_cmd_locked()
+ * needs snp_reclaim_pages(), so a forward declaration is needed.
+ */
+static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret);
+
+static int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
+{
+ int ret, err, i;
+
+ paddr = __sme_clr(ALIGN_DOWN(paddr, PAGE_SIZE));
+
+ for (i = 0; i < npages; i++, paddr += PAGE_SIZE) {
+ struct sev_data_snp_page_reclaim data = {0};
+
+ data.paddr = paddr;
+
+ if (locked)
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err);
+ else
+ ret = sev_do_cmd(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err);
+
+ if (ret)
+ goto cleanup;
+
+ ret = rmp_make_shared(__phys_to_pfn(paddr), PG_LEVEL_4K);
+ if (ret)
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ /*
+ * If there was a failure reclaiming the page then it is no longer safe
+ * to release it back to the system; leak it instead.
+ */
+ snp_leak_pages(__phys_to_pfn(paddr), npages - i);
+ return ret;
+}
+
+static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, bool locked)
+{
+ unsigned long pfn = __sme_clr(paddr) >> PAGE_SHIFT;
+ int rc, i;
+
+ for (i = 0; i < npages; i++, pfn++) {
+ rc = rmp_make_private(pfn, 0, PG_LEVEL_4K, 0, true);
+ if (rc)
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ /*
+ * Try unrolling the firmware state changes by
+ * reclaiming the pages which were already changed to the
+ * firmware state.
+ */
+ snp_reclaim_pages(paddr, i, locked);
+
+ return rc;
+}
+
+static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order)
+{
+ unsigned long npages = 1ul << order, paddr;
+ struct sev_device *sev;
+ struct page *page;
+
+ if (!psp_master || !psp_master->sev_data)
+ return NULL;
+
+ page = alloc_pages(gfp_mask, order);
+ if (!page)
+ return NULL;
+
+ /* If SEV-SNP is initialized then add the page in RMP table. */
+ sev = psp_master->sev_data;
+ if (!sev->snp_initialized)
+ return page;
+
+ paddr = __pa((unsigned long)page_address(page));
+ if (rmp_mark_pages_firmware(paddr, npages, false))
+ return NULL;
+
+ return page;
+}
+
+void *snp_alloc_firmware_page(gfp_t gfp_mask)
+{
+ struct page *page;
+
+ page = __snp_alloc_firmware_pages(gfp_mask, 0);
+
+ return page ? page_address(page) : NULL;
+}
+EXPORT_SYMBOL_GPL(snp_alloc_firmware_page);
+
+static void __snp_free_firmware_pages(struct page *page, int order, bool locked)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ unsigned long paddr, npages = 1ul << order;
+
+ if (!page)
+ return;
+
+ paddr = __pa((unsigned long)page_address(page));
+ if (sev->snp_initialized &&
+ snp_reclaim_pages(paddr, npages, locked))
+ return;
+
+ __free_pages(page, order);
+}
+
+void snp_free_firmware_page(void *addr)
+{
+ if (!addr)
+ return;
+
+ __snp_free_firmware_pages(virt_to_page(addr), 0, false);
+}
+EXPORT_SYMBOL_GPL(snp_free_firmware_page);
+
+static void *sev_fw_alloc(unsigned long len)
+{
+ struct page *page;
+
+ page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len));
+ if (!page)
+ return NULL;
+
+ return page_address(page);
+}
+
+/**
+ * struct cmd_buf_desc - descriptors for managing legacy SEV command address
+ * parameters corresponding to buffers that may be written to by firmware.
+ *
+ * @paddr_ptr: pointer to the address parameter in the command buffer which may
+ * need to be saved/restored depending on whether a bounce buffer
+ * is used. In the case of a bounce buffer, the command buffer
+ * needs to be updated with the address of the new bounce buffer
+ * snp_map_cmd_buf_desc() has allocated specifically for it. Must
+ * be NULL if this descriptor is only an end-of-list indicator.
+ *
+ * @paddr_orig: storage for the original address parameter, which can be used to
+ * restore the original value in @paddr_ptr in cases where it is
+ * replaced with the address of a bounce buffer.
+ *
+ * @len: length of buffer located at the address originally stored at @paddr_ptr
+ *
+ * @guest_owned: true if the address corresponds to guest-owned pages, in which
+ * case bounce buffers are not needed.
+ */
+struct cmd_buf_desc {
+ u64 *paddr_ptr;
+ u64 paddr_orig;
+ u32 len;
+ bool guest_owned;
+};
+
+/*
+ * If a legacy SEV command parameter is a memory address, those pages in
+ * turn need to be transitioned to/from firmware-owned before/after
+ * executing the firmware command.
+ *
+ * Additionally, in cases where those pages are not guest-owned, a bounce
+ * buffer is needed in place of the original memory address parameter.
+ *
+ * A set of descriptors are used to keep track of this handling, and
+ * initialized here based on the specific commands being executed.
+ */
+static void snp_populate_cmd_buf_desc_list(int cmd, void *cmd_buf,
+ struct cmd_buf_desc *desc_list)
+{
+ switch (cmd) {
+ case SEV_CMD_PDH_CERT_EXPORT: {
+ struct sev_data_pdh_cert_export *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->pdh_cert_address;
+ desc_list[0].len = data->pdh_cert_len;
+ desc_list[1].paddr_ptr = &data->cert_chain_address;
+ desc_list[1].len = data->cert_chain_len;
+ break;
+ }
+ case SEV_CMD_GET_ID: {
+ struct sev_data_get_id *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ break;
+ }
+ case SEV_CMD_PEK_CSR: {
+ struct sev_data_pek_csr *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ break;
+ }
+ case SEV_CMD_LAUNCH_UPDATE_DATA: {
+ struct sev_data_launch_update_data *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_LAUNCH_UPDATE_VMSA: {
+ struct sev_data_launch_update_vmsa *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_LAUNCH_MEASURE: {
+ struct sev_data_launch_measure *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ break;
+ }
+ case SEV_CMD_LAUNCH_UPDATE_SECRET: {
+ struct sev_data_launch_secret *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->guest_address;
+ desc_list[0].len = data->guest_len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_DBG_DECRYPT: {
+ struct sev_data_dbg *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->dst_addr;
+ desc_list[0].len = data->len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_DBG_ENCRYPT: {
+ struct sev_data_dbg *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->dst_addr;
+ desc_list[0].len = data->len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_ATTESTATION_REPORT: {
+ struct sev_data_attestation_report *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ break;
+ }
+ case SEV_CMD_SEND_START: {
+ struct sev_data_send_start *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->session_address;
+ desc_list[0].len = data->session_len;
+ break;
+ }
+ case SEV_CMD_SEND_UPDATE_DATA: {
+ struct sev_data_send_update_data *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->hdr_address;
+ desc_list[0].len = data->hdr_len;
+ desc_list[1].paddr_ptr = &data->trans_address;
+ desc_list[1].len = data->trans_len;
+ break;
+ }
+ case SEV_CMD_SEND_UPDATE_VMSA: {
+ struct sev_data_send_update_vmsa *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->hdr_address;
+ desc_list[0].len = data->hdr_len;
+ desc_list[1].paddr_ptr = &data->trans_address;
+ desc_list[1].len = data->trans_len;
+ break;
+ }
+ case SEV_CMD_RECEIVE_UPDATE_DATA: {
+ struct sev_data_receive_update_data *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->guest_address;
+ desc_list[0].len = data->guest_len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_RECEIVE_UPDATE_VMSA: {
+ struct sev_data_receive_update_vmsa *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->guest_address;
+ desc_list[0].len = data->guest_len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static int snp_map_cmd_buf_desc(struct cmd_buf_desc *desc)
+{
+ unsigned int npages;
+
+ if (!desc->len)
+ return 0;
+
+ /* Allocate a bounce buffer if this isn't a guest owned page. */
+ if (!desc->guest_owned) {
+ struct page *page;
+
+ page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(desc->len));
+ if (!page) {
+ pr_warn("Failed to allocate bounce buffer for SEV legacy command.\n");
+ return -ENOMEM;
+ }
+
+ desc->paddr_orig = *desc->paddr_ptr;
+ *desc->paddr_ptr = __psp_pa(page_to_virt(page));
+ }
+
+ npages = PAGE_ALIGN(desc->len) >> PAGE_SHIFT;
+
+ /* Transition the buffer to firmware-owned. */
+ if (rmp_mark_pages_firmware(*desc->paddr_ptr, npages, true)) {
+ pr_warn("Error moving pages to firmware-owned state for SEV legacy command.\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int snp_unmap_cmd_buf_desc(struct cmd_buf_desc *desc)
+{
+ unsigned int npages;
+
+ if (!desc->len)
+ return 0;
+
+ npages = PAGE_ALIGN(desc->len) >> PAGE_SHIFT;
+
+ /* Transition the buffers back to hypervisor-owned. */
+ if (snp_reclaim_pages(*desc->paddr_ptr, npages, true)) {
+ pr_warn("Failed to reclaim firmware-owned pages while issuing SEV legacy command.\n");
+ return -EFAULT;
+ }
+
+ /* Copy data from bounce buffer and then free it. */
+ if (!desc->guest_owned) {
+ void *bounce_buf = __va(__sme_clr(*desc->paddr_ptr));
+ void *dst_buf = __va(__sme_clr(desc->paddr_orig));
+
+ memcpy(dst_buf, bounce_buf, desc->len);
+ __free_pages(virt_to_page(bounce_buf), get_order(desc->len));
+
+ /* Restore the original address in the command buffer. */
+ *desc->paddr_ptr = desc->paddr_orig;
+ }
+
+ return 0;
+}
+
+static int snp_map_cmd_buf_desc_list(int cmd, void *cmd_buf, struct cmd_buf_desc *desc_list)
+{
+ int i;
+
+ snp_populate_cmd_buf_desc_list(cmd, cmd_buf, desc_list);
+
+ for (i = 0; i < CMD_BUF_DESC_MAX; i++) {
+ struct cmd_buf_desc *desc = &desc_list[i];
+
+ if (!desc->paddr_ptr)
+ break;
+
+ if (snp_map_cmd_buf_desc(desc))
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ for (i--; i >= 0; i--)
+ snp_unmap_cmd_buf_desc(&desc_list[i]);
+
+ return -EFAULT;
+}
+
+static int snp_unmap_cmd_buf_desc_list(struct cmd_buf_desc *desc_list)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < CMD_BUF_DESC_MAX; i++) {
+ struct cmd_buf_desc *desc = &desc_list[i];
+
+ if (!desc->paddr_ptr)
+ break;
+
+ if (snp_unmap_cmd_buf_desc(&desc_list[i]))
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+static bool sev_cmd_buf_writable(int cmd)
+{
+ switch (cmd) {
+ case SEV_CMD_PLATFORM_STATUS:
+ case SEV_CMD_GUEST_STATUS:
+ case SEV_CMD_LAUNCH_START:
+ case SEV_CMD_RECEIVE_START:
+ case SEV_CMD_LAUNCH_MEASURE:
+ case SEV_CMD_SEND_START:
+ case SEV_CMD_SEND_UPDATE_DATA:
+ case SEV_CMD_SEND_UPDATE_VMSA:
+ case SEV_CMD_PEK_CSR:
+ case SEV_CMD_PDH_CERT_EXPORT:
+ case SEV_CMD_GET_ID:
+ case SEV_CMD_ATTESTATION_REPORT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* After SNP is INIT'ed, the behavior of legacy SEV commands is changed. */
+static bool snp_legacy_handling_needed(int cmd)
+{
+ struct sev_device *sev = psp_master->sev_data;
+
+ return cmd < SEV_CMD_SNP_INIT && sev->snp_initialized;
+}
+
+static int snp_prep_cmd_buf(int cmd, void *cmd_buf, struct cmd_buf_desc *desc_list)
+{
+ if (!snp_legacy_handling_needed(cmd))
+ return 0;
+
+ if (snp_map_cmd_buf_desc_list(cmd, cmd_buf, desc_list))
+ return -EFAULT;
+
+ /*
+ * Before command execution, the command buffer needs to be put into
+ * the firmware-owned state.
+ */
+ if (sev_cmd_buf_writable(cmd)) {
+ if (rmp_mark_pages_firmware(__pa(cmd_buf), 1, true))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int snp_reclaim_cmd_buf(int cmd, void *cmd_buf)
+{
+ if (!snp_legacy_handling_needed(cmd))
+ return 0;
+
+ /*
+ * After command completion, the command buffer needs to be put back
+ * into the hypervisor-owned state.
+ */
+ if (sev_cmd_buf_writable(cmd))
+ if (snp_reclaim_pages(__pa(cmd_buf), 1, true))
+ return -EFAULT;
+
+ return 0;
+}
+
static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
{
+ struct cmd_buf_desc desc_list[CMD_BUF_DESC_MAX] = {0};
struct psp_device *psp = psp_master;
struct sev_device *sev;
unsigned int cmdbuff_hi, cmdbuff_lo;
unsigned int phys_lsb, phys_msb;
unsigned int reg, ret = 0;
+ void *cmd_buf;
int buf_len;
if (!psp || !psp->sev_data)
@@ -331,12 +857,47 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
* work for some memory, e.g. vmalloc'd addresses, and @data may not be
* physically contiguous.
*/
- if (data)
- memcpy(sev->cmd_buf, data, buf_len);
+ if (data) {
+ /*
+ * Commands are generally issued one at a time and require the
+ * sev_cmd_mutex, but there could be recursive firmware requests
+ * due to SEV_CMD_SNP_PAGE_RECLAIM needing to be issued while
+ * preparing buffers for another command. This is the only known
+ * case of nesting in the current code, so exactly one
+ * additional command buffer is available for that purpose.
+ */
+ if (!sev->cmd_buf_active) {
+ cmd_buf = sev->cmd_buf;
+ sev->cmd_buf_active = true;
+ } else if (!sev->cmd_buf_backup_active) {
+ cmd_buf = sev->cmd_buf_backup;
+ sev->cmd_buf_backup_active = true;
+ } else {
+ dev_err(sev->dev,
+ "SEV: too many firmware commands in progress, no command buffers available.\n");
+ return -EBUSY;
+ }
+
+ memcpy(cmd_buf, data, buf_len);
+
+ /*
+ * The behavior of the SEV-legacy commands is altered when the
+ * SNP firmware is in the INIT state.
+ */
+ ret = snp_prep_cmd_buf(cmd, cmd_buf, desc_list);
+ if (ret) {
+ dev_err(sev->dev,
+ "SEV: failed to prepare buffer for legacy command 0x%x. Error: %d\n",
+ cmd, ret);
+ return ret;
+ }
+ } else {
+ cmd_buf = sev->cmd_buf;
+ }
/* Get the physical address of the command buffer */
- phys_lsb = data ? lower_32_bits(__psp_pa(sev->cmd_buf)) : 0;
- phys_msb = data ? upper_32_bits(__psp_pa(sev->cmd_buf)) : 0;
+ phys_lsb = data ? lower_32_bits(__psp_pa(cmd_buf)) : 0;
+ phys_msb = data ? upper_32_bits(__psp_pa(cmd_buf)) : 0;
dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
cmd, phys_msb, phys_lsb, psp_timeout);
@@ -390,20 +951,41 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
ret = sev_write_init_ex_file_if_required(cmd);
}
- print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
- buf_len, false);
-
/*
* Copy potential output from the PSP back to data. Do this even on
* failure in case the caller wants to glean something from the error.
*/
- if (data)
- memcpy(data, sev->cmd_buf, buf_len);
+ if (data) {
+ int ret_reclaim;
+ /*
+ * Restore the page state after the command completes.
+ */
+ ret_reclaim = snp_reclaim_cmd_buf(cmd, cmd_buf);
+ if (ret_reclaim) {
+ dev_err(sev->dev,
+ "SEV: failed to reclaim buffer for legacy command %#x. Error: %d\n",
+ cmd, ret_reclaim);
+ return ret_reclaim;
+ }
+
+ memcpy(data, cmd_buf, buf_len);
+
+ if (sev->cmd_buf_backup_active)
+ sev->cmd_buf_backup_active = false;
+ else
+ sev->cmd_buf_active = false;
+
+ if (snp_unmap_cmd_buf_desc_list(desc_list))
+ return -EFAULT;
+ }
+
+ print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+ buf_len, false);
return ret;
}
-static int sev_do_cmd(int cmd, void *data, int *psp_ret)
+int sev_do_cmd(int cmd, void *data, int *psp_ret)
{
int rc;
@@ -413,6 +995,7 @@ static int sev_do_cmd(int cmd, void *data, int *psp_ret)
return rc;
}
+EXPORT_SYMBOL_GPL(sev_do_cmd);
static int __sev_init_locked(int *error)
{
@@ -427,7 +1010,7 @@ static int __sev_init_locked(int *error)
data.tmr_address = __pa(sev_es_tmr);
data.flags |= SEV_INIT_FLAGS_SEV_ES;
- data.tmr_len = SEV_ES_TMR_SIZE;
+ data.tmr_len = sev_es_tmr_size;
}
return __sev_do_cmd_locked(SEV_CMD_INIT, &data, error);
@@ -450,7 +1033,7 @@ static int __sev_init_ex_locked(int *error)
data.tmr_address = __pa(sev_es_tmr);
data.flags |= SEV_INIT_FLAGS_SEV_ES;
- data.tmr_len = SEV_ES_TMR_SIZE;
+ data.tmr_len = sev_es_tmr_size;
}
return __sev_do_cmd_locked(SEV_CMD_INIT_EX, &data, error);
@@ -464,25 +1047,217 @@ static inline int __sev_do_init_locked(int *psp_ret)
return __sev_init_locked(psp_ret);
}
-static int __sev_platform_init_locked(int *error)
+static void snp_set_hsave_pa(void *arg)
+{
+ wrmsrl(MSR_VM_HSAVE_PA, 0);
+}
+
+static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
+{
+ struct sev_data_range_list *range_list = arg;
+ struct sev_data_range *range = &range_list->ranges[range_list->num_elements];
+ size_t size;
+
+ /*
+ * Ensure the list of HV_FIXED pages that will be passed to firmware
+ * do not exceed the page-sized argument buffer.
+ */
+ if ((range_list->num_elements * sizeof(struct sev_data_range) +
+ sizeof(struct sev_data_range_list)) > PAGE_SIZE)
+ return -E2BIG;
+
+ switch (rs->desc) {
+ case E820_TYPE_RESERVED:
+ case E820_TYPE_PMEM:
+ case E820_TYPE_ACPI:
+ range->base = rs->start & PAGE_MASK;
+ size = PAGE_ALIGN((rs->end + 1) - rs->start);
+ range->page_count = size >> PAGE_SHIFT;
+ range_list->num_elements++;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int __sev_snp_init_locked(int *error)
{
- int rc = 0, psp_ret = SEV_RET_NO_FW_CALL;
struct psp_device *psp = psp_master;
+ struct sev_data_snp_init_ex data;
struct sev_device *sev;
+ void *arg = &data;
+ int cmd, rc = 0;
- if (!psp || !psp->sev_data)
+ if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
return -ENODEV;
sev = psp->sev_data;
- if (sev->state == SEV_STATE_INIT)
+ if (sev->snp_initialized)
return 0;
- if (sev_init_ex_buffer) {
- rc = sev_read_init_ex_file();
- if (rc)
+ if (!sev_version_greater_or_equal(SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR)) {
+ dev_dbg(sev->dev, "SEV-SNP support requires firmware version >= %d:%d\n",
+ SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR);
+ return 0;
+ }
+
+ /* SNP_INIT requires MSR_VM_HSAVE_PA to be cleared on all CPUs. */
+ on_each_cpu(snp_set_hsave_pa, NULL, 1);
+
+ /*
+ * Starting in SNP firmware v1.52, the SNP_INIT_EX command takes a list
+ * of system physical address ranges to convert into HV-fixed page
+ * states during the RMP initialization. For instance, the memory that
+ * UEFI reserves should be included in the that list. This allows system
+ * components that occasionally write to memory (e.g. logging to UEFI
+ * reserved regions) to not fail due to RMP initialization and SNP
+ * enablement.
+ *
+ */
+ if (sev_version_greater_or_equal(SNP_MIN_API_MAJOR, 52)) {
+ /*
+ * Firmware checks that the pages containing the ranges enumerated
+ * in the RANGES structure are either in the default page state or in the
+ * firmware page state.
+ */
+ snp_range_list = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!snp_range_list) {
+ dev_err(sev->dev,
+ "SEV: SNP_INIT_EX range list memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Retrieve all reserved memory regions from the e820 memory map
+ * to be setup as HV-fixed pages.
+ */
+ rc = walk_iomem_res_desc(IORES_DESC_NONE, IORESOURCE_MEM, 0, ~0,
+ snp_range_list, snp_filter_reserved_mem_regions);
+ if (rc) {
+ dev_err(sev->dev,
+ "SEV: SNP_INIT_EX walk_iomem_res_desc failed rc = %d\n", rc);
return rc;
+ }
+
+ memset(&data, 0, sizeof(data));
+ data.init_rmp = 1;
+ data.list_paddr_en = 1;
+ data.list_paddr = __psp_pa(snp_range_list);
+ cmd = SEV_CMD_SNP_INIT_EX;
+ } else {
+ cmd = SEV_CMD_SNP_INIT;
+ arg = NULL;
+ }
+
+ /*
+ * The following sequence must be issued before launching the first SNP
+ * guest to ensure all dirty cache lines are flushed, including from
+ * updates to the RMP table itself via the RMPUPDATE instruction:
+ *
+ * - WBINVD on all running CPUs
+ * - SEV_CMD_SNP_INIT[_EX] firmware command
+ * - WBINVD on all running CPUs
+ * - SEV_CMD_SNP_DF_FLUSH firmware command
+ */
+ wbinvd_on_all_cpus();
+
+ rc = __sev_do_cmd_locked(cmd, arg, error);
+ if (rc)
+ return rc;
+
+ /* Prepare for first SNP guest launch after INIT. */
+ wbinvd_on_all_cpus();
+ rc = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, error);
+ if (rc)
+ return rc;
+
+ sev->snp_initialized = true;
+ dev_dbg(sev->dev, "SEV-SNP firmware initialized\n");
+
+ sev_es_tmr_size = SNP_TMR_SIZE;
+
+ return rc;
+}
+
+static void __sev_platform_init_handle_tmr(struct sev_device *sev)
+{
+ if (sev_es_tmr)
+ return;
+
+ /* Obtain the TMR memory area for SEV-ES use */
+ sev_es_tmr = sev_fw_alloc(sev_es_tmr_size);
+ if (sev_es_tmr) {
+ /* Must flush the cache before giving it to the firmware */
+ if (!sev->snp_initialized)
+ clflush_cache_range(sev_es_tmr, sev_es_tmr_size);
+ } else {
+ dev_warn(sev->dev, "SEV: TMR allocation failed, SEV-ES support unavailable\n");
}
+}
+
+/*
+ * If an init_ex_path is provided allocate a buffer for the file and
+ * read in the contents. Additionally, if SNP is initialized, convert
+ * the buffer pages to firmware pages.
+ */
+static int __sev_platform_init_handle_init_ex_path(struct sev_device *sev)
+{
+ struct page *page;
+ int rc;
+
+ if (!init_ex_path)
+ return 0;
+
+ if (sev_init_ex_buffer)
+ return 0;
+
+ page = alloc_pages(GFP_KERNEL, get_order(NV_LENGTH));
+ if (!page) {
+ dev_err(sev->dev, "SEV: INIT_EX NV memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ sev_init_ex_buffer = page_address(page);
+
+ rc = sev_read_init_ex_file();
+ if (rc)
+ return rc;
+
+ /* If SEV-SNP is initialized, transition to firmware page. */
+ if (sev->snp_initialized) {
+ unsigned long npages;
+
+ npages = 1UL << get_order(NV_LENGTH);
+ if (rmp_mark_pages_firmware(__pa(sev_init_ex_buffer), npages, false)) {
+ dev_err(sev->dev, "SEV: INIT_EX NV memory page state change failed.\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int __sev_platform_init_locked(int *error)
+{
+ int rc, psp_ret = SEV_RET_NO_FW_CALL;
+ struct sev_device *sev;
+
+ if (!psp_master || !psp_master->sev_data)
+ return -ENODEV;
+
+ sev = psp_master->sev_data;
+
+ if (sev->state == SEV_STATE_INIT)
+ return 0;
+
+ __sev_platform_init_handle_tmr(sev);
+
+ rc = __sev_platform_init_handle_init_ex_path(sev);
+ if (rc)
+ return rc;
rc = __sev_do_init_locked(&psp_ret);
if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) {
@@ -520,12 +1295,46 @@ static int __sev_platform_init_locked(int *error)
return 0;
}
-int sev_platform_init(int *error)
+static int _sev_platform_init_locked(struct sev_platform_init_args *args)
+{
+ struct sev_device *sev;
+ int rc;
+
+ if (!psp_master || !psp_master->sev_data)
+ return -ENODEV;
+
+ sev = psp_master->sev_data;
+
+ if (sev->state == SEV_STATE_INIT)
+ return 0;
+
+ /*
+ * Legacy guests cannot be running while SNP_INIT(_EX) is executing,
+ * so perform SEV-SNP initialization at probe time.
+ */
+ rc = __sev_snp_init_locked(&args->error);
+ if (rc && rc != -ENODEV) {
+ /*
+ * Don't abort the probe if SNP INIT failed,
+ * continue to initialize the legacy SEV firmware.
+ */
+ dev_err(sev->dev, "SEV-SNP: failed to INIT rc %d, error %#x\n",
+ rc, args->error);
+ }
+
+ /* Defer legacy SEV/SEV-ES support if allowed by caller/module. */
+ if (args->probe && !psp_init_on_probe)
+ return 0;
+
+ return __sev_platform_init_locked(&args->error);
+}
+
+int sev_platform_init(struct sev_platform_init_args *args)
{
int rc;
mutex_lock(&sev_cmd_mutex);
- rc = __sev_platform_init_locked(error);
+ rc = _sev_platform_init_locked(args);
mutex_unlock(&sev_cmd_mutex);
return rc;
@@ -534,10 +1343,16 @@ EXPORT_SYMBOL_GPL(sev_platform_init);
static int __sev_platform_shutdown_locked(int *error)
{
- struct sev_device *sev = psp_master->sev_data;
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
int ret;
- if (!sev || sev->state == SEV_STATE_UNINIT)
+ if (!psp || !psp->sev_data)
+ return 0;
+
+ sev = psp->sev_data;
+
+ if (sev->state == SEV_STATE_UNINIT)
return 0;
ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
@@ -550,17 +1365,6 @@ static int __sev_platform_shutdown_locked(int *error)
return ret;
}
-static int sev_platform_shutdown(int *error)
-{
- int rc;
-
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_platform_shutdown_locked(NULL);
- mutex_unlock(&sev_cmd_mutex);
-
- return rc;
-}
-
static int sev_get_platform_state(int *state, int *error)
{
struct sev_user_data_status data;
@@ -836,6 +1640,72 @@ fw_err:
return ret;
}
+static int __sev_snp_shutdown_locked(int *error, bool panic)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_data_snp_shutdown_ex data;
+ int ret;
+
+ if (!sev->snp_initialized)
+ return 0;
+
+ memset(&data, 0, sizeof(data));
+ data.len = sizeof(data);
+ data.iommu_snp_shutdown = 1;
+
+ /*
+ * If invoked during panic handling, local interrupts are disabled
+ * and all CPUs are stopped, so wbinvd_on_all_cpus() can't be called.
+ * In that case, a wbinvd() is done on remote CPUs via the NMI
+ * callback, so only a local wbinvd() is needed here.
+ */
+ if (!panic)
+ wbinvd_on_all_cpus();
+ else
+ wbinvd();
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, error);
+ /* SHUTDOWN may require DF_FLUSH */
+ if (*error == SEV_RET_DFFLUSH_REQUIRED) {
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, NULL);
+ if (ret) {
+ dev_err(sev->dev, "SEV-SNP DF_FLUSH failed\n");
+ return ret;
+ }
+ /* reissue the shutdown command */
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data,
+ error);
+ }
+ if (ret) {
+ dev_err(sev->dev, "SEV-SNP firmware shutdown failed\n");
+ return ret;
+ }
+
+ /*
+ * SNP_SHUTDOWN_EX with IOMMU_SNP_SHUTDOWN set to 1 disables SNP
+ * enforcement by the IOMMU and also transitions all pages
+ * associated with the IOMMU to the Reclaim state.
+ * Firmware was transitioning the IOMMU pages to Hypervisor state
+ * before version 1.53. But, accounting for the number of assigned
+ * 4kB pages in a 2M page was done incorrectly by not transitioning
+ * to the Reclaim state. This resulted in RMP #PF when later accessing
+ * the 2M page containing those pages during kexec boot. Hence, the
+ * firmware now transitions these pages to Reclaim state and hypervisor
+ * needs to transition these pages to shared state. SNP Firmware
+ * version 1.53 and above are needed for kexec boot.
+ */
+ ret = amd_iommu_snp_disable();
+ if (ret) {
+ dev_err(sev->dev, "SNP IOMMU shutdown failed\n");
+ return ret;
+ }
+
+ sev->snp_initialized = false;
+ dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n");
+
+ return ret;
+}
+
static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
@@ -1078,6 +1948,85 @@ e_free_pdh:
return ret;
}
+static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_data_snp_addr buf;
+ struct page *status_page;
+ void *data;
+ int ret;
+
+ if (!sev->snp_initialized || !argp->data)
+ return -EINVAL;
+
+ status_page = alloc_page(GFP_KERNEL_ACCOUNT);
+ if (!status_page)
+ return -ENOMEM;
+
+ data = page_address(status_page);
+
+ /*
+ * Firmware expects status page to be in firmware-owned state, otherwise
+ * it will report firmware error code INVALID_PAGE_STATE (0x1A).
+ */
+ if (rmp_mark_pages_firmware(__pa(data), 1, true)) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+
+ buf.address = __psp_pa(data);
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_PLATFORM_STATUS, &buf, &argp->error);
+
+ /*
+ * Status page will be transitioned to Reclaim state upon success, or
+ * left in Firmware state in failure. Use snp_reclaim_pages() to
+ * transition either case back to Hypervisor-owned state.
+ */
+ if (snp_reclaim_pages(__pa(data), 1, true))
+ return -EFAULT;
+
+ if (ret)
+ goto cleanup;
+
+ if (copy_to_user((void __user *)argp->data, data,
+ sizeof(struct sev_user_data_snp_status)))
+ ret = -EFAULT;
+
+cleanup:
+ __free_pages(status_page, 0);
+ return ret;
+}
+
+static int sev_ioctl_do_snp_commit(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_data_snp_commit buf;
+
+ if (!sev->snp_initialized)
+ return -EINVAL;
+
+ buf.len = sizeof(buf);
+
+ return __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error);
+}
+
+static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_snp_config config;
+
+ if (!sev->snp_initialized || !argp->data)
+ return -EINVAL;
+
+ if (!writable)
+ return -EPERM;
+
+ if (copy_from_user(&config, (void __user *)argp->data, sizeof(config)))
+ return -EFAULT;
+
+ return __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error);
+}
+
static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
{
void __user *argp = (void __user *)arg;
@@ -1129,6 +2078,15 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
case SEV_GET_ID2:
ret = sev_ioctl_do_get_id2(&input);
break;
+ case SNP_PLATFORM_STATUS:
+ ret = sev_ioctl_do_snp_platform_status(&input);
+ break;
+ case SNP_COMMIT:
+ ret = sev_ioctl_do_snp_commit(&input);
+ break;
+ case SNP_SET_CONFIG:
+ ret = sev_ioctl_do_snp_set_config(&input, writable);
+ break;
default:
ret = -EINVAL;
goto out;
@@ -1239,10 +2197,12 @@ int sev_dev_init(struct psp_device *psp)
if (!sev)
goto e_err;
- sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
+ sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 1);
if (!sev->cmd_buf)
goto e_sev;
+ sev->cmd_buf_backup = (uint8_t *)sev->cmd_buf + PAGE_SIZE;
+
psp->sev_data = sev;
sev->dev = dev;
@@ -1281,24 +2241,51 @@ e_err:
return ret;
}
-static void sev_firmware_shutdown(struct sev_device *sev)
+static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
{
- sev_platform_shutdown(NULL);
+ int error;
+
+ __sev_platform_shutdown_locked(NULL);
if (sev_es_tmr) {
- /* The TMR area was encrypted, flush it from the cache */
- wbinvd_on_all_cpus();
+ /*
+ * The TMR area was encrypted, flush it from the cache.
+ *
+ * If invoked during panic handling, local interrupts are
+ * disabled and all CPUs are stopped, so wbinvd_on_all_cpus()
+ * can't be used. In that case, wbinvd() is done on remote CPUs
+ * via the NMI callback, and done for this CPU later during
+ * SNP shutdown, so wbinvd_on_all_cpus() can be skipped.
+ */
+ if (!panic)
+ wbinvd_on_all_cpus();
- free_pages((unsigned long)sev_es_tmr,
- get_order(SEV_ES_TMR_SIZE));
+ __snp_free_firmware_pages(virt_to_page(sev_es_tmr),
+ get_order(sev_es_tmr_size),
+ true);
sev_es_tmr = NULL;
}
if (sev_init_ex_buffer) {
- free_pages((unsigned long)sev_init_ex_buffer,
- get_order(NV_LENGTH));
+ __snp_free_firmware_pages(virt_to_page(sev_init_ex_buffer),
+ get_order(NV_LENGTH),
+ true);
sev_init_ex_buffer = NULL;
}
+
+ if (snp_range_list) {
+ kfree(snp_range_list);
+ snp_range_list = NULL;
+ }
+
+ __sev_snp_shutdown_locked(&error, panic);
+}
+
+static void sev_firmware_shutdown(struct sev_device *sev)
+{
+ mutex_lock(&sev_cmd_mutex);
+ __sev_firmware_shutdown(sev, false);
+ mutex_unlock(&sev_cmd_mutex);
}
void sev_dev_destroy(struct psp_device *psp)
@@ -1316,6 +2303,29 @@ void sev_dev_destroy(struct psp_device *psp)
psp_clear_sev_irq_handler(psp);
}
+static int snp_shutdown_on_panic(struct notifier_block *nb,
+ unsigned long reason, void *arg)
+{
+ struct sev_device *sev = psp_master->sev_data;
+
+ /*
+ * If sev_cmd_mutex is already acquired, then it's likely
+ * another PSP command is in flight and issuing a shutdown
+ * would fail in unexpected ways. Rather than create even
+ * more confusion during a panic, just bail out here.
+ */
+ if (mutex_is_locked(&sev_cmd_mutex))
+ return NOTIFY_DONE;
+
+ __sev_firmware_shutdown(sev, true);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block snp_panic_notifier = {
+ .notifier_call = snp_shutdown_on_panic,
+};
+
int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
void *data, int *error)
{
@@ -1329,7 +2339,8 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
void sev_pci_init(void)
{
struct sev_device *sev = psp_master->sev_data;
- int error, rc;
+ struct sev_platform_init_args args = {0};
+ int rc;
if (!sev)
return;
@@ -1342,36 +2353,18 @@ void sev_pci_init(void)
if (sev_update_firmware(sev->dev) == 0)
sev_get_api_version();
- /* If an init_ex_path is provided rely on INIT_EX for PSP initialization
- * instead of INIT.
- */
- if (init_ex_path) {
- sev_init_ex_buffer = sev_fw_alloc(NV_LENGTH);
- if (!sev_init_ex_buffer) {
- dev_err(sev->dev,
- "SEV: INIT_EX NV memory allocation failed\n");
- goto err;
- }
- }
-
- /* Obtain the TMR memory area for SEV-ES use */
- sev_es_tmr = sev_fw_alloc(SEV_ES_TMR_SIZE);
- if (sev_es_tmr)
- /* Must flush the cache before giving it to the firmware */
- clflush_cache_range(sev_es_tmr, SEV_ES_TMR_SIZE);
- else
- dev_warn(sev->dev,
- "SEV: TMR allocation failed, SEV-ES support unavailable\n");
-
- if (!psp_init_on_probe)
- return;
-
/* Initialize the platform */
- rc = sev_platform_init(&error);
+ args.probe = true;
+ rc = sev_platform_init(&args);
if (rc)
dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n",
- error, rc);
+ args.error, rc);
+ dev_info(sev->dev, "SEV%s API:%d.%d build:%d\n", sev->snp_initialized ?
+ "-SNP" : "", sev->api_major, sev->api_minor, sev->build);
+
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &snp_panic_notifier);
return;
err:
@@ -1386,4 +2379,7 @@ void sev_pci_exit(void)
return;
sev_firmware_shutdown(sev);
+
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &snp_panic_notifier);
}
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
index 778c95155e74..3e4e5574e88a 100644
--- a/drivers/crypto/ccp/sev-dev.h
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -52,6 +52,11 @@ struct sev_device {
u8 build;
void *cmd_buf;
+ void *cmd_buf_backup;
+ bool cmd_buf_active;
+ bool cmd_buf_backup_active;
+
+ bool snp_initialized;
};
int sev_dev_init(struct psp_device *psp);
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 1b13b4aa16ec..a235e6c300f1 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -332,12 +332,12 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
theend:
pm_runtime_put_autosuspend(rkc->dev);
+ rk_hash_unprepare(engine, breq);
+
local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
local_bh_enable();
- rk_hash_unprepare(engine, breq);
-
return 0;
}
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index 2621ff8a9376..de53eddf6796 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -104,7 +104,8 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
}
static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
- struct virtio_crypto_ctrl_header *header, void *para,
+ struct virtio_crypto_ctrl_header *header,
+ struct virtio_crypto_akcipher_session_para *para,
const uint8_t *key, unsigned int keylen)
{
struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
@@ -128,7 +129,7 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher
ctrl = &vc_ctrl_req->ctrl;
memcpy(&ctrl->header, header, sizeof(ctrl->header));
- memcpy(&ctrl->u, para, sizeof(ctrl->u));
+ memcpy(&ctrl->u.akcipher_create_session.para, para, sizeof(*para));
input = &vc_ctrl_req->input;
input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index dcf2b39e1048..1a3e6aafbdcc 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -316,31 +316,27 @@ static const struct cxl_root_ops acpi_root_ops = {
.qos_class = cxl_acpi_qos_class,
};
-static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
- const unsigned long end)
+static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
+ struct cxl_cfmws_context *ctx)
{
int target_map[CXL_DECODER_MAX_INTERLEAVE];
- struct cxl_cfmws_context *ctx = arg;
struct cxl_port *root_port = ctx->root_port;
struct resource *cxl_res = ctx->cxl_res;
struct cxl_cxims_context cxims_ctx;
struct cxl_root_decoder *cxlrd;
struct device *dev = ctx->dev;
- struct acpi_cedt_cfmws *cfmws;
cxl_calc_hb_fn cxl_calc_hb;
struct cxl_decoder *cxld;
unsigned int ways, i, ig;
struct resource *res;
int rc;
- cfmws = (struct acpi_cedt_cfmws *) header;
-
rc = cxl_acpi_cfmws_verify(dev, cfmws);
if (rc) {
dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1);
- return 0;
+ return rc;
}
rc = eiw_to_ways(cfmws->interleave_ways, &ways);
@@ -376,7 +372,7 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
cxlrd = cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb);
if (IS_ERR(cxlrd))
- return 0;
+ return PTR_ERR(cxlrd);
cxld = &cxlrd->cxlsd.cxld;
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
@@ -420,16 +416,7 @@ err_xormap:
put_device(&cxld->dev);
else
rc = cxl_decoder_autoremove(dev, cxld);
- if (rc) {
- dev_err(dev, "Failed to add decode range: %pr", res);
- return rc;
- }
- dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
- dev_name(&cxld->dev),
- phys_to_target_node(cxld->hpa_range.start),
- cxld->hpa_range.start, cxld->hpa_range.end);
-
- return 0;
+ return rc;
err_insert:
kfree(res->name);
@@ -438,6 +425,29 @@ err_name:
return -ENOMEM;
}
+static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
+ const unsigned long end)
+{
+ struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header;
+ struct cxl_cfmws_context *ctx = arg;
+ struct device *dev = ctx->dev;
+ int rc;
+
+ rc = __cxl_parse_cfmws(cfmws, ctx);
+ if (rc)
+ dev_err(dev,
+ "Failed to add decode range: [%#llx - %#llx] (%d)\n",
+ cfmws->base_hpa,
+ cfmws->base_hpa + cfmws->window_size - 1, rc);
+ else
+ dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n",
+ phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa,
+ cfmws->base_hpa + cfmws->window_size - 1);
+
+ /* never fail cxl_acpi load for a single window failure */
+ return 0;
+}
+
__mock struct acpi_device *to_cxl_host_bridge(struct device *host,
struct device *dev)
{
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index 6fe11546889f..08fd0baea7a0 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -210,19 +210,12 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
return 0;
}
-static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
- struct list_head *list)
+static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
+ struct cxl_dpa_perf *dpa_perf)
{
- struct cxl_dpa_perf *dpa_perf;
-
- dpa_perf = kzalloc(sizeof(*dpa_perf), GFP_KERNEL);
- if (!dpa_perf)
- return;
-
dpa_perf->dpa_range = dent->dpa_range;
dpa_perf->coord = dent->coord;
dpa_perf->qos_class = dent->qos_class;
- list_add_tail(&dpa_perf->list, list);
dev_dbg(dev,
"DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
dent->dpa_range.start, dpa_perf->qos_class,
@@ -230,20 +223,6 @@ static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
dent->coord.read_latency, dent->coord.write_latency);
}
-static void free_perf_ents(void *data)
-{
- struct cxl_memdev_state *mds = data;
- struct cxl_dpa_perf *dpa_perf, *n;
- LIST_HEAD(discard);
-
- list_splice_tail_init(&mds->ram_perf_list, &discard);
- list_splice_tail_init(&mds->pmem_perf_list, &discard);
- list_for_each_entry_safe(dpa_perf, n, &discard, list) {
- list_del(&dpa_perf->list);
- kfree(dpa_perf);
- }
-}
-
static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
struct xarray *dsmas_xa)
{
@@ -263,16 +242,14 @@ static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
xa_for_each(dsmas_xa, index, dent) {
if (resource_size(&cxlds->ram_res) &&
range_contains(&ram_range, &dent->dpa_range))
- add_perf_entry(dev, dent, &mds->ram_perf_list);
+ update_perf_entry(dev, dent, &mds->ram_perf);
else if (resource_size(&cxlds->pmem_res) &&
range_contains(&pmem_range, &dent->dpa_range))
- add_perf_entry(dev, dent, &mds->pmem_perf_list);
+ update_perf_entry(dev, dent, &mds->pmem_perf);
else
dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
dent->dpa_range.start);
}
-
- devm_add_action_or_reset(&cxlds->cxlmd->dev, free_perf_ents, mds);
}
static int match_cxlrd_qos_class(struct device *dev, void *data)
@@ -293,24 +270,24 @@ static int match_cxlrd_qos_class(struct device *dev, void *data)
return 0;
}
-static void cxl_qos_match(struct cxl_port *root_port,
- struct list_head *work_list,
- struct list_head *discard_list)
+static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
{
- struct cxl_dpa_perf *dpa_perf, *n;
+ *dpa_perf = (struct cxl_dpa_perf) {
+ .qos_class = CXL_QOS_CLASS_INVALID,
+ };
+}
- list_for_each_entry_safe(dpa_perf, n, work_list, list) {
- int rc;
+static bool cxl_qos_match(struct cxl_port *root_port,
+ struct cxl_dpa_perf *dpa_perf)
+{
+ if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
+ return false;
- if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
- return;
+ if (!device_for_each_child(&root_port->dev, &dpa_perf->qos_class,
+ match_cxlrd_qos_class))
+ return false;
- rc = device_for_each_child(&root_port->dev,
- (void *)&dpa_perf->qos_class,
- match_cxlrd_qos_class);
- if (!rc)
- list_move_tail(&dpa_perf->list, discard_list);
- }
+ return true;
}
static int match_cxlrd_hb(struct device *dev, void *data)
@@ -334,23 +311,10 @@ static int match_cxlrd_hb(struct device *dev, void *data)
return 0;
}
-static void discard_dpa_perf(struct list_head *list)
-{
- struct cxl_dpa_perf *dpa_perf, *n;
-
- list_for_each_entry_safe(dpa_perf, n, list, list) {
- list_del(&dpa_perf->list);
- kfree(dpa_perf);
- }
-}
-DEFINE_FREE(dpa_perf, struct list_head *, if (!list_empty(_T)) discard_dpa_perf(_T))
-
static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- LIST_HEAD(__discard);
- struct list_head *discard __free(dpa_perf) = &__discard;
struct cxl_port *root_port;
int rc;
@@ -363,16 +327,17 @@ static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
root_port = &cxl_root->port;
/* Check that the QTG IDs are all sane between end device and root decoders */
- cxl_qos_match(root_port, &mds->ram_perf_list, discard);
- cxl_qos_match(root_port, &mds->pmem_perf_list, discard);
+ if (!cxl_qos_match(root_port, &mds->ram_perf))
+ reset_dpa_perf(&mds->ram_perf);
+ if (!cxl_qos_match(root_port, &mds->pmem_perf))
+ reset_dpa_perf(&mds->pmem_perf);
/* Check to make sure that the device's host bridge is under a root decoder */
rc = device_for_each_child(&root_port->dev,
- (void *)cxlmd->endpoint->host_bridge,
- match_cxlrd_hb);
+ cxlmd->endpoint->host_bridge, match_cxlrd_hb);
if (!rc) {
- list_splice_tail_init(&mds->ram_perf_list, discard);
- list_splice_tail_init(&mds->pmem_perf_list, discard);
+ reset_dpa_perf(&mds->ram_perf);
+ reset_dpa_perf(&mds->pmem_perf);
}
return rc;
@@ -417,6 +382,7 @@ void cxl_endpoint_parse_cdat(struct cxl_port *port)
cxl_memdev_set_qos_class(cxlds, dsmas_xa);
cxl_qos_class_verify(cxlmd);
+ cxl_memdev_update_perf(cxlmd);
}
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 27166a411705..9adda4795eb7 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -1391,8 +1391,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
mds->cxlds.reg_map.host = dev;
mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
- INIT_LIST_HEAD(&mds->ram_perf_list);
- INIT_LIST_HEAD(&mds->pmem_perf_list);
+ mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID;
+ mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID;
return mds;
}
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index dae8802ecdb0..d4e259f3a7e9 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -447,13 +447,41 @@ static struct attribute *cxl_memdev_attributes[] = {
NULL,
};
+static ssize_t pmem_qos_class_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+
+ return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class);
+}
+
+static struct device_attribute dev_attr_pmem_qos_class =
+ __ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
+
static struct attribute *cxl_memdev_pmem_attributes[] = {
&dev_attr_pmem_size.attr,
+ &dev_attr_pmem_qos_class.attr,
NULL,
};
+static ssize_t ram_qos_class_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+
+ return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class);
+}
+
+static struct device_attribute dev_attr_ram_qos_class =
+ __ATTR(qos_class, 0444, ram_qos_class_show, NULL);
+
static struct attribute *cxl_memdev_ram_attributes[] = {
&dev_attr_ram_size.attr,
+ &dev_attr_ram_qos_class.attr,
NULL,
};
@@ -477,14 +505,42 @@ static struct attribute_group cxl_memdev_attribute_group = {
.is_visible = cxl_memdev_visible,
};
+static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (a == &dev_attr_ram_qos_class.attr)
+ if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID)
+ return 0;
+
+ return a->mode;
+}
+
static struct attribute_group cxl_memdev_ram_attribute_group = {
.name = "ram",
.attrs = cxl_memdev_ram_attributes,
+ .is_visible = cxl_ram_visible,
};
+static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (a == &dev_attr_pmem_qos_class.attr)
+ if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID)
+ return 0;
+
+ return a->mode;
+}
+
static struct attribute_group cxl_memdev_pmem_attribute_group = {
.name = "pmem",
.attrs = cxl_memdev_pmem_attributes,
+ .is_visible = cxl_pmem_visible,
};
static umode_t cxl_memdev_security_visible(struct kobject *kobj,
@@ -519,6 +575,13 @@ static const struct attribute_group *cxl_memdev_attribute_groups[] = {
NULL,
};
+void cxl_memdev_update_perf(struct cxl_memdev *cxlmd)
+{
+ sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_ram_attribute_group);
+ sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_pmem_attribute_group);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, CXL);
+
static const struct device_type cxl_memdev_type = {
.name = "cxl_memdev",
.release = cxl_memdev_release,
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 6c9c8d92f8f7..e9e6c81ce034 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -477,9 +477,9 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
allowed++;
}
- if (!allowed) {
- cxl_set_mem_enable(cxlds, 0);
- info->mem_enabled = 0;
+ if (!allowed && info->mem_enabled) {
+ dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n");
+ return -ENXIO;
}
/*
@@ -932,11 +932,21 @@ static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) { }
void cxl_cor_error_detected(struct pci_dev *pdev)
{
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+ struct device *dev = &cxlds->cxlmd->dev;
+
+ scoped_guard(device, dev) {
+ if (!dev->driver) {
+ dev_warn(&pdev->dev,
+ "%s: memdev disabled, abort error handling\n",
+ dev_name(dev));
+ return;
+ }
- if (cxlds->rcd)
- cxl_handle_rdport_errors(cxlds);
+ if (cxlds->rcd)
+ cxl_handle_rdport_errors(cxlds);
- cxl_handle_endpoint_cor_ras(cxlds);
+ cxl_handle_endpoint_cor_ras(cxlds);
+ }
}
EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
@@ -948,16 +958,25 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
struct device *dev = &cxlmd->dev;
bool ue;
- if (cxlds->rcd)
- cxl_handle_rdport_errors(cxlds);
+ scoped_guard(device, dev) {
+ if (!dev->driver) {
+ dev_warn(&pdev->dev,
+ "%s: memdev disabled, abort error handling\n",
+ dev_name(dev));
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (cxlds->rcd)
+ cxl_handle_rdport_errors(cxlds);
+ /*
+ * A frozen channel indicates an impending reset which is fatal to
+ * CXL.mem operation, and will likely crash the system. On the off
+ * chance the situation is recoverable dump the status of the RAS
+ * capability registers and bounce the active state of the memdev.
+ */
+ ue = cxl_handle_endpoint_ras(cxlds);
+ }
- /*
- * A frozen channel indicates an impending reset which is fatal to
- * CXL.mem operation, and will likely crash the system. On the off
- * chance the situation is recoverable dump the status of the RAS
- * capability registers and bounce the active state of the memdev.
- */
- ue = cxl_handle_endpoint_ras(cxlds);
switch (state) {
case pci_channel_io_normal:
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index ce0e2d82bb2b..4c7fd2d5cccb 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -730,12 +730,17 @@ static int match_auto_decoder(struct device *dev, void *data)
return 0;
}
-static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
- struct cxl_region *cxlr)
+static struct cxl_decoder *
+cxl_region_find_decoder(struct cxl_port *port,
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_region *cxlr)
{
struct device *dev;
int id = 0;
+ if (port == cxled_to_port(cxled))
+ return &cxled->cxld;
+
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
dev = device_find_child(&port->dev, &cxlr->params,
match_auto_decoder);
@@ -753,8 +758,31 @@ static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
return to_cxl_decoder(dev);
}
-static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
- struct cxl_region *cxlr)
+static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter,
+ struct cxl_decoder *cxld)
+{
+ struct cxl_region_ref *rr = cxl_rr_load(port, cxlr_iter);
+ struct cxl_decoder *cxld_iter = rr->decoder;
+
+ /*
+ * Allow the out of order assembly of auto-discovered regions.
+ * Per CXL Spec 3.1 8.2.4.20.12 software must commit decoders
+ * in HPA order. Confirm that the decoder with the lesser HPA
+ * starting address has the lesser id.
+ */
+ dev_dbg(&cxld->dev, "check for HPA violation %s:%d < %s:%d\n",
+ dev_name(&cxld->dev), cxld->id,
+ dev_name(&cxld_iter->dev), cxld_iter->id);
+
+ if (cxld_iter->id > cxld->id)
+ return true;
+
+ return false;
+}
+
+static struct cxl_region_ref *
+alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_region_ref *cxl_rr, *iter;
@@ -764,16 +792,21 @@ static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
xa_for_each(&port->regions, index, iter) {
struct cxl_region_params *ip = &iter->region->params;
- if (!ip->res)
+ if (!ip->res || ip->res->start < p->res->start)
continue;
- if (ip->res->start > p->res->start) {
- dev_dbg(&cxlr->dev,
- "%s: HPA order violation %s:%pr vs %pr\n",
- dev_name(&port->dev),
- dev_name(&iter->region->dev), ip->res, p->res);
- return ERR_PTR(-EBUSY);
+ if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
+ struct cxl_decoder *cxld;
+
+ cxld = cxl_region_find_decoder(port, cxled, cxlr);
+ if (auto_order_ok(port, iter->region, cxld))
+ continue;
}
+ dev_dbg(&cxlr->dev, "%s: HPA order violation %s:%pr vs %pr\n",
+ dev_name(&port->dev),
+ dev_name(&iter->region->dev), ip->res, p->res);
+
+ return ERR_PTR(-EBUSY);
}
cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
@@ -853,10 +886,7 @@ static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
{
struct cxl_decoder *cxld;
- if (port == cxled_to_port(cxled))
- cxld = &cxled->cxld;
- else
- cxld = cxl_region_find_decoder(port, cxlr);
+ cxld = cxl_region_find_decoder(port, cxled, cxlr);
if (!cxld) {
dev_dbg(&cxlr->dev, "%s: no decoder available\n",
dev_name(&port->dev));
@@ -953,7 +983,7 @@ static int cxl_port_attach_region(struct cxl_port *port,
nr_targets_inc = true;
}
} else {
- cxl_rr = alloc_region_ref(port, cxlr);
+ cxl_rr = alloc_region_ref(port, cxlr, cxled);
if (IS_ERR(cxl_rr)) {
dev_dbg(&cxlr->dev,
"%s: failed to allocate region reference\n",
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index 89445435303a..bdf117a33744 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -338,7 +338,7 @@ TRACE_EVENT(cxl_general_media,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
- memcpy(&__entry->hdr_uuid, &CXL_EVENT_GEN_MEDIA_UUID, sizeof(uuid_t));
+ __entry->hdr_uuid = CXL_EVENT_GEN_MEDIA_UUID;
/* General Media */
__entry->dpa = le64_to_cpu(rec->phys_addr);
@@ -425,7 +425,7 @@ TRACE_EVENT(cxl_dram,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
- memcpy(&__entry->hdr_uuid, &CXL_EVENT_DRAM_UUID, sizeof(uuid_t));
+ __entry->hdr_uuid = CXL_EVENT_DRAM_UUID;
/* DRAM */
__entry->dpa = le64_to_cpu(rec->phys_addr);
@@ -573,7 +573,7 @@ TRACE_EVENT(cxl_memory_module,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
- memcpy(&__entry->hdr_uuid, &CXL_EVENT_MEM_MODULE_UUID, sizeof(uuid_t));
+ __entry->hdr_uuid = CXL_EVENT_MEM_MODULE_UUID;
/* Memory Module Event */
__entry->event_type = rec->event_type;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index b6017c0c57b4..003feebab79b 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -880,6 +880,8 @@ void cxl_switch_parse_cdat(struct cxl_port *port);
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
+void cxl_memdev_update_perf(struct cxl_memdev *cxlmd);
+
/*
* Unit test builds overrides this to __weak, find the 'strong' version
* of these symbols in tools/testing/cxl/.
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 5303d6942b88..20fb3b35e89e 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -395,13 +395,11 @@ enum cxl_devtype {
/**
* struct cxl_dpa_perf - DPA performance property entry
- * @list - list entry
* @dpa_range - range for DPA address
* @coord - QoS performance data (i.e. latency, bandwidth)
* @qos_class - QoS Class cookies
*/
struct cxl_dpa_perf {
- struct list_head list;
struct range dpa_range;
struct access_coordinate coord;
int qos_class;
@@ -471,8 +469,8 @@ struct cxl_dev_state {
* @security: security driver state info
* @fw: firmware upload / activation state
* @mbox_send: @dev specific transport for transmitting mailbox commands
- * @ram_perf_list: performance data entries matched to RAM
- * @pmem_perf_list: performance data entries matched to PMEM
+ * @ram_perf: performance data entry matched to RAM partition
+ * @pmem_perf: performance data entry matched to PMEM partition
*
* See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
@@ -494,8 +492,8 @@ struct cxl_memdev_state {
u64 next_volatile_bytes;
u64 next_persistent_bytes;
- struct list_head ram_perf_list;
- struct list_head pmem_perf_list;
+ struct cxl_dpa_perf ram_perf;
+ struct cxl_dpa_perf pmem_perf;
struct cxl_event_state event;
struct cxl_poison_state poison;
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index c5c9d8e0d88d..0c79d9ce877c 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -215,52 +215,6 @@ static ssize_t trigger_poison_list_store(struct device *dev,
}
static DEVICE_ATTR_WO(trigger_poison_list);
-static ssize_t ram_qos_class_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- struct cxl_dpa_perf *dpa_perf;
-
- if (!dev->driver)
- return -ENOENT;
-
- if (list_empty(&mds->ram_perf_list))
- return -ENOENT;
-
- dpa_perf = list_first_entry(&mds->ram_perf_list, struct cxl_dpa_perf,
- list);
-
- return sysfs_emit(buf, "%d\n", dpa_perf->qos_class);
-}
-
-static struct device_attribute dev_attr_ram_qos_class =
- __ATTR(qos_class, 0444, ram_qos_class_show, NULL);
-
-static ssize_t pmem_qos_class_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
- struct cxl_dpa_perf *dpa_perf;
-
- if (!dev->driver)
- return -ENOENT;
-
- if (list_empty(&mds->pmem_perf_list))
- return -ENOENT;
-
- dpa_perf = list_first_entry(&mds->pmem_perf_list, struct cxl_dpa_perf,
- list);
-
- return sysfs_emit(buf, "%d\n", dpa_perf->qos_class);
-}
-
-static struct device_attribute dev_attr_pmem_qos_class =
- __ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
-
static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
{
struct device *dev = kobj_to_dev(kobj);
@@ -272,21 +226,11 @@ static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
mds->poison.enabled_cmds))
return 0;
- if (a == &dev_attr_pmem_qos_class.attr)
- if (list_empty(&mds->pmem_perf_list))
- return 0;
-
- if (a == &dev_attr_ram_qos_class.attr)
- if (list_empty(&mds->ram_perf_list))
- return 0;
-
return a->mode;
}
static struct attribute *cxl_mem_attrs[] = {
&dev_attr_trigger_poison_list.attr,
- &dev_attr_ram_qos_class.attr,
- &dev_attr_pmem_qos_class.attr,
NULL
};
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 233e7c42c161..2ff361e756d6 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -974,61 +974,6 @@ static struct pci_driver cxl_pci_driver = {
},
};
-#define CXL_EVENT_HDR_FLAGS_REC_SEVERITY GENMASK(1, 0)
-static void cxl_cper_event_call(enum cxl_event_type ev_type,
- struct cxl_cper_event_rec *rec)
-{
- struct cper_cxl_event_devid *device_id = &rec->hdr.device_id;
- struct pci_dev *pdev __free(pci_dev_put) = NULL;
- enum cxl_event_log_type log_type;
- struct cxl_dev_state *cxlds;
- unsigned int devfn;
- u32 hdr_flags;
-
- devfn = PCI_DEVFN(device_id->device_num, device_id->func_num);
- pdev = pci_get_domain_bus_and_slot(device_id->segment_num,
- device_id->bus_num, devfn);
- if (!pdev)
- return;
-
- guard(pci_dev)(pdev);
- if (pdev->driver != &cxl_pci_driver)
- return;
-
- cxlds = pci_get_drvdata(pdev);
- if (!cxlds)
- return;
-
- /* Fabricate a log type */
- hdr_flags = get_unaligned_le24(rec->event.generic.hdr.flags);
- log_type = FIELD_GET(CXL_EVENT_HDR_FLAGS_REC_SEVERITY, hdr_flags);
-
- cxl_event_trace_record(cxlds->cxlmd, log_type, ev_type,
- &uuid_null, &rec->event);
-}
-
-static int __init cxl_pci_driver_init(void)
-{
- int rc;
-
- rc = cxl_cper_register_callback(cxl_cper_event_call);
- if (rc)
- return rc;
-
- rc = pci_register_driver(&cxl_pci_driver);
- if (rc)
- cxl_cper_unregister_callback(cxl_cper_event_call);
-
- return rc;
-}
-
-static void __exit cxl_pci_driver_exit(void)
-{
- pci_unregister_driver(&cxl_pci_driver);
- cxl_cper_unregister_callback(cxl_cper_event_call);
-}
-
-module_init(cxl_pci_driver_init);
-module_exit(cxl_pci_driver_exit);
+module_pci_driver(cxl_pci_driver);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index f4b635526345..a0244f6bb44b 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -532,8 +532,7 @@ static int dax_fs_init(void)
int rc;
dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0,
- (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
- SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+ SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
init_once);
if (!dax_cache)
return -ENOMEM;
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index b38786f0ad79..b75fdaffad9a 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -346,6 +346,20 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
+static void dw_edma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
+{
+ /*
+ * In case of remote eDMA engine setup, the DW PCIe RP/EP internal
+ * configuration registers and application memory are normally accessed
+ * over different buses. Ensure LL-data reaches the memory before the
+ * doorbell register is toggled by issuing the dummy-read from the remote
+ * LL memory in a hope that the MRd TLP will return only after the
+ * last MWr TLP is completed
+ */
+ if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+ readl(chunk->ll_region.vaddr.io);
+}
+
static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
@@ -412,6 +426,9 @@ static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
SET_CH_32(dw, chan->dir, chan->id, llp.msb,
upper_32_bits(chunk->ll_region.paddr));
}
+
+ dw_edma_v0_sync_ll_data(chunk);
+
/* Doorbell */
SET_RW_32(dw, chan->dir, doorbell,
FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.c b/drivers/dma/dw-edma/dw-hdma-v0-core.c
index 00b735a0202a..10e8f0715114 100644
--- a/drivers/dma/dw-edma/dw-hdma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-hdma-v0-core.c
@@ -65,18 +65,12 @@ static void dw_hdma_v0_core_off(struct dw_edma *dw)
static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
{
- u32 num_ch = 0;
- int id;
-
- for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) {
- if (GET_CH_32(dw, id, dir, ch_en) & BIT(0))
- num_ch++;
- }
-
- if (num_ch > HDMA_V0_MAX_NR_CH)
- num_ch = HDMA_V0_MAX_NR_CH;
-
- return (u16)num_ch;
+ /*
+ * The HDMA IP have no way to know the number of hardware channels
+ * available, we set it to maximum channels and let the platform
+ * set the right number of channels.
+ */
+ return HDMA_V0_MAX_NR_CH;
}
static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan)
@@ -228,6 +222,20 @@ static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
+static void dw_hdma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
+{
+ /*
+ * In case of remote HDMA engine setup, the DW PCIe RP/EP internal
+ * configuration registers and application memory are normally accessed
+ * over different buses. Ensure LL-data reaches the memory before the
+ * doorbell register is toggled by issuing the dummy-read from the remote
+ * LL memory in a hope that the MRd TLP will return only after the
+ * last MWr TLP is completed
+ */
+ if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+ readl(chunk->ll_region.vaddr.io);
+}
+
static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
@@ -242,7 +250,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
/* Interrupt enable&unmask - done, abort */
tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK |
- HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_STOP_INT_EN;
+ HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
+ if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+ tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN;
SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
/* Channel control */
SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN);
@@ -256,6 +266,9 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
/* Set consumer cycle */
SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
+
+ dw_hdma_v0_sync_ll_data(chunk);
+
/* Doorbell */
SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START);
}
diff --git a/drivers/dma/dw-edma/dw-hdma-v0-regs.h b/drivers/dma/dw-edma/dw-hdma-v0-regs.h
index a974abdf8aaf..eab5fd7177e5 100644
--- a/drivers/dma/dw-edma/dw-hdma-v0-regs.h
+++ b/drivers/dma/dw-edma/dw-hdma-v0-regs.h
@@ -15,7 +15,7 @@
#define HDMA_V0_LOCAL_ABORT_INT_EN BIT(6)
#define HDMA_V0_REMOTE_ABORT_INT_EN BIT(5)
#define HDMA_V0_LOCAL_STOP_INT_EN BIT(4)
-#define HDMA_V0_REMOTEL_STOP_INT_EN BIT(3)
+#define HDMA_V0_REMOTE_STOP_INT_EN BIT(3)
#define HDMA_V0_ABORT_INT_MASK BIT(2)
#define HDMA_V0_STOP_INT_MASK BIT(0)
#define HDMA_V0_LINKLIST_EN BIT(0)
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index b53f46245c37..793f1a7ad5e3 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -503,7 +503,7 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
if (fsl_chan->is_multi_fifo) {
/* set mloff to support multiple fifo */
burst = cfg->direction == DMA_DEV_TO_MEM ?
- cfg->src_addr_width : cfg->dst_addr_width;
+ cfg->src_maxburst : cfg->dst_maxburst;
nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
/* enable DMLOE/SMLOE */
if (cfg->direction == DMA_MEM_TO_DEV) {
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index bb5221158a77..f5e216b157c7 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -30,8 +30,9 @@
#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
-#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
-#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
+#define EDMA_TCD_ITER_MASK GENMASK(14, 0)
+#define EDMA_TCD_CITER_CITER(x) ((x) & EDMA_TCD_ITER_MASK)
+#define EDMA_TCD_BITER_BITER(x) ((x) & EDMA_TCD_ITER_MASK)
#define EDMA_TCD_CSR_START BIT(0)
#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 45cc419b1b4a..d36e28b9c767 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -10,6 +10,7 @@
*/
#include <dt-bindings/dma/fsl-edma.h>
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
@@ -582,7 +583,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
DMAENGINE_ALIGN_32_BYTES;
/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
- dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff);
+ dma_set_max_seg_size(fsl_edma->dma_dev.dev,
+ FIELD_GET(EDMA_TCD_ITER_MASK, EDMA_TCD_ITER_MASK));
fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index f405c77060ad..5005e138fc23 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -109,6 +109,7 @@
#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
#define FSL_QDMA_CMD_DSEN_OFFSET 19
#define FSL_QDMA_CMD_LWC_OFFSET 16
+#define FSL_QDMA_CMD_PF BIT(17)
/* Field definition for Descriptor status */
#define QDMA_CCDF_STATUS_RTE BIT(5)
@@ -160,6 +161,10 @@ struct fsl_qdma_format {
u8 __reserved1[2];
u8 cfg8b_w1;
} __packed;
+ struct {
+ __le32 __reserved2;
+ __le32 cmd;
+ } __packed;
__le64 data;
};
} __packed;
@@ -354,7 +359,6 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
dma_addr_t dst, dma_addr_t src, u32 len)
{
- u32 cmd;
struct fsl_qdma_format *sdf, *ddf;
struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
@@ -383,14 +387,11 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
/* This entry is the last entry. */
qdma_csgf_set_f(csgf_dest, len);
/* Descriptor Buffer */
- cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
- FSL_QDMA_CMD_RWTTYPE_OFFSET);
- sdf->data = QDMA_SDDF_CMD(cmd);
-
- cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
- FSL_QDMA_CMD_RWTTYPE_OFFSET);
- cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
- ddf->data = QDMA_SDDF_CMD(cmd);
+ sdf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
+ FSL_QDMA_CMD_PF);
+
+ ddf->cmd = cpu_to_le32((FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET) |
+ (FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET));
}
/*
@@ -624,7 +625,7 @@ static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
static int
fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
- void *block,
+ __iomem void *block,
int id)
{
bool duplicate;
@@ -1196,10 +1197,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
if (!fsl_qdma->queue)
return -ENOMEM;
- ret = fsl_qdma_irq_init(pdev, fsl_qdma);
- if (ret)
- return ret;
-
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
if (fsl_qdma->irq_base < 0)
return fsl_qdma->irq_base;
@@ -1238,16 +1235,19 @@ static int fsl_qdma_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fsl_qdma);
- ret = dma_async_device_register(&fsl_qdma->dma_dev);
+ ret = fsl_qdma_reg_init(fsl_qdma);
if (ret) {
- dev_err(&pdev->dev,
- "Can't register NXP Layerscape qDMA engine.\n");
+ dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
return ret;
}
- ret = fsl_qdma_reg_init(fsl_qdma);
+ ret = fsl_qdma_irq_init(pdev, fsl_qdma);
+ if (ret)
+ return ret;
+
+ ret = dma_async_device_register(&fsl_qdma->dma_dev);
if (ret) {
- dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
+ dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
return ret;
}
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 77f8885cf407..e5a94a93a3cc 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -345,7 +345,7 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
spin_lock(&evl->lock);
status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = status.tail;
- h = evl->head;
+ h = status.head;
size = evl->size;
while (h != t) {
diff --git a/drivers/dma/idxd/debugfs.c b/drivers/dma/idxd/debugfs.c
index 9cfbd9b14c4c..f3f25ee676f3 100644
--- a/drivers/dma/idxd/debugfs.c
+++ b/drivers/dma/idxd/debugfs.c
@@ -68,9 +68,9 @@ static int debugfs_evl_show(struct seq_file *s, void *d)
spin_lock(&evl->lock);
- h = evl->head;
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
+ h = evl_status.head;
evl_size = evl->size;
seq_printf(s, "Event Log head %u tail %u interrupt pending %u\n\n",
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 47de3f93ff1e..d0f5db6cf1ed 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -300,7 +300,6 @@ struct idxd_evl {
unsigned int log_size;
/* The number of entries in the event log. */
u16 size;
- u16 head;
unsigned long *bmap;
bool batch_fail[IDXD_MAX_BATCH_IDENT];
};
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 14df1f1347a8..4954adc6bb60 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -343,7 +343,9 @@ static void idxd_cleanup_internals(struct idxd_device *idxd)
static int idxd_init_evl(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
+ unsigned int evl_cache_size;
struct idxd_evl *evl;
+ const char *idxd_name;
if (idxd->hw.gen_cap.evl_support == 0)
return 0;
@@ -355,9 +357,16 @@ static int idxd_init_evl(struct idxd_device *idxd)
spin_lock_init(&evl->lock);
evl->size = IDXD_EVL_SIZE_MIN;
- idxd->evl_cache = kmem_cache_create(dev_name(idxd_confdev(idxd)),
- sizeof(struct idxd_evl_fault) + evl_ent_size(idxd),
- 0, 0, NULL);
+ idxd_name = dev_name(idxd_confdev(idxd));
+ evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd);
+ /*
+ * Since completion record in evl_cache will be copied to user
+ * when handling completion record page fault, need to create
+ * the cache suitable for user copy.
+ */
+ idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size,
+ 0, 0, 0, evl_cache_size,
+ NULL);
if (!idxd->evl_cache) {
kfree(evl);
return -ENOMEM;
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index c8a0aa874b11..348aa21389a9 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -367,9 +367,9 @@ static void process_evl_entries(struct idxd_device *idxd)
/* Clear interrupt pending bit */
iowrite32(evl_status.bits_upper32,
idxd->reg_base + IDXD_EVLSTATUS_OFFSET + sizeof(u32));
- h = evl->head;
evl_status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
t = evl_status.tail;
+ h = evl_status.head;
size = idxd->evl->size;
while (h != t) {
@@ -378,7 +378,6 @@ static void process_evl_entries(struct idxd_device *idxd)
h = (h + 1) % size;
}
- evl->head = h;
evl_status.head = h;
iowrite32(evl_status.bits_lower32, idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
spin_unlock(&evl->lock);
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 1ebfbe88e733..97ebc791a30b 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -747,8 +747,8 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
if (IS_ERR(xor_dev->clk))
return PTR_ERR(xor_dev->clk);
- ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
- mv_xor_v2_set_msi_msg);
+ ret = platform_device_msi_init_and_alloc_irqs(&pdev->dev, 1,
+ mv_xor_v2_set_msi_msg);
if (ret)
return ret;
@@ -851,7 +851,7 @@ free_hw_desq:
xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
xor_dev->hw_desq_virt, xor_dev->hw_desq);
free_msi_irqs:
- platform_msi_domain_free_irqs(&pdev->dev);
+ platform_device_msi_free_irqs_all(&pdev->dev);
return ret;
}
@@ -867,7 +867,7 @@ static void mv_xor_v2_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, xor_dev->irq, xor_dev);
- platform_msi_domain_free_irqs(&pdev->dev);
+ platform_device_msi_free_irqs_all(&pdev->dev);
tasklet_kill(&xor_dev->irq_tasklet);
}
diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
index 1aa65e5de0f3..f79240734807 100644
--- a/drivers/dma/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/ptdma/ptdma-dmaengine.c
@@ -385,8 +385,6 @@ int pt_dmaengine_register(struct pt_device *pt)
chan->vc.desc_free = pt_do_cleanup;
vchan_init(&chan->vc, dma_dev);
- dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
-
ret = dma_async_device_register(dma_dev);
if (ret)
goto err_reg;
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index d63b93dc7047..202ac95227cb 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -696,7 +696,7 @@ static void hidma_free_msis(struct hidma_dev *dmadev)
devm_free_irq(dev, virq, &dmadev->lldev);
}
- platform_msi_domain_free_irqs(dev);
+ platform_device_msi_free_irqs_all(dev);
#endif
}
@@ -706,8 +706,8 @@ static int hidma_request_msi(struct hidma_dev *dmadev,
#ifdef CONFIG_GENERIC_MSI_IRQ
int rc, i, virq;
- rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
- hidma_write_msi_msg);
+ rc = platform_device_msi_init_and_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
+ hidma_write_msi_msg);
if (rc)
return rc;
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index 5152bd1b0daf..64eaca80d736 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -29,6 +29,8 @@ static u32 dpll_pin_xa_id;
WARN_ON_ONCE(!xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
#define ASSERT_DPLL_NOT_REGISTERED(d) \
WARN_ON_ONCE(xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
+#define ASSERT_DPLL_PIN_REGISTERED(p) \
+ WARN_ON_ONCE(!xa_get_mark(&dpll_pin_xa, (p)->id, DPLL_REGISTERED))
struct dpll_device_registration {
struct list_head list;
@@ -129,9 +131,9 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
reg = dpll_pin_registration_find(ref, ops, priv);
if (WARN_ON(!reg))
return -EINVAL;
+ list_del(&reg->list);
+ kfree(reg);
if (refcount_dec_and_test(&ref->refcount)) {
- list_del(&reg->list);
- kfree(reg);
xa_erase(xa_pins, i);
WARN_ON(!list_empty(&ref->registration_list));
kfree(ref);
@@ -209,9 +211,9 @@ dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
reg = dpll_pin_registration_find(ref, ops, priv);
if (WARN_ON(!reg))
return;
+ list_del(&reg->list);
+ kfree(reg);
if (refcount_dec_and_test(&ref->refcount)) {
- list_del(&reg->list);
- kfree(reg);
xa_erase(xa_dplls, i);
WARN_ON(!list_empty(&ref->registration_list));
kfree(ref);
@@ -508,6 +510,26 @@ err_pin_prop:
return ERR_PTR(ret);
}
+static void dpll_netdev_pin_assign(struct net_device *dev, struct dpll_pin *dpll_pin)
+{
+ rtnl_lock();
+ rcu_assign_pointer(dev->dpll_pin, dpll_pin);
+ rtnl_unlock();
+}
+
+void dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin)
+{
+ WARN_ON(!dpll_pin);
+ dpll_netdev_pin_assign(dev, dpll_pin);
+}
+EXPORT_SYMBOL(dpll_netdev_pin_set);
+
+void dpll_netdev_pin_clear(struct net_device *dev)
+{
+ dpll_netdev_pin_assign(dev, NULL);
+}
+EXPORT_SYMBOL(dpll_netdev_pin_clear);
+
/**
* dpll_pin_get - find existing or create new dpll pin
* @clock_id: clock_id of creator
@@ -560,11 +582,11 @@ void dpll_pin_put(struct dpll_pin *pin)
{
mutex_lock(&dpll_lock);
if (refcount_dec_and_test(&pin->refcount)) {
+ xa_erase(&dpll_pin_xa, pin->id);
xa_destroy(&pin->dpll_refs);
xa_destroy(&pin->parent_refs);
- xa_erase(&dpll_pin_xa, pin->id);
dpll_pin_prop_free(&pin->prop);
- kfree(pin);
+ kfree_rcu(pin, rcu);
}
mutex_unlock(&dpll_lock);
}
@@ -631,6 +653,7 @@ static void
__dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
const struct dpll_pin_ops *ops, void *priv)
{
+ ASSERT_DPLL_PIN_REGISTERED(pin);
dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv);
if (xa_empty(&pin->dpll_refs))
diff --git a/drivers/dpll/dpll_core.h b/drivers/dpll/dpll_core.h
index 717f715015c7..2b6d8ef1cdf3 100644
--- a/drivers/dpll/dpll_core.h
+++ b/drivers/dpll/dpll_core.h
@@ -47,6 +47,7 @@ struct dpll_device {
* @prop: pin properties copied from the registerer
* @rclk_dev_name: holds name of device when pin can recover clock from it
* @refcount: refcount
+ * @rcu: rcu_head for kfree_rcu()
**/
struct dpll_pin {
u32 id;
@@ -57,6 +58,7 @@ struct dpll_pin {
struct xarray parent_refs;
struct dpll_pin_properties prop;
refcount_t refcount;
+ struct rcu_head rcu;
};
/**
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index 314bb3775465..98e6ad8528d3 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/netdevice.h>
#include <net/genetlink.h>
#include "dpll_core.h"
#include "dpll_netlink.h"
@@ -48,18 +49,6 @@ dpll_msg_add_dev_parent_handle(struct sk_buff *msg, u32 id)
}
/**
- * dpll_msg_pin_handle_size - get size of pin handle attribute for given pin
- * @pin: pin pointer
- *
- * Return: byte size of pin handle attribute for given pin.
- */
-size_t dpll_msg_pin_handle_size(struct dpll_pin *pin)
-{
- return pin ? nla_total_size(4) : 0; /* DPLL_A_PIN_ID */
-}
-EXPORT_SYMBOL_GPL(dpll_msg_pin_handle_size);
-
-/**
* dpll_msg_add_pin_handle - attach pin handle attribute to a given message
* @msg: pointer to sk_buff message to attach a pin handle
* @pin: pin pointer
@@ -68,7 +57,7 @@ EXPORT_SYMBOL_GPL(dpll_msg_pin_handle_size);
* * 0 - success
* * -EMSGSIZE - no space in message to attach pin handle
*/
-int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
+static int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
{
if (!pin)
return 0;
@@ -76,7 +65,28 @@ int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
return -EMSGSIZE;
return 0;
}
-EXPORT_SYMBOL_GPL(dpll_msg_add_pin_handle);
+
+static struct dpll_pin *dpll_netdev_pin(const struct net_device *dev)
+{
+ return rcu_dereference_rtnl(dev->dpll_pin);
+}
+
+/**
+ * dpll_netdev_pin_handle_size - get size of pin handle attribute of a netdev
+ * @dev: netdev from which to get the pin
+ *
+ * Return: byte size of pin handle attribute, or 0 if @dev has no pin.
+ */
+size_t dpll_netdev_pin_handle_size(const struct net_device *dev)
+{
+ return dpll_netdev_pin(dev) ? nla_total_size(4) : 0; /* DPLL_A_PIN_ID */
+}
+
+int dpll_netdev_add_pin_handle(struct sk_buff *msg,
+ const struct net_device *dev)
+{
+ return dpll_msg_add_pin_handle(msg, dpll_netdev_pin(dev));
+}
static int
dpll_msg_add_mode(struct sk_buff *msg, struct dpll_device *dpll,
@@ -121,14 +131,21 @@ dpll_msg_add_lock_status(struct sk_buff *msg, struct dpll_device *dpll,
struct netlink_ext_ack *extack)
{
const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ enum dpll_lock_status_error status_error = 0;
enum dpll_lock_status status;
int ret;
- ret = ops->lock_status_get(dpll, dpll_priv(dpll), &status, extack);
+ ret = ops->lock_status_get(dpll, dpll_priv(dpll), &status,
+ &status_error, extack);
if (ret)
return ret;
if (nla_put_u32(msg, DPLL_A_LOCK_STATUS, status))
return -EMSGSIZE;
+ if (status_error &&
+ (status == DPLL_LOCK_STATUS_UNLOCKED ||
+ status == DPLL_LOCK_STATUS_HOLDOVER) &&
+ nla_put_u32(msg, DPLL_A_LOCK_STATUS_ERROR, status_error))
+ return -EMSGSIZE;
return 0;
}
@@ -1199,6 +1216,7 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
unsigned long i;
int ret = 0;
+ mutex_lock(&dpll_lock);
xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED,
ctx->idx) {
if (!dpll_pin_available(pin))
@@ -1218,6 +1236,8 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
genlmsg_end(skb, hdr);
}
+ mutex_unlock(&dpll_lock);
+
if (ret == -EMSGSIZE) {
ctx->idx = i;
return skb->len;
@@ -1373,6 +1393,7 @@ int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
unsigned long i;
int ret = 0;
+ mutex_lock(&dpll_lock);
xa_for_each_marked_start(&dpll_device_xa, i, dpll, DPLL_REGISTERED,
ctx->idx) {
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
@@ -1389,6 +1410,8 @@ int dpll_nl_device_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
}
genlmsg_end(skb, hdr);
}
+ mutex_unlock(&dpll_lock);
+
if (ret == -EMSGSIZE) {
ctx->idx = i;
return skb->len;
@@ -1439,20 +1462,6 @@ dpll_unlock_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
mutex_unlock(&dpll_lock);
}
-int dpll_lock_dumpit(struct netlink_callback *cb)
-{
- mutex_lock(&dpll_lock);
-
- return 0;
-}
-
-int dpll_unlock_dumpit(struct netlink_callback *cb)
-{
- mutex_unlock(&dpll_lock);
-
- return 0;
-}
-
int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
struct genl_info *info)
{
diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c
index eaee5be7aa64..1e95f5397cfc 100644
--- a/drivers/dpll/dpll_nl.c
+++ b/drivers/dpll/dpll_nl.c
@@ -95,9 +95,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
},
{
.cmd = DPLL_CMD_DEVICE_GET,
- .start = dpll_lock_dumpit,
.dumpit = dpll_nl_device_get_dumpit,
- .done = dpll_unlock_dumpit,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,
},
{
@@ -129,9 +127,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
},
{
.cmd = DPLL_CMD_PIN_GET,
- .start = dpll_lock_dumpit,
.dumpit = dpll_nl_pin_get_dumpit,
- .done = dpll_unlock_dumpit,
.policy = dpll_pin_get_dump_nl_policy,
.maxattr = DPLL_A_PIN_ID,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP,
diff --git a/drivers/dpll/dpll_nl.h b/drivers/dpll/dpll_nl.h
index 92d4c9c4f788..f491262bee4f 100644
--- a/drivers/dpll/dpll_nl.h
+++ b/drivers/dpll/dpll_nl.h
@@ -30,8 +30,6 @@ dpll_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
void
dpll_pin_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
struct genl_info *info);
-int dpll_lock_dumpit(struct netlink_callback *cb);
-int dpll_unlock_dumpit(struct netlink_callback *cb);
int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info);
int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 5a7f3fabee22..16c8de5050e5 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -78,6 +78,7 @@ config EDAC_GHES
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64)"
depends on AMD_NB && EDAC_DECODE_MCE
+ imply AMD_ATL
help
Support for error detection and correction of DRAM ECC errors on
the AMD64 families (>= K8) of memory controllers.
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 537b9987a431..1f3520d76861 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/ras.h>
#include "amd64_edac.h"
#include <asm/amd_nb.h>
@@ -1051,281 +1052,6 @@ static int fixup_node_id(int node_id, struct mce *m)
return nid - gpu_node_map.base_node_id + 1;
}
-/* Protect the PCI config register pairs used for DF indirect access. */
-static DEFINE_MUTEX(df_indirect_mutex);
-
-/*
- * Data Fabric Indirect Access uses FICAA/FICAD.
- *
- * Fabric Indirect Configuration Access Address (FICAA): Constructed based
- * on the device's Instance Id and the PCI function and register offset of
- * the desired register.
- *
- * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
- * and FICAD HI registers but so far we only need the LO register.
- *
- * Use Instance Id 0xFF to indicate a broadcast read.
- */
-#define DF_BROADCAST 0xFF
-static int __df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
-{
- struct pci_dev *F4;
- u32 ficaa;
- int err = -ENODEV;
-
- if (node >= amd_nb_num())
- goto out;
-
- F4 = node_to_amd_nb(node)->link;
- if (!F4)
- goto out;
-
- ficaa = (instance_id == DF_BROADCAST) ? 0 : 1;
- ficaa |= reg & 0x3FC;
- ficaa |= (func & 0x7) << 11;
- ficaa |= instance_id << 16;
-
- mutex_lock(&df_indirect_mutex);
-
- err = pci_write_config_dword(F4, 0x5C, ficaa);
- if (err) {
- pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
- goto out_unlock;
- }
-
- err = pci_read_config_dword(F4, 0x98, lo);
- if (err)
- pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
-
-out_unlock:
- mutex_unlock(&df_indirect_mutex);
-
-out:
- return err;
-}
-
-static int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
-{
- return __df_indirect_read(node, func, reg, instance_id, lo);
-}
-
-static int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo)
-{
- return __df_indirect_read(node, func, reg, DF_BROADCAST, lo);
-}
-
-struct addr_ctx {
- u64 ret_addr;
- u32 tmp;
- u16 nid;
- u8 inst_id;
-};
-
-static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
-{
- u64 dram_base_addr, dram_limit_addr, dram_hole_base;
-
- u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
- u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
- u8 intlv_addr_sel, intlv_addr_bit;
- u8 num_intlv_bits, hashed_bit;
- u8 lgcy_mmio_hole_en, base = 0;
- u8 cs_mask, cs_id = 0;
- bool hash_enabled = false;
-
- struct addr_ctx ctx;
-
- memset(&ctx, 0, sizeof(ctx));
-
- /* Start from the normalized address */
- ctx.ret_addr = norm_addr;
-
- ctx.nid = nid;
- ctx.inst_id = umc;
-
- /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
- if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp))
- goto out_err;
-
- /* Remove HiAddrOffset from normalized address, if enabled: */
- if (ctx.tmp & BIT(0)) {
- u64 hi_addr_offset = (ctx.tmp & GENMASK_ULL(31, 20)) << 8;
-
- if (norm_addr >= hi_addr_offset) {
- ctx.ret_addr -= hi_addr_offset;
- base = 1;
- }
- }
-
- /* Read D18F0x110 (DramBaseAddress). */
- if (df_indirect_read_instance(nid, 0, 0x110 + (8 * base), umc, &ctx.tmp))
- goto out_err;
-
- /* Check if address range is valid. */
- if (!(ctx.tmp & BIT(0))) {
- pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
- __func__, ctx.tmp);
- goto out_err;
- }
-
- lgcy_mmio_hole_en = ctx.tmp & BIT(1);
- intlv_num_chan = (ctx.tmp >> 4) & 0xF;
- intlv_addr_sel = (ctx.tmp >> 8) & 0x7;
- dram_base_addr = (ctx.tmp & GENMASK_ULL(31, 12)) << 16;
-
- /* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
- if (intlv_addr_sel > 3) {
- pr_err("%s: Invalid interleave address select %d.\n",
- __func__, intlv_addr_sel);
- goto out_err;
- }
-
- /* Read D18F0x114 (DramLimitAddress). */
- if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp))
- goto out_err;
-
- intlv_num_sockets = (ctx.tmp >> 8) & 0x1;
- intlv_num_dies = (ctx.tmp >> 10) & 0x3;
- dram_limit_addr = ((ctx.tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
-
- intlv_addr_bit = intlv_addr_sel + 8;
-
- /* Re-use intlv_num_chan by setting it equal to log2(#channels) */
- switch (intlv_num_chan) {
- case 0: intlv_num_chan = 0; break;
- case 1: intlv_num_chan = 1; break;
- case 3: intlv_num_chan = 2; break;
- case 5: intlv_num_chan = 3; break;
- case 7: intlv_num_chan = 4; break;
-
- case 8: intlv_num_chan = 1;
- hash_enabled = true;
- break;
- default:
- pr_err("%s: Invalid number of interleaved channels %d.\n",
- __func__, intlv_num_chan);
- goto out_err;
- }
-
- num_intlv_bits = intlv_num_chan;
-
- if (intlv_num_dies > 2) {
- pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
- __func__, intlv_num_dies);
- goto out_err;
- }
-
- num_intlv_bits += intlv_num_dies;
-
- /* Add a bit if sockets are interleaved. */
- num_intlv_bits += intlv_num_sockets;
-
- /* Assert num_intlv_bits <= 4 */
- if (num_intlv_bits > 4) {
- pr_err("%s: Invalid interleave bits %d.\n",
- __func__, num_intlv_bits);
- goto out_err;
- }
-
- if (num_intlv_bits > 0) {
- u64 temp_addr_x, temp_addr_i, temp_addr_y;
- u8 die_id_bit, sock_id_bit, cs_fabric_id;
-
- /*
- * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
- * This is the fabric id for this coherent slave. Use
- * umc/channel# as instance id of the coherent slave
- * for FICAA.
- */
- if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp))
- goto out_err;
-
- cs_fabric_id = (ctx.tmp >> 8) & 0xFF;
- die_id_bit = 0;
-
- /* If interleaved over more than 1 channel: */
- if (intlv_num_chan) {
- die_id_bit = intlv_num_chan;
- cs_mask = (1 << die_id_bit) - 1;
- cs_id = cs_fabric_id & cs_mask;
- }
-
- sock_id_bit = die_id_bit;
-
- /* Read D18F1x208 (SystemFabricIdMask). */
- if (intlv_num_dies || intlv_num_sockets)
- if (df_indirect_read_broadcast(nid, 1, 0x208, &ctx.tmp))
- goto out_err;
-
- /* If interleaved over more than 1 die. */
- if (intlv_num_dies) {
- sock_id_bit = die_id_bit + intlv_num_dies;
- die_id_shift = (ctx.tmp >> 24) & 0xF;
- die_id_mask = (ctx.tmp >> 8) & 0xFF;
-
- cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
- }
-
- /* If interleaved over more than 1 socket. */
- if (intlv_num_sockets) {
- socket_id_shift = (ctx.tmp >> 28) & 0xF;
- socket_id_mask = (ctx.tmp >> 16) & 0xFF;
-
- cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
- }
-
- /*
- * The pre-interleaved address consists of XXXXXXIIIYYYYY
- * where III is the ID for this CS, and XXXXXXYYYYY are the
- * address bits from the post-interleaved address.
- * "num_intlv_bits" has been calculated to tell us how many "I"
- * bits there are. "intlv_addr_bit" tells us how many "Y" bits
- * there are (where "I" starts).
- */
- temp_addr_y = ctx.ret_addr & GENMASK_ULL(intlv_addr_bit - 1, 0);
- temp_addr_i = (cs_id << intlv_addr_bit);
- temp_addr_x = (ctx.ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
- ctx.ret_addr = temp_addr_x | temp_addr_i | temp_addr_y;
- }
-
- /* Add dram base address */
- ctx.ret_addr += dram_base_addr;
-
- /* If legacy MMIO hole enabled */
- if (lgcy_mmio_hole_en) {
- if (df_indirect_read_broadcast(nid, 0, 0x104, &ctx.tmp))
- goto out_err;
-
- dram_hole_base = ctx.tmp & GENMASK(31, 24);
- if (ctx.ret_addr >= dram_hole_base)
- ctx.ret_addr += (BIT_ULL(32) - dram_hole_base);
- }
-
- if (hash_enabled) {
- /* Save some parentheses and grab ls-bit at the end. */
- hashed_bit = (ctx.ret_addr >> 12) ^
- (ctx.ret_addr >> 18) ^
- (ctx.ret_addr >> 21) ^
- (ctx.ret_addr >> 30) ^
- cs_id;
-
- hashed_bit &= BIT(0);
-
- if (hashed_bit != ((ctx.ret_addr >> intlv_addr_bit) & BIT(0)))
- ctx.ret_addr ^= BIT(intlv_addr_bit);
- }
-
- /* Is calculated system address is above DRAM limit address? */
- if (ctx.ret_addr > dram_limit_addr)
- goto out_err;
-
- *sys_addr = ctx.ret_addr;
- return 0;
-
-out_err:
- return -EINVAL;
-}
-
static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
/*
@@ -1915,7 +1641,7 @@ ddr3:
/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
{
- u16 mce_nid = topology_die_id(m->extcpu);
+ u16 mce_nid = topology_amd_node_id(m->extcpu);
struct mem_ctl_info *mci;
u8 start_bit = 1;
u8 end_bit = 47;
@@ -3073,9 +2799,10 @@ static void decode_umc_error(int node_id, struct mce *m)
{
u8 ecc_type = (m->status >> 45) & 0x3;
struct mem_ctl_info *mci;
+ unsigned long sys_addr;
struct amd64_pvt *pvt;
+ struct atl_err a_err;
struct err_info err;
- u64 sys_addr;
node_id = fixup_node_id(node_id, m);
@@ -3106,7 +2833,12 @@ static void decode_umc_error(int node_id, struct mce *m)
pvt->ops->get_err_info(m, &err);
- if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
+ a_err.addr = m->addr;
+ a_err.ipid = m->ipid;
+ a_err.cpu = m->extcpu;
+
+ sys_addr = amd_convert_umc_mca_addr_to_sys_addr(&a_err);
+ if (IS_ERR_VALUE(sys_addr)) {
err.err_code = ERR_NORM_ADDR;
goto log_error;
}
@@ -3446,7 +3178,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
int cpu;
for_each_online_cpu(cpu)
- if (topology_die_id(cpu) == nid)
+ if (topology_amd_node_id(cpu) == nid)
cpumask_set_cpu(cpu, mask);
}
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 2b83d6de9352..3fd22a1eb1a9 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -951,6 +951,7 @@ static const struct x86_cpu_id i10nm_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_CRESTMONT, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index 2b0ecdeba5cd..cdd8480e7368 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -238,6 +238,7 @@ static struct work_struct ecclog_work;
#define DID_ADL_N_SKU9 0x4678
#define DID_ADL_N_SKU10 0x4679
#define DID_ADL_N_SKU11 0x467c
+#define DID_ADL_N_SKU12 0x4632
/* Compute die IDs for Raptor Lake-P with IBECC */
#define DID_RPL_P_SKU1 0xa706
@@ -583,6 +584,7 @@ static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU9), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU10), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU11), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU12), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU1), (kernel_ulong_t)&rpl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU2), (kernel_ulong_t)&rpl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU3), (kernel_ulong_t)&rpl_p_cfg },
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index ec8b6c9fedfd..8130c3dc64da 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -584,7 +584,7 @@ static void decode_mc3_mce(struct mce *m)
static void decode_mc4_mce(struct mce *m)
{
unsigned int fam = x86_family(m->cpuid);
- int node_id = topology_die_id(m->extcpu);
+ int node_id = topology_amd_node_id(m->extcpu);
u16 ec = EC(m->status);
u8 xec = XEC(m->status, 0x1f);
u8 offset = 0;
@@ -746,7 +746,7 @@ static void decode_smca_error(struct mce *m)
if ((bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2) &&
xec == 0 && decode_dram_ecc)
- decode_dram_ecc(topology_die_id(m->extcpu), m);
+ decode_dram_ecc(topology_amd_node_id(m->extcpu), m);
}
static inline void amd_decode_err_code(u16 ec)
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 709babce43ba..5527055b0964 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -1324,11 +1324,9 @@ static int mc_probe(struct platform_device *pdev)
struct synps_edac_priv *priv;
struct mem_ctl_info *mci;
void __iomem *baseaddr;
- struct resource *res;
int rc;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- baseaddr = devm_ioremap_resource(&pdev->dev, res);
+ baseaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(baseaddr))
return PTR_ERR(baseaddr);
diff --git a/drivers/edac/versal_edac.c b/drivers/edac/versal_edac.c
index 62caf454b567..1688a5050f63 100644
--- a/drivers/edac/versal_edac.c
+++ b/drivers/edac/versal_edac.c
@@ -42,8 +42,11 @@
#define ECCW0_FLIP_CTRL 0x109C
#define ECCW0_FLIP0_OFFSET 0x10A0
+#define ECCW0_FLIP0_BITS 31
+#define ECCW0_FLIP1_OFFSET 0x10A4
#define ECCW1_FLIP_CTRL 0x10AC
#define ECCW1_FLIP0_OFFSET 0x10B0
+#define ECCW1_FLIP1_OFFSET 0x10B4
#define ECCR0_CERR_STAT_OFFSET 0x10BC
#define ECCR0_CE_ADDR_LO_OFFSET 0x10C0
#define ECCR0_CE_ADDR_HI_OFFSET 0x10C4
@@ -116,9 +119,6 @@
#define XDDR_BUS_WIDTH_32 1
#define XDDR_BUS_WIDTH_16 2
-#define ECC_CEPOISON_MASK 0x1
-#define ECC_UEPOISON_MASK 0x3
-
#define XDDR_MAX_ROW_CNT 18
#define XDDR_MAX_COL_CNT 10
#define XDDR_MAX_RANK_CNT 2
@@ -133,6 +133,7 @@
* https://docs.xilinx.com/r/en-US/am012-versal-register-reference/PCSR_LOCK-XRAM_SLCR-Register
*/
#define PCSR_UNLOCK_VAL 0xF9E8D7C6
+#define PCSR_LOCK_VAL 1
#define XDDR_ERR_TYPE_CE 0
#define XDDR_ERR_TYPE_UE 1
@@ -142,6 +143,7 @@
#define XILINX_DRAM_SIZE_12G 3
#define XILINX_DRAM_SIZE_16G 4
#define XILINX_DRAM_SIZE_32G 5
+#define NUM_UE_BITPOS 2
/**
* struct ecc_error_info - ECC error log information.
@@ -479,7 +481,7 @@ static void err_callback(const u32 *payload, void *data)
writel(regval, priv->ddrmc_baseaddr + XDDR_ISR_OFFSET);
/* Lock the PCSR registers */
- writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
edac_dbg(3, "Total error count CE %d UE %d\n",
priv->ce_cnt, priv->ue_cnt);
}
@@ -650,7 +652,7 @@ static void enable_intr(struct edac_priv *priv)
writel(XDDR_IRQ_UE_MASK,
priv->ddrmc_baseaddr + XDDR_IRQ1_EN_OFFSET);
/* Lock the PCSR registers */
- writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
}
static void disable_intr(struct edac_priv *priv)
@@ -663,7 +665,7 @@ static void disable_intr(struct edac_priv *priv)
priv->ddrmc_baseaddr + XDDR_IRQ_DIS_OFFSET);
/* Lock the PCSR registers */
- writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
}
#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
@@ -734,38 +736,63 @@ static void poison_setup(struct edac_priv *priv)
writel(regval, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC15_OFFSET);
}
-static ssize_t xddr_inject_data_poison_store(struct mem_ctl_info *mci,
- const char __user *data)
+static void xddr_inject_data_ce_store(struct mem_ctl_info *mci, u8 ce_bitpos)
{
+ u32 ecc0_flip0, ecc1_flip0, ecc0_flip1, ecc1_flip1;
struct edac_priv *priv = mci->pvt_info;
- writel(0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET);
- writel(0, priv->ddrmc_baseaddr + ECCW1_FLIP0_OFFSET);
-
- if (strncmp(data, "CE", 2) == 0) {
- writel(ECC_CEPOISON_MASK, priv->ddrmc_baseaddr +
- ECCW0_FLIP0_OFFSET);
- writel(ECC_CEPOISON_MASK, priv->ddrmc_baseaddr +
- ECCW1_FLIP0_OFFSET);
+ if (ce_bitpos < ECCW0_FLIP0_BITS) {
+ ecc0_flip0 = BIT(ce_bitpos);
+ ecc1_flip0 = BIT(ce_bitpos);
+ ecc0_flip1 = 0;
+ ecc1_flip1 = 0;
} else {
- writel(ECC_UEPOISON_MASK, priv->ddrmc_baseaddr +
- ECCW0_FLIP0_OFFSET);
- writel(ECC_UEPOISON_MASK, priv->ddrmc_baseaddr +
- ECCW1_FLIP0_OFFSET);
+ ce_bitpos = ce_bitpos - ECCW0_FLIP0_BITS;
+ ecc0_flip1 = BIT(ce_bitpos);
+ ecc1_flip1 = BIT(ce_bitpos);
+ ecc0_flip0 = 0;
+ ecc1_flip0 = 0;
}
- /* Lock the PCSR registers */
- writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
-
- return 0;
+ writel(ecc0_flip0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET);
+ writel(ecc1_flip0, priv->ddrmc_baseaddr + ECCW1_FLIP0_OFFSET);
+ writel(ecc0_flip1, priv->ddrmc_baseaddr + ECCW0_FLIP1_OFFSET);
+ writel(ecc1_flip1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET);
}
-static ssize_t inject_data_poison_store(struct file *file, const char __user *data,
- size_t count, loff_t *ppos)
+/*
+ * To inject a correctable error, the following steps are needed:
+ *
+ * - Write the correctable error bit position value:
+ * echo <bit_pos val> > /sys/kernel/debug/edac/<controller instance>/inject_ce
+ *
+ * poison_setup() derives the row, column, bank, group and rank and
+ * writes to the ADEC registers based on the address given by the user.
+ *
+ * The ADEC12 and ADEC13 are mask registers; write 0 to make sure default
+ * configuration is there and no addresses are masked.
+ *
+ * The row, column, bank, group and rank registers are written to the
+ * match ADEC bit to generate errors at the particular address. ADEC14
+ * and ADEC15 have the match bits.
+ *
+ * xddr_inject_data_ce_store() updates the ECC FLIP registers with the
+ * bits to be corrupted based on the bit position given by the user.
+ *
+ * Upon doing a read to the address the errors are injected.
+ */
+static ssize_t inject_data_ce_store(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
{
struct device *dev = file->private_data;
struct mem_ctl_info *mci = to_mci(dev);
struct edac_priv *priv = mci->pvt_info;
+ u8 ce_bitpos;
+ int ret;
+
+ ret = kstrtou8_from_user(data, count, 0, &ce_bitpos);
+ if (ret)
+ return ret;
/* Unlock the PCSR registers */
writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
@@ -773,17 +800,110 @@ static ssize_t inject_data_poison_store(struct file *file, const char __user *da
poison_setup(priv);
+ xddr_inject_data_ce_store(mci, ce_bitpos);
+ ret = count;
+
/* Lock the PCSR registers */
- writel(1, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
+
+ return ret;
+}
+
+static const struct file_operations xddr_inject_ce_fops = {
+ .open = simple_open,
+ .write = inject_data_ce_store,
+ .llseek = generic_file_llseek,
+};
+
+static void xddr_inject_data_ue_store(struct mem_ctl_info *mci, u32 val0, u32 val1)
+{
+ struct edac_priv *priv = mci->pvt_info;
+
+ writel(val0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET);
+ writel(val0, priv->ddrmc_baseaddr + ECCW0_FLIP1_OFFSET);
+ writel(val1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET);
+ writel(val1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET);
+}
+
+/*
+ * To inject an uncorrectable error, the following steps are needed:
+ * echo <bit_pos val> > /sys/kernel/debug/edac/<controller instance>/inject_ue
+ *
+ * poison_setup() derives the row, column, bank, group and rank and
+ * writes to the ADEC registers based on the address given by the user.
+ *
+ * The ADEC12 and ADEC13 are mask registers; write 0 so that none of the
+ * addresses are masked. The row, column, bank, group and rank registers
+ * are written to the match ADEC bit to generate errors at the
+ * particular address. ADEC14 and ADEC15 have the match bits.
+ *
+ * xddr_inject_data_ue_store() updates the ECC FLIP registers with the
+ * bits to be corrupted based on the bit position given by the user. For
+ * uncorrectable errors
+ * 2 bit errors are injected.
+ *
+ * Upon doing a read to the address the errors are injected.
+ */
+static ssize_t inject_data_ue_store(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = file->private_data;
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct edac_priv *priv = mci->pvt_info;
+ char buf[6], *pbuf, *token[2];
+ u32 val0 = 0, val1 = 0;
+ u8 len, ue0, ue1;
+ int i, ret;
+
+ len = min_t(size_t, count, sizeof(buf));
+ if (copy_from_user(buf, data, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ pbuf = &buf[0];
+ for (i = 0; i < NUM_UE_BITPOS; i++)
+ token[i] = strsep(&pbuf, ",");
+
+ ret = kstrtou8(token[0], 0, &ue0);
+ if (ret)
+ return ret;
+
+ ret = kstrtou8(token[1], 0, &ue1);
+ if (ret)
+ return ret;
+
+ if (ue0 < ECCW0_FLIP0_BITS) {
+ val0 = BIT(ue0);
+ } else {
+ ue0 = ue0 - ECCW0_FLIP0_BITS;
+ val1 = BIT(ue0);
+ }
+
+ if (ue1 < ECCW0_FLIP0_BITS) {
+ val0 |= BIT(ue1);
+ } else {
+ ue1 = ue1 - ECCW0_FLIP0_BITS;
+ val1 |= BIT(ue1);
+ }
- xddr_inject_data_poison_store(mci, data);
+ /* Unlock the PCSR registers */
+ writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_UNLOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
+ poison_setup(priv);
+
+ xddr_inject_data_ue_store(mci, val0, val1);
+
+ /* Lock the PCSR registers */
+ writel(PCSR_LOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
return count;
}
-static const struct file_operations xddr_inject_enable_fops = {
+static const struct file_operations xddr_inject_ue_fops = {
.open = simple_open,
- .write = inject_data_poison_store,
+ .write = inject_data_ue_store,
.llseek = generic_file_llseek,
};
@@ -795,8 +915,17 @@ static void create_debugfs_attributes(struct mem_ctl_info *mci)
if (!priv->debugfs)
return;
- edac_debugfs_create_file("inject_error", 0200, priv->debugfs,
- &mci->dev, &xddr_inject_enable_fops);
+ if (!edac_debugfs_create_file("inject_ce", 0200, priv->debugfs,
+ &mci->dev, &xddr_inject_ce_fops)) {
+ debugfs_remove_recursive(priv->debugfs);
+ return;
+ }
+
+ if (!edac_debugfs_create_file("inject_ue", 0200, priv->debugfs,
+ &mci->dev, &xddr_inject_ue_fops)) {
+ debugfs_remove_recursive(priv->debugfs);
+ return;
+ }
debugfs_create_x64("address", 0600, priv->debugfs,
&priv->err_inject_addr);
mci->debugfs = priv->debugfs;
@@ -1031,7 +1160,7 @@ free_edac_mc:
return rc;
}
-static int mc_remove(struct platform_device *pdev)
+static void mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
struct edac_priv *priv = mci->pvt_info;
@@ -1049,8 +1178,6 @@ static int mc_remove(struct platform_device *pdev)
XPM_EVENT_ERROR_MASK_DDRMC_NCR, err_callback, mci);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
-
- return 0;
}
static struct platform_driver xilinx_ddr_edac_mc_driver = {
@@ -1059,7 +1186,7 @@ static struct platform_driver xilinx_ddr_edac_mc_driver = {
.of_match_table = xlnx_edac_match,
},
.probe = mc_probe,
- .remove = mc_remove,
+ .remove_new = mc_remove,
};
module_platform_driver(xilinx_ddr_edac_mc_driver);
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 6ac5ff20a2fe..401a77e3b5fa 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -429,7 +429,23 @@ static void bm_work(struct work_struct *work)
*/
card->bm_generation = generation;
- if (root_device == NULL) {
+ if (card->gap_count == 0) {
+ /*
+ * If self IDs have inconsistent gap counts, do a
+ * bus reset ASAP. The config rom read might never
+ * complete, so don't wait for it. However, still
+ * send a PHY configuration packet prior to the
+ * bus reset. The PHY configuration packet might
+ * fail, but 1394-2008 8.4.5.2 explicitly permits
+ * it in this case, so it should be safe to try.
+ */
+ new_root_id = local_id;
+ /*
+ * We must always send a bus reset if the gap count
+ * is inconsistent, so bypass the 5-reset limit.
+ */
+ card->bm_retries = 0;
+ } else if (root_device == NULL) {
/*
* Either link_on is false, or we failed to read the
* config rom. In either case, pick another root.
@@ -484,7 +500,19 @@ static void bm_work(struct work_struct *work)
fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
new_root_id, gap_count);
fw_send_phy_config(card, new_root_id, generation, gap_count);
- reset_bus(card, true);
+ /*
+ * Where possible, use a short bus reset to minimize
+ * disruption to isochronous transfers. But in the event
+ * of a gap count inconsistency, use a long bus reset.
+ *
+ * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus
+ * may set different gap counts after a bus reset. On a mixed
+ * 1394/1394a bus, a short bus reset can get doubled. Some
+ * nodes may treat the double reset as one bus reset and others
+ * may treat it as two, causing a gap count inconsistency
+ * again. Using a long bus reset prevents this.
+ */
+ reset_bus(card, card->gap_count != 0);
/* Will allocate broadcast channel after the reset. */
goto out;
}
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9db9290c3269..7bc71f4be64a 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -3773,6 +3773,7 @@ static int pci_probe(struct pci_dev *dev,
return 0;
fail_msi:
+ devm_free_irq(&dev->dev, dev->irq, ohci);
pci_disable_msi(dev);
return err;
@@ -3800,6 +3801,7 @@ static void pci_remove(struct pci_dev *dev)
software_reset(ohci);
+ devm_free_irq(&dev->dev, dev->irq, ohci);
pci_disable_msi(dev);
dev_notice(&dev->dev, "removing fw-ohci device\n");
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index 1c7940ba5539..2f557e90f2eb 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -105,7 +105,7 @@ static struct attribute *ffa_device_attributes_attrs[] = {
};
ATTRIBUTE_GROUPS(ffa_device_attributes);
-struct bus_type ffa_bus_type = {
+const struct bus_type ffa_bus_type = {
.name = "arm_ffa",
.match = ffa_device_match,
.probe = ffa_device_probe,
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index c15928b8c5cc..77c78be6e79c 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -141,6 +141,17 @@ out:
return ret;
}
+static int scmi_protocol_table_register(const struct scmi_device_id *id_table)
+{
+ int ret = 0;
+ const struct scmi_device_id *entry;
+
+ for (entry = id_table; entry->name && ret == 0; entry++)
+ ret = scmi_protocol_device_request(entry);
+
+ return ret;
+}
+
/**
* scmi_protocol_device_unrequest - Helper to unrequest a device
*
@@ -186,6 +197,15 @@ static void scmi_protocol_device_unrequest(const struct scmi_device_id *id_table
mutex_unlock(&scmi_requested_devices_mtx);
}
+static void
+scmi_protocol_table_unregister(const struct scmi_device_id *id_table)
+{
+ const struct scmi_device_id *entry;
+
+ for (entry = id_table; entry->name; entry++)
+ scmi_protocol_device_unrequest(entry);
+}
+
static const struct scmi_device_id *
scmi_dev_match_id(struct scmi_device *scmi_dev, struct scmi_driver *scmi_drv)
{
@@ -263,7 +283,7 @@ static void scmi_dev_remove(struct device *dev)
scmi_drv->remove(scmi_dev);
}
-struct bus_type scmi_bus_type = {
+const struct bus_type scmi_bus_type = {
.name = "scmi_protocol",
.match = scmi_dev_match,
.probe = scmi_dev_probe,
@@ -279,7 +299,7 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner,
if (!driver->probe)
return -EINVAL;
- retval = scmi_protocol_device_request(driver->id_table);
+ retval = scmi_protocol_table_register(driver->id_table);
if (retval)
return retval;
@@ -299,7 +319,7 @@ EXPORT_SYMBOL_GPL(scmi_driver_register);
void scmi_driver_unregister(struct scmi_driver *driver)
{
driver_unregister(&driver->driver);
- scmi_protocol_device_unrequest(driver->id_table);
+ scmi_protocol_table_unregister(driver->id_table);
}
EXPORT_SYMBOL_GPL(scmi_driver_unregister);
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index e2050adbf85c..134019297d08 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -13,7 +13,7 @@
#include "notify.h"
/* Updated only after ALL the mandatory features for that version are merged */
-#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x20000
+#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x30000
enum scmi_clock_protocol_cmd {
CLOCK_ATTRIBUTES = 0x3,
@@ -28,8 +28,13 @@ enum scmi_clock_protocol_cmd {
CLOCK_POSSIBLE_PARENTS_GET = 0xC,
CLOCK_PARENT_SET = 0xD,
CLOCK_PARENT_GET = 0xE,
+ CLOCK_GET_PERMISSIONS = 0xF,
};
+#define CLOCK_STATE_CONTROL_ALLOWED BIT(31)
+#define CLOCK_PARENT_CONTROL_ALLOWED BIT(30)
+#define CLOCK_RATE_CONTROL_ALLOWED BIT(29)
+
enum clk_state {
CLK_STATE_DISABLE,
CLK_STATE_ENABLE,
@@ -49,6 +54,8 @@ struct scmi_msg_resp_clock_attributes {
#define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30))
#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(29))
#define SUPPORTS_PARENT_CLOCK(x) ((x) & BIT(28))
+#define SUPPORTS_EXTENDED_CONFIG(x) ((x) & BIT(27))
+#define SUPPORTS_GET_PERMISSIONS(x) ((x) & BIT(1))
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
__le32 clock_enable_latency;
};
@@ -152,14 +159,18 @@ struct clock_info {
u32 version;
int num_clocks;
int max_async_req;
+ bool notify_rate_changed_cmd;
+ bool notify_rate_change_requested_cmd;
atomic_t cur_async_req;
struct scmi_clock_info *clk;
int (*clock_config_set)(const struct scmi_protocol_handle *ph,
u32 clk_id, enum clk_state state,
- u8 oem_type, u32 oem_val, bool atomic);
+ enum scmi_clock_oem_config oem_type,
+ u32 oem_val, bool atomic);
int (*clock_config_get)(const struct scmi_protocol_handle *ph,
- u32 clk_id, u8 oem_type, u32 *attributes,
- bool *enabled, u32 *oem_val, bool atomic);
+ u32 clk_id, enum scmi_clock_oem_config oem_type,
+ u32 *attributes, bool *enabled, u32 *oem_val,
+ bool atomic);
};
static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
@@ -167,6 +178,15 @@ static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
};
+static inline struct scmi_clock_info *
+scmi_clock_domain_lookup(struct clock_info *ci, u32 clk_id)
+{
+ if (clk_id >= ci->num_clocks)
+ return ERR_PTR(-EINVAL);
+
+ return ci->clk + clk_id;
+}
+
static int
scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
struct clock_info *ci)
@@ -189,6 +209,17 @@ scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
}
ph->xops->xfer_put(ph, t);
+
+ if (!ret) {
+ if (!ph->hops->protocol_msg_check(ph, CLOCK_RATE_NOTIFY, NULL))
+ ci->notify_rate_changed_cmd = true;
+
+ if (!ph->hops->protocol_msg_check(ph,
+ CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
+ NULL))
+ ci->notify_rate_change_requested_cmd = true;
+ }
+
return ret;
}
@@ -284,14 +315,44 @@ static int scmi_clock_possible_parents(const struct scmi_protocol_handle *ph, u3
return ret;
}
+static int
+scmi_clock_get_permissions(const struct scmi_protocol_handle *ph, u32 clk_id,
+ struct scmi_clock_info *clk)
+{
+ struct scmi_xfer *t;
+ u32 perm;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, CLOCK_GET_PERMISSIONS,
+ sizeof(clk_id), sizeof(perm), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(clk_id, t->tx.buf);
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ perm = get_unaligned_le32(t->rx.buf);
+
+ clk->state_ctrl_forbidden = !(perm & CLOCK_STATE_CONTROL_ALLOWED);
+ clk->rate_ctrl_forbidden = !(perm & CLOCK_RATE_CONTROL_ALLOWED);
+ clk->parent_ctrl_forbidden = !(perm & CLOCK_PARENT_CONTROL_ALLOWED);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
- u32 clk_id, struct scmi_clock_info *clk,
+ u32 clk_id, struct clock_info *cinfo,
u32 version)
{
int ret;
u32 attributes;
struct scmi_xfer *t;
struct scmi_msg_resp_clock_attributes *attr;
+ struct scmi_clock_info *clk = cinfo->clk + clk_id;
ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
sizeof(clk_id), sizeof(*attr), &t);
@@ -324,12 +385,20 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
NULL, clk->name,
SCMI_MAX_STR_SIZE);
- if (SUPPORTS_RATE_CHANGED_NOTIF(attributes))
+ if (cinfo->notify_rate_changed_cmd &&
+ SUPPORTS_RATE_CHANGED_NOTIF(attributes))
clk->rate_changed_notifications = true;
- if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
+ if (cinfo->notify_rate_change_requested_cmd &&
+ SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
clk->rate_change_requested_notifications = true;
- if (SUPPORTS_PARENT_CLOCK(attributes))
- scmi_clock_possible_parents(ph, clk_id, clk);
+ if (PROTOCOL_REV_MAJOR(version) >= 0x3) {
+ if (SUPPORTS_PARENT_CLOCK(attributes))
+ scmi_clock_possible_parents(ph, clk_id, clk);
+ if (SUPPORTS_GET_PERMISSIONS(attributes))
+ scmi_clock_get_permissions(ph, clk_id, clk);
+ if (SUPPORTS_EXTENDED_CONFIG(attributes))
+ clk->extended_config = true;
+ }
}
return ret;
@@ -502,6 +571,14 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
struct scmi_xfer *t;
struct scmi_clock_set_rate *cfg;
struct clock_info *ci = ph->get_priv(ph);
+ struct scmi_clock_info *clk;
+
+ clk = scmi_clock_domain_lookup(ci, clk_id);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (clk->rate_ctrl_forbidden)
+ return -EACCES;
ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
if (ret)
@@ -543,7 +620,8 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
static int
scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
- enum clk_state state, u8 __unused0, u32 __unused1,
+ enum clk_state state,
+ enum scmi_clock_oem_config __unused0, u32 __unused1,
bool atomic)
{
int ret;
@@ -580,14 +658,16 @@ scmi_clock_set_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
struct clock_info *ci = ph->get_priv(ph);
struct scmi_clock_info *clk;
- if (clk_id >= ci->num_clocks)
- return -EINVAL;
-
- clk = ci->clk + clk_id;
+ clk = scmi_clock_domain_lookup(ci, clk_id);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
if (parent_id >= clk->num_parents)
return -EINVAL;
+ if (clk->parent_ctrl_forbidden)
+ return -EACCES;
+
ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_SET,
sizeof(*cfg), 0, &t);
if (ret)
@@ -628,10 +708,11 @@ scmi_clock_get_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
return ret;
}
-/* For SCMI clock v2.1 and onwards */
+/* For SCMI clock v3.0 and onwards */
static int
scmi_clock_config_set_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
- enum clk_state state, u8 oem_type, u32 oem_val,
+ enum clk_state state,
+ enum scmi_clock_oem_config oem_type, u32 oem_val,
bool atomic)
{
int ret;
@@ -671,6 +752,14 @@ static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id,
bool atomic)
{
struct clock_info *ci = ph->get_priv(ph);
+ struct scmi_clock_info *clk;
+
+ clk = scmi_clock_domain_lookup(ci, clk_id);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (clk->state_ctrl_forbidden)
+ return -EACCES;
return ci->clock_config_set(ph, clk_id, CLK_STATE_ENABLE,
NULL_OEM_TYPE, 0, atomic);
@@ -680,16 +769,24 @@ static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id,
bool atomic)
{
struct clock_info *ci = ph->get_priv(ph);
+ struct scmi_clock_info *clk;
+
+ clk = scmi_clock_domain_lookup(ci, clk_id);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (clk->state_ctrl_forbidden)
+ return -EACCES;
return ci->clock_config_set(ph, clk_id, CLK_STATE_DISABLE,
NULL_OEM_TYPE, 0, atomic);
}
-/* For SCMI clock v2.1 and onwards */
+/* For SCMI clock v3.0 and onwards */
static int
scmi_clock_config_get_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
- u8 oem_type, u32 *attributes, bool *enabled,
- u32 *oem_val, bool atomic)
+ enum scmi_clock_oem_config oem_type, u32 *attributes,
+ bool *enabled, u32 *oem_val, bool atomic)
{
int ret;
u32 flags;
@@ -730,8 +827,8 @@ scmi_clock_config_get_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
static int
scmi_clock_config_get(const struct scmi_protocol_handle *ph, u32 clk_id,
- u8 oem_type, u32 *attributes, bool *enabled,
- u32 *oem_val, bool atomic)
+ enum scmi_clock_oem_config oem_type, u32 *attributes,
+ bool *enabled, u32 *oem_val, bool atomic)
{
int ret;
struct scmi_xfer *t;
@@ -768,20 +865,38 @@ static int scmi_clock_state_get(const struct scmi_protocol_handle *ph,
}
static int scmi_clock_config_oem_set(const struct scmi_protocol_handle *ph,
- u32 clk_id, u8 oem_type, u32 oem_val,
- bool atomic)
+ u32 clk_id,
+ enum scmi_clock_oem_config oem_type,
+ u32 oem_val, bool atomic)
{
struct clock_info *ci = ph->get_priv(ph);
+ struct scmi_clock_info *clk;
+
+ clk = scmi_clock_domain_lookup(ci, clk_id);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (!clk->extended_config)
+ return -EOPNOTSUPP;
return ci->clock_config_set(ph, clk_id, CLK_STATE_UNCHANGED,
oem_type, oem_val, atomic);
}
static int scmi_clock_config_oem_get(const struct scmi_protocol_handle *ph,
- u32 clk_id, u8 oem_type, u32 *oem_val,
- u32 *attributes, bool atomic)
+ u32 clk_id,
+ enum scmi_clock_oem_config oem_type,
+ u32 *oem_val, u32 *attributes, bool atomic)
{
struct clock_info *ci = ph->get_priv(ph);
+ struct scmi_clock_info *clk;
+
+ clk = scmi_clock_domain_lookup(ci, clk_id);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (!clk->extended_config)
+ return -EOPNOTSUPP;
return ci->clock_config_get(ph, clk_id, oem_type, attributes,
NULL, oem_val, atomic);
@@ -800,10 +915,10 @@ scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
struct scmi_clock_info *clk;
struct clock_info *ci = ph->get_priv(ph);
- if (clk_id >= ci->num_clocks)
+ clk = scmi_clock_domain_lookup(ci, clk_id);
+ if (IS_ERR(clk))
return NULL;
- clk = ci->clk + clk_id;
if (!clk->name[0])
return NULL;
@@ -824,6 +939,28 @@ static const struct scmi_clk_proto_ops clk_proto_ops = {
.parent_get = scmi_clock_get_parent,
};
+static bool scmi_clk_notify_supported(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id)
+{
+ bool supported;
+ struct scmi_clock_info *clk;
+ struct clock_info *ci = ph->get_priv(ph);
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd))
+ return false;
+
+ clk = scmi_clock_domain_lookup(ci, src_id);
+ if (IS_ERR(clk))
+ return false;
+
+ if (evt_id == SCMI_EVENT_CLOCK_RATE_CHANGED)
+ supported = clk->rate_changed_notifications;
+ else
+ supported = clk->rate_change_requested_notifications;
+
+ return supported;
+}
+
static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
u32 clk_id, int message_id, bool enable)
{
@@ -908,6 +1045,7 @@ static const struct scmi_event clk_events[] = {
};
static const struct scmi_event_ops clk_event_ops = {
+ .is_notify_supported = scmi_clk_notify_supported,
.get_num_sources = scmi_clk_get_num_sources,
.set_notify_enabled = scmi_clk_set_notify_enabled,
.fill_custom_report = scmi_clk_fill_custom_report,
@@ -949,7 +1087,7 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
struct scmi_clock_info *clk = cinfo->clk + clkid;
- ret = scmi_clock_attributes_get(ph, clkid, clk, version);
+ ret = scmi_clock_attributes_get(ph, clkid, cinfo, version);
if (!ret)
scmi_clock_describe_rates_get(ph, clkid, clk);
}
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 00b165d1f502..6affbfdd1dec 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -141,7 +141,7 @@ scmi_revision_area_get(const struct scmi_protocol_handle *ph);
void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
u8 *prot_imp);
-extern struct bus_type scmi_bus_type;
+extern const struct bus_type scmi_bus_type;
#define SCMI_BUS_NOTIFY_DEVICE_REQUEST 0
#define SCMI_BUS_NOTIFY_DEVICE_UNREQUEST 1
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 3ea64b22cf0d..34d77802c990 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -86,6 +86,12 @@ struct scmi_xfers_info {
* @users: A refcount to track effective users of this protocol.
* @priv: Reference for optional protocol private data.
* @version: Protocol version supported by the platform as detected at runtime.
+ * @negotiated_version: When the platform supports a newer protocol version,
+ * the agent will try to negotiate with the platform the
+ * usage of the newest version known to it, since
+ * backward compatibility is NOT automatically assured.
+ * This field is NON-zero when a successful negotiation
+ * has completed.
* @ph: An embedded protocol handle that will be passed down to protocol
* initialization code to identify this instance.
*
@@ -99,6 +105,7 @@ struct scmi_protocol_instance {
refcount_t users;
void *priv;
unsigned int version;
+ unsigned int negotiated_version;
struct scmi_protocol_handle ph;
};
@@ -1754,10 +1761,44 @@ static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
#endif
}
+/**
+ * scmi_protocol_msg_check - Check protocol message attributes
+ *
+ * @ph: A reference to the protocol handle.
+ * @message_id: The ID of the message to check.
+ * @attributes: A parameter to optionally return the retrieved message
+ * attributes, in case of Success.
+ *
+ * An helper to check protocol message attributes for a specific protocol
+ * and message pair.
+ *
+ * Return: 0 on SUCCESS
+ */
+static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
+ u32 message_id, u32 *attributes)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
+ sizeof(__le32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(message_id, t->tx.buf);
+ ret = do_xfer(ph, t);
+ if (!ret && attributes)
+ *attributes = get_unaligned_le32(t->rx.buf);
+ xfer_put(ph, t);
+
+ return ret;
+}
+
static const struct scmi_proto_helpers_ops helpers_ops = {
.extended_name_get = scmi_common_extended_name_get,
.iter_response_init = scmi_iterator_init,
.iter_response_run = scmi_iterator_run,
+ .protocol_msg_check = scmi_protocol_msg_check,
.fastchannel_init = scmi_common_fastchannel_init,
.fastchannel_db_ring = scmi_common_fastchannel_db_ring,
};
@@ -1782,6 +1823,44 @@ scmi_revision_area_get(const struct scmi_protocol_handle *ph)
}
/**
+ * scmi_protocol_version_negotiate - Negotiate protocol version
+ *
+ * @ph: A reference to the protocol handle.
+ *
+ * An helper to negotiate a protocol version different from the latest
+ * advertised as supported from the platform: on Success backward
+ * compatibility is assured by the platform.
+ *
+ * Return: 0 on Success
+ */
+static int scmi_protocol_version_negotiate(struct scmi_protocol_handle *ph)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_protocol_instance *pi = ph_to_pi(ph);
+
+ /* At first check if NEGOTIATE_PROTOCOL_VERSION is supported ... */
+ ret = scmi_protocol_msg_check(ph, NEGOTIATE_PROTOCOL_VERSION, NULL);
+ if (ret)
+ return ret;
+
+ /* ... then attempt protocol version negotiation */
+ ret = xfer_get_init(ph, NEGOTIATE_PROTOCOL_VERSION,
+ sizeof(__le32), 0, &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(pi->proto->supported_version, t->tx.buf);
+ ret = do_xfer(ph, t);
+ if (!ret)
+ pi->negotiated_version = pi->proto->supported_version;
+
+ xfer_put(ph, t);
+
+ return ret;
+}
+
+/**
* scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
* instance descriptor.
* @info: The reference to the related SCMI instance.
@@ -1853,11 +1932,21 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info,
devres_close_group(handle->dev, pi->gid);
dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
- if (pi->version > proto->supported_version)
- dev_warn(handle->dev,
- "Detected UNSUPPORTED higher version 0x%X for protocol 0x%X."
- "Backward compatibility is NOT assured.\n",
- pi->version, pi->proto->id);
+ if (pi->version > proto->supported_version) {
+ ret = scmi_protocol_version_negotiate(&pi->ph);
+ if (!ret) {
+ dev_info(handle->dev,
+ "Protocol 0x%X successfully negotiated version 0x%X\n",
+ proto->id, pi->negotiated_version);
+ } else {
+ dev_warn(handle->dev,
+ "Detected UNSUPPORTED higher version 0x%X for protocol 0x%X.\n",
+ pi->version, pi->proto->id);
+ dev_warn(handle->dev,
+ "Trying version 0x%X. Backward compatibility is NOT assured.\n",
+ pi->proto->supported_version);
+ }
+ }
return pi;
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
index 0efd20cd9d69..27c52531194d 100644
--- a/drivers/firmware/arm_scmi/notify.c
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -99,6 +99,7 @@
#define PROTO_ID_MASK GENMASK(31, 24)
#define EVT_ID_MASK GENMASK(23, 16)
#define SRC_ID_MASK GENMASK(15, 0)
+#define NOTIF_UNSUPP -1
/*
* Builds an unsigned 32bit key from the given input tuple to be used
@@ -788,6 +789,7 @@ int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
pd->ph = ph;
for (i = 0; i < ee->num_events; i++, evt++) {
+ int id;
struct scmi_registered_event *r_evt;
r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt),
@@ -809,6 +811,11 @@ int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
if (!r_evt->report)
return -ENOMEM;
+ for (id = 0; id < r_evt->num_sources; id++)
+ if (ee->ops->is_notify_supported &&
+ !ee->ops->is_notify_supported(ph, r_evt->evt->id, id))
+ refcount_set(&r_evt->sources[id], NOTIF_UNSUPP);
+
pd->registered_events[i] = r_evt;
/* Ensure events are updated */
smp_wmb();
@@ -1166,7 +1173,13 @@ static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt,
int ret = 0;
sid = &r_evt->sources[src_id];
- if (refcount_read(sid) == 0) {
+ if (refcount_read(sid) == NOTIF_UNSUPP) {
+ dev_dbg(r_evt->proto->ph->dev,
+ "Notification NOT supported - proto_id:%d evt_id:%d src_id:%d",
+ r_evt->proto->id, r_evt->evt->id,
+ src_id);
+ ret = -EOPNOTSUPP;
+ } else if (refcount_read(sid) == 0) {
ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id,
src_id);
if (!ret)
@@ -1179,6 +1192,8 @@ static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt,
} else {
for (; num_sources; src_id++, num_sources--) {
sid = &r_evt->sources[src_id];
+ if (refcount_read(sid) == NOTIF_UNSUPP)
+ continue;
if (refcount_dec_and_test(sid))
REVT_NOTIFY_DISABLE(r_evt,
r_evt->evt->id, src_id);
diff --git a/drivers/firmware/arm_scmi/notify.h b/drivers/firmware/arm_scmi/notify.h
index 4e9b627edfef..76758a736cf4 100644
--- a/drivers/firmware/arm_scmi/notify.h
+++ b/drivers/firmware/arm_scmi/notify.h
@@ -35,6 +35,8 @@ struct scmi_protocol_handle;
/**
* struct scmi_event_ops - Protocol helpers called by the notification core.
+ * @is_notify_supported: Return 0 if the specified notification for the
+ * specified resource (src_id) is supported.
* @get_num_sources: Returns the number of possible events' sources for this
* protocol
* @set_notify_enabled: Enable/disable the required evt_id/src_id notifications
@@ -50,6 +52,8 @@ struct scmi_protocol_handle;
* process context.
*/
struct scmi_event_ops {
+ bool (*is_notify_supported)(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id);
int (*get_num_sources)(const struct scmi_protocol_handle *ph);
int (*set_notify_enabled)(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enabled);
diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c
index 25bfb465484d..4e7944b91e38 100644
--- a/drivers/firmware/arm_scmi/optee.c
+++ b/drivers/firmware/arm_scmi/optee.c
@@ -109,8 +109,10 @@ enum scmi_optee_pta_cmd {
* @rx_len: Response size
* @mu: Mutex protection on channel access
* @cinfo: SCMI channel information
- * @shmem: Virtual base address of the shared memory
- * @req: Shared memory protocol handle for SCMI request and synchronous response
+ * @req: union for SCMI interface
+ * @req.shmem: Virtual base address of the shared memory
+ * @req.msg: Shared memory protocol handle for SCMI request and
+ * synchronous response
* @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem
* @link: Reference in agent's channel list
*/
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 211e8e0aef2c..981e327e63e3 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -182,6 +182,8 @@ struct scmi_perf_info {
enum scmi_power_scale power_scale;
u64 stats_addr;
u32 stats_size;
+ bool notify_lvl_cmd;
+ bool notify_lim_cmd;
struct perf_dom_info *dom_info;
};
@@ -222,6 +224,15 @@ static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
}
ph->xops->xfer_put(ph, t);
+
+ if (!ret) {
+ if (!ph->hops->protocol_msg_check(ph, PERF_NOTIFY_LEVEL, NULL))
+ pi->notify_lvl_cmd = true;
+
+ if (!ph->hops->protocol_msg_check(ph, PERF_NOTIFY_LIMITS, NULL))
+ pi->notify_lim_cmd = true;
+ }
+
return ret;
}
@@ -239,6 +250,7 @@ static void scmi_perf_xa_destroy(void *data)
static int
scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
struct perf_dom_info *dom_info,
+ bool notify_lim_cmd, bool notify_lvl_cmd,
u32 version)
{
int ret;
@@ -260,8 +272,12 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
dom_info->info.set_perf = SUPPORTS_SET_PERF_LVL(flags);
- dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
- dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
+ if (notify_lim_cmd)
+ dom_info->perf_limit_notify =
+ SUPPORTS_PERF_LIMIT_NOTIFY(flags);
+ if (notify_lvl_cmd)
+ dom_info->perf_level_notify =
+ SUPPORTS_PERF_LEVEL_NOTIFY(flags);
dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
if (PROTOCOL_REV_MAJOR(version) >= 0x4)
dom_info->level_indexing_mode =
@@ -270,15 +286,30 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
le32_to_cpu(attr->sustained_freq_khz);
dom_info->sustained_perf_level =
le32_to_cpu(attr->sustained_perf_level);
+ /*
+ * sustained_freq_khz = mult_factor * sustained_perf_level
+ * mult_factor must be non zero positive integer(not fraction)
+ */
if (!dom_info->sustained_freq_khz ||
!dom_info->sustained_perf_level ||
- dom_info->level_indexing_mode)
+ dom_info->level_indexing_mode) {
/* CPUFreq converts to kHz, hence default 1000 */
dom_info->mult_factor = 1000;
- else
+ } else {
dom_info->mult_factor =
(dom_info->sustained_freq_khz * 1000UL)
/ dom_info->sustained_perf_level;
+ if ((dom_info->sustained_freq_khz * 1000UL) %
+ dom_info->sustained_perf_level)
+ dev_warn(ph->dev,
+ "multiplier for domain %d rounded\n",
+ dom_info->id);
+ }
+ if (!dom_info->mult_factor)
+ dev_warn(ph->dev,
+ "Wrong sustained perf/frequency(domain %d)\n",
+ dom_info->id);
+
strscpy(dom_info->info.name, attr->name,
SCMI_SHORT_NAME_MAX_SIZE);
}
@@ -295,9 +326,9 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
dom_info->id, NULL, dom_info->info.name,
SCMI_MAX_STR_SIZE);
+ xa_init(&dom_info->opps_by_lvl);
if (dom_info->level_indexing_mode) {
xa_init(&dom_info->opps_by_idx);
- xa_init(&dom_info->opps_by_lvl);
hash_init(dom_info->opps_by_freq);
}
@@ -340,13 +371,21 @@ static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
}
static inline void
-process_response_opp(struct scmi_opp *opp, unsigned int loop_idx,
+process_response_opp(struct device *dev, struct perf_dom_info *dom,
+ struct scmi_opp *opp, unsigned int loop_idx,
const struct scmi_msg_resp_perf_describe_levels *r)
{
+ int ret;
+
opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
opp->power = le32_to_cpu(r->opp[loop_idx].power);
opp->trans_latency_us =
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
+
+ ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+ if (ret)
+ dev_warn(dev, "Failed to add opps_by_lvl at %d - ret:%d\n",
+ opp->perf, ret);
}
static inline void
@@ -354,16 +393,21 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
struct scmi_opp *opp, unsigned int loop_idx,
const struct scmi_msg_resp_perf_describe_levels_v4 *r)
{
+ int ret;
+
opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
opp->power = le32_to_cpu(r->opp[loop_idx].power);
opp->trans_latency_us =
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
+ ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+ if (ret)
+ dev_warn(dev, "Failed to add opps_by_lvl at %d - ret:%d\n",
+ opp->perf, ret);
+
/* Note that PERF v4 reports always five 32-bit words */
opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
if (dom->level_indexing_mode) {
- int ret;
-
opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index);
ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
@@ -373,12 +417,6 @@ process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
"Failed to add opps_by_idx at %d - ret:%d\n",
opp->level_index, ret);
- ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
- if (ret)
- dev_warn(dev,
- "Failed to add opps_by_lvl at %d - ret:%d\n",
- opp->perf, ret);
-
hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
}
}
@@ -393,7 +431,8 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
opp = &p->perf_dom->opp[st->desc_index + st->loop_idx];
if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
- process_response_opp(opp, st->loop_idx, response);
+ process_response_opp(ph->dev, p->perf_dom, opp, st->loop_idx,
+ response);
else
process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx,
response);
@@ -978,6 +1017,27 @@ static const struct scmi_perf_proto_ops perf_proto_ops = {
.power_scale_get = scmi_power_scale_get,
};
+static bool scmi_perf_notify_supported(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id)
+{
+ bool supported;
+ struct perf_dom_info *dom;
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd))
+ return false;
+
+ dom = scmi_perf_domain_lookup(ph, src_id);
+ if (IS_ERR(dom))
+ return false;
+
+ if (evt_id == SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED)
+ supported = dom->perf_limit_notify;
+ else
+ supported = dom->perf_level_notify;
+
+ return supported;
+}
+
static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
@@ -995,18 +1055,47 @@ static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph,
return ret;
}
+static int
+scmi_perf_xlate_opp_to_freq(struct perf_dom_info *dom,
+ unsigned int index, unsigned long *freq)
+{
+ struct scmi_opp *opp;
+
+ if (!dom || !freq)
+ return -EINVAL;
+
+ if (!dom->level_indexing_mode) {
+ opp = xa_load(&dom->opps_by_lvl, index);
+ if (!opp)
+ return -ENODEV;
+
+ *freq = opp->perf * dom->mult_factor;
+ } else {
+ opp = xa_load(&dom->opps_by_idx, index);
+ if (!opp)
+ return -ENODEV;
+
+ *freq = opp->indicative_freq * dom->mult_factor;
+ }
+
+ return 0;
+}
+
static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph,
u8 evt_id, ktime_t timestamp,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
{
+ int ret;
void *rep = NULL;
+ struct perf_dom_info *dom;
switch (evt_id) {
case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED:
{
const struct scmi_perf_limits_notify_payld *p = payld;
struct scmi_perf_limits_report *r = report;
+ unsigned long freq_min, freq_max;
if (sizeof(*p) != payld_sz)
break;
@@ -1016,14 +1105,36 @@ static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph,
r->domain_id = le32_to_cpu(p->domain_id);
r->range_max = le32_to_cpu(p->range_max);
r->range_min = le32_to_cpu(p->range_min);
+ /* Check if the reported domain exist at all */
+ dom = scmi_perf_domain_lookup(ph, r->domain_id);
+ if (IS_ERR(dom))
+ break;
+ /*
+ * Event will be reported from this point on...
+ * ...even if, later, xlated frequencies were not retrieved.
+ */
*src_id = r->domain_id;
rep = r;
+
+ ret = scmi_perf_xlate_opp_to_freq(dom, r->range_max, &freq_max);
+ if (ret)
+ break;
+
+ ret = scmi_perf_xlate_opp_to_freq(dom, r->range_min, &freq_min);
+ if (ret)
+ break;
+
+ /* Report translated freqs ONLY if both available */
+ r->range_max_freq = freq_max;
+ r->range_min_freq = freq_min;
+
break;
}
case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED:
{
const struct scmi_perf_level_notify_payld *p = payld;
struct scmi_perf_level_report *r = report;
+ unsigned long freq;
if (sizeof(*p) != payld_sz)
break;
@@ -1031,9 +1142,27 @@ static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph,
r->timestamp = timestamp;
r->agent_id = le32_to_cpu(p->agent_id);
r->domain_id = le32_to_cpu(p->domain_id);
+ /* Report translated freqs ONLY if available */
r->performance_level = le32_to_cpu(p->performance_level);
+ /* Check if the reported domain exist at all */
+ dom = scmi_perf_domain_lookup(ph, r->domain_id);
+ if (IS_ERR(dom))
+ break;
+ /*
+ * Event will be reported from this point on...
+ * ...even if, later, xlated frequencies were not retrieved.
+ */
*src_id = r->domain_id;
rep = r;
+
+ /* Report translated freqs ONLY if available */
+ ret = scmi_perf_xlate_opp_to_freq(dom, r->performance_level,
+ &freq);
+ if (ret)
+ break;
+
+ r->performance_level_freq = freq;
+
break;
}
default:
@@ -1067,6 +1196,7 @@ static const struct scmi_event perf_events[] = {
};
static const struct scmi_event_ops perf_event_ops = {
+ .is_notify_supported = scmi_perf_notify_supported,
.get_num_sources = scmi_perf_get_num_sources,
.set_notify_enabled = scmi_perf_set_notify_enabled,
.fill_custom_report = scmi_perf_fill_custom_report,
@@ -1111,7 +1241,8 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
struct perf_dom_info *dom = pinfo->dom_info + domain;
dom->id = domain;
- scmi_perf_domain_attributes_get(ph, dom, version);
+ scmi_perf_domain_attributes_get(ph, dom, pinfo->notify_lim_cmd,
+ pinfo->notify_lvl_cmd, version);
scmi_perf_describe_levels_get(ph, dom, version);
if (dom->perf_fastchannels)
diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c
index c2e6b9b4d941..49666bd1d8ac 100644
--- a/drivers/firmware/arm_scmi/power.c
+++ b/drivers/firmware/arm_scmi/power.c
@@ -68,6 +68,7 @@ struct power_dom_info {
struct scmi_power_info {
u32 version;
+ bool notify_state_change_cmd;
int num_domains;
u64 stats_addr;
u32 stats_size;
@@ -97,13 +98,18 @@ static int scmi_power_attributes_get(const struct scmi_protocol_handle *ph,
}
ph->xops->xfer_put(ph, t);
+
+ if (!ret)
+ if (!ph->hops->protocol_msg_check(ph, POWER_STATE_NOTIFY, NULL))
+ pi->notify_state_change_cmd = true;
+
return ret;
}
static int
scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph,
u32 domain, struct power_dom_info *dom_info,
- u32 version)
+ u32 version, bool notify_state_change_cmd)
{
int ret;
u32 flags;
@@ -122,7 +128,9 @@ scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph,
if (!ret) {
flags = le32_to_cpu(attr->flags);
- dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags);
+ if (notify_state_change_cmd)
+ dom_info->state_set_notify =
+ SUPPORTS_STATE_SET_NOTIFY(flags);
dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags);
dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags);
strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
@@ -231,6 +239,20 @@ static int scmi_power_request_notify(const struct scmi_protocol_handle *ph,
return ret;
}
+static bool scmi_power_notify_supported(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id)
+{
+ struct power_dom_info *dom;
+ struct scmi_power_info *pinfo = ph->get_priv(ph);
+
+ if (evt_id != SCMI_EVENT_POWER_STATE_CHANGED ||
+ src_id >= pinfo->num_domains)
+ return false;
+
+ dom = pinfo->dom_info + src_id;
+ return dom->state_set_notify;
+}
+
static int scmi_power_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
@@ -285,6 +307,7 @@ static const struct scmi_event power_events[] = {
};
static const struct scmi_event_ops power_event_ops = {
+ .is_notify_supported = scmi_power_notify_supported,
.get_num_sources = scmi_power_get_num_sources,
.set_notify_enabled = scmi_power_set_notify_enabled,
.fill_custom_report = scmi_power_fill_custom_report,
@@ -326,7 +349,8 @@ static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph)
for (domain = 0; domain < pinfo->num_domains; domain++) {
struct power_dom_info *dom = pinfo->dom_info + domain;
- scmi_power_domain_attributes_get(ph, domain, dom, version);
+ scmi_power_domain_attributes_get(ph, domain, dom, version,
+ pinfo->notify_state_change_cmd);
}
pinfo->version = version;
diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c
index a4c6cd4716fe..2fab92367e42 100644
--- a/drivers/firmware/arm_scmi/powercap.c
+++ b/drivers/firmware/arm_scmi/powercap.c
@@ -124,6 +124,8 @@ struct scmi_powercap_state {
struct powercap_info {
u32 version;
int num_domains;
+ bool notify_cap_cmd;
+ bool notify_measurements_cmd;
struct scmi_powercap_state *states;
struct scmi_powercap_info *powercaps;
};
@@ -157,6 +159,18 @@ scmi_powercap_attributes_get(const struct scmi_protocol_handle *ph,
}
ph->xops->xfer_put(ph, t);
+
+ if (!ret) {
+ if (!ph->hops->protocol_msg_check(ph,
+ POWERCAP_CAP_NOTIFY, NULL))
+ pi->notify_cap_cmd = true;
+
+ if (!ph->hops->protocol_msg_check(ph,
+ POWERCAP_MEASUREMENTS_NOTIFY,
+ NULL))
+ pi->notify_measurements_cmd = true;
+ }
+
return ret;
}
@@ -200,10 +214,12 @@ scmi_powercap_domain_attributes_get(const struct scmi_protocol_handle *ph,
flags = le32_to_cpu(resp->attributes);
dom_info->id = domain;
- dom_info->notify_powercap_cap_change =
- SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(flags);
- dom_info->notify_powercap_measurement_change =
- SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(flags);
+ if (pinfo->notify_cap_cmd)
+ dom_info->notify_powercap_cap_change =
+ SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(flags);
+ if (pinfo->notify_measurements_cmd)
+ dom_info->notify_powercap_measurement_change =
+ SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(flags);
dom_info->async_powercap_cap_set =
SUPPORTS_ASYNC_POWERCAP_CAP_SET(flags);
dom_info->powercap_cap_config =
@@ -788,6 +804,26 @@ static int scmi_powercap_notify(const struct scmi_protocol_handle *ph,
return ret;
}
+static bool
+scmi_powercap_notify_supported(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id)
+{
+ bool supported = false;
+ const struct scmi_powercap_info *dom_info;
+ struct powercap_info *pi = ph->get_priv(ph);
+
+ if (evt_id >= ARRAY_SIZE(evt_2_cmd) || src_id >= pi->num_domains)
+ return false;
+
+ dom_info = pi->powercaps + src_id;
+ if (evt_id == SCMI_EVENT_POWERCAP_CAP_CHANGED)
+ supported = dom_info->notify_powercap_cap_change;
+ else if (evt_id == SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED)
+ supported = dom_info->notify_powercap_measurement_change;
+
+ return supported;
+}
+
static int
scmi_powercap_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
@@ -904,6 +940,7 @@ static const struct scmi_event powercap_events[] = {
};
static const struct scmi_event_ops powercap_event_ops = {
+ .is_notify_supported = scmi_powercap_notify_supported,
.get_num_sources = scmi_powercap_get_num_sources,
.set_notify_enabled = scmi_powercap_set_notify_enabled,
.fill_custom_report = scmi_powercap_fill_custom_report,
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index e683c26f24eb..693019fff0f6 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -33,6 +33,7 @@ enum scmi_common_cmd {
PROTOCOL_VERSION = 0x0,
PROTOCOL_ATTRIBUTES = 0x1,
PROTOCOL_MESSAGE_ATTRIBUTES = 0x2,
+ NEGOTIATE_PROTOCOL_VERSION = 0x10,
};
/**
@@ -251,6 +252,8 @@ struct scmi_fc_info {
* provided in @ops.
* @iter_response_run: A common helper to trigger the run of a previously
* initialized iterator.
+ * @protocol_msg_check: A common helper to check is a specific protocol message
+ * is supported.
* @fastchannel_init: A common helper used to initialize FC descriptors by
* gathering FC descriptions from the SCMI platform server.
* @fastchannel_db_ring: A common helper to ring a FC doorbell.
@@ -264,6 +267,8 @@ struct scmi_proto_helpers_ops {
unsigned int max_resources, u8 msg_id,
size_t tx_size, void *priv);
int (*iter_response_run)(void *iter);
+ int (*protocol_msg_check)(const struct scmi_protocol_handle *ph,
+ u32 message_id, u32 *attributes);
void (*fastchannel_init)(const struct scmi_protocol_handle *ph,
u8 describe_id, u32 message_id,
u32 valid_size, u32 domain,
diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c
index 19970d9f9e36..1b318316535e 100644
--- a/drivers/firmware/arm_scmi/reset.c
+++ b/drivers/firmware/arm_scmi/reset.c
@@ -67,6 +67,7 @@ struct reset_dom_info {
struct scmi_reset_info {
u32 version;
int num_domains;
+ bool notify_reset_cmd;
struct reset_dom_info *dom_info;
};
@@ -89,18 +90,24 @@ static int scmi_reset_attributes_get(const struct scmi_protocol_handle *ph,
}
ph->xops->xfer_put(ph, t);
+
+ if (!ret)
+ if (!ph->hops->protocol_msg_check(ph, RESET_NOTIFY, NULL))
+ pi->notify_reset_cmd = true;
+
return ret;
}
static int
scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
- u32 domain, struct reset_dom_info *dom_info,
- u32 version)
+ struct scmi_reset_info *pinfo,
+ u32 domain, u32 version)
{
int ret;
u32 attributes;
struct scmi_xfer *t;
struct scmi_msg_resp_reset_domain_attributes *attr;
+ struct reset_dom_info *dom_info = pinfo->dom_info + domain;
ret = ph->xops->xfer_get_init(ph, RESET_DOMAIN_ATTRIBUTES,
sizeof(domain), sizeof(*attr), &t);
@@ -115,7 +122,9 @@ scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
attributes = le32_to_cpu(attr->attributes);
dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes);
- dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes);
+ if (pinfo->notify_reset_cmd)
+ dom_info->reset_notify =
+ SUPPORTS_NOTIFY_RESET(attributes);
dom_info->latency_us = le32_to_cpu(attr->latency);
if (dom_info->latency_us == U32_MAX)
dom_info->latency_us = 0;
@@ -226,6 +235,20 @@ static const struct scmi_reset_proto_ops reset_proto_ops = {
.deassert = scmi_reset_domain_deassert,
};
+static bool scmi_reset_notify_supported(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id)
+{
+ struct reset_dom_info *dom;
+ struct scmi_reset_info *pi = ph->get_priv(ph);
+
+ if (evt_id != SCMI_EVENT_RESET_ISSUED || src_id >= pi->num_domains)
+ return false;
+
+ dom = pi->dom_info + src_id;
+
+ return dom->reset_notify;
+}
+
static int scmi_reset_notify(const struct scmi_protocol_handle *ph,
u32 domain_id, bool enable)
{
@@ -301,6 +324,7 @@ static const struct scmi_event reset_events[] = {
};
static const struct scmi_event_ops reset_event_ops = {
+ .is_notify_supported = scmi_reset_notify_supported,
.get_num_sources = scmi_reset_get_num_sources,
.set_notify_enabled = scmi_reset_set_notify_enabled,
.fill_custom_report = scmi_reset_fill_custom_report,
@@ -339,11 +363,8 @@ static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph)
if (!pinfo->dom_info)
return -ENOMEM;
- for (domain = 0; domain < pinfo->num_domains; domain++) {
- struct reset_dom_info *dom = pinfo->dom_info + domain;
-
- scmi_reset_domain_attributes_get(ph, domain, dom, version);
- }
+ for (domain = 0; domain < pinfo->num_domains; domain++)
+ scmi_reset_domain_attributes_get(ph, pinfo, domain, version);
pinfo->version = version;
return ph->set_priv(ph, pinfo, version);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 311149965370..7fc5535ca34c 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -215,6 +215,8 @@ struct scmi_sensor_update_notify_payld {
struct sensors_info {
u32 version;
+ bool notify_trip_point_cmd;
+ bool notify_continuos_update_cmd;
int num_sensors;
int max_requests;
u64 reg_addr;
@@ -246,6 +248,18 @@ static int scmi_sensor_attributes_get(const struct scmi_protocol_handle *ph,
}
ph->xops->xfer_put(ph, t);
+
+ if (!ret) {
+ if (!ph->hops->protocol_msg_check(ph,
+ SENSOR_TRIP_POINT_NOTIFY, NULL))
+ si->notify_trip_point_cmd = true;
+
+ if (!ph->hops->protocol_msg_check(ph,
+ SENSOR_CONTINUOUS_UPDATE_NOTIFY,
+ NULL))
+ si->notify_continuos_update_cmd = true;
+ }
+
return ret;
}
@@ -594,7 +608,8 @@ iter_sens_descr_process_response(const struct scmi_protocol_handle *ph,
* Such bitfields are assumed to be zeroed on non
* relevant fw versions...assuming fw not buggy !
*/
- s->update = SUPPORTS_UPDATE_NOTIFY(attrl);
+ if (si->notify_continuos_update_cmd)
+ s->update = SUPPORTS_UPDATE_NOTIFY(attrl);
s->timestamped = SUPPORTS_TIMESTAMP(attrl);
if (s->timestamped)
s->tstamp_scale = S32_EXT(SENSOR_TSTAMP_EXP(attrl));
@@ -988,6 +1003,25 @@ static const struct scmi_sensor_proto_ops sensor_proto_ops = {
.config_set = scmi_sensor_config_set,
};
+static bool scmi_sensor_notify_supported(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id)
+{
+ bool supported = false;
+ const struct scmi_sensor_info *s;
+ struct sensors_info *sinfo = ph->get_priv(ph);
+
+ s = scmi_sensor_info_get(ph, src_id);
+ if (!s)
+ return false;
+
+ if (evt_id == SCMI_EVENT_SENSOR_TRIP_POINT_EVENT)
+ supported = sinfo->notify_trip_point_cmd;
+ else if (evt_id == SCMI_EVENT_SENSOR_UPDATE)
+ supported = s->update;
+
+ return supported;
+}
+
static int scmi_sensor_set_notify_enabled(const struct scmi_protocol_handle *ph,
u8 evt_id, u32 src_id, bool enable)
{
@@ -1099,6 +1133,7 @@ static const struct scmi_event sensor_events[] = {
};
static const struct scmi_event_ops sensor_event_ops = {
+ .is_notify_supported = scmi_sensor_notify_supported,
.get_num_sources = scmi_sensor_get_num_sources,
.set_notify_enabled = scmi_sensor_set_notify_enabled,
.fill_custom_report = scmi_sensor_fill_custom_report,
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
index 7611e9665038..39936e1dd30e 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -214,6 +214,13 @@ static int smc_chan_free(int id, void *p, void *data)
struct scmi_chan_info *cinfo = p;
struct scmi_smc *scmi_info = cinfo->transport_info;
+ /*
+ * Different protocols might share the same chan info, so a previous
+ * smc_chan_free call might have already freed the structure.
+ */
+ if (!scmi_info)
+ return 0;
+
/* Ignore any possible further reception on the IRQ path */
if (scmi_info->irq > 0)
free_irq(scmi_info->irq, scmi_info);
diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c
index 1621da97bcbb..b6358c155f7f 100644
--- a/drivers/firmware/arm_scmi/system.c
+++ b/drivers/firmware/arm_scmi/system.c
@@ -36,8 +36,20 @@ struct scmi_system_power_state_notifier_payld {
struct scmi_system_info {
u32 version;
bool graceful_timeout_supported;
+ bool power_state_notify_cmd;
};
+static bool scmi_system_notify_supported(const struct scmi_protocol_handle *ph,
+ u8 evt_id, u32 src_id)
+{
+ struct scmi_system_info *pinfo = ph->get_priv(ph);
+
+ if (evt_id != SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER)
+ return false;
+
+ return pinfo->power_state_notify_cmd;
+}
+
static int scmi_system_request_notify(const struct scmi_protocol_handle *ph,
bool enable)
{
@@ -114,6 +126,7 @@ static const struct scmi_event system_events[] = {
};
static const struct scmi_event_ops system_event_ops = {
+ .is_notify_supported = scmi_system_notify_supported,
.set_notify_enabled = scmi_system_set_notify_enabled,
.fill_custom_report = scmi_system_fill_custom_report,
};
@@ -147,6 +160,9 @@ static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph)
if (PROTOCOL_REV_MAJOR(pinfo->version) >= 0x2)
pinfo->graceful_timeout_supported = true;
+ if (!ph->hops->protocol_msg_check(ph, SYSTEM_POWER_STATE_NOTIFY, NULL))
+ pinfo->power_state_notify_cmd = true;
+
return ph->set_priv(ph, pinfo, version);
}
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 83f5bb57fa4c..83092d93f36a 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -107,7 +107,7 @@ static int __init arm_enable_runtime_services(void)
efi_memory_desc_t *md;
for_each_efi_memory_desc(md) {
- int md_size = md->num_pages << EFI_PAGE_SHIFT;
+ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
struct resource *res;
if (!(md->attribute & EFI_MEMORY_SP))
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index 3e8d4b51a814..97bafb5f7038 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -292,7 +292,7 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
- cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
+ cap_info->phys = kzalloc(sizeof(phys_addr_t), GFP_KERNEL);
if (!cap_info->phys) {
kfree(cap_info->pages);
kfree(cap_info);
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 35c37f667781..9b3884ff81e6 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -523,6 +523,17 @@ static void cper_print_tstamp(const char *pfx,
}
}
+struct ignore_section {
+ guid_t guid;
+ const char *name;
+};
+
+static const struct ignore_section ignore_sections[] = {
+ { .guid = CPER_SEC_CXL_GEN_MEDIA_GUID, .name = "CXL General Media Event" },
+ { .guid = CPER_SEC_CXL_DRAM_GUID, .name = "CXL DRAM Event" },
+ { .guid = CPER_SEC_CXL_MEM_MODULE_GUID, .name = "CXL Memory Module Event" },
+};
+
static void
cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata,
int sec_no)
@@ -543,6 +554,14 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text);
snprintf(newpfx, sizeof(newpfx), "%s ", pfx);
+
+ for (int i = 0; i < ARRAY_SIZE(ignore_sections); i++) {
+ if (guid_equal(sec_type, &ignore_sections[i].guid)) {
+ printk("%ssection_type: %s\n", newpfx, ignore_sections[i].name);
+ return;
+ }
+ }
+
if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) {
struct cper_sec_proc_generic *proc_err = acpi_hest_get_payload(gdata);
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index d4987d013080..a00e07b853f2 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -144,15 +144,6 @@ static __init int is_usable_memory(efi_memory_desc_t *md)
case EFI_CONVENTIONAL_MEMORY:
case EFI_PERSISTENT_MEMORY:
/*
- * Special purpose memory is 'soft reserved', which means it
- * is set aside initially, but can be hotplugged back in or
- * be assigned to the dax driver after boot.
- */
- if (efi_soft_reserve_enabled() &&
- (md->attribute & EFI_MEMORY_SP))
- return false;
-
- /*
* According to the spec, these regions are no longer reserved
* after calling ExitBootServices(). However, we can only use
* them as System RAM if they can be mapped writeback cacheable.
@@ -196,6 +187,16 @@ static __init void reserve_regions(void)
size = npages << PAGE_SHIFT;
if (is_memory(md)) {
+ /*
+ * Special purpose memory is 'soft reserved', which
+ * means it is set aside initially. Don't add a memblock
+ * for it now so that it can be hotplugged back in or
+ * be assigned to the dax driver after boot.
+ */
+ if (efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ continue;
+
early_init_dt_add_memory_arch(paddr, size);
if (!is_usable_memory(md))
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index e7b9ec6f8a86..833cbb995dd3 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -14,16 +14,43 @@ static unsigned int record_size = 1024;
module_param(record_size, uint, 0444);
MODULE_PARM_DESC(record_size, "size of each pstore UEFI var (in bytes, min/default=1024)");
-static bool efivars_pstore_disable =
- IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
-
-module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
-
#define PSTORE_EFI_ATTRIBUTES \
(EFI_VARIABLE_NON_VOLATILE | \
EFI_VARIABLE_BOOTSERVICE_ACCESS | \
EFI_VARIABLE_RUNTIME_ACCESS)
+static bool pstore_disable = IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
+
+static int efivars_pstore_init(void);
+static void efivars_pstore_exit(void);
+
+static int efi_pstore_disable_set(const char *val, const struct kernel_param *kp)
+{
+ int err;
+ bool old_pstore_disable = pstore_disable;
+
+ err = param_set_bool(val, kp);
+ if (err)
+ return err;
+
+ if (old_pstore_disable != pstore_disable) {
+ if (pstore_disable)
+ efivars_pstore_exit();
+ else
+ efivars_pstore_init();
+ }
+
+ return 0;
+}
+
+static const struct kernel_param_ops pstore_disable_ops = {
+ .set = efi_pstore_disable_set,
+ .get = param_get_bool,
+};
+
+module_param_cb(pstore_disable, &pstore_disable_ops, &pstore_disable, 0644);
+__MODULE_PARM_TYPE(pstore_disable, "bool");
+
static int efi_pstore_open(struct pstore_info *psi)
{
int err;
@@ -218,12 +245,12 @@ static struct pstore_info efi_pstore_info = {
.erase = efi_pstore_erase,
};
-static __init int efivars_pstore_init(void)
+static int efivars_pstore_init(void)
{
if (!efivar_supports_writes())
return 0;
- if (efivars_pstore_disable)
+ if (pstore_disable)
return 0;
/*
@@ -250,7 +277,7 @@ static __init int efivars_pstore_init(void)
return 0;
}
-static __exit void efivars_pstore_exit(void)
+static void efivars_pstore_exit(void)
{
if (!efi_pstore_info.bufsize)
return;
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 06964a3c130f..73f4810f6db3 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -28,7 +28,7 @@ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
-DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
-DEFI_HAVE_STRCMP -fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base)
-cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE
+cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax
cflags-$(CONFIG_LOONGARCH) += -fpie
cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt
@@ -143,7 +143,7 @@ STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
# exist.
STUBCOPY_FLAGS-$(CONFIG_RISCV) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
-STUBCOPY_RELOC-$(CONFIG_RISCV) := R_RISCV_HI20
+STUBCOPY_RELOC-$(CONFIG_RISCV) := -E R_RISCV_HI20\|R_RISCV_$(BITS)\|R_RISCV_RELAX
# For LoongArch, keep all the symbols in .init section and make sure that no
# absolute symbols references exist.
diff --git a/drivers/firmware/efi/libstub/alignedmem.c b/drivers/firmware/efi/libstub/alignedmem.c
index 6b83c492c3b8..31928bd87e0f 100644
--- a/drivers/firmware/efi/libstub/alignedmem.c
+++ b/drivers/firmware/efi/libstub/alignedmem.c
@@ -14,6 +14,7 @@
* @max: the address that the last allocated memory page shall not
* exceed
* @align: minimum alignment of the base of the allocation
+ * @memory_type: the type of memory to allocate
*
* Allocate pages as EFI_LOADER_DATA. The allocated pages are aligned according
* to @align, which should be >= EFI_ALLOC_ALIGN. The last allocated page will
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index bfa30625f5d0..3dc2f9aaf08d 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -24,6 +24,8 @@ static bool efi_noinitrd;
static bool efi_nosoftreserve;
static bool efi_disable_pci_dma = IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA);
+int efi_mem_encrypt;
+
bool __pure __efi_soft_reserve_enabled(void)
{
return !efi_nosoftreserve;
@@ -75,6 +77,12 @@ efi_status_t efi_parse_options(char const *cmdline)
efi_noinitrd = true;
} else if (IS_ENABLED(CONFIG_X86_64) && !strcmp(param, "no5lvl")) {
efi_no5lvl = true;
+ } else if (IS_ENABLED(CONFIG_ARCH_HAS_MEM_ENCRYPT) &&
+ !strcmp(param, "mem_encrypt") && val) {
+ if (parse_option_str(val, "on"))
+ efi_mem_encrypt = 1;
+ else if (parse_option_str(val, "off"))
+ efi_mem_encrypt = -1;
} else if (!strcmp(param, "efi") && val) {
efi_nochunk = parse_option_str(val, "nochunk");
efi_novamap |= parse_option_str(val, "novamap");
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 212687c30d79..fc18fd649ed7 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -37,8 +37,8 @@ extern bool efi_no5lvl;
extern bool efi_nochunk;
extern bool efi_nokaslr;
extern int efi_loglevel;
+extern int efi_mem_encrypt;
extern bool efi_novamap;
-
extern const efi_system_table_t *efi_system_table;
typedef union efi_dxe_services_table efi_dxe_services_table_t;
@@ -956,7 +956,8 @@ efi_status_t efi_get_random_bytes(unsigned long size, u8 *out);
efi_status_t efi_random_alloc(unsigned long size, unsigned long align,
unsigned long *addr, unsigned long random_seed,
- int memory_type, unsigned long alloc_limit);
+ int memory_type, unsigned long alloc_min,
+ unsigned long alloc_max);
efi_status_t efi_random_get_seed(void);
diff --git a/drivers/firmware/efi/libstub/kaslr.c b/drivers/firmware/efi/libstub/kaslr.c
index 62d63f7a2645..1a9808012abd 100644
--- a/drivers/firmware/efi/libstub/kaslr.c
+++ b/drivers/firmware/efi/libstub/kaslr.c
@@ -119,7 +119,7 @@ efi_status_t efi_kaslr_relocate_kernel(unsigned long *image_addr,
*/
status = efi_random_alloc(*reserve_size, min_kimg_align,
reserve_addr, phys_seed,
- EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
+ EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
if (status != EFI_SUCCESS)
efi_warn("efi_random_alloc() failed: 0x%lx\n", status);
} else {
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 674a064b8f7a..4e96a855fdf4 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -17,7 +17,7 @@
static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
unsigned long size,
unsigned long align_shift,
- u64 alloc_limit)
+ u64 alloc_min, u64 alloc_max)
{
unsigned long align = 1UL << align_shift;
u64 first_slot, last_slot, region_end;
@@ -30,11 +30,11 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
return 0;
region_end = min(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - 1,
- alloc_limit);
+ alloc_max);
if (region_end < size)
return 0;
- first_slot = round_up(md->phys_addr, align);
+ first_slot = round_up(max(md->phys_addr, alloc_min), align);
last_slot = round_down(region_end - size + 1, align);
if (first_slot > last_slot)
@@ -56,7 +56,8 @@ efi_status_t efi_random_alloc(unsigned long size,
unsigned long *addr,
unsigned long random_seed,
int memory_type,
- unsigned long alloc_limit)
+ unsigned long alloc_min,
+ unsigned long alloc_max)
{
unsigned long total_slots = 0, target_slot;
unsigned long total_mirrored_slots = 0;
@@ -78,7 +79,8 @@ efi_status_t efi_random_alloc(unsigned long size,
efi_memory_desc_t *md = (void *)map->map + map_offset;
unsigned long slots;
- slots = get_entry_num_slots(md, size, ilog2(align), alloc_limit);
+ slots = get_entry_num_slots(md, size, ilog2(align), alloc_min,
+ alloc_max);
MD_NUM_SLOTS(md) = slots;
total_slots += slots;
if (md->attribute & EFI_MEMORY_MORE_RELIABLE)
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 0d510c9a06a4..0336ed175e67 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -223,8 +223,8 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
}
}
-void efi_adjust_memory_range_protection(unsigned long start,
- unsigned long size)
+efi_status_t efi_adjust_memory_range_protection(unsigned long start,
+ unsigned long size)
{
efi_status_t status;
efi_gcd_memory_space_desc_t desc;
@@ -236,13 +236,17 @@ void efi_adjust_memory_range_protection(unsigned long start,
rounded_end = roundup(start + size, EFI_PAGE_SIZE);
if (memattr != NULL) {
- efi_call_proto(memattr, clear_memory_attributes, rounded_start,
- rounded_end - rounded_start, EFI_MEMORY_XP);
- return;
+ status = efi_call_proto(memattr, clear_memory_attributes,
+ rounded_start,
+ rounded_end - rounded_start,
+ EFI_MEMORY_XP);
+ if (status != EFI_SUCCESS)
+ efi_warn("Failed to clear EFI_MEMORY_XP attribute\n");
+ return status;
}
if (efi_dxe_table == NULL)
- return;
+ return EFI_SUCCESS;
/*
* Don't modify memory region attributes, they are
@@ -255,7 +259,7 @@ void efi_adjust_memory_range_protection(unsigned long start,
status = efi_dxe_call(get_memory_space_descriptor, start, &desc);
if (status != EFI_SUCCESS)
- return;
+ break;
next = desc.base_address + desc.length;
@@ -280,8 +284,10 @@ void efi_adjust_memory_range_protection(unsigned long start,
unprotect_start,
unprotect_start + unprotect_size,
status);
+ break;
}
}
+ return EFI_SUCCESS;
}
static void setup_unaccepted_memory(void)
@@ -793,6 +799,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
status = efi_random_alloc(alloc_size, CONFIG_PHYSICAL_ALIGN, &addr,
seed[0], EFI_LOADER_CODE,
+ LOAD_PHYSICAL_ADDR,
EFI_X86_KERNEL_ALLOC_LIMIT);
if (status != EFI_SUCCESS)
return status;
@@ -805,9 +812,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
*kernel_entry = addr + entry;
- efi_adjust_memory_range_protection(addr, kernel_total_size);
-
- return EFI_SUCCESS;
+ return efi_adjust_memory_range_protection(addr, kernel_total_size);
}
static void __noreturn enter_kernel(unsigned long kernel_addr,
@@ -879,6 +884,9 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
}
}
+ if (efi_mem_encrypt > 0)
+ hdr->xloadflags |= XLF_MEM_ENCRYPTION;
+
status = efi_decompress_kernel(&kernel_entry);
if (status != EFI_SUCCESS) {
efi_err("Failed to decompress kernel\n");
diff --git a/drivers/firmware/efi/libstub/x86-stub.h b/drivers/firmware/efi/libstub/x86-stub.h
index 37c5a36b9d8c..1c20e99a6494 100644
--- a/drivers/firmware/efi/libstub/x86-stub.h
+++ b/drivers/firmware/efi/libstub/x86-stub.h
@@ -5,8 +5,8 @@
extern void trampoline_32bit_src(void *, bool);
extern const u16 trampoline_ljmp_imm_offset;
-void efi_adjust_memory_range_protection(unsigned long start,
- unsigned long size);
+efi_status_t efi_adjust_memory_range_protection(unsigned long start,
+ unsigned long size);
#ifdef CONFIG_X86_64
efi_status_t efi_setup_5level_paging(void);
diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c
index bdb17eac0cb4..1ceace956758 100644
--- a/drivers/firmware/efi/libstub/zboot.c
+++ b/drivers/firmware/efi/libstub/zboot.c
@@ -119,7 +119,7 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
}
status = efi_random_alloc(alloc_size, min_kimg_align, &image_base,
- seed, EFI_LOADER_CODE, EFI_ALLOC_LIMIT);
+ seed, EFI_LOADER_CODE, 0, EFI_ALLOC_LIMIT);
if (status != EFI_SUCCESS) {
efi_err("Failed to allocate memory\n");
goto free_cmdline;
diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c
index 09525fb5c240..01f0f90ea418 100644
--- a/drivers/firmware/efi/riscv-runtime.c
+++ b/drivers/firmware/efi/riscv-runtime.c
@@ -85,7 +85,7 @@ static int __init riscv_enable_runtime_services(void)
efi_memory_desc_t *md;
for_each_efi_memory_desc(md) {
- int md_size = md->num_pages << EFI_PAGE_SHIFT;
+ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
struct resource *res;
if (!(md->attribute & EFI_MEMORY_SP))
diff --git a/drivers/firmware/google/cbmem.c b/drivers/firmware/google/cbmem.c
index 88e587ba1e0d..c2bffdc352a3 100644
--- a/drivers/firmware/google/cbmem.c
+++ b/drivers/firmware/google/cbmem.c
@@ -114,6 +114,12 @@ static int cbmem_entry_probe(struct coreboot_device *dev)
return 0;
}
+static const struct coreboot_device_id cbmem_ids[] = {
+ { .tag = LB_TAG_CBMEM_ENTRY },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(coreboot, cbmem_ids);
+
static struct coreboot_driver cbmem_entry_driver = {
.probe = cbmem_entry_probe,
.drv = {
@@ -121,7 +127,7 @@ static struct coreboot_driver cbmem_entry_driver = {
.owner = THIS_MODULE,
.dev_groups = dev_groups,
},
- .tag = LB_TAG_CBMEM_ENTRY,
+ .id_table = cbmem_ids,
};
module_coreboot_driver(cbmem_entry_driver);
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 2a4469bf1b81..d4b6e581a6c6 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -28,8 +28,17 @@ static int coreboot_bus_match(struct device *dev, struct device_driver *drv)
{
struct coreboot_device *device = CB_DEV(dev);
struct coreboot_driver *driver = CB_DRV(drv);
+ const struct coreboot_device_id *id;
- return device->entry.tag == driver->tag;
+ if (!driver->id_table)
+ return 0;
+
+ for (id = driver->id_table; id->tag; id++) {
+ if (device->entry.tag == id->tag)
+ return 1;
+ }
+
+ return 0;
}
static int coreboot_bus_probe(struct device *dev)
@@ -53,11 +62,20 @@ static void coreboot_bus_remove(struct device *dev)
driver->remove(device);
}
-static struct bus_type coreboot_bus_type = {
+static int coreboot_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+ struct coreboot_device *device = CB_DEV(dev);
+ u32 tag = device->entry.tag;
+
+ return add_uevent_var(env, "MODALIAS=coreboot:t%08X", tag);
+}
+
+static const struct bus_type coreboot_bus_type = {
.name = "coreboot",
.match = coreboot_bus_match,
.probe = coreboot_bus_probe,
.remove = coreboot_bus_remove,
+ .uevent = coreboot_bus_uevent,
};
static void coreboot_device_release(struct device *dev)
diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
index d814dca33a08..86427989c57f 100644
--- a/drivers/firmware/google/coreboot_table.h
+++ b/drivers/firmware/google/coreboot_table.h
@@ -13,6 +13,7 @@
#define __COREBOOT_TABLE_H
#include <linux/device.h>
+#include <linux/mod_devicetable.h>
/* Coreboot table header structure */
struct coreboot_table_header {
@@ -93,7 +94,7 @@ struct coreboot_driver {
int (*probe)(struct coreboot_device *);
void (*remove)(struct coreboot_device *);
struct device_driver drv;
- u32 tag;
+ const struct coreboot_device_id *id_table;
};
/* Register a driver that uses the data from a coreboot table. */
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
index 5c84bbebfef8..07c458bf64ec 100644
--- a/drivers/firmware/google/framebuffer-coreboot.c
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -80,13 +80,19 @@ static void framebuffer_remove(struct coreboot_device *dev)
platform_device_unregister(pdev);
}
+static const struct coreboot_device_id framebuffer_ids[] = {
+ { .tag = CB_TAG_FRAMEBUFFER },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(coreboot, framebuffer_ids);
+
static struct coreboot_driver framebuffer_driver = {
.probe = framebuffer_probe,
.remove = framebuffer_remove,
.drv = {
.name = "framebuffer",
},
- .tag = CB_TAG_FRAMEBUFFER,
+ .id_table = framebuffer_ids,
};
module_coreboot_driver(framebuffer_driver);
diff --git a/drivers/firmware/google/memconsole-coreboot.c b/drivers/firmware/google/memconsole-coreboot.c
index 74b5286518ee..24c97a70aa80 100644
--- a/drivers/firmware/google/memconsole-coreboot.c
+++ b/drivers/firmware/google/memconsole-coreboot.c
@@ -96,13 +96,19 @@ static void memconsole_remove(struct coreboot_device *dev)
memconsole_exit();
}
+static const struct coreboot_device_id memconsole_ids[] = {
+ { .tag = CB_TAG_CBMEM_CONSOLE },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(coreboot, memconsole_ids);
+
static struct coreboot_driver memconsole_driver = {
.probe = memconsole_probe,
.remove = memconsole_remove,
.drv = {
.name = "memconsole",
},
- .tag = CB_TAG_CBMEM_CONSOLE,
+ .id_table = memconsole_ids,
};
module_coreboot_driver(memconsole_driver);
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index ee6e08c0592b..8e4216714b29 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -306,13 +306,19 @@ static void vpd_remove(struct coreboot_device *dev)
kobject_put(vpd_kobj);
}
+static const struct coreboot_device_id vpd_ids[] = {
+ { .tag = CB_TAG_VPD },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(coreboot, vpd_ids);
+
static struct coreboot_driver vpd_driver = {
.probe = vpd_probe,
.remove = vpd_remove,
.drv = {
.name = "vpd",
},
- .tag = CB_TAG_VPD,
+ .id_table = vpd_ids,
};
module_coreboot_driver(vpd_driver);
diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c
index 81f5f62e34fc..fbeeaee4ac85 100644
--- a/drivers/firmware/microchip/mpfs-auto-update.c
+++ b/drivers/firmware/microchip/mpfs-auto-update.c
@@ -167,7 +167,7 @@ static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader)
u32 *response_msg;
int ret;
- response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(response_msg),
+ response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg),
GFP_KERNEL);
if (!response_msg)
return -ENOMEM;
@@ -384,7 +384,8 @@ static int mpfs_auto_update_available(struct mpfs_auto_update_priv *priv)
u32 *response_msg;
int ret;
- response_msg = devm_kzalloc(priv->dev, AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(response_msg),
+ response_msg = devm_kzalloc(priv->dev,
+ AUTO_UPDATE_FEATURE_RESP_SIZE * sizeof(*response_msg),
GFP_KERNEL);
if (!response_msg)
return -ENOMEM;
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index bbcdd9fed3fb..4221fed70ad4 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -77,7 +77,7 @@ static const char *get_filename(struct tegra_bpmp *bpmp,
root_path_buf = kzalloc(root_path_buf_len, GFP_KERNEL);
if (!root_path_buf)
- goto out;
+ return NULL;
root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
root_path_buf_len);
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index e00c33310517..753e7be039e4 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -127,8 +127,6 @@ static int gen_74x164_probe(struct spi_device *spi)
if (IS_ERR(chip->gpiod_oe))
return PTR_ERR(chip->gpiod_oe);
- gpiod_set_value_cansleep(chip->gpiod_oe, 1);
-
spi_set_drvdata(spi, chip);
chip->gpio_chip.label = spi->modalias;
@@ -153,6 +151,8 @@ static int gen_74x164_probe(struct spi_device *spi)
goto exit_destroy;
}
+ gpiod_set_value_cansleep(chip->gpiod_oe, 1);
+
ret = gpiochip_add_data(&chip->gpio_chip, chip);
if (!ret)
return 0;
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index a13f3c18ccd4..8cfd3a89c018 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -99,7 +99,6 @@ struct mvebu_pwm {
u32 offset;
unsigned long clk_rate;
struct gpio_desc *gpiod;
- struct pwm_chip chip;
spinlock_t lock;
struct mvebu_gpio_chip *mvchip;
@@ -615,7 +614,7 @@ static const struct regmap_config mvebu_gpio_regmap_config = {
*/
static struct mvebu_pwm *to_mvebu_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct mvebu_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int mvebu_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -789,6 +788,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
struct mvebu_pwm *mvpwm;
+ struct pwm_chip *chip;
void __iomem *base;
u32 offset;
u32 set;
@@ -813,9 +813,11 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
if (IS_ERR(mvchip->clk))
return PTR_ERR(mvchip->clk);
- mvpwm = devm_kzalloc(dev, sizeof(struct mvebu_pwm), GFP_KERNEL);
- if (!mvpwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(dev, mvchip->chip.ngpio, sizeof(*mvpwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ mvpwm = to_mvebu_pwm(chip);
+
mvchip->mvpwm = mvpwm;
mvpwm->mvchip = mvchip;
mvpwm->offset = offset;
@@ -868,13 +870,11 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
return -EINVAL;
}
- mvpwm->chip.dev = dev;
- mvpwm->chip.ops = &mvebu_pwm_ops;
- mvpwm->chip.npwm = mvchip->chip.ngpio;
+ chip->ops = &mvebu_pwm_ops;
spin_lock_init(&mvpwm->lock);
- return devm_pwmchip_add(dev, &mvpwm->chip);
+ return devm_pwmchip_add(dev, chip);
}
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 44c8f5743a24..75be4a3ca7f8 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -968,11 +968,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = gpiochip_irqchip_init_valid_mask(gc);
if (ret)
- goto err_remove_acpi_chip;
+ goto err_free_hogs;
ret = gpiochip_irqchip_init_hw(gc);
if (ret)
- goto err_remove_acpi_chip;
+ goto err_remove_irqchip_mask;
ret = gpiochip_add_irqchip(gc, lock_key, request_key);
if (ret)
@@ -997,23 +997,23 @@ err_remove_irqchip:
gpiochip_irqchip_remove(gc);
err_remove_irqchip_mask:
gpiochip_irqchip_free_valid_mask(gc);
-err_remove_acpi_chip:
+err_free_hogs:
+ gpiochip_free_hogs(gc);
acpi_gpiochip_remove(gc);
+ gpiochip_remove_pin_ranges(gc);
err_remove_of_chip:
- gpiochip_free_hogs(gc);
of_gpiochip_remove(gc);
err_free_gpiochip_mask:
- gpiochip_remove_pin_ranges(gc);
gpiochip_free_valid_mask(gc);
+err_remove_from_list:
+ spin_lock_irqsave(&gpio_lock, flags);
+ list_del(&gdev->list);
+ spin_unlock_irqrestore(&gpio_lock, flags);
if (gdev->dev.release) {
/* release() has been registered by gpiochip_setup_dev() */
gpio_device_put(gdev);
goto err_print_message;
}
-err_remove_from_list:
- spin_lock_irqsave(&gpio_lock, flags);
- list_del(&gdev->list);
- spin_unlock_irqrestore(&gpio_lock, flags);
err_free_label:
kfree_const(gdev->label);
err_free_descs:
@@ -2042,6 +2042,11 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_free);
int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset,
unsigned long config)
{
+#ifdef CONFIG_PINCTRL
+ if (list_empty(&gc->gpiodev->pin_ranges))
+ return -ENOTSUPP;
+#endif
+
return pinctrl_gpio_set_config(gc, offset, config);
}
EXPORT_SYMBOL_GPL(gpiochip_generic_config);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 2520db0b776e..c7edba18a6f0 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -199,7 +199,7 @@ config DRM_TTM
config DRM_TTM_KUNIT_TEST
tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
default n
- depends on DRM && KUNIT && MMU
+ depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
select DRM_TTM
select DRM_EXPORT_FOR_TESTS if m
select DRM_KUNIT_TEST_HELPERS
@@ -207,7 +207,8 @@ config DRM_TTM_KUNIT_TEST
help
Enables unit tests for TTM, a GPU memory manager subsystem used
to manage memory buffers. This option is mostly useful for kernel
- developers.
+ developers. It depends on (UML || COMPILE_TEST) since no other driver
+ which uses TTM can be loaded while running the tests.
If in doubt, say "N".
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 3d8a48f46b01..79827a6dcd7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -200,6 +200,7 @@ extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dc_visual_confirm;
extern uint amdgpu_dm_abm_level;
extern int amdgpu_backlight;
+extern int amdgpu_damage_clips;
extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable;
extern uint amdgpu_ras_mask;
@@ -1078,6 +1079,8 @@ struct amdgpu_device {
bool in_s3;
bool in_s4;
bool in_s0ix;
+ /* indicate amdgpu suspension status */
+ bool suspend_complete;
enum pp_mp1_state mp1_state;
struct amdgpu_doorbell_index doorbell_index;
@@ -1547,9 +1550,11 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
+void amdgpu_choose_low_power_state(struct amdgpu_device *adev);
#else
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
+static inline void amdgpu_choose_low_power_state(struct amdgpu_device *adev) { }
#endif
#if defined(CONFIG_DRM_AMD_DC)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 2deebece810e..7099ff9cf8c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1519,4 +1519,22 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
#endif /* CONFIG_AMD_PMC */
}
+/**
+ * amdgpu_choose_low_power_state
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * Choose the target low power state for the GPU
+ */
+void amdgpu_choose_low_power_state(struct amdgpu_device *adev)
+{
+ if (adev->in_runpm)
+ return;
+
+ if (amdgpu_acpi_is_s0ix_active(adev))
+ adev->in_s0ix = true;
+ else if (amdgpu_acpi_is_s3_active(adev))
+ adev->in_s3 = true;
+}
+
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index fdde7488d0ed..94bdb5fa6ebc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4514,13 +4514,15 @@ int amdgpu_device_prepare(struct drm_device *dev)
struct amdgpu_device *adev = drm_to_adev(dev);
int i, r;
+ amdgpu_choose_low_power_state(adev);
+
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
/* Evict the majority of BOs before starting suspend sequence */
r = amdgpu_device_evict_resources(adev);
if (r)
- return r;
+ goto unprepare;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
@@ -4529,10 +4531,15 @@ int amdgpu_device_prepare(struct drm_device *dev)
continue;
r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
if (r)
- return r;
+ goto unprepare;
}
return 0;
+
+unprepare:
+ adev->in_s0ix = adev->in_s3 = false;
+
+ return r;
}
/**
@@ -4569,7 +4576,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
cancel_delayed_work_sync(&adev->delayed_init_work);
- flush_delayed_work(&adev->gfx.gfx_off_delay_work);
amdgpu_ras_suspend(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 971acf01bea6..586f4d03039d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -211,6 +211,7 @@ int amdgpu_seamless = -1; /* auto */
uint amdgpu_debug_mask;
int amdgpu_agp = -1; /* auto */
int amdgpu_wbrf = -1;
+int amdgpu_damage_clips = -1; /* auto */
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@@ -860,6 +861,18 @@ MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (defau
module_param_named(backlight, amdgpu_backlight, bint, 0444);
/**
+ * DOC: damageclips (int)
+ * Enable or disable damage clips support. If damage clips support is disabled,
+ * we will force full frame updates, irrespective of what user space sends to
+ * us.
+ *
+ * Defaults to -1 (where it is enabled unless a PSR-SU display is detected).
+ */
+MODULE_PARM_DESC(damageclips,
+ "Damage clips support (0 = disable, 1 = enable, -1 auto (default))");
+module_param_named(damageclips, amdgpu_damage_clips, int, 0444);
+
+/**
* DOC: tmz (int)
* Trusted Memory Zone (TMZ) is a method to protect data being written
* to or read from memory.
@@ -2476,6 +2489,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ adev->suspend_complete = false;
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
@@ -2490,6 +2504,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ adev->suspend_complete = true;
if (amdgpu_acpi_should_gpu_reset(adev))
return amdgpu_asic_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index b9674c57c436..6ddc8e3360e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -723,8 +723,15 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (adev->gfx.gfx_off_req_count == 0 &&
!adev->gfx.gfx_off_state) {
- schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ /* If going to s2idle, no need to wait */
+ if (adev->in_s0ix) {
+ if (!amdgpu_dpm_set_powergating_by_smu(adev,
+ AMD_IP_BLOCK_TYPE_GFX, true))
+ adev->gfx.gfx_off_state = true;
+ } else {
+ schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
delay);
+ }
}
} else {
if (adev->gfx.gfx_off_req_count == 0) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
index 468a67b302d4..ca5c86e5f7cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
@@ -362,7 +362,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
}
}
- if (copy_to_user((char *)buf, context->mem_context.shared_buf, shared_buf_len))
+ if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len))
ret = -EFAULT;
err_free_shared_buf:
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 69c500910746..3bc6943365a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3034,6 +3034,14 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
gfx_v9_0_cp_gfx_enable(adev, true);
+ /* Now only limit the quirk on the APU gfx9 series and already
+ * confirmed that the APU gfx10/gfx11 needn't such update.
+ */
+ if (adev->flags & AMD_IS_APU &&
+ adev->in_s3 && !adev->suspend_complete) {
+ DRM_INFO(" Will skip the CSB packet resubmit\n");
+ return 0;
+ }
r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 40a00ea0009f..e67a62db9e12 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1947,14 +1947,6 @@ static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
{
- static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
- u32 vram_info;
-
- /* Only for dGPU, vendor informaton is reliable */
- if (!amdgpu_sriov_vf(adev) && !(adev->flags & AMD_IS_APU)) {
- vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
- adev->gmc.vram_vendor = vram_info & 0xF;
- }
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
adev->gmc.vram_width = 128 * 64;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index bc38b90f8cf8..88ea58d5c4ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -674,14 +674,6 @@ static int jpeg_v4_0_set_powergating_state(void *handle,
return ret;
}
-static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- return 0;
-}
-
static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int type,
@@ -765,7 +757,6 @@ static void jpeg_v4_0_set_dec_ring_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = {
- .set = jpeg_v4_0_set_interrupt_state,
.process = jpeg_v4_0_process_interrupt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 6ede85b28cc8..78b74daf4eeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -181,7 +181,6 @@ static int jpeg_v4_0_5_hw_fini(void *handle)
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
- amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0);
return 0;
}
@@ -516,14 +515,6 @@ static int jpeg_v4_0_5_set_powergating_state(void *handle,
return ret;
}
-static int jpeg_v4_0_5_set_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- return 0;
-}
-
static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -603,7 +594,6 @@ static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = {
- .set = jpeg_v4_0_5_set_interrupt_state,
.process = jpeg_v4_0_5_process_interrupt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index e90f33780803..b4723d68eab0 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -431,6 +431,12 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
u32 inst_mask;
int i;
+ if (amdgpu_sriov_vf(adev))
+ adev->rmmio_remap.reg_offset =
+ SOC15_REG_OFFSET(
+ NBIO, 0,
+ regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL)
+ << 2;
WREG32_SOC15(NBIO, 0, regXCC_DOORBELL_FENCE,
0xff & ~(adev->gfx.xcc_mask));
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 15033efec2ba..1c614451dead 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -574,11 +574,34 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
return AMD_RESET_METHOD_MODE1;
}
+static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
+{
+ u32 sol_reg;
+
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+
+ /* Will reset for the following suspend abort cases.
+ * 1) Only reset limit on APU side, dGPU hasn't checked yet.
+ * 2) S3 suspend abort and TOS already launched.
+ */
+ if (adev->flags & AMD_IS_APU && adev->in_s3 &&
+ !adev->suspend_complete &&
+ sol_reg)
+ return true;
+
+ return false;
+}
+
static int soc15_asic_reset(struct amdgpu_device *adev)
{
/* original raven doesn't have full asic reset */
- if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
- (adev->apu_flags & AMD_APU_IS_RAVEN2))
+ /* On the latest Raven, the GPU reset can be performed
+ * successfully. So now, temporarily enable it for the
+ * S3 suspend abort case.
+ */
+ if (((adev->apu_flags & AMD_APU_IS_RAVEN) ||
+ (adev->apu_flags & AMD_APU_IS_RAVEN2)) &&
+ !soc15_need_reset_on_resume(adev))
return 0;
switch (soc15_asic_reset_method(adev)) {
@@ -1302,6 +1325,10 @@ static int soc15_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (soc15_need_reset_on_resume(adev)) {
+ dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
+ soc15_asic_reset(adev);
+ }
return soc15_common_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 48c6efcdeac9..4d7188912edf 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -50,13 +50,13 @@ static const struct amd_ip_funcs soc21_common_ip_funcs;
/* SOC21 */
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
- {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
};
static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 = {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index d722cbd31783..826bc4f6c8a7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -55,8 +55,8 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
if (has_wa_flag) {
- uint32_t wa_mask = minfo->update_flag == UPDATE_FLAG_DBG_WA_ENABLE ?
- 0xffff : 0xffffffff;
+ uint32_t wa_mask =
+ (minfo->update_flag & UPDATE_FLAG_DBG_WA_ENABLE) ? 0xffff : 0xffffffff;
m->compute_static_thread_mgmt_se0 = wa_mask;
m->compute_static_thread_mgmt_se1 = wa_mask;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 42d881809dc7..697b6d530d12 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -303,6 +303,15 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
update_cu_mask(mm, mqd, minfo, 0);
set_priority(m, q);
+ if (minfo && KFD_GC_VERSION(mm->dev) >= IP_VERSION(9, 4, 2)) {
+ if (minfo->update_flag & UPDATE_FLAG_IS_GWS)
+ m->compute_resource_limits |=
+ COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK;
+ else
+ m->compute_resource_limits &=
+ ~COMPUTE_RESOURCE_LIMITS__FORCE_SIMD_DIST_MASK;
+ }
+
q->is_active = QUEUE_IS_ACTIVE(*q);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 677281c0793e..80320b8603fc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -532,6 +532,7 @@ struct queue_properties {
enum mqd_update_flag {
UPDATE_FLAG_DBG_WA_ENABLE = 1,
UPDATE_FLAG_DBG_WA_DISABLE = 2,
+ UPDATE_FLAG_IS_GWS = 4, /* quirk for gfx9 IP */
};
struct mqd_update_info {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 43eff221eae5..4858112f9a53 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -95,6 +95,7 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
void *gws)
{
+ struct mqd_update_info minfo = {0};
struct kfd_node *dev = NULL;
struct process_queue_node *pqn;
struct kfd_process_device *pdd;
@@ -146,9 +147,10 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
}
pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
+ minfo.update_flag = gws ? UPDATE_FLAG_IS_GWS : 0;
return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
- pqn->q, NULL);
+ pqn->q, &minfo);
}
void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index e5f7c92eebcb..6ed2ec381aaa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1638,12 +1638,10 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
else
mode = UNKNOWN_MEMORY_PARTITION_MODE;
- if (pcache->cache_level == 2)
- pcache->cache_size = pcache_info[cache_type].cache_size * num_xcc;
- else if (mode)
- pcache->cache_size = pcache_info[cache_type].cache_size / mode;
- else
- pcache->cache_size = pcache_info[cache_type].cache_size;
+ pcache->cache_size = pcache_info[cache_type].cache_size;
+ /* Partition mode only affects L3 cache size */
+ if (mode && pcache->cache_level == 3)
+ pcache->cache_size /= mode;
if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
pcache->cache_type |= HSA_CACHE_TYPE_DATA;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d292f290cd6e..1a9bbb04bd5e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1843,21 +1843,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_ERROR("amdgpu: fail to register dmub aux callback");
goto error;
}
- if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
- goto error;
- }
- if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
- DRM_ERROR("amdgpu: fail to register dmub hpd callback");
- goto error;
- }
- }
-
- /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
- * It is expected that DMUB will resend any pending notifications at this point, for
- * example HPD from DPIA.
- */
- if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+ * It is expected that DMUB will resend any pending notifications at this point. Note
+ * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
+ * align legacy interface initialization sequence. Connection status will be proactivly
+ * detected once in the amdgpu_dm_initialize_drm_device.
+ */
dc_enable_dmub_outbox(adev->dm.dc);
/* DPIA trace goes to dmesg logs only if outbox is enabled */
@@ -1956,7 +1947,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
&adev->dm.dmub_bo_gpu_addr,
&adev->dm.dmub_bo_cpu_addr);
- if (adev->dm.hpd_rx_offload_wq) {
+ if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) {
for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
if (adev->dm.hpd_rx_offload_wq[i].wq) {
destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
@@ -2287,6 +2278,7 @@ static int dm_sw_fini(void *handle)
if (adev->dm.dmub_srv) {
dmub_srv_destroy(adev->dm.dmub_srv);
+ kfree(adev->dm.dmub_srv);
adev->dm.dmub_srv = NULL;
}
@@ -3536,6 +3528,14 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true))
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true))
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ }
+
list_for_each_entry(connector,
&dev->mode_config.connector_list, head) {
@@ -3564,10 +3564,6 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
handle_hpd_rx_irq,
(void *) aconnector);
}
-
- if (adev->dm.hpd_rx_offload_wq)
- adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
- aconnector;
}
}
@@ -4561,6 +4557,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
+ if (dm->hpd_rx_offload_wq)
+ dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
+ aconnector;
+
if (!dc_link_detect_connection_type(link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
@@ -5219,6 +5219,7 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
struct drm_plane_state *new_plane_state,
struct drm_crtc_state *crtc_state,
struct dc_flip_addrs *flip_addrs,
+ bool is_psr_su,
bool *dirty_regions_changed)
{
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
@@ -5243,6 +5244,10 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
clips = drm_plane_get_damage_clips(new_plane_state);
+ if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 &&
+ is_psr_su)))
+ goto ffu;
+
if (!dm_crtc_state->mpo_requested) {
if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
goto ffu;
@@ -6194,7 +6199,9 @@ create_stream_for_sink(struct drm_connector *connector,
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
drm_mode_copy(&saved_mode, &mode);
+ saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio;
drm_mode_copy(&mode, freesync_mode);
+ mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio;
} else {
decide_crtc_timing_for_drm_display_mode(
&mode, preferred_mode, scale);
@@ -6527,10 +6534,15 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector)
static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
/*
* Note: drm_get_edid gets edid in the following order:
@@ -6538,7 +6550,7 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
* 2) firmware EDID if set via edid_firmware module parameter
* 3) regular DDC read.
*/
- edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
+ edid = drm_get_edid(connector, ddc);
if (!edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
return;
@@ -6579,12 +6591,18 @@ static int get_modes(struct drm_connector *connector)
static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(&aconnector->base);
+ struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_VIRTUAL
};
struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
/*
* Note: drm_get_edid gets edid in the following order:
@@ -6592,7 +6610,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
* 2) firmware EDID if set via edid_firmware module parameter
* 3) regular DDC read.
*/
- edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
+ edid = drm_get_edid(connector, ddc);
if (!edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
return;
@@ -8298,6 +8316,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
fill_dc_dirty_rects(plane, old_plane_state,
new_plane_state, new_crtc_state,
&bundle->flip_addrs[planes_count],
+ acrtc_state->stream->link->psr_settings.psr_version ==
+ DC_PSR_VERSION_SU_1,
&dirty_rects_changed);
/*
@@ -10731,11 +10751,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
- ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
- if (ret) {
- DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
- ret = -EINVAL;
- goto fail;
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
+ if (ret) {
+ DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
+ ret = -EINVAL;
+ goto fail;
+ }
}
ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
@@ -11147,14 +11169,23 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (range->flags != 1)
continue;
- amdgpu_dm_connector->min_vfreq = range->min_vfreq;
- amdgpu_dm_connector->max_vfreq = range->max_vfreq;
- amdgpu_dm_connector->pixel_clock_mhz =
- range->pixel_clock_mhz * 10;
-
connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
+ if (edid->revision >= 4) {
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
+ connector->display_info.monitor_range.min_vfreq += 255;
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
+ connector->display_info.monitor_range.max_vfreq += 255;
+ }
+
+ amdgpu_dm_connector->min_vfreq =
+ connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq =
+ connector->display_info.monitor_range.max_vfreq;
+ amdgpu_dm_connector->pixel_clock_mhz =
+ range->pixel_clock_mhz * 10;
+
break;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 85b7f58a7f35..c27063305a13 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -67,6 +67,8 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
/* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB):
case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B):
+ case drm_edid_encode_panel_id('B', 'O', 'E', 0x092A):
+ case drm_edid_encode_panel_id('L', 'G', 'D', 0x06D1):
DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
@@ -120,6 +122,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->edid_hdmi = connector->display_info.is_hdmi;
+ apply_edid_quirks(edid_buf, edid_caps);
+
sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
if (sad_count <= 0)
return result;
@@ -146,8 +150,6 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
else
edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
- apply_edid_quirks(edid_buf, edid_caps);
-
kfree(sads);
kfree(sadb);
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index f2dfa96f9ef5..39530b2ea495 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -94,7 +94,7 @@ static void calculate_bandwidth(
const uint32_t s_high = 7;
const uint32_t dmif_chunk_buff_margin = 1;
- uint32_t max_chunks_fbc_mode;
+ uint32_t max_chunks_fbc_mode = 0;
int32_t num_cursor_lines;
int32_t i, j, k;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 960c4b4f6ddf..05f392501c0a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1850,19 +1850,21 @@ static enum bp_result get_firmware_info_v3_2(
/* Vega12 */
smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
DATA_TABLES(smu_info));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
if (!smu_info_v3_2)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage);
+
info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
} else if (revision.minor == 3) {
/* Vega20 */
smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
DATA_TABLES(smu_info));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
if (!smu_info_v3_3)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage);
+
info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
}
@@ -2422,10 +2424,11 @@ static enum bp_result get_integrated_info_v11(
info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
DATA_TABLES(integratedsysteminfo));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
if (info_v11 == NULL)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage);
+
info->gpu_cap_info =
le32_to_cpu(info_v11->gpucapinfo);
/*
@@ -2637,11 +2640,12 @@ static enum bp_result get_integrated_info_v2_1(
info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1,
DATA_TABLES(integratedsysteminfo));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
if (info_v2_1 == NULL)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage);
+
info->gpu_cap_info =
le32_to_cpu(info_v2_1->gpucapinfo);
/*
@@ -2799,11 +2803,11 @@ static enum bp_result get_integrated_info_v2_2(
info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2,
DATA_TABLES(integratedsysteminfo));
- DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
-
if (info_v2_2 == NULL)
return BP_RESULT_BADBIOSTABLE;
+ DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage);
+
info->gpu_cap_info =
le32_to_cpu(info_v2_2->gpucapinfo);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index a5489fe6875f..aa9fd1dc550a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -546,6 +546,8 @@ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_ta
int i;
for (i = 0; i < VG_NUM_SOC_VOLTAGE_LEVELS; i++) {
+ if (i >= VG_NUM_DCFCLK_DPM_LEVELS)
+ break;
if (clock_table->SocVoltage[i] == voltage)
return clock_table->DcfClocks[i];
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 14cec1c7b718..e64890259235 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -655,10 +655,13 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0;
+ uint32_t num_memps, num_fclk, num_dcfclk;
int i;
/* Determine min/max p-state values. */
- for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
+ num_memps = (clock_table->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS) ? NUM_MEM_PSTATE_LEVELS :
+ clock_table->NumMemPstatesEnabled;
+ for (i = 0; i < num_memps; i++) {
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) {
@@ -670,7 +673,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
min_dram_speed_mts = max_dram_speed_mts;
min_pstate = max_pstate;
- for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
+ for (i = 0; i < num_memps; i++) {
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) {
@@ -699,9 +702,13 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
/* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
ASSERT(clock_table->NumDcfClkLevelsEnabled > 0);
- max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, clock_table->NumFclkLevelsEnabled);
+ num_fclk = (clock_table->NumFclkLevelsEnabled > NUM_FCLK_DPM_LEVELS) ? NUM_FCLK_DPM_LEVELS :
+ clock_table->NumFclkLevelsEnabled;
+ max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk);
- for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
+ num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
+ clock_table->NumDcfClkLevelsEnabled;
+ for (i = 0; i < num_dcfclk; i++) {
int j;
/* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index aa7c02ba948e..2c424e435962 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -3817,7 +3817,9 @@ static void commit_planes_for_stream(struct dc *dc,
* programming has completed (we turn on phantom OTG in order
* to complete the plane disable for phantom pipes).
*/
- dc->hwss.apply_ctx_to_hw(dc, context);
+
+ if (dc->hwss.disable_phantom_streams)
+ dc->hwss.disable_phantom_streams(dc, context);
}
if (update_type != UPDATE_TYPE_FAST)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 88c6436b28b6..180ac47868c2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -291,11 +291,14 @@ void dc_state_destruct(struct dc_state *state)
dc_stream_release(state->phantom_streams[i]);
state->phantom_streams[i] = NULL;
}
+ state->phantom_stream_count = 0;
for (i = 0; i < state->phantom_plane_count; i++) {
dc_plane_state_release(state->phantom_planes[i]);
state->phantom_planes[i] = NULL;
}
+ state->phantom_plane_count = 0;
+
state->stream_mask = 0;
memset(&state->res_ctx, 0, sizeof(state->res_ctx));
memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg));
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 2b79a0e5638e..363d522603a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -125,7 +125,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
unsigned int count,
union dmub_rb_cmd *cmd_list)
{
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ struct dc_context *dc_ctx;
struct dmub_srv *dmub;
enum dmub_status status;
int i;
@@ -133,6 +133,7 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
return false;
+ dc_ctx = dc_dmub_srv->ctx;
dmub = dc_dmub_srv->dmub;
for (i = 0 ; i < count; i++) {
@@ -1161,7 +1162,7 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con
bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
{
- struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ struct dc_context *dc_ctx;
enum dmub_status status;
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
@@ -1170,6 +1171,8 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation)
return true;
+ dc_ctx = dc_dmub_srv->ctx;
+
if (wait) {
if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
do {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
index e8570060d007..5bca67407c5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -290,4 +290,5 @@ void dce_panel_cntl_construct(
dce_panel_cntl->base.funcs = &dce_link_panel_cntl_funcs;
dce_panel_cntl->base.ctx = init_data->ctx;
dce_panel_cntl->base.inst = init_data->inst;
+ dce_panel_cntl->base.pwrseq_inst = 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
index e43f77c11c00..5f97a868ada3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
@@ -56,16 +56,13 @@ static void dpp3_enable_cm_block(
static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base)
{
- enum dc_lut_mode mode;
+ enum dc_lut_mode mode = LUT_BYPASS;
uint32_t state_mode;
uint32_t lut_mode;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode);
- if (state_mode == 0)
- mode = LUT_BYPASS;
-
if (state_mode == 2) {//Programmable RAM LUT
REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode);
if (lut_mode == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
index ad0df1a72a90..9e96a3ace207 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c
@@ -215,4 +215,5 @@ void dcn301_panel_cntl_construct(
dcn301_panel_cntl->base.funcs = &dcn301_link_panel_cntl_funcs;
dcn301_panel_cntl->base.ctx = init_data->ctx;
dcn301_panel_cntl->base.inst = init_data->inst;
+ dcn301_panel_cntl->base.pwrseq_inst = 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
index 03248422d6ff..281be20b1a10 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
@@ -154,8 +154,24 @@ void dcn31_panel_cntl_construct(
struct dcn31_panel_cntl *dcn31_panel_cntl,
const struct panel_cntl_init_data *init_data)
{
+ uint8_t pwrseq_inst = 0xF;
+
dcn31_panel_cntl->base.funcs = &dcn31_link_panel_cntl_funcs;
dcn31_panel_cntl->base.ctx = init_data->ctx;
dcn31_panel_cntl->base.inst = init_data->inst;
- dcn31_panel_cntl->base.pwrseq_inst = init_data->pwrseq_inst;
+
+ switch (init_data->eng_id) {
+ case ENGINE_ID_DIGA:
+ pwrseq_inst = 0;
+ break;
+ case ENGINE_ID_DIGB:
+ pwrseq_inst = 1;
+ break;
+ default:
+ DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", init_data->eng_id);
+ ASSERT(false);
+ break;
+ }
+
+ dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 6042a5a6a44f..59ade76ffb18 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -72,11 +72,11 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index dd781a20692e..a0a65e099104 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1288,7 +1288,7 @@ static bool update_pipes_with_split_flags(struct dc *dc, struct dc_state *contex
return updated;
}
-static bool should_allow_odm_power_optimization(struct dc *dc,
+static bool should_apply_odm_power_optimization(struct dc *dc,
struct dc_state *context, struct vba_vars_st *v, int *split,
bool *merge)
{
@@ -1392,9 +1392,12 @@ static void try_odm_power_optimization_and_revalidate(
{
int i;
unsigned int new_vlevel;
+ unsigned int cur_policy[MAX_PIPES];
- for (i = 0; i < pipe_cnt; i++)
+ for (i = 0; i < pipe_cnt; i++) {
+ cur_policy[i] = pipes[i].pipe.dest.odm_combine_policy;
pipes[i].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+ }
new_vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
@@ -1403,6 +1406,9 @@ static void try_odm_power_optimization_and_revalidate(
memset(merge, 0, MAX_PIPES * sizeof(bool));
*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, new_vlevel, split, merge);
context->bw_ctx.dml.vba.VoltageLevel = *vlevel;
+ } else {
+ for (i = 0; i < pipe_cnt; i++)
+ pipes[i].pipe.dest.odm_combine_policy = cur_policy[i];
}
}
@@ -1580,7 +1586,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
}
}
- if (should_allow_odm_power_optimization(dc, context, vba, split, merge))
+ if (should_apply_odm_power_optimization(dc, context, vba, split, merge))
try_odm_power_optimization_and_revalidate(
dc, context, pipes, split, merge, vlevel, *pipe_cnt);
@@ -2209,7 +2215,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
int i;
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
- dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
+ if (!dc->config.enable_windowed_mpo_odm)
+ dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
/* repopulate_pipes = 1 means the pipes were either split or merged. In this case
* we have to re-calculate the DET allocation and run through DML once more to
@@ -2753,7 +2760,7 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
struct _vcs_dpi_voltage_scaling_st entry = {0};
struct clk_limit_table_entry max_clk_data = {0};
- unsigned int min_dcfclk_mhz = 399, min_fclk_mhz = 599;
+ unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299;
static const unsigned int num_dcfclk_stas = 5;
unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 23a608274096..1ba6933d2b36 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -398,7 +398,6 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
/* Copy clocks tables entries, if available */
if (dml2->config.bbox_overrides.clks_table.num_states) {
p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
-
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels; i++) {
p->in_states->state_array[i].dcfclk_mhz = dml2->config.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz;
}
@@ -437,6 +436,14 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
dml2_policy_build_synthetic_soc_states(s, p);
+ if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 ||
+ dml2->v20.dml_core_ctx.project == dml_project_dcn351) {
+ // Override last out_state with data from last in_state
+ // This will ensure that out_state contains max fclk
+ memcpy(&p->out_states->state_array[p->out_states->num_states - 1],
+ &p->in_states->state_array[p->in_states->num_states - 1],
+ sizeof(struct soc_state_bounding_box_st));
+ }
}
void dml2_translate_ip_params(const struct dc *in, struct ip_params_st *out)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 26307e599614..2a58a7687bdb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -76,6 +76,11 @@ static void map_hw_resources(struct dml2_context *dml2,
in_out_display_cfg->hw.DLGRefClkFreqMHz = 50;
}
for (j = 0; j < mode_support_info->DPPPerSurface[i]; j++) {
+ if (i >= __DML2_WRAPPER_MAX_STREAMS_PLANES__) {
+ dml_print("DML::%s: Index out of bounds: i=%d, __DML2_WRAPPER_MAX_STREAMS_PLANES__=%d\n",
+ __func__, i, __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ break;
+ }
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i];
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[num_pipes] = true;
dml2->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_id[num_pipes] = dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[i];
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 2352428bcea3..01493c49bd7a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1476,7 +1476,7 @@ static enum dc_status dce110_enable_stream_timing(
return DC_OK;
}
-static enum dc_status apply_single_controller_ctx_to_hw(
+enum dc_status dce110_apply_single_controller_ctx_to_hw(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
struct dc *dc)
@@ -2302,7 +2302,7 @@ enum dc_status dce110_apply_ctx_to_hw(
if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
continue;
- status = apply_single_controller_ctx_to_hw(
+ status = dce110_apply_single_controller_ctx_to_hw(
pipe_ctx,
context,
dc);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
index 08028a1779ae..ed3cc3648e8e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
@@ -39,6 +39,10 @@ enum dc_status dce110_apply_ctx_to_hw(
struct dc *dc,
struct dc_state *context);
+enum dc_status dce110_apply_single_controller_ctx_to_hw(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index 4853ecac53f9..931ac8ed7069 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -2561,7 +2561,7 @@ void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
tg->funcs->setup_vertical_interrupt2(tg, start_line);
}
-static void dcn20_reset_back_end_for_pipe(
+void dcn20_reset_back_end_for_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
index b94c85340abf..d950b3e54ec2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
@@ -84,6 +84,10 @@ enum dc_status dcn20_enable_stream_timing(
void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_reset_back_end_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
void dcn20_init_blank(
struct dc *dc,
struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
index 8e88dcaf88f5..7252f5f781f0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
@@ -206,28 +206,32 @@ void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
{
struct abm *abm = pipe_ctx->stream_res.abm;
- uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
+ uint32_t otg_inst;
+
+ if (!abm || !tg || !panel_cntl)
+ return;
+
+ otg_inst = tg->inst;
if (dmcu) {
dce110_set_pipe(pipe_ctx);
return;
}
- if (abm && panel_cntl) {
- if (abm->funcs && abm->funcs->set_pipe_ex) {
- abm->funcs->set_pipe_ex(abm,
+ if (abm->funcs && abm->funcs->set_pipe_ex) {
+ abm->funcs->set_pipe_ex(abm,
otg_inst,
SET_ABM_PIPE_NORMAL,
panel_cntl->inst,
panel_cntl->pwrseq_inst);
- } else {
- dmub_abm_set_pipe(abm, otg_inst,
- SET_ABM_PIPE_NORMAL,
- panel_cntl->inst,
- panel_cntl->pwrseq_inst);
- }
+ } else {
+ dmub_abm_set_pipe(abm, otg_inst,
+ SET_ABM_PIPE_NORMAL,
+ panel_cntl->inst,
+ panel_cntl->pwrseq_inst);
}
}
@@ -237,34 +241,35 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
{
struct dc_context *dc = pipe_ctx->stream->ctx;
struct abm *abm = pipe_ctx->stream_res.abm;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+ uint32_t otg_inst;
+
+ if (!abm || !tg || !panel_cntl)
+ return false;
+
+ otg_inst = tg->inst;
if (dc->dc->res_pool->dmcu) {
dce110_set_backlight_level(pipe_ctx, backlight_pwm_u16_16, frame_ramp);
return true;
}
- if (abm != NULL) {
- uint32_t otg_inst = pipe_ctx->stream_res.tg->inst;
-
- if (abm && panel_cntl) {
- if (abm->funcs && abm->funcs->set_pipe_ex) {
- abm->funcs->set_pipe_ex(abm,
- otg_inst,
- SET_ABM_PIPE_NORMAL,
- panel_cntl->inst,
- panel_cntl->pwrseq_inst);
- } else {
- dmub_abm_set_pipe(abm,
- otg_inst,
- SET_ABM_PIPE_NORMAL,
- panel_cntl->inst,
- panel_cntl->pwrseq_inst);
- }
- }
+ if (abm->funcs && abm->funcs->set_pipe_ex) {
+ abm->funcs->set_pipe_ex(abm,
+ otg_inst,
+ SET_ABM_PIPE_NORMAL,
+ panel_cntl->inst,
+ panel_cntl->pwrseq_inst);
+ } else {
+ dmub_abm_set_pipe(abm,
+ otg_inst,
+ SET_ABM_PIPE_NORMAL,
+ panel_cntl->inst,
+ panel_cntl->pwrseq_inst);
}
- if (abm && abm->funcs && abm->funcs->set_backlight_level_pwm)
+ if (abm->funcs && abm->funcs->set_backlight_level_pwm)
abm->funcs->set_backlight_level_pwm(abm, backlight_pwm_u16_16,
frame_ramp, 0, panel_cntl->inst);
else
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index 6c9299c7683d..aa36d7a56ca8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -1474,9 +1474,44 @@ void dcn32_update_dsc_pg(struct dc *dc,
}
}
+void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx_old->stream)
+ continue;
+
+ if (dc_state_get_pipe_subvp_type(dc->current_state, pipe_ctx_old) != SUBVP_PHANTOM)
+ continue;
+
+ if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
+ continue;
+
+ if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx) ||
+ (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)) {
+ struct clock_source *old_clk = pipe_ctx_old->clock_source;
+
+ if (hws->funcs.reset_back_end_for_pipe)
+ hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+ if (hws->funcs.enable_stream_gating)
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
+ if (old_clk)
+ old_clk->funcs->cs_power_down(old_clk);
+ }
+ }
+}
+
void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
{
unsigned int i;
+ enum dc_status status = DC_OK;
+ struct dce_hwseq *hws = dc->hwseq;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1497,16 +1532,39 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
-
- if (new_pipe->stream && dc_state_get_pipe_subvp_type(context, new_pipe) == SUBVP_PHANTOM) {
- // If old context or new context has phantom pipes, apply
- // the phantom timings now. We can't change the phantom
- // pipe configuration safely without driver acquiring
- // the DMCUB lock first.
- dc->hwss.apply_ctx_to_hw(dc, context);
- break;
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ continue;
+
+ if (pipe_ctx->stream == pipe_ctx_old->stream &&
+ pipe_ctx->stream->link->link_state_valid) {
+ continue;
}
+
+ if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
+ continue;
+
+ if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
+ continue;
+
+ if (hws->funcs.apply_single_controller_ctx_to_hw)
+ status = hws->funcs.apply_single_controller_ctx_to_hw(
+ pipe_ctx,
+ context,
+ dc);
+
+ ASSERT(status == DC_OK);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ if (hws->funcs.resync_fifo_dccg_dio)
+ hws->funcs.resync_fifo_dccg_dio(hws, dc, context);
+#endif
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
index cecf7f0f5671..069e20bc87c0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
@@ -111,6 +111,8 @@ void dcn32_update_dsc_pg(struct dc *dc,
void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context);
+void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context);
+
void dcn32_init_blank(
struct dc *dc,
struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index 427cfc8c24a4..e8ac94a005b8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -109,6 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.commit_subvp_config = dcn32_commit_subvp_config,
.enable_phantom_streams = dcn32_enable_phantom_streams,
+ .disable_phantom_streams = dcn32_disable_phantom_streams,
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast,
@@ -159,6 +160,8 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
.set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
.resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio,
.is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+ .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
+ .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
};
void dcn32_hw_sequencer_init_functions(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index a54399383318..64ca7c66509b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -379,6 +379,7 @@ struct hw_sequencer_funcs {
struct dc_cursor_attributes *cursor_attr);
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
+ void (*disable_phantom_streams)(struct dc *dc, struct dc_state *context);
void (*subvp_pipe_control_lock)(struct dc *dc,
struct dc_state *context,
bool lock,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 6137cf09aa54..b3c62a82cb1c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -165,8 +165,15 @@ struct hwseq_private_funcs {
void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx);
void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc,
struct dc_state *context);
+ enum dc_status (*apply_single_controller_ctx_to_hw)(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx);
#endif
+ void (*reset_back_end_for_pipe)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
index 5dcbaa2db964..e97d964a1791 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
@@ -57,7 +57,7 @@ struct panel_cntl_funcs {
struct panel_cntl_init_data {
struct dc_context *ctx;
uint32_t inst;
- uint32_t pwrseq_inst;
+ uint32_t eng_id;
};
struct panel_cntl {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index c958ef37b78a..77a60aa9f27b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -427,22 +427,18 @@ struct pipe_ctx *resource_get_primary_dpp_pipe(const struct pipe_ctx *dpp_pipe);
int resource_get_mpc_slice_index(const struct pipe_ctx *dpp_pipe);
/*
- * Get number of MPC "cuts" of the plane associated with the pipe. MPC slice
- * count is equal to MPC splits + 1. For example if a plane is cut 3 times, it
- * will have 4 pieces of slice.
- * return - 0 if pipe is not used for a plane with MPCC combine. otherwise
- * the number of MPC "cuts" for the plane.
+ * Get the number of MPC slices associated with the pipe.
+ * The function returns 0 if the pipe is not associated with an MPC combine
+ * pipe topology.
*/
-int resource_get_mpc_slice_count(const struct pipe_ctx *opp_head);
+int resource_get_mpc_slice_count(const struct pipe_ctx *pipe);
/*
- * Get number of ODM "cuts" of the timing associated with the pipe. ODM slice
- * count is equal to ODM splits + 1. For example if a timing is cut 3 times, it
- * will have 4 pieces of slice.
- * return - 0 if pipe is not used for ODM combine. otherwise
- * the number of ODM "cuts" for the timing.
+ * Get the number of ODM slices associated with the pipe.
+ * The function returns 0 if the pipe is not associated with an ODM combine
+ * pipe topology.
*/
-int resource_get_odm_slice_count(const struct pipe_ctx *otg_master);
+int resource_get_odm_slice_count(const struct pipe_ctx *pipe);
/* Get the ODM slice index counting from 0 from left most slice */
int resource_get_odm_slice_index(const struct pipe_ctx *opp_head);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 37d3027c32dc..cf22b8f28ba6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -370,30 +370,6 @@ static enum transmitter translate_encoder_to_transmitter(
}
}
-static uint8_t translate_dig_inst_to_pwrseq_inst(struct dc_link *link)
-{
- uint8_t pwrseq_inst = 0xF;
- struct dc_context *dc_ctx = link->dc->ctx;
-
- DC_LOGGER_INIT(dc_ctx->logger);
-
- switch (link->eng_id) {
- case ENGINE_ID_DIGA:
- pwrseq_inst = 0;
- break;
- case ENGINE_ID_DIGB:
- pwrseq_inst = 1;
- break;
- default:
- DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", link->eng_id);
- ASSERT(false);
- break;
- }
-
- return pwrseq_inst;
-}
-
-
static void link_destruct(struct dc_link *link)
{
int i;
@@ -657,7 +633,7 @@ static bool construct_phy(struct dc_link *link,
link->link_id.id == CONNECTOR_ID_LVDS)) {
panel_cntl_init_data.ctx = dc_ctx;
panel_cntl_init_data.inst = panel_cntl_init_data.ctx->dc_edp_id_count;
- panel_cntl_init_data.pwrseq_inst = translate_dig_inst_to_pwrseq_inst(link);
+ panel_cntl_init_data.eng_id = link->eng_id;
link->panel_cntl =
link->dc->res_pool->funcs->panel_cntl_create(
&panel_cntl_init_data);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index 8fe66c367850..5b0bc7f6a188 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -361,7 +361,7 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un
struct dc_link *dpia_link[MAX_DPIA_NUM] = {0};
int num_dpias = 0;
- for (uint8_t i = 0; i < num_streams; ++i) {
+ for (unsigned int i = 0; i < num_streams; ++i) {
if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) {
/* new dpia sst stream, check whether it exceeds max dpia */
if (num_dpias >= MAX_DPIA_NUM)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 5a0b04518956..16a62e018712 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -517,6 +517,7 @@ enum link_training_result dp_check_link_loss_status(
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
union lane_status lane_status;
+ union lane_align_status_updated dpcd_lane_status_updated;
uint8_t dpcd_buf[6] = {0};
uint32_t lane;
@@ -532,10 +533,12 @@ enum link_training_result dp_check_link_loss_status(
* check lanes status
*/
lane_status.raw = dp_get_nibble_at_index(&dpcd_buf[2], lane);
+ dpcd_lane_status_updated.raw = dpcd_buf[4];
if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
!lane_status.bits.CR_DONE_0 ||
- !lane_status.bits.SYMBOL_LOCKED_0) {
+ !lane_status.bits.SYMBOL_LOCKED_0 ||
+ !dp_is_interlane_aligned(dpcd_lane_status_updated)) {
/* if one of the channel equalization, clock
* recovery or symbol lock is dropped
* consider it as (link has been
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index e8dda44b23cb..5d36bab0029c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -619,7 +619,7 @@ static enum link_training_result dpia_training_eq_non_transparent(
uint32_t retries_eq = 0;
enum dc_status status;
enum dc_dp_training_pattern tr_pattern;
- uint32_t wait_time_microsec;
+ uint32_t wait_time_microsec = 0;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_align_status_updated dpcd_lane_status_updated = {0};
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
index 511ff6b5b985..7538b548c572 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
@@ -999,7 +999,7 @@ static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id
vpg = dcn301_vpg_create(ctx, vpg_inst);
afmt = dcn301_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt) {
+ if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) {
kfree(enc1);
kfree(vpg);
kfree(afmt);
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index c4d71e7f18af..6f10052caeef 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1829,7 +1829,21 @@ int dcn32_populate_dml_pipes_from_context(
dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt);
DC_FP_END();
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
- pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ if (dc->config.enable_windowed_mpo_odm &&
+ dc->debug.enable_single_display_2to1_odm_policy) {
+ switch (resource_get_odm_slice_count(pipe)) {
+ case 2:
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+ break;
+ case 4:
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1;
+ break;
+ default:
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ }
+ } else {
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ }
pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 761ec9891875..5fdcda8f8602 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -780,8 +780,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.ignore_pg = true,
.psp_disabled_wa = true,
- .ips2_eval_delay_us = 200,
- .ips2_entry_delay_us = 400,
+ .ips2_eval_delay_us = 2000,
+ .ips2_entry_delay_us = 800,
.static_screen_wait_frames = 2,
};
@@ -2130,6 +2130,7 @@ static bool dcn35_resource_construct(
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
dc->dml2_options.use_native_pstate_optimization = true;
dc->dml2_options.use_native_soc_bb_construction = true;
+ dc->dml2_options.minimize_dispclk_using_odm = false;
if (dc->config.EnableMinDispClkODM)
dc->dml2_options.minimize_dispclk_using_odm = true;
dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 087d57850304..39c5e1dfa275 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2558,6 +2558,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err, ret;
+ u32 pwm_mode;
int value;
if (amdgpu_in_reset(adev))
@@ -2569,13 +2570,22 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
if (err)
return err;
+ if (value == 0)
+ pwm_mode = AMD_FAN_CTRL_NONE;
+ else if (value == 1)
+ pwm_mode = AMD_FAN_CTRL_MANUAL;
+ else if (value == 2)
+ pwm_mode = AMD_FAN_CTRL_AUTO;
+ else
+ return -EINVAL;
+
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret;
}
- ret = amdgpu_dpm_set_fan_control_mode(adev, value);
+ ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index df4f20293c16..eb4da3666e05 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -6925,6 +6925,23 @@ static int si_dpm_enable(struct amdgpu_device *adev)
return 0;
}
+static int si_set_temperature_range(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = si_thermal_enable_alert(adev, false);
+ if (ret)
+ return ret;
+ ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = si_thermal_enable_alert(adev, true);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static void si_dpm_disable(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
@@ -7608,6 +7625,18 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
static int si_dpm_late_init(void *handle)
{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->pm.dpm_enabled)
+ return 0;
+
+ ret = si_set_temperature_range(adev);
+ if (ret)
+ return ret;
+#if 0 //TODO ?
+ si_dpm_powergate_uvd(adev, true);
+#endif
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 4cd43bbec910..bcad42534da4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1303,13 +1303,12 @@ static int arcturus_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled) {
+ if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 8d1d29ffb0f1..ed189a3878eb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2357,13 +2357,12 @@ static int navi10_get_power_limit(struct smu_context *smu,
*default_power_limit = power_limit;
if (smu->od_enabled &&
- navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+ navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT))
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 21fc033528fa..e2ad2b972ab0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -640,13 +640,12 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled) {
+ if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 2ff6deedef95..da1f43999d09 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -451,7 +451,7 @@ static int vangogh_init_smc_tables(struct smu_context *smu)
#ifdef CONFIG_X86
/* AMD x86 APU only */
- smu->cpu_core_num = boot_cpu_data.x86_max_cores;
+ smu->cpu_core_num = topology_num_cores_per_package();
#else
smu->cpu_core_num = 4;
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index a9954ffc02c5..9b80f18ea6c3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2369,13 +2369,12 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled) {
+ if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 0ffdb58af74e..3dc7b60cb075 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2333,13 +2333,12 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled) {
+ if (smu->od_enabled)
od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
- } else {
+ else
od_percent_upper = 0;
- od_percent_lower = 100;
- }
+
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 4894f7ee737b..6dae5ad74ff0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -229,8 +229,6 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
break;
case IP_VERSION(14, 0, 0):
- if ((smu->smc_fw_version < 0x5d3a00))
- dev_warn(smu->adev->dev, "The PMFW version(%x) is behind in this BIOS!\n", smu->smc_fw_version);
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
break;
default:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 47fdbae4adfc..9310c4758e38 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -261,7 +261,10 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->MpipuclkFrequency;
break;
case METRICS_AVERAGE_GFXACTIVITY:
- *value = metrics->GfxActivity / 100;
+ if ((smu->smc_fw_version > 0x5d4600))
+ *value = metrics->GfxActivity;
+ else
+ *value = metrics->GfxActivity / 100;
break;
case METRICS_AVERAGE_VCNACTIVITY:
*value = metrics->VcnActivity / 100;
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
index bb55f697a181..6886db2d9e00 100644
--- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -25,20 +25,18 @@ static void drm_aux_hpd_bridge_release(struct device *dev)
ida_free(&drm_aux_hpd_bridge_ida, adev->id);
of_node_put(adev->dev.platform_data);
+ of_node_put(adev->dev.of_node);
kfree(adev);
}
-static void drm_aux_hpd_bridge_unregister_adev(void *_adev)
+static void drm_aux_hpd_bridge_free_adev(void *_adev)
{
- struct auxiliary_device *adev = _adev;
-
- auxiliary_device_delete(adev);
- auxiliary_device_uninit(adev);
+ auxiliary_device_uninit(_adev);
}
/**
- * drm_dp_hpd_bridge_register - Create a simple HPD DisplayPort bridge
+ * devm_drm_dp_hpd_bridge_alloc - allocate a HPD DisplayPort bridge
* @parent: device instance providing this bridge
* @np: device node pointer corresponding to this bridge instance
*
@@ -46,11 +44,9 @@ static void drm_aux_hpd_bridge_unregister_adev(void *_adev)
* DRM_MODE_CONNECTOR_DisplayPort, which terminates the bridge chain and is
* able to send the HPD events.
*
- * Return: device instance that will handle created bridge or an error code
- * encoded into the pointer.
+ * Return: bridge auxiliary device pointer or an error pointer
*/
-struct device *drm_dp_hpd_bridge_register(struct device *parent,
- struct device_node *np)
+struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np)
{
struct auxiliary_device *adev;
int ret;
@@ -74,18 +70,62 @@ struct device *drm_dp_hpd_bridge_register(struct device *parent,
ret = auxiliary_device_init(adev);
if (ret) {
+ of_node_put(adev->dev.platform_data);
+ of_node_put(adev->dev.of_node);
ida_free(&drm_aux_hpd_bridge_ida, adev->id);
kfree(adev);
return ERR_PTR(ret);
}
- ret = auxiliary_device_add(adev);
- if (ret) {
- auxiliary_device_uninit(adev);
+ ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_free_adev, adev);
+ if (ret)
return ERR_PTR(ret);
- }
- ret = devm_add_action_or_reset(parent, drm_aux_hpd_bridge_unregister_adev, adev);
+ return adev;
+}
+EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_alloc);
+
+static void drm_aux_hpd_bridge_del_adev(void *_adev)
+{
+ auxiliary_device_delete(_adev);
+}
+
+/**
+ * devm_drm_dp_hpd_bridge_add - register a HDP DisplayPort bridge
+ * @dev: struct device to tie registration lifetime to
+ * @adev: bridge auxiliary device to be registered
+ *
+ * Returns: zero on success or a negative errno
+ */
+int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev)
+{
+ int ret;
+
+ ret = auxiliary_device_add(adev);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, drm_aux_hpd_bridge_del_adev, adev);
+}
+EXPORT_SYMBOL_GPL(devm_drm_dp_hpd_bridge_add);
+
+/**
+ * drm_dp_hpd_bridge_register - allocate and register a HDP DisplayPort bridge
+ * @parent: device instance providing this bridge
+ * @np: device node pointer corresponding to this bridge instance
+ *
+ * Return: device instance that will handle created bridge or an error pointer
+ */
+struct device *drm_dp_hpd_bridge_register(struct device *parent, struct device_node *np)
+{
+ struct auxiliary_device *adev;
+ int ret;
+
+ adev = devm_drm_dp_hpd_bridge_alloc(parent, np);
+ if (IS_ERR(adev))
+ return ERR_CAST(adev);
+
+ ret = devm_drm_dp_hpd_bridge_add(parent, adev);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 62cc3893dca5..7fbc307cc025 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -197,7 +197,7 @@ struct ti_sn65dsi86 {
DECLARE_BITMAP(gchip_output, SN_NUM_GPIOS);
#endif
#if defined(CONFIG_PWM)
- struct pwm_chip pchip;
+ struct pwm_chip *pchip;
bool pwm_enabled;
atomic_t pwm_pin_busy;
#endif
@@ -1374,7 +1374,7 @@ static void ti_sn_pwm_pin_release(struct ti_sn65dsi86 *pdata)
static struct ti_sn65dsi86 *pwm_chip_to_ti_sn_bridge(struct pwm_chip *chip)
{
- return container_of(chip, struct ti_sn65dsi86, pchip);
+ return pwmchip_get_drvdata(chip);
}
static int ti_sn_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -1415,7 +1415,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
int ret;
if (!pdata->pwm_enabled) {
- ret = pm_runtime_resume_and_get(chip->dev);
+ ret = pm_runtime_resume_and_get(pwmchip_parent(chip));
if (ret < 0)
return ret;
}
@@ -1431,7 +1431,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
SN_GPIO_MUX_MASK << (2 * SN_PWM_GPIO_IDX),
SN_GPIO_MUX_SPECIAL << (2 * SN_PWM_GPIO_IDX));
if (ret) {
- dev_err(chip->dev, "failed to mux in PWM function\n");
+ dev_err(pwmchip_parent(chip), "failed to mux in PWM function\n");
goto out;
}
}
@@ -1507,7 +1507,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
ret = regmap_write(pdata->regmap, SN_PWM_PRE_DIV_REG, pre_div);
if (ret) {
- dev_err(chip->dev, "failed to update PWM_PRE_DIV\n");
+ dev_err(pwmchip_parent(chip), "failed to update PWM_PRE_DIV\n");
goto out;
}
@@ -1519,7 +1519,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
FIELD_PREP(SN_PWM_INV_MASK, state->polarity == PWM_POLARITY_INVERSED);
ret = regmap_write(pdata->regmap, SN_PWM_EN_INV_REG, pwm_en_inv);
if (ret) {
- dev_err(chip->dev, "failed to update PWM_EN/PWM_INV\n");
+ dev_err(pwmchip_parent(chip), "failed to update PWM_EN/PWM_INV\n");
goto out;
}
@@ -1527,7 +1527,7 @@ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
out:
if (!pdata->pwm_enabled)
- pm_runtime_put_sync(chip->dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
return ret;
}
@@ -1585,24 +1585,28 @@ static const struct pwm_ops ti_sn_pwm_ops = {
static int ti_sn_pwm_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
+ struct pwm_chip *chip;
struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent);
- pdata->pchip.dev = &adev->dev;
- pdata->pchip.ops = &ti_sn_pwm_ops;
- pdata->pchip.npwm = 1;
- pdata->pchip.of_xlate = of_pwm_single_xlate;
- pdata->pchip.of_pwm_n_cells = 1;
+ pdata->pchip = chip = devm_pwmchip_alloc(&adev->dev, 1, 0);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ pwmchip_set_drvdata(chip, pdata);
+
+ chip->ops = &ti_sn_pwm_ops;
+ chip->of_xlate = of_pwm_single_xlate;
devm_pm_runtime_enable(&adev->dev);
- return pwmchip_add(&pdata->pchip);
+ return pwmchip_add(chip);
}
static void ti_sn_pwm_remove(struct auxiliary_device *adev)
{
struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent);
- pwmchip_remove(&pdata->pchip);
+ pwmchip_remove(pdata->pchip);
if (pdata->pwm_enabled)
pm_runtime_put_sync(&adev->dev);
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index f57e6d74fb0e..5ebdd6f8f36e 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -332,6 +332,7 @@ alloc_range_bias(struct drm_buddy *mm,
u64 start, u64 end,
unsigned int order)
{
+ u64 req_size = mm->chunk_size << order;
struct drm_buddy_block *block;
struct drm_buddy_block *buddy;
LIST_HEAD(dfs);
@@ -367,6 +368,15 @@ alloc_range_bias(struct drm_buddy *mm,
if (drm_buddy_block_is_allocated(block))
continue;
+ if (block_start < start || block_end > end) {
+ u64 adjusted_start = max(block_start, start);
+ u64 adjusted_end = min(block_end, end);
+
+ if (round_down(adjusted_end + 1, req_size) <=
+ round_up(adjusted_start, req_size))
+ continue;
+ }
+
if (contains(start, end, block_start, block_end) &&
order == drm_buddy_block_order(block)) {
/*
@@ -538,7 +548,13 @@ static int __alloc_range(struct drm_buddy *mm,
list_add(&block->left->tmp_link, dfs);
} while (1);
+ if (total_allocated < size) {
+ err = -ENOSPC;
+ goto err_free;
+ }
+
list_splice_tail(&allocated, blocks);
+
return 0;
err_undo:
@@ -755,8 +771,12 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
return -EINVAL;
/* Actual range allocation */
- if (start + size == end)
+ if (start + size == end) {
+ if (!IS_ALIGNED(start | end, min_block_size))
+ return -EINVAL;
+
return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
+ }
original_size = size;
original_min_size = min_block_size;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index cb90e70d85e8..65f9f66933bb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -904,6 +904,7 @@ out:
connector_set = NULL;
fb = NULL;
mode = NULL;
+ num_connectors = 0;
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index b67eafa55715..b7d42210fccc 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -260,8 +260,7 @@ static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
}
/**
- * drm_gem_vram_offset() - \
- Returns a GEM VRAM object's offset in video memory
+ * drm_gem_vram_offset() - Returns a GEM VRAM object's offset in video memory
* @gbo: the GEM VRAM object
*
* This function returns the buffer object's offset in the device's video
@@ -470,14 +469,15 @@ void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo,
EXPORT_SYMBOL(drm_gem_vram_vunmap);
/**
- * drm_gem_vram_fill_create_dumb() - \
- Helper for implementing &struct drm_driver.dumb_create
+ * drm_gem_vram_fill_create_dumb() - Helper for implementing
+ * &struct drm_driver.dumb_create
+ *
* @file: the DRM file
* @dev: the DRM device
* @pg_align: the buffer's alignment in multiples of the page size
* @pitch_align: the scanline's alignment in powers of 2
- * @args: the arguments as provided to \
- &struct drm_driver.dumb_create
+ * @args: the arguments as provided to
+ * &struct drm_driver.dumb_create
*
* This helper function fills &struct drm_mode_create_dumb, which is used
* by &struct drm_driver.dumb_create. Implementations of this interface
@@ -575,8 +575,7 @@ static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
*/
/**
- * drm_gem_vram_object_free() - \
- Implements &struct drm_gem_object_funcs.free
+ * drm_gem_vram_object_free() - Implements &struct drm_gem_object_funcs.free
* @gem: GEM object. Refers to &struct drm_gem_vram_object.gem
*/
static void drm_gem_vram_object_free(struct drm_gem_object *gem)
@@ -591,12 +590,11 @@ static void drm_gem_vram_object_free(struct drm_gem_object *gem)
*/
/**
- * drm_gem_vram_driver_dumb_create() - \
- Implements &struct drm_driver.dumb_create
+ * drm_gem_vram_driver_dumb_create() - Implements &struct drm_driver.dumb_create
* @file: the DRM file
* @dev: the DRM device
- * @args: the arguments as provided to \
- &struct drm_driver.dumb_create
+ * @args: the arguments as provided to
+ * &struct drm_driver.dumb_create
*
* This function requires the driver to use @drm_device.vram_mm for its
* instance of VRAM MM.
@@ -639,8 +637,8 @@ static void __drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
}
/**
- * drm_gem_vram_plane_helper_prepare_fb() - \
- * Implements &struct drm_plane_helper_funcs.prepare_fb
+ * drm_gem_vram_plane_helper_prepare_fb() - Implements &struct
+ * drm_plane_helper_funcs.prepare_fb
* @plane: a DRM plane
* @new_state: the plane's new state
*
@@ -690,8 +688,8 @@ err_drm_gem_vram_unpin:
EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb);
/**
- * drm_gem_vram_plane_helper_cleanup_fb() - \
- * Implements &struct drm_plane_helper_funcs.cleanup_fb
+ * drm_gem_vram_plane_helper_cleanup_fb() - Implements &struct
+ * drm_plane_helper_funcs.cleanup_fb
* @plane: a DRM plane
* @old_state: the plane's old state
*
@@ -717,8 +715,8 @@ EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
*/
/**
- * drm_gem_vram_simple_display_pipe_prepare_fb() - \
- * Implements &struct drm_simple_display_pipe_funcs.prepare_fb
+ * drm_gem_vram_simple_display_pipe_prepare_fb() - Implements &struct
+ * drm_simple_display_pipe_funcs.prepare_fb
* @pipe: a simple display pipe
* @new_state: the plane's new state
*
@@ -739,8 +737,8 @@ int drm_gem_vram_simple_display_pipe_prepare_fb(
EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb);
/**
- * drm_gem_vram_simple_display_pipe_cleanup_fb() - \
- * Implements &struct drm_simple_display_pipe_funcs.cleanup_fb
+ * drm_gem_vram_simple_display_pipe_cleanup_fb() - Implements &struct
+ * drm_simple_display_pipe_funcs.cleanup_fb
* @pipe: a simple display pipe
* @old_state: the plane's old state
*
@@ -761,8 +759,7 @@ EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb);
*/
/**
- * drm_gem_vram_object_pin() - \
- Implements &struct drm_gem_object_funcs.pin
+ * drm_gem_vram_object_pin() - Implements &struct drm_gem_object_funcs.pin
* @gem: The GEM object to pin
*
* Returns:
@@ -785,8 +782,7 @@ static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
}
/**
- * drm_gem_vram_object_unpin() - \
- Implements &struct drm_gem_object_funcs.unpin
+ * drm_gem_vram_object_unpin() - Implements &struct drm_gem_object_funcs.unpin
* @gem: The GEM object to unpin
*/
static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 834a5e28abbe..7352bde299d5 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -820,7 +820,7 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
if (max_segment == 0)
max_segment = UINT_MAX;
err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT,
+ (unsigned long)nr_pages << PAGE_SHIFT,
max_segment, GFP_KERNEL);
if (err) {
kfree(sg);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 3f479483d7d8..23b4e9a3361d 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -760,9 +760,11 @@ static void output_poll_execute(struct work_struct *work)
changed = dev->mode_config.delayed_event;
dev->mode_config.delayed_event = false;
- if (!drm_kms_helper_poll && dev->mode_config.poll_running) {
- drm_kms_helper_disable_hpd(dev);
- dev->mode_config.poll_running = false;
+ if (!drm_kms_helper_poll) {
+ if (dev->mode_config.poll_running) {
+ drm_kms_helper_disable_hpd(dev);
+ dev->mode_config.poll_running = false;
+ }
goto out;
}
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 84101baeecc6..a6c19de46292 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -1040,7 +1040,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
uint64_t *points;
uint32_t signaled_count, i;
- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
+ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
lockdep_assert_none_held_once();
points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
@@ -1109,7 +1110,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
* fallthough and try a 0 timeout wait!
*/
- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
for (i = 0; i < count; ++i)
drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
}
@@ -1416,10 +1418,21 @@ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
/* This happens inside the syncobj lock */
fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
+ if (!fence)
+ return;
+
ret = dma_fence_chain_find_seqno(&fence, entry->point);
- if (ret != 0 || !fence) {
+ if (ret != 0) {
+ /* The given seqno has not been submitted yet. */
dma_fence_put(fence);
return;
+ } else if (!fence) {
+ /* If dma_fence_chain_find_seqno returns 0 but sets the fence
+ * to NULL, it implies that the given seqno is signaled and a
+ * later seqno has already been submitted. Assign a stub fence
+ * so that the eventfd still gets signaled below.
+ */
+ fence = dma_fence_get_stub();
}
list_del_init(&entry->node);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index b5d6e3352071..3089029abba4 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -140,7 +140,7 @@ config DRM_I915_GVT_KVMGT
Note that this driver only supports newer device from Broadwell on.
For further information and setup guide, you can visit:
- http://01.org/igvt-g.
+ https://github.com/intel/gvt-linux/wiki.
If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 47cd6bb04366..06900ff307b2 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -246,7 +246,14 @@ static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
- return intel_port_to_phy(i915, dig_port->base.port);
+ /*
+ * FIXME should we care about the (VBT defined) dig_port->aux_ch
+ * relationship or should this be purely defined by the hardware layout?
+ * Currently if the port doesn't appear in the VBT, or if it's declared
+ * as HDMI-only and routed to a combo PHY, the encoder either won't be
+ * present at all or it will not have an aux_ch assigned.
+ */
+ return dig_port ? intel_port_to_phy(i915, dig_port->base.port) : PHY_NONE;
}
static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
@@ -414,7 +421,8 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
- if (DISPLAY_VER(dev_priv) < 12)
+ /* FIXME this is a mess */
+ if (phy != PHY_NONE)
intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
0, ICL_LANE_ENABLE_AUX);
@@ -437,7 +445,10 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), ICL_LANE_ENABLE_AUX, 0);
+ /* FIXME this is a mess */
+ if (phy != PHY_NONE)
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
+ ICL_LANE_ENABLE_AUX, 0);
intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 3fdd8a517983..ac7fe6281afe 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -609,6 +609,13 @@ struct intel_connector {
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
+ /*
+ * Optional hook called during init/resume to sync any state
+ * stored in the connector (eg. DSC state) wrt. the HW state.
+ */
+ void (*sync_state)(struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state);
+
/* Panel info for eDP and LVDS */
struct intel_panel panel;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index f5ef95da5534..94d2a15d8444 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -2355,6 +2355,9 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
limits->min_rate = intel_dp_common_rate(intel_dp, 0);
limits->max_rate = intel_dp_max_link_rate(intel_dp);
+ /* FIXME 128b/132b SST support missing */
+ limits->max_rate = min(limits->max_rate, 810000);
+
limits->min_lane_count = 1;
limits->max_lane_count = intel_dp_max_lane_count(intel_dp);
@@ -5696,6 +5699,9 @@ intel_dp_detect(struct drm_connector *connector,
goto out;
}
+ if (!intel_dp_is_edp(intel_dp))
+ intel_psr_init_dpcd(intel_dp);
+
intel_dp_detect_dsc_caps(intel_dp, intel_connector);
intel_dp_configure_mst(intel_dp);
@@ -5856,6 +5862,19 @@ intel_dp_connector_unregister(struct drm_connector *connector)
intel_connector_unregister(connector);
}
+void intel_dp_connector_sync_state(struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ if (crtc_state && crtc_state->dsc.compression_enable) {
+ drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux);
+ connector->dp.dsc_decompression_enabled = true;
+ } else {
+ connector->dp.dsc_decompression_enabled = false;
+ }
+}
+
void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 05db46b111f2..375d0677cd8c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -45,6 +45,8 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
int intel_dp_min_bpp(enum intel_output_format output_format);
bool intel_dp_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector);
+void intel_dp_connector_sync_state(struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, int lane_count);
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 3a595cd433d4..8538d1ce2fcb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -330,23 +330,13 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
0, 0 },
};
-static struct drm_dp_aux *
-intel_dp_hdcp_get_aux(struct intel_connector *connector)
-{
- struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
-
- if (intel_encoder_is_mst(connector->encoder))
- return &connector->port->aux;
- else
- return &dig_port->dp.aux;
-}
-
static int
intel_dp_hdcp2_read_rx_status(struct intel_connector *connector,
u8 *rx_status)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
ssize_t ret;
ret = drm_dp_dpcd_read(aux,
@@ -399,7 +389,9 @@ intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector,
const struct hdcp2_dp_msg_data *hdcp2_msg_data)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_hdcp *hdcp = &connector->hdcp;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct intel_dp *dp = &dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
u8 msg_id = hdcp2_msg_data->msg_id;
int ret, timeout;
bool msg_ready = false;
@@ -454,8 +446,9 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector,
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_write, len;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
const struct hdcp2_dp_msg_data *hdcp2_msg_data;
- struct drm_dp_aux *aux;
hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
if (!hdcp2_msg_data)
@@ -463,8 +456,6 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector,
offset = hdcp2_msg_data->offset;
- aux = intel_dp_hdcp_get_aux(connector);
-
/* No msg_id in DP HDCP2.2 msgs */
bytes_to_write = size - 1;
byte++;
@@ -490,7 +481,8 @@ static
ssize_t get_receiver_id_list_rx_info(struct intel_connector *connector,
u32 *dev_cnt, u8 *byte)
{
- struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
ssize_t ret;
u8 *rx_info = byte;
@@ -515,8 +507,9 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_dp_aux *aux;
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
+ struct intel_dp *dp = &dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
@@ -530,8 +523,6 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
return -EINVAL;
offset = hdcp2_msg_data->offset;
- aux = intel_dp_hdcp_get_aux(connector);
-
ret = intel_dp_hdcp2_wait_for_msg(connector, hdcp2_msg_data);
if (ret < 0)
return ret;
@@ -561,13 +552,8 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
/* Entire msg read timeout since initiate of msg read */
if (bytes_to_recv == size - 1 && hdcp2_msg_data->msg_read_timeout > 0) {
- if (intel_encoder_is_mst(connector->encoder))
- msg_end = ktime_add_ms(ktime_get_raw(),
- hdcp2_msg_data->msg_read_timeout *
- connector->port->parent->num_ports);
- else
- msg_end = ktime_add_ms(ktime_get_raw(),
- hdcp2_msg_data->msg_read_timeout);
+ msg_end = ktime_add_ms(ktime_get_raw(),
+ hdcp2_msg_data->msg_read_timeout);
}
ret = drm_dp_dpcd_read(aux, offset,
@@ -651,12 +637,11 @@ static
int intel_dp_hdcp2_capable(struct intel_connector *connector,
bool *capable)
{
- struct drm_dp_aux *aux;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
u8 rx_caps[3];
int ret;
- aux = intel_dp_hdcp_get_aux(connector);
-
*capable = false;
ret = drm_dp_dpcd_read(aux,
DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 8a9432335030..a01a59f57ae5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -1534,6 +1534,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
return NULL;
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+ intel_connector->sync_state = intel_dp_connector_sync_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
drm_dp_mst_get_port_malloc(port);
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 94eece7f63be..caeca3a8442c 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -318,12 +318,6 @@ static void intel_modeset_update_connector_atomic_state(struct drm_i915_private
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
- if (crtc_state->dsc.compression_enable) {
- drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux);
- connector->dp.dsc_decompression_enabled = true;
- } else {
- connector->dp.dsc_decompression_enabled = false;
- }
conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
}
}
@@ -775,8 +769,9 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
+ struct intel_crtc_state *crtc_state = NULL;
+
if (connector->get_hw_state(connector)) {
- struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
connector->base.dpms = DRM_MODE_DPMS_ON;
@@ -802,6 +797,10 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
+
+ if (connector->sync_state)
+ connector->sync_state(connector, crtc_state);
+
drm_dbg_kms(&i915->drm,
"[CONNECTOR:%d:%s] hw state readout: %s\n",
connector->base.base.id, connector->base.name,
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 57bbf3e3af92..4faaf4b3fc53 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -2776,9 +2776,6 @@ void intel_psr_init(struct intel_dp *intel_dp)
if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
return;
- if (!intel_dp_is_edp(intel_dp))
- intel_psr_init_dpcd(intel_dp);
-
/*
* HSW spec explicitly says PSR is tied to port A.
* BDW+ platforms have a instance of PSR registers per transcoder but
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index acc6b6804105..2915d7afe5cc 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1209,7 +1209,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_tv_format format;
u32 format_map;
- format_map = 1 << conn_state->tv.mode;
+ format_map = 1 << conn_state->tv.legacy_mode;
memset(&format, 0, sizeof(format));
memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
@@ -2298,7 +2298,7 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
* Read the list of supported input resolutions for the selected TV
* format.
*/
- format_map = 1 << conn_state->tv.mode;
+ format_map = 1 << conn_state->tv.legacy_mode;
memcpy(&tv_res, &format_map,
min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
@@ -2363,7 +2363,7 @@ intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
int i;
for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
- if (state->tv.mode == intel_sdvo_connector->tv_format_supported[i]) {
+ if (state->tv.legacy_mode == intel_sdvo_connector->tv_format_supported[i]) {
*val = i;
return 0;
@@ -2419,7 +2419,7 @@ intel_sdvo_connector_atomic_set_property(struct drm_connector *connector,
struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(state);
if (property == intel_sdvo_connector->tv_format) {
- state->tv.mode = intel_sdvo_connector->tv_format_supported[val];
+ state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[val];
if (state->crtc) {
struct drm_crtc_state *crtc_state =
@@ -3076,7 +3076,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
drm_property_add_enum(intel_sdvo_connector->tv_format, i,
tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
- intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0];
+ intel_sdvo_connector->base.base.state->tv.legacy_mode = intel_sdvo_connector->tv_format_supported[0];
drm_object_attach_property(&intel_sdvo_connector->base.base.base,
intel_sdvo_connector->tv_format, 0);
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index d4386cb3569e..992a725de751 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -949,7 +949,7 @@ intel_disable_tv(struct intel_atomic_state *state,
static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
{
- int format = conn_state->tv.mode;
+ int format = conn_state->tv.legacy_mode;
return &tv_modes[format];
}
@@ -1704,7 +1704,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
break;
}
- connector->state->tv.mode = i;
+ connector->state->tv.legacy_mode = i;
}
static int
@@ -1859,7 +1859,7 @@ static int intel_tv_atomic_check(struct drm_connector *connector,
old_state = drm_atomic_get_old_connector_state(state, connector);
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
- if (old_state->tv.mode != new_state->tv.mode ||
+ if (old_state->tv.legacy_mode != new_state->tv.legacy_mode ||
old_state->tv.margins.left != new_state->tv.margins.left ||
old_state->tv.margins.right != new_state->tv.margins.right ||
old_state->tv.margins.top != new_state->tv.margins.top ||
@@ -1896,7 +1896,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
conn_state->tv.margins.right = 46;
conn_state->tv.margins.bottom = 37;
- conn_state->tv.mode = 0;
+ conn_state->tv.legacy_mode = 0;
/* Create TV properties then attach current values */
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
@@ -1910,7 +1910,7 @@ static void intel_tv_add_properties(struct drm_connector *connector)
drm_object_attach_property(&connector->base,
i915->drm.mode_config.legacy_tv_mode_property,
- conn_state->tv.mode);
+ conn_state->tv.legacy_mode);
drm_object_attach_property(&connector->base,
i915->drm.mode_config.tv_left_margin_property,
conn_state->tv.margins.left);
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
index 64f440fdc22b..8b21dc8e26d5 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
@@ -51,8 +51,8 @@
#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00)
#define _DSCA_PPS_0 0x6B200
#define _DSCC_PPS_0 0x6BA00
-#define DSCA_PPS(pps) _MMIO(_DSCA_PPS_0 + (pps) * 4)
-#define DSCC_PPS(pps) _MMIO(_DSCC_PPS_0 + (pps) * 4)
+#define DSCA_PPS(pps) _MMIO(_DSCA_PPS_0 + ((pps) < 12 ? (pps) : (pps) + 12) * 4)
+#define DSCC_PPS(pps) _MMIO(_DSCC_PPS_0 + ((pps) < 12 ? (pps) : (pps) + 12) * 4)
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 1d3ebdf4069b..c08b67593565 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -379,6 +379,9 @@ i915_gem_userptr_release(struct drm_i915_gem_object *obj)
{
GEM_WARN_ON(obj->userptr.page_ref);
+ if (!obj->userptr.notifier.mm)
+ return;
+
mmu_interval_notifier_remove(&obj->userptr.notifier);
obj->userptr.notifier.mm = NULL;
}
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 90f6c1ece57d..efcb00472be2 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2849,8 +2849,7 @@ static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
for (i = start; i < end; i += 4) {
p = intel_gvt_find_mmio_info(gvt, i);
if (p) {
- WARN(1, "dup mmio definition offset %x\n",
- info->offset);
+ WARN(1, "dup mmio definition offset %x\n", i);
/* We return -EEXIST here to make GVT-g load fail.
* So duplicated MMIO can be found as soon as
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index e98b6d69a91a..9b6d87c8b583 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -41,7 +41,7 @@
* To virtualize GPU resources GVT-g driver depends on hypervisor technology
* e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability
* and be virtualized within GVT-g device module. More architectural design
- * doc is available on https://01.org/group/2230/documentation-list.
+ * doc is available on https://github.com/intel/gvt-linux/wiki.
*/
static LIST_HEAD(intel_gvt_devices);
diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
index 2990dd4d4a0d..e14ac0ab1314 100644
--- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
+++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
@@ -3,6 +3,8 @@
* Copyright © 2021 Intel Corporation
*/
+#include <linux/jiffies.h>
+
//#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
@@ -12,7 +14,7 @@
#define REDUCED_TIMESLICE 5
#define REDUCED_PREEMPT 10
-#define WAIT_FOR_RESET_TIME 10000
+#define WAIT_FOR_RESET_TIME_MS 10000
struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt)
{
@@ -91,7 +93,7 @@ int intel_selftest_wait_for_rq(struct i915_request *rq)
{
long ret;
- ret = i915_request_wait(rq, 0, WAIT_FOR_RESET_TIME);
+ ret = i915_request_wait(rq, 0, msecs_to_jiffies(WAIT_FOR_RESET_TIME_MS));
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index 3f73b211fa8e..3407450435e2 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -294,6 +294,5 @@ void meson_encoder_cvbs_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_CVBS]) {
meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS];
drm_bridge_remove(&meson_encoder_cvbs->bridge);
- drm_bridge_remove(meson_encoder_cvbs->next_bridge);
}
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
index 3f93c70488ca..311b91630fbe 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
@@ -168,6 +168,5 @@ void meson_encoder_dsi_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_DSI]) {
meson_encoder_dsi = priv->encoders[MESON_ENC_DSI];
drm_bridge_remove(&meson_encoder_dsi->bridge);
- drm_bridge_remove(meson_encoder_dsi->next_bridge);
}
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 25ea76558690..c4686568c9ca 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -474,6 +474,5 @@ void meson_encoder_hdmi_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_HDMI]) {
meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI];
drm_bridge_remove(&meson_encoder_hdmi->bridge);
- drm_bridge_remove(meson_encoder_hdmi->next_bridge);
}
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index c0bc924cd302..c9c55e2ea584 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1287,7 +1287,7 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.highest_bank_bit = 15;
if (adreno_is_a610(gpu)) {
- gpu->ubwc_config.highest_bank_bit = 14;
+ gpu->ubwc_config.highest_bank_bit = 13;
gpu->ubwc_config.min_acc_len = 1;
gpu->ubwc_config.ubwc_mode = 1;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 83380bc92a00..6a4b489d44e5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -144,10 +144,6 @@ enum dpu_enc_rc_states {
* to track crtc in the disable() hook which is called
* _after_ encoder_mask is cleared.
* @connector: If a mode is set, cached pointer to the active connector
- * @crtc_kickoff_cb: Callback into CRTC that will flush & start
- * all CTL paths
- * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
- * @debugfs_root: Debug file system root file node
* @enc_lock: Lock around physical encoder
* create/destroy/enable/disable
* @frame_busy_mask: Bitmask tracking which phys_enc we are still
@@ -2072,7 +2068,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
}
/* reset the merge 3D HW block */
- if (phys_enc->hw_pp->merge_3d) {
+ if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
BLEND_3D_NONE);
if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
@@ -2103,7 +2099,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
if (phys_enc->hw_wb)
intf_cfg.wb = phys_enc->hw_wb->idx;
- if (phys_enc->hw_pp->merge_3d)
+ if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d)
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
if (ctl->ops.reset_intf_cfg)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index b58a9c2ae326..724537ab776d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -29,7 +29,6 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx,
/**
* struct dpu_rm_requirements - Reservation requirements parameter bundle
* @topology: selected topology for the display
- * @hw_res: Hardware resources required as reported by the encoders
*/
struct dpu_rm_requirements {
struct msm_display_topology topology;
@@ -204,6 +203,8 @@ static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
* _dpu_rm_get_lm_peer - get the id of a mixer which is a peer of the primary
* @rm: dpu resource manager handle
* @primary_idx: index of primary mixer in rm->mixer_blks[]
+ *
+ * Returns: lm peer mixed id on success or %-EINVAL on error
*/
static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
{
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 77a8d9366ed7..fb588fde298a 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -135,11 +135,6 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
tbd = dp_link_get_test_bits_depth(ctrl->link,
ctrl->panel->dp_mode.bpp);
- if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) {
- pr_debug("BIT_DEPTH not set. Configure default\n");
- tbd = DP_TEST_BIT_DEPTH_8;
- }
-
config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT;
/* Num of Lanes */
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index d37d599aec27..4c72124ffb5d 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -329,10 +329,26 @@ static const struct component_ops dp_display_comp_ops = {
.unbind = dp_display_unbind,
};
+static void dp_display_send_hpd_event(struct msm_dp *dp_display)
+{
+ struct dp_display_private *dp;
+ struct drm_connector *connector;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ connector = dp->dp_display.connector;
+ drm_helper_hpd_irq_event(connector->dev);
+}
+
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
bool hpd)
{
- struct drm_bridge *bridge = dp->dp_display.bridge;
+ if ((hpd && dp->dp_display.link_ready) ||
+ (!hpd && !dp->dp_display.link_ready)) {
+ drm_dbg_dp(dp->drm_dev, "HPD already %s\n",
+ (hpd ? "on" : "off"));
+ return 0;
+ }
/* reset video pattern flag on disconnect */
if (!hpd) {
@@ -348,7 +364,7 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
drm_dbg_dp(dp->drm_dev, "type=%d hpd=%d\n",
dp->dp_display.connector_type, hpd);
- drm_bridge_hpd_notify(bridge, dp->dp_display.link_ready);
+ dp_display_send_hpd_event(&dp->dp_display);
return 0;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 98427d45e9a7..49dfac1fd1ef 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -7,6 +7,7 @@
#include <drm/drm_print.h>
+#include "dp_reg.h"
#include "dp_link.h"
#include "dp_panel.h"
@@ -1082,7 +1083,7 @@ int dp_link_process_request(struct dp_link *dp_link)
int dp_link_get_colorimetry_config(struct dp_link *dp_link)
{
- u32 cc;
+ u32 cc = DP_MISC0_COLORIMERY_CFG_LEGACY_RGB;
struct dp_link_private *link;
if (!dp_link) {
@@ -1096,10 +1097,11 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
* Unless a video pattern CTS test is ongoing, use RGB_VESA
* Only RGB_VESA and RGB_CEA supported for now
*/
- if (dp_link_is_video_pattern_requested(link))
- cc = link->dp_link.test_video.test_dyn_range;
- else
- cc = DP_TEST_DYNAMIC_RANGE_VESA;
+ if (dp_link_is_video_pattern_requested(link)) {
+ if (link->dp_link.test_video.test_dyn_range &
+ DP_TEST_DYNAMIC_RANGE_CEA)
+ cc = DP_MISC0_COLORIMERY_CFG_CEA_RGB;
+ }
return cc;
}
@@ -1179,6 +1181,9 @@ void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link)
u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
{
u32 tbd;
+ struct dp_link_private *link;
+
+ link = container_of(dp_link, struct dp_link_private, dp_link);
/*
* Few simplistic rules and assumptions made here:
@@ -1196,12 +1201,13 @@ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
tbd = DP_TEST_BIT_DEPTH_10;
break;
default:
- tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
+ drm_dbg_dp(link->drm_dev, "bpp=%d not supported, use bpc=8\n",
+ bpp);
+ tbd = DP_TEST_BIT_DEPTH_8;
break;
}
- if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN)
- tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
+ tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
return tbd;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
index ea85a691e72b..78785ed4b40c 100644
--- a/drivers/gpu/drm/msm/dp/dp_reg.h
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -143,6 +143,9 @@
#define DP_MISC0_COLORIMETRY_CFG_SHIFT (0x00000001)
#define DP_MISC0_TEST_BITS_DEPTH_SHIFT (0x00000005)
+#define DP_MISC0_COLORIMERY_CFG_LEGACY_RGB (0)
+#define DP_MISC0_COLORIMERY_CFG_CEA_RGB (0x04)
+
#define REG_DP_VALID_BOUNDARY (0x00000030)
#define REG_DP_VALID_BOUNDARY_2 (0x00000034)
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 5f68e31a3e4e..0915f3b68752 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -26,7 +26,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
void *vaddr;
- vaddr = msm_gem_get_vaddr(obj);
+ vaddr = msm_gem_get_vaddr_locked(obj);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
iosys_map_set_vaddr(map, vaddr);
@@ -36,7 +36,7 @@ int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
- msm_gem_put_vaddr(obj);
+ msm_gem_put_vaddr_locked(obj);
}
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 095390774f22..655002b21b0d 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -751,12 +751,14 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned long flags;
- pm_runtime_get_sync(&gpu->pdev->dev);
+ WARN_ON(!mutex_is_locked(&gpu->lock));
- mutex_lock(&gpu->lock);
+ pm_runtime_get_sync(&gpu->pdev->dev);
msm_gpu_hw_init(gpu);
+ submit->seqno = submit->hw_fence->seqno;
+
update_sw_cntrs(gpu);
/*
@@ -781,11 +783,8 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
gpu->funcs->submit(gpu, submit);
gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
- hangcheck_timer_reset(gpu);
-
- mutex_unlock(&gpu->lock);
-
pm_runtime_put(&gpu->pdev->dev);
+ hangcheck_timer_reset(gpu);
}
/*
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 5cc8d358cc97..d5512037c38b 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -21,6 +21,8 @@ struct msm_iommu_pagetable {
struct msm_mmu base;
struct msm_mmu *parent;
struct io_pgtable_ops *pgtbl_ops;
+ const struct iommu_flush_ops *tlb;
+ struct device *iommu_dev;
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
phys_addr_t ttbr;
u32 asid;
@@ -201,11 +203,33 @@ static const struct msm_mmu_funcs pagetable_funcs = {
static void msm_iommu_tlb_flush_all(void *cookie)
{
+ struct msm_iommu_pagetable *pagetable = cookie;
+ struct adreno_smmu_priv *adreno_smmu;
+
+ if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
+ return;
+
+ adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
+
+ pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie);
+
+ pm_runtime_put_autosuspend(pagetable->iommu_dev);
}
static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
+ struct msm_iommu_pagetable *pagetable = cookie;
+ struct adreno_smmu_priv *adreno_smmu;
+
+ if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
+ return;
+
+ adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
+
+ pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie);
+
+ pm_runtime_put_autosuspend(pagetable->iommu_dev);
}
static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
@@ -213,7 +237,7 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
{
}
-static const struct iommu_flush_ops null_tlb_ops = {
+static const struct iommu_flush_ops tlb_ops = {
.tlb_flush_all = msm_iommu_tlb_flush_all,
.tlb_flush_walk = msm_iommu_tlb_flush_walk,
.tlb_add_page = msm_iommu_tlb_add_page,
@@ -254,10 +278,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
/* The incoming cfg will have the TTBR1 quirk enabled */
ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
- ttbr0_cfg.tlb = &null_tlb_ops;
+ ttbr0_cfg.tlb = &tlb_ops;
pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
- &ttbr0_cfg, iommu->domain);
+ &ttbr0_cfg, pagetable);
if (!pagetable->pgtbl_ops) {
kfree(pagetable);
@@ -279,6 +303,8 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
/* Needed later for TLB flush */
pagetable->parent = parent;
+ pagetable->tlb = ttbr1_cfg->tlb;
+ pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 455b2e3a0cdd..35423d10aafa 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -562,6 +562,7 @@ static const struct msm_mdss_data sdm670_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.highest_bank_bit = 1,
+ .reg_bus_bw = 76800,
};
static const struct msm_mdss_data sdm845_data = {
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 4bc13f7d005a..9d6655f96f0c 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -21,8 +21,6 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
msm_fence_init(submit->hw_fence, fctx);
- submit->seqno = submit->hw_fence->seqno;
-
mutex_lock(&priv->lru.lock);
for (i = 0; i < submit->nr_bos; i++) {
@@ -35,8 +33,13 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
mutex_unlock(&priv->lru.lock);
+ /* TODO move submit path over to using a per-ring lock.. */
+ mutex_lock(&gpu->lock);
+
msm_gpu_submit(gpu, submit);
+ mutex_unlock(&gpu->lock);
+
return dma_fence_get(submit->hw_fence);
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 1e6aaf95ff7c..ceef470c9fbf 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -100,3 +100,11 @@ config DRM_NOUVEAU_SVM
help
Say Y here if you want to enable experimental support for
Shared Virtual Memory (SVM).
+
+config DRM_NOUVEAU_GSP_DEFAULT
+ bool "Use GSP firmware for Turing/Ampere (needs firmware installed)"
+ depends on DRM_NOUVEAU
+ default n
+ help
+ Say Y here if you want to use the GSP codepaths by default on
+ Turing and Ampere GPUs.
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index 0d9fc741a719..932c9fd0b2d8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -11,6 +11,7 @@ struct nvkm_client {
u32 debug;
struct rb_root objroot;
+ spinlock_t obj_lock;
void *data;
int (*event)(u64 token, void *argv, u32 argc);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index d1437c08645f..6f5d376d8fcc 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -9,7 +9,7 @@
#define GSP_PAGE_SIZE BIT(GSP_PAGE_SHIFT)
struct nvkm_gsp_mem {
- u32 size;
+ size_t size;
void *data;
dma_addr_t addr;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index a04156ca8390..80f74ee0fc78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -128,12 +128,14 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
struct nouveau_abi16_ntfy *ntfy, *temp;
/* Cancel all jobs from the entity's queue. */
- drm_sched_entity_fini(&chan->sched.entity);
+ if (chan->sched)
+ drm_sched_entity_fini(&chan->sched->entity);
if (chan->chan)
nouveau_channel_idle(chan->chan);
- nouveau_sched_fini(&chan->sched);
+ if (chan->sched)
+ nouveau_sched_destroy(&chan->sched);
/* cleanup notifier state */
list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
@@ -197,6 +199,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
+ struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device);
struct nvkm_gr *gr = nvxx_gr(device);
struct drm_nouveau_getparam *getparam = data;
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -261,6 +264,14 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = nouveau_exec_push_max_from_ib_max(ib_max);
break;
}
+ case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
+ getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
+ break;
+ case NOUVEAU_GETPARAM_VRAM_USED: {
+ struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
+ getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
+ break;
+ }
default:
NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
@@ -337,10 +348,16 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
- ret = nouveau_sched_init(&chan->sched, drm, drm->sched_wq,
- chan->chan->dma.ib_max);
- if (ret)
- goto done;
+ /* If we're not using the VM_BIND uAPI, we don't need a scheduler.
+ *
+ * The client lock is already acquired by nouveau_abi16_get().
+ */
+ if (nouveau_cli_uvmm(cli)) {
+ ret = nouveau_sched_create(&chan->sched, drm, drm->sched_wq,
+ chan->chan->dma.ib_max);
+ if (ret)
+ goto done;
+ }
init->channel = chan->chan->chid;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 1f5e243c0c75..11c8c4a80079 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -26,7 +26,7 @@ struct nouveau_abi16_chan {
struct nouveau_bo *ntfy;
struct nouveau_vma *ntfy_vma;
struct nvkm_mm heap;
- struct nouveau_sched sched;
+ struct nouveau_sched *sched;
};
struct nouveau_abi16 {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 6f6c31a9937b..a947e1d5f309 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -201,7 +201,8 @@ nouveau_cli_fini(struct nouveau_cli *cli)
WARN_ON(!list_empty(&cli->worker));
usif_client_fini(cli);
- nouveau_sched_fini(&cli->sched);
+ if (cli->sched)
+ nouveau_sched_destroy(&cli->sched);
if (uvmm)
nouveau_uvmm_fini(uvmm);
nouveau_vmm_fini(&cli->svm);
@@ -311,7 +312,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
cli->mem = &mems[ret];
/* Don't pass in the (shared) sched_wq in order to let
- * nouveau_sched_init() create a dedicated one for VM_BIND jobs.
+ * nouveau_sched_create() create a dedicated one for VM_BIND jobs.
*
* This is required to ensure that for VM_BIND jobs free_job() work and
* run_job() work can always run concurrently and hence, free_job() work
@@ -320,7 +321,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
* locks which indirectly or directly are held for allocations
* elsewhere.
*/
- ret = nouveau_sched_init(&cli->sched, drm, NULL, 1);
+ ret = nouveau_sched_create(&cli->sched, drm, NULL, 1);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 8a6d94c8b163..e239c6bf4afa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -98,7 +98,7 @@ struct nouveau_cli {
bool disabled;
} uvmm;
- struct nouveau_sched sched;
+ struct nouveau_sched *sched;
const struct nvif_mclass *mem;
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index bc5d71b79ab2..e65c0ef23bc7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -389,7 +389,7 @@ nouveau_exec_ioctl_exec(struct drm_device *dev,
if (ret)
goto out;
- args.sched = &chan16->sched;
+ args.sched = chan16->sched;
args.file_priv = file_priv;
args.chan = chan;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 49c2bcbef129..5a887d67dc0e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -764,7 +764,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
return -ENOMEM;
if (unlikely(nouveau_cli_uvmm(cli)))
- return -ENOSYS;
+ return nouveau_abi16_put(abi16, -ENOSYS);
list_for_each_entry(temp, &abi16->channels, head) {
if (temp->chan->chid == req->channel) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index dd98f6910f9c..32fa2e273965 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -398,7 +398,7 @@ static const struct drm_sched_backend_ops nouveau_sched_ops = {
.free_job = nouveau_sched_free_job,
};
-int
+static int
nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
struct workqueue_struct *wq, u32 credit_limit)
{
@@ -453,7 +453,30 @@ fail_wq:
return ret;
}
-void
+int
+nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
+ struct workqueue_struct *wq, u32 credit_limit)
+{
+ struct nouveau_sched *sched;
+ int ret;
+
+ sched = kzalloc(sizeof(*sched), GFP_KERNEL);
+ if (!sched)
+ return -ENOMEM;
+
+ ret = nouveau_sched_init(sched, drm, wq, credit_limit);
+ if (ret) {
+ kfree(sched);
+ return ret;
+ }
+
+ *psched = sched;
+
+ return 0;
+}
+
+
+static void
nouveau_sched_fini(struct nouveau_sched *sched)
{
struct drm_gpu_scheduler *drm_sched = &sched->base;
@@ -471,3 +494,14 @@ nouveau_sched_fini(struct nouveau_sched *sched)
if (sched->wq)
destroy_workqueue(sched->wq);
}
+
+void
+nouveau_sched_destroy(struct nouveau_sched **psched)
+{
+ struct nouveau_sched *sched = *psched;
+
+ nouveau_sched_fini(sched);
+ kfree(sched);
+
+ *psched = NULL;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h
index a6528f5981e6..e1f01a23e6f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.h
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.h
@@ -111,8 +111,8 @@ struct nouveau_sched {
} job;
};
-int nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm,
- struct workqueue_struct *wq, u32 credit_limit);
-void nouveau_sched_fini(struct nouveau_sched *sched);
+int nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
+ struct workqueue_struct *wq, u32 credit_limit);
+void nouveau_sched_destroy(struct nouveau_sched **psched);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index cc03e0c22ff3..5e4565c5011a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -1011,7 +1011,7 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
if (ret)
return ret;
- buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL);
+ buffer->fault = kvcalloc(buffer->entries, sizeof(*buffer->fault), GFP_KERNEL);
if (!buffer->fault)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 4f223c972c6a..0a0a11dc9ec0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1740,7 +1740,7 @@ nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev,
if (ret)
return ret;
- args.sched = &cli->sched;
+ args.sched = cli->sched;
args.file_priv = file_priv;
ret = nouveau_uvmm_vm_bind(&args);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index ebdeb8eb9e77..c55662937ab2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -180,6 +180,7 @@ nvkm_client_new(const char *name, u64 device, const char *cfg, const char *dbg,
client->device = device;
client->debug = nvkm_dbgopt(dbg, "CLIENT");
client->objroot = RB_ROOT;
+ spin_lock_init(&client->obj_lock);
client->event = event;
INIT_LIST_HEAD(&client->umem);
spin_lock_init(&client->lock);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index 7c554c14e884..aea3ba72027a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -30,8 +30,10 @@ nvkm_object_search(struct nvkm_client *client, u64 handle,
const struct nvkm_object_func *func)
{
struct nvkm_object *object;
+ unsigned long flags;
if (handle) {
+ spin_lock_irqsave(&client->obj_lock, flags);
struct rb_node *node = client->objroot.rb_node;
while (node) {
object = rb_entry(node, typeof(*object), node);
@@ -40,9 +42,12 @@ nvkm_object_search(struct nvkm_client *client, u64 handle,
else
if (handle > object->object)
node = node->rb_right;
- else
+ else {
+ spin_unlock_irqrestore(&client->obj_lock, flags);
goto done;
+ }
}
+ spin_unlock_irqrestore(&client->obj_lock, flags);
return ERR_PTR(-ENOENT);
} else {
object = &client->object;
@@ -57,30 +62,39 @@ done:
void
nvkm_object_remove(struct nvkm_object *object)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&object->client->obj_lock, flags);
if (!RB_EMPTY_NODE(&object->node))
rb_erase(&object->node, &object->client->objroot);
+ spin_unlock_irqrestore(&object->client->obj_lock, flags);
}
bool
nvkm_object_insert(struct nvkm_object *object)
{
- struct rb_node **ptr = &object->client->objroot.rb_node;
+ struct rb_node **ptr;
struct rb_node *parent = NULL;
+ unsigned long flags;
+ spin_lock_irqsave(&object->client->obj_lock, flags);
+ ptr = &object->client->objroot.rb_node;
while (*ptr) {
struct nvkm_object *this = rb_entry(*ptr, typeof(*this), node);
parent = *ptr;
- if (object->object < this->object)
+ if (object->object < this->object) {
ptr = &parent->rb_left;
- else
- if (object->object > this->object)
+ } else if (object->object > this->object) {
ptr = &parent->rb_right;
- else
+ } else {
+ spin_unlock_irqrestore(&object->client->obj_lock, flags);
return false;
+ }
}
rb_link_node(&object->node, parent, ptr);
rb_insert_color(&object->node, &object->client->objroot);
+ spin_unlock_irqrestore(&object->client->obj_lock, flags);
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
index 4135690326f4..3a30bea30e36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
@@ -168,12 +168,11 @@ r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
rm->flush = r535_bar_flush;
ret = gf100_bar_new_(rm, device, type, inst, &bar);
- *pbar = bar;
if (ret) {
- if (!bar)
- kfree(rm);
+ kfree(rm);
return ret;
}
+ *pbar = bar;
bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE);
if (!bar->flushBAR2PhysMode)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 19188683c8fc..8c2bf1c16f2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -154,11 +154,17 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name)
return (void *)fw;
}
+static void
+shadow_fw_release(void *fw)
+{
+ release_firmware(fw);
+}
+
static const struct nvbios_source
shadow_fw = {
.name = "firmware",
.init = shadow_fw_init,
- .fini = (void(*)(void *))release_firmware,
+ .fini = shadow_fw_release,
.read = shadow_fw_read,
.rw = false,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index 5e1fa176aac4..a73a5b589790 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -997,6 +997,32 @@ r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
return 0;
}
+static void
+nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
+{
+ if (mem->data) {
+ /*
+ * Poison the buffer to catch any unexpected access from
+ * GSP-RM if the buffer was prematurely freed.
+ */
+ memset(mem->data, 0xFF, mem->size);
+
+ dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr);
+ memset(mem, 0, sizeof(*mem));
+ }
+}
+
+static int
+nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem)
+{
+ mem->size = size;
+ mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
+ if (WARN_ON(!mem->data))
+ return -ENOMEM;
+
+ return 0;
+}
+
static int
r535_gsp_postinit(struct nvkm_gsp *gsp)
{
@@ -1024,6 +1050,11 @@ r535_gsp_postinit(struct nvkm_gsp *gsp)
nvkm_inth_allow(&gsp->subdev.inth);
nvkm_wr32(device, 0x110004, 0x00000040);
+
+ /* Release the DMA buffers that were needed only for boot and init */
+ nvkm_gsp_mem_dtor(gsp, &gsp->boot.fw);
+ nvkm_gsp_mem_dtor(gsp, &gsp->libos);
+
return ret;
}
@@ -1532,27 +1563,6 @@ r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
return 0;
}
-static void
-nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
-{
- if (mem->data) {
- dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr);
- mem->data = NULL;
- }
-}
-
-static int
-nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, u32 size, struct nvkm_gsp_mem *mem)
-{
- mem->size = size;
- mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
- if (WARN_ON(!mem->data))
- return -ENOMEM;
-
- return 0;
-}
-
-
static int
r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
{
@@ -1938,20 +1948,20 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
* See kgspCreateRadix3_IMPL
*/
static int
-nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size,
+nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size,
struct nvkm_gsp_radix3 *rx3)
{
u64 addr;
for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) {
u64 *ptes;
- int idx;
+ size_t bufsize;
+ int ret, idx;
- rx3->mem[i].size = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
- rx3->mem[i].data = dma_alloc_coherent(device->dev, rx3->mem[i].size,
- &rx3->mem[i].addr, GFP_KERNEL);
- if (WARN_ON(!rx3->mem[i].data))
- return -ENOMEM;
+ bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
+ ret = nvkm_gsp_mem_ctor(gsp, bufsize, &rx3->mem[i]);
+ if (ret)
+ return ret;
ptes = rx3->mem[i].data;
if (i == 2) {
@@ -1991,7 +2001,7 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
if (ret)
return ret;
- ret = nvkm_gsp_radix3_sg(gsp->subdev.device, &gsp->sr.sgt, len, &gsp->sr.radix3);
+ ret = nvkm_gsp_radix3_sg(gsp, &gsp->sr.sgt, len, &gsp->sr.radix3);
if (ret)
return ret;
@@ -2150,6 +2160,13 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
mutex_destroy(&gsp->cmdq.mutex);
r535_gsp_dtor_fws(gsp);
+
+ nvkm_gsp_mem_dtor(gsp, &gsp->rmargs);
+ nvkm_gsp_mem_dtor(gsp, &gsp->wpr_meta);
+ nvkm_gsp_mem_dtor(gsp, &gsp->shm.mem);
+ nvkm_gsp_mem_dtor(gsp, &gsp->loginit);
+ nvkm_gsp_mem_dtor(gsp, &gsp->logintr);
+ nvkm_gsp_mem_dtor(gsp, &gsp->logrm);
}
int
@@ -2194,7 +2211,7 @@ r535_gsp_oneinit(struct nvkm_gsp *gsp)
memcpy(gsp->sig.data, data, size);
/* Build radix3 page table for ELF image. */
- ret = nvkm_gsp_radix3_sg(device, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3);
+ ret = nvkm_gsp_radix3_sg(gsp, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3);
if (ret)
return ret;
@@ -2295,8 +2312,12 @@ r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
{
struct nvkm_subdev *subdev = &gsp->subdev;
int ret;
+ bool enable_gsp = fwif->enable;
- if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable))
+#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
+ enable_gsp = true;
+#endif
+ if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
return -EINVAL;
if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) ||
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index c4c0f08e9202..4945a1e787eb 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1768,11 +1768,11 @@ static const struct panel_desc starry_qfh032011_53g_desc = {
};
static const struct drm_display_mode starry_himax83102_j02_default_mode = {
- .clock = 162850,
+ .clock = 162680,
.hdisplay = 1200,
- .hsync_start = 1200 + 50,
- .hsync_end = 1200 + 50 + 20,
- .htotal = 1200 + 50 + 20 + 50,
+ .hsync_start = 1200 + 60,
+ .hsync_end = 1200 + 60 + 20,
+ .htotal = 1200 + 60 + 20 + 40,
.vdisplay = 1920,
.vsync_start = 1920 + 116,
.vsync_end = 1920 + 116 + 8,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 85b3b4871a1d..fdd768bbd487 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -1985,8 +1985,10 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
clock = vop2_set_intf_mux(vp, rkencoder->crtc_endpoint_id, polflags);
}
- if (!clock)
+ if (!clock) {
+ vop2_unlock(vop2);
return;
+ }
if (vcstate->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
!(vp_data->feature & VOP2_VP_FEATURE_OUTPUT_10BIT))
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 85f082396d42..d442b893275b 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -1178,21 +1178,24 @@ static void drm_sched_run_job_work(struct work_struct *w)
struct drm_sched_entity *entity;
struct dma_fence *fence;
struct drm_sched_fence *s_fence;
- struct drm_sched_job *sched_job = NULL;
+ struct drm_sched_job *sched_job;
int r;
if (READ_ONCE(sched->pause_submit))
return;
/* Find entity with a ready job */
- while (!sched_job && (entity = drm_sched_select_entity(sched))) {
- sched_job = drm_sched_entity_pop_job(entity);
- if (!sched_job)
- complete_all(&entity->entity_idle);
- }
+ entity = drm_sched_select_entity(sched);
if (!entity)
return; /* No more work */
+ sched_job = drm_sched_entity_pop_job(entity);
+ if (!sched_job) {
+ complete_all(&entity->entity_idle);
+ drm_sched_run_job_queue(sched);
+ return;
+ }
+
s_fence = sched_job->s_fence;
atomic_add(sched_job->credits, &sched->credit_count);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index a73cff7a3070..03d1c76aec2d 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1243,9 +1243,26 @@ static int host1x_drm_probe(struct host1x_device *dev)
drm_mode_config_reset(drm);
- err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
- if (err < 0)
- goto hub;
+ /*
+ * Only take over from a potential firmware framebuffer if any CRTCs
+ * have been registered. This must not be a fatal error because there
+ * are other accelerators that are exposed via this driver.
+ *
+ * Another case where this happens is on Tegra234 where the display
+ * hardware is no longer part of the host1x complex, so this driver
+ * will not expose any modesetting features.
+ */
+ if (drm->mode_config.num_crtc > 0) {
+ err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
+ if (err < 0)
+ goto hub;
+ } else {
+ /*
+ * Indicate to userspace that this doesn't expose any display
+ * capabilities.
+ */
+ drm->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
+ }
err = drm_dev_register(drm, 0);
if (err < 0)
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index ea2af6bd9abe..e48863a44556 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -8,16 +8,308 @@
#include <linux/prime_numbers.h>
#include <linux/sched/signal.h>
+#include <linux/sizes.h>
#include <drm/drm_buddy.h>
#include "../lib/drm_random.h"
+static unsigned int random_seed;
+
static inline u64 get_size(int order, u64 chunk_size)
{
return (1 << order) * chunk_size;
}
+static void drm_test_buddy_alloc_range_bias(struct kunit *test)
+{
+ u32 mm_size, ps, bias_size, bias_start, bias_end, bias_rem;
+ DRM_RND_STATE(prng, random_seed);
+ unsigned int i, count, *order;
+ struct drm_buddy mm;
+ LIST_HEAD(allocated);
+
+ bias_size = SZ_1M;
+ ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size);
+ ps = max(SZ_4K, ps);
+ mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */
+
+ kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps);
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
+ "buddy_init failed\n");
+
+ count = mm_size / bias_size;
+ order = drm_random_order(count, &prng);
+ KUNIT_EXPECT_TRUE(test, order);
+
+ /*
+ * Idea is to split the address space into uniform bias ranges, and then
+ * in some random order allocate within each bias, using various
+ * patterns within. This should detect if allocations leak out from a
+ * given bias, for example.
+ */
+
+ for (i = 0; i < count; i++) {
+ LIST_HEAD(tmp);
+ u32 size;
+
+ bias_start = order[i] * bias_size;
+ bias_end = bias_start + bias_size;
+ bias_rem = bias_size;
+
+ /* internal round_up too big */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_size + ps, bias_size,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_size, bias_size);
+
+ /* size too big */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_size + ps, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_size + ps, ps);
+
+ /* bias range too small for size */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start + ps,
+ bias_end, bias_size, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start + ps, bias_end, bias_size, ps);
+
+ /* bias misaligned */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start + ps,
+ bias_end - ps,
+ bias_size >> 1, bias_size >> 1,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1);
+
+ /* single big page */
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_size, bias_size,
+ &tmp,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_size, bias_size);
+ drm_buddy_free_list(&mm, &tmp);
+
+ /* single page with internal round_up */
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, ps, bias_size,
+ &tmp,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, ps, bias_size);
+ drm_buddy_free_list(&mm, &tmp);
+
+ /* random size within */
+ size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+ if (size)
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, size, ps,
+ &tmp,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, size, ps);
+
+ bias_rem -= size;
+ /* too big for current avail */
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, bias_rem + ps, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, bias_rem + ps, ps);
+
+ if (bias_rem) {
+ /* random fill of the remainder */
+ size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+ size = max(size, ps);
+
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, size, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, size, ps);
+ /*
+ * Intentionally allow some space to be left
+ * unallocated, and ideally not always on the bias
+ * boundaries.
+ */
+ drm_buddy_free_list(&mm, &tmp);
+ } else {
+ list_splice_tail(&tmp, &allocated);
+ }
+ }
+
+ kfree(order);
+ drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_fini(&mm);
+
+ /*
+ * Something more free-form. Idea is to pick a random starting bias
+ * range within the address space and then start filling it up. Also
+ * randomly grow the bias range in both directions as we go along. This
+ * should give us bias start/end which is not always uniform like above,
+ * and in some cases will require the allocator to jump over already
+ * allocated nodes in the middle of the address space.
+ */
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
+ "buddy_init failed\n");
+
+ bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
+ bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
+ bias_end = max(bias_end, bias_start + ps);
+ bias_rem = bias_end - bias_start;
+
+ do {
+ u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
+
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start,
+ bias_end, size, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
+ bias_start, bias_end, size, ps);
+ bias_rem -= size;
+
+ /*
+ * Try to randomly grow the bias range in both directions, or
+ * only one, or perhaps don't grow at all.
+ */
+ do {
+ u32 old_bias_start = bias_start;
+ u32 old_bias_end = bias_end;
+
+ if (bias_start)
+ bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps);
+ if (bias_end != mm_size)
+ bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps);
+
+ bias_rem += old_bias_start - bias_start;
+ bias_rem += bias_end - old_bias_end;
+ } while (!bias_rem && (bias_start || bias_end != mm_size));
+ } while (bias_rem);
+
+ KUNIT_ASSERT_EQ(test, bias_start, 0);
+ KUNIT_ASSERT_EQ(test, bias_end, mm_size);
+ KUNIT_ASSERT_TRUE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, bias_start, bias_end,
+ ps, ps,
+ &allocated,
+ DRM_BUDDY_RANGE_ALLOCATION),
+ "buddy_alloc passed with bias(%x-%x), size=%u\n",
+ bias_start, bias_end, ps);
+
+ drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_contiguous(struct kunit *test)
+{
+ const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
+ unsigned long i, n_pages, total;
+ struct drm_buddy_block *block;
+ struct drm_buddy mm;
+ LIST_HEAD(left);
+ LIST_HEAD(middle);
+ LIST_HEAD(right);
+ LIST_HEAD(allocated);
+
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
+
+ /*
+ * Idea is to fragment the address space by alternating block
+ * allocations between three different lists; one for left, middle and
+ * right. We can then free a list to simulate fragmentation. In
+ * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION,
+ * including the try_harder path.
+ */
+
+ i = 0;
+ n_pages = mm_size / ps;
+ do {
+ struct list_head *list;
+ int slot = i % 3;
+
+ if (slot == 0)
+ list = &left;
+ else if (slot == 1)
+ list = &middle;
+ else
+ list = &right;
+ KUNIT_ASSERT_FALSE_MSG(test,
+ drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ ps, ps, list, 0),
+ "buddy_alloc hit an error size=%lu\n",
+ ps);
+ } while (++i < n_pages);
+
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 3 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc didn't error size=%lu\n", 3 * ps);
+
+ drm_buddy_free_list(&mm, &middle);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 3 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc didn't error size=%lu\n", 3 * ps);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 2 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc didn't error size=%lu\n", 2 * ps);
+
+ drm_buddy_free_list(&mm, &right);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 3 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc didn't error size=%lu\n", 3 * ps);
+ /*
+ * At this point we should have enough contiguous space for 2 blocks,
+ * however they are never buddies (since we freed middle and right) so
+ * will require the try_harder logic to find them.
+ */
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 2 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc hit an error size=%lu\n", 2 * ps);
+
+ drm_buddy_free_list(&mm, &left);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
+ 3 * ps, ps, &allocated,
+ DRM_BUDDY_CONTIGUOUS_ALLOCATION),
+ "buddy_alloc hit an error size=%lu\n", 3 * ps);
+
+ total = 0;
+ list_for_each_entry(block, &allocated, link)
+ total += drm_buddy_block_size(&mm, block);
+
+ KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3);
+
+ drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_fini(&mm);
+}
+
static void drm_test_buddy_alloc_pathological(struct kunit *test)
{
u64 mm_size, size, start = 0;
@@ -275,16 +567,30 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
drm_buddy_fini(&mm);
}
+static int drm_buddy_suite_init(struct kunit_suite *suite)
+{
+ while (!random_seed)
+ random_seed = get_random_u32();
+
+ kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n",
+ random_seed);
+
+ return 0;
+}
+
static struct kunit_case drm_buddy_tests[] = {
KUNIT_CASE(drm_test_buddy_alloc_limit),
KUNIT_CASE(drm_test_buddy_alloc_optimistic),
KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
KUNIT_CASE(drm_test_buddy_alloc_pathological),
+ KUNIT_CASE(drm_test_buddy_alloc_contiguous),
+ KUNIT_CASE(drm_test_buddy_alloc_range_bias),
{}
};
static struct kunit_suite drm_buddy_test_suite = {
.name = "drm_buddy",
+ .suite_init = drm_buddy_suite_init,
.test_cases = drm_buddy_tests,
};
diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
index 1eb0c304f960..f37c0d765865 100644
--- a/drivers/gpu/drm/tests/drm_mm_test.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -157,7 +157,7 @@ static void drm_test_mm_init(struct kunit *test)
/* After creation, it should all be one massive hole */
if (!assert_one_hole(test, &mm, 0, size)) {
- KUNIT_FAIL(test, "");
+ KUNIT_FAIL(test, "mm not one hole on creation");
goto out;
}
@@ -171,14 +171,14 @@ static void drm_test_mm_init(struct kunit *test)
/* After filling the range entirely, there should be no holes */
if (!assert_no_holes(test, &mm)) {
- KUNIT_FAIL(test, "");
+ KUNIT_FAIL(test, "mm has holes when filled");
goto out;
}
/* And then after emptying it again, the massive hole should be back */
drm_mm_remove_node(&tmp);
if (!assert_one_hole(test, &mm, 0, size)) {
- KUNIT_FAIL(test, "");
+ KUNIT_FAIL(test, "mm does not have single hole after emptying");
goto out;
}
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index b62f420a9f96..112438d965ff 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -387,7 +387,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
enum ttm_caching caching,
pgoff_t start_page, pgoff_t end_page)
{
- struct page **pages = tt->pages;
+ struct page **pages = &tt->pages[start_page];
unsigned int order;
pgoff_t i, nr;
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
index 68d9f6116bdf..777c20ceabab 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
@@ -10,7 +10,7 @@
#include "xe_bo.h"
-#define i915_gem_object_is_shmem(obj) ((obj)->flags & XE_BO_CREATE_SYSTEM_BIT)
+#define i915_gem_object_is_shmem(obj) (0) /* We don't use shmem */
static inline dma_addr_t i915_gem_object_get_dma_address(const struct xe_bo *bo, pgoff_t n)
{
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index a6523df0f1d3..c347e2c29f81 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -114,21 +114,21 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
region |
XE_BO_NEEDS_CPU_ACCESS);
if (IS_ERR(remote)) {
- KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %li\n",
- str, PTR_ERR(remote));
+ KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
+ str, remote);
return;
}
err = xe_bo_validate(remote, NULL, false);
if (err) {
- KUNIT_FAIL(test, "Failed to validate system bo for %s: %li\n",
+ KUNIT_FAIL(test, "Failed to validate system bo for %s: %i\n",
str, err);
goto out_unlock;
}
err = xe_bo_vmap(remote);
if (err) {
- KUNIT_FAIL(test, "Failed to vmap system bo for %s: %li\n",
+ KUNIT_FAIL(test, "Failed to vmap system bo for %s: %i\n",
str, err);
goto out_unlock;
}
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.c b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
index ef56bd517b28..421b819fd4ba 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs_test.c
@@ -21,4 +21,5 @@ kunit_test_suite(xe_mocs_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("xe_mocs kunit test");
MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 0b0e262e2166..4d3b80ec906d 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -28,6 +28,14 @@
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
+const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = {
+ [XE_PL_SYSTEM] = "system",
+ [XE_PL_TT] = "gtt",
+ [XE_PL_VRAM0] = "vram0",
+ [XE_PL_VRAM1] = "vram1",
+ [XE_PL_STOLEN] = "stolen"
+};
+
static const struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
@@ -713,8 +721,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
migrate = xe->tiles[0].migrate;
xe_assert(xe, migrate);
-
- trace_xe_bo_move(bo);
+ trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
xe_device_mem_access_get(xe);
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 9b1279aca127..8be42ac6cd07 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -243,6 +243,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo);
int xe_bo_restore_pinned(struct xe_bo *bo);
extern struct ttm_device_funcs xe_ttm_funcs;
+extern const char *const xe_mem_type_to_name[];
int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 1f0b4b9ce84f..5176c27e4b6a 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -83,9 +83,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-static void device_kill_persistent_exec_queues(struct xe_device *xe,
- struct xe_file *xef);
-
static void xe_file_close(struct drm_device *dev, struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
@@ -102,8 +99,6 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
mutex_unlock(&xef->exec_queue.lock);
xa_destroy(&xef->exec_queue.xa);
mutex_destroy(&xef->exec_queue.lock);
- device_kill_persistent_exec_queues(xe, xef);
-
mutex_lock(&xef->vm.lock);
xa_for_each(&xef->vm.xa, idx, vm)
xe_vm_close_and_put(vm);
@@ -255,9 +250,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xa_erase(&xe->usm.asid_to_vm, asid);
}
- drmm_mutex_init(&xe->drm, &xe->persistent_engines.lock);
- INIT_LIST_HEAD(&xe->persistent_engines.list);
-
spin_lock_init(&xe->pinned.lock);
INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
INIT_LIST_HEAD(&xe->pinned.external_vram);
@@ -570,37 +562,6 @@ void xe_device_shutdown(struct xe_device *xe)
{
}
-void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q)
-{
- mutex_lock(&xe->persistent_engines.lock);
- list_add_tail(&q->persistent.link, &xe->persistent_engines.list);
- mutex_unlock(&xe->persistent_engines.lock);
-}
-
-void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
- struct xe_exec_queue *q)
-{
- mutex_lock(&xe->persistent_engines.lock);
- if (!list_empty(&q->persistent.link))
- list_del(&q->persistent.link);
- mutex_unlock(&xe->persistent_engines.lock);
-}
-
-static void device_kill_persistent_exec_queues(struct xe_device *xe,
- struct xe_file *xef)
-{
- struct xe_exec_queue *q, *next;
-
- mutex_lock(&xe->persistent_engines.lock);
- list_for_each_entry_safe(q, next, &xe->persistent_engines.list,
- persistent.link)
- if (q->persistent.xef == xef) {
- xe_exec_queue_kill(q);
- list_del_init(&q->persistent.link);
- }
- mutex_unlock(&xe->persistent_engines.lock);
-}
-
void xe_device_wmb(struct xe_device *xe)
{
struct xe_gt *gt = xe_root_mmio_gt(xe);
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 3da83b233206..08d8b72c7731 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -42,10 +42,6 @@ int xe_device_probe(struct xe_device *xe);
void xe_device_remove(struct xe_device *xe);
void xe_device_shutdown(struct xe_device *xe);
-void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q);
-void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
- struct xe_exec_queue *q);
-
void xe_device_wmb(struct xe_device *xe);
static inline struct xe_file *to_xe_file(const struct drm_file *file)
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 5dc9127a2029..e8491979a6f2 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -341,14 +341,6 @@ struct xe_device {
struct mutex lock;
} usm;
- /** @persistent_engines: engines that are closed but still running */
- struct {
- /** @lock: protects persistent engines */
- struct mutex lock;
- /** @list: list of persistent engines */
- struct list_head list;
- } persistent_engines;
-
/** @pinned: pinned BO state */
struct {
/** @lock: protected pinned BO list state */
diff --git a/drivers/gpu/drm/xe/xe_display.c b/drivers/gpu/drm/xe/xe_display.c
index 74391d9b11ae..e4db069f0db3 100644
--- a/drivers/gpu/drm/xe/xe_display.c
+++ b/drivers/gpu/drm/xe/xe_display.c
@@ -134,8 +134,6 @@ static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
int xe_display_init_nommio(struct xe_device *xe)
{
- int err;
-
if (!xe->info.enable_display)
return 0;
@@ -145,10 +143,6 @@ int xe_display_init_nommio(struct xe_device *xe)
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(xe);
- err = intel_power_domains_init(xe);
- if (err)
- return err;
-
return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 82d1305e831f..6040e4d22b28 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -131,14 +131,6 @@ static void bo_meminfo(struct xe_bo *bo,
static void show_meminfo(struct drm_printer *p, struct drm_file *file)
{
- static const char *const mem_type_to_name[TTM_NUM_MEM_TYPES] = {
- [XE_PL_SYSTEM] = "system",
- [XE_PL_TT] = "gtt",
- [XE_PL_VRAM0] = "vram0",
- [XE_PL_VRAM1] = "vram1",
- [4 ... 6] = NULL,
- [XE_PL_STOLEN] = "stolen"
- };
struct drm_memory_stats stats[TTM_NUM_MEM_TYPES] = {};
struct xe_file *xef = file->driver_priv;
struct ttm_device *bdev = &xef->xe->ttm;
@@ -171,7 +163,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
spin_unlock(&client->bos_lock);
for (mem_type = XE_PL_SYSTEM; mem_type < TTM_NUM_MEM_TYPES; ++mem_type) {
- if (!mem_type_to_name[mem_type])
+ if (!xe_mem_type_to_name[mem_type])
continue;
man = ttm_manager_type(bdev, mem_type);
@@ -182,7 +174,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
DRM_GEM_OBJECT_RESIDENT |
(mem_type != XE_PL_SYSTEM ? 0 :
DRM_GEM_OBJECT_PURGEABLE),
- mem_type_to_name[mem_type]);
+ xe_mem_type_to_name[mem_type]);
}
}
}
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index bcfc4127c7c5..49223026c89f 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -60,7 +60,6 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
q->fence_irq = &gt->fence_irq[hwe->class];
q->ring_ops = gt->ring_ops[hwe->class];
q->ops = gt->exec_queue_ops;
- INIT_LIST_HEAD(&q->persistent.link);
INIT_LIST_HEAD(&q->compute.link);
INIT_LIST_HEAD(&q->multi_gt_link);
@@ -310,102 +309,6 @@ static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *
return q->ops->set_timeslice(q, value);
}
-static int exec_queue_set_preemption_timeout(struct xe_device *xe,
- struct xe_exec_queue *q, u64 value,
- bool create)
-{
- u32 min = 0, max = 0;
-
- xe_exec_queue_get_prop_minmax(q->hwe->eclass,
- XE_EXEC_QUEUE_PREEMPT_TIMEOUT, &min, &max);
-
- if (xe_exec_queue_enforce_schedule_limit() &&
- !xe_hw_engine_timeout_in_range(value, min, max))
- return -EINVAL;
-
- return q->ops->set_preempt_timeout(q, value);
-}
-
-static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, xe_vm_in_preempt_fence_mode(q->vm)))
- return -EINVAL;
-
- if (value)
- q->flags |= EXEC_QUEUE_FLAG_PERSISTENT;
- else
- q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT;
-
- return 0;
-}
-
-static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- u32 min = 0, max = 0;
-
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- xe_exec_queue_get_prop_minmax(q->hwe->eclass,
- XE_EXEC_QUEUE_JOB_TIMEOUT, &min, &max);
-
- if (xe_exec_queue_enforce_schedule_limit() &&
- !xe_hw_engine_timeout_in_range(value, min, max))
- return -EINVAL;
-
- return q->ops->set_job_timeout(q, value);
-}
-
-static int exec_queue_set_acc_trigger(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
- return -EINVAL;
-
- q->usm.acc_trigger = value;
-
- return 0;
-}
-
-static int exec_queue_set_acc_notify(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
- return -EINVAL;
-
- q->usm.acc_notify = value;
-
- return 0;
-}
-
-static int exec_queue_set_acc_granularity(struct xe_device *xe, struct xe_exec_queue *q,
- u64 value, bool create)
-{
- if (XE_IOCTL_DBG(xe, !create))
- return -EINVAL;
-
- if (XE_IOCTL_DBG(xe, !xe->info.has_usm))
- return -EINVAL;
-
- if (value > DRM_XE_ACC_GRANULARITY_64M)
- return -EINVAL;
-
- q->usm.acc_granularity = value;
-
- return 0;
-}
-
typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
struct xe_exec_queue *q,
u64 value, bool create);
@@ -413,12 +316,6 @@ typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
- [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY] = exec_queue_set_acc_granularity,
};
static int exec_queue_user_ext_set_property(struct xe_device *xe,
@@ -437,10 +334,15 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
if (XE_IOCTL_DBG(xe, ext.property >=
ARRAY_SIZE(exec_queue_set_property_funcs)) ||
- XE_IOCTL_DBG(xe, ext.pad))
+ XE_IOCTL_DBG(xe, ext.pad) ||
+ XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
+ ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
+ if (!exec_queue_set_property_funcs[idx])
+ return -EINVAL;
+
return exec_queue_set_property_funcs[idx](xe, q, ext.value, create);
}
@@ -704,9 +606,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
}
q = xe_exec_queue_create(xe, vm, logical_mask,
- args->width, hwe,
- xe_vm_in_lr_mode(vm) ? 0 :
- EXEC_QUEUE_FLAG_PERSISTENT);
+ args->width, hwe, 0);
up_read(&vm->lock);
xe_vm_put(vm);
if (IS_ERR(q))
@@ -728,8 +628,6 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
goto kill_exec_queue;
}
- q->persistent.xef = xef;
-
mutex_lock(&xef->exec_queue.lock);
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
mutex_unlock(&xef->exec_queue.lock);
@@ -872,10 +770,7 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, !q))
return -ENOENT;
- if (!(q->flags & EXEC_QUEUE_FLAG_PERSISTENT))
- xe_exec_queue_kill(q);
- else
- xe_device_add_persistent_exec_queues(xe, q);
+ xe_exec_queue_kill(q);
trace_xe_exec_queue_close(q);
xe_exec_queue_put(q);
@@ -926,20 +821,24 @@ void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
* @q: The exec queue
* @vm: The VM the engine does a bind or exec for
*
- * Get last fence, does not take a ref
+ * Get last fence, takes a ref
*
* Returns: last fence if not signaled, dma fence stub if signaled
*/
struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
struct xe_vm *vm)
{
+ struct dma_fence *fence;
+
xe_exec_queue_last_fence_lockdep_assert(q, vm);
if (q->last_fence &&
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
xe_exec_queue_last_fence_put(q, vm);
- return q->last_fence ? q->last_fence : dma_fence_get_stub();
+ fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
+ dma_fence_get(fence);
+ return fence;
}
/**
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 8d4b7feb8c30..36f4901d8d7e 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -105,16 +105,6 @@ struct xe_exec_queue {
struct xe_guc_exec_queue *guc;
};
- /**
- * @persistent: persistent exec queue state
- */
- struct {
- /** @xef: file which this exec queue belongs to */
- struct xe_file *xef;
- /** @link: link in list of persistent exec queues */
- struct list_head link;
- } persistent;
-
union {
/**
* @parallel: parallel submission state
@@ -160,16 +150,6 @@ struct xe_exec_queue {
spinlock_t lock;
} compute;
- /** @usm: unified shared memory state */
- struct {
- /** @acc_trigger: access counter trigger */
- u32 acc_trigger;
- /** @acc_notify: access counter notify */
- u32 acc_notify;
- /** @acc_granularity: access counter granularity */
- u32 acc_granularity;
- } usm;
-
/** @ops: submission backend exec queue operations */
const struct xe_exec_queue_ops *ops;
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 96b5224eb478..acb4d9f38fd7 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -212,7 +212,7 @@ static void xe_execlist_port_wake_locked(struct xe_execlist_port *port,
static void xe_execlist_make_active(struct xe_execlist_exec_queue *exl)
{
struct xe_execlist_port *port = exl->port;
- enum xe_exec_queue_priority priority = exl->active_priority;
+ enum xe_exec_queue_priority priority = exl->q->sched_props.priority;
XE_WARN_ON(priority == XE_EXEC_QUEUE_PRIORITY_UNSET);
XE_WARN_ON(priority < 0);
@@ -378,8 +378,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
list_del(&exl->active_link);
spin_unlock_irqrestore(&exl->port->lock, flags);
- if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
- xe_device_remove_persistent_exec_queues(xe, q);
drm_sched_entity_fini(&exl->entity);
drm_sched_fini(&exl->sched);
kfree(exl);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 3af2adec1295..35474ddbaf97 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -437,7 +437,10 @@ static int all_fw_domain_init(struct xe_gt *gt)
* USM has its only SA pool to non-block behind user operations
*/
if (gt_to_xe(gt)->info.has_usm) {
- gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16);
+ struct xe_device *xe = gt_to_xe(gt);
+
+ gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
+ IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
if (IS_ERR(gt->usm.bb_pool)) {
err = PTR_ERR(gt->usm.bb_pool);
goto err_force_wake;
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index 9358f7336889..9fcae65b6469 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -145,10 +145,10 @@ void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle)
}
if (xe_gt_is_media_type(gt)) {
- sprintf(gtidle->name, "gt%d-mc\n", gt->info.id);
+ sprintf(gtidle->name, "gt%d-mc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_mc6_residency;
} else {
- sprintf(gtidle->name, "gt%d-rc\n", gt->info.id);
+ sprintf(gtidle->name, "gt%d-rc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_rc6_residency;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 9c2fe1697d6e..73f08f1924df 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -335,7 +335,7 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
return -EPROTO;
asid = FIELD_GET(PFD_ASID, msg[1]);
- pf_queue = &gt->usm.pf_queue[asid % NUM_PF_QUEUE];
+ pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
spin_lock_irqsave(&pf_queue->lock, flags);
full = pf_queue_full(pf_queue);
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 7eef23a00d77..f4c485289dbe 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -247,6 +247,14 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
xe_gt_assert(gt, vma);
+ /* Execlists not supported */
+ if (gt_to_xe(gt)->info.force_execlist) {
+ if (fence)
+ __invalidation_fence_signal(fence);
+
+ return 0;
+ }
+
action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
if (!xe->info.has_range_tlb_invalidation) {
@@ -317,6 +325,10 @@ int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
struct drm_printer p = drm_err_printer(__func__);
int ret;
+ /* Execlists not supported */
+ if (gt_to_xe(gt)->info.force_execlist)
+ return 0;
+
/*
* XXX: See above, this algorithm only works if seqno are always in
* order
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 54ffcfcdd41f..f22ae717b0b2 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1028,8 +1028,6 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
- if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
- xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q);
release_guc_id(guc, q);
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 0ec5ad2539f1..b38319d2801e 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -682,8 +682,6 @@ static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
#define PVC_CTX_ASID (0x2e + 1)
#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
-#define ACC_GRANULARITY_S 20
-#define ACC_NOTIFY_S 16
int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
struct xe_exec_queue *q, struct xe_vm *vm, u32 ring_size)
@@ -754,13 +752,7 @@ int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_lrc_write_ctx_reg(lrc, CTX_RING_CTL,
RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
if (xe->info.has_asid && vm)
- xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID,
- (q->usm.acc_granularity <<
- ACC_GRANULARITY_S) | vm->usm.asid);
- if (xe->info.has_usm && vm)
- xe_lrc_write_ctx_reg(lrc, PVC_CTX_ACC_CTR_THOLD,
- (q->usm.acc_notify << ACC_NOTIFY_S) |
- q->usm.acc_trigger);
+ xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
lrc->desc = LRC_VALID;
lrc->desc |= LRC_LEGACY_64B_CONTEXT << LRC_ADDRESSING_MODE_SHIFT;
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 5c6c54624252..70480c305602 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -170,11 +170,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (!IS_DGFX(xe)) {
/* Write out batch too */
m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
- if (xe->info.has_usm) {
- batch = tile->primary_gt->usm.bb_pool->bo;
- m->usm_batch_base_ofs = m->batch_base_ofs;
- }
-
for (i = 0; i < batch->size;
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
@@ -185,6 +180,24 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
entry);
level++;
}
+ if (xe->info.has_usm) {
+ xe_tile_assert(tile, batch->size == SZ_1M);
+
+ batch = tile->primary_gt->usm.bb_pool->bo;
+ m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M;
+ xe_tile_assert(tile, batch->size == SZ_512K);
+
+ for (i = 0; i < batch->size;
+ i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
+ XE_PAGE_SIZE) {
+ entry = vm->pt_ops->pte_encode_bo(batch, i,
+ pat_index, 0);
+
+ xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
+ entry);
+ level++;
+ }
+ }
} else {
u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE);
@@ -1204,8 +1217,11 @@ static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
}
if (q) {
fence = xe_exec_queue_last_fence_get(q, vm);
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ dma_fence_put(fence);
return false;
+ }
+ dma_fence_put(fence);
}
return true;
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 5f6b53ea5528..02f7808f28ca 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -105,7 +105,7 @@ static void xe_resize_vram_bar(struct xe_device *xe)
pci_bus_for_each_resource(root, root_res, i) {
if (root_res && root_res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
- root_res->start > 0x100000000ull)
+ (u64)root_res->start > 0x100000000ul)
break;
}
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index e45b37c3f0c2..6653c045f3c9 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -20,8 +20,8 @@
struct xe_pt_dir {
struct xe_pt pt;
- /** @dir: Directory structure for the xe_pt_walk functionality */
- struct xe_ptw_dir dir;
+ /** @children: Array of page-table child nodes */
+ struct xe_ptw *children[XE_PDES];
};
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
@@ -44,7 +44,7 @@ static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
{
- return container_of(pt_dir->dir.entries[index], struct xe_pt, base);
+ return container_of(pt_dir->children[index], struct xe_pt, base);
}
static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
@@ -65,6 +65,14 @@ static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
XE_PTE_NULL;
}
+static void xe_pt_free(struct xe_pt *pt)
+{
+ if (pt->level)
+ kfree(as_xe_pt_dir(pt));
+ else
+ kfree(pt);
+}
+
/**
* xe_pt_create() - Create a page-table.
* @vm: The vm to create for.
@@ -85,15 +93,19 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
{
struct xe_pt *pt;
struct xe_bo *bo;
- size_t size;
int err;
- size = !level ? sizeof(struct xe_pt) : sizeof(struct xe_pt_dir) +
- XE_PDES * sizeof(struct xe_ptw *);
- pt = kzalloc(size, GFP_KERNEL);
+ if (level) {
+ struct xe_pt_dir *dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+
+ pt = (dir) ? &dir->pt : NULL;
+ } else {
+ pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+ }
if (!pt)
return ERR_PTR(-ENOMEM);
+ pt->level = level;
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
ttm_bo_type_kernel,
XE_BO_CREATE_VRAM_IF_DGFX(tile) |
@@ -106,8 +118,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
goto err_kfree;
}
pt->bo = bo;
- pt->level = level;
- pt->base.dir = level ? &as_xe_pt_dir(pt)->dir : NULL;
+ pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL;
if (vm->xef)
xe_drm_client_add_bo(vm->xef->client, pt->bo);
@@ -116,7 +127,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
return pt;
err_kfree:
- kfree(pt);
+ xe_pt_free(pt);
return ERR_PTR(err);
}
@@ -193,7 +204,7 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
deferred);
}
}
- kfree(pt);
+ xe_pt_free(pt);
}
/**
@@ -358,7 +369,7 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
struct iosys_map *map = &parent->bo->vmap;
if (unlikely(xe_child))
- parent->base.dir->entries[offset] = &xe_child->base;
+ parent->base.children[offset] = &xe_child->base;
xe_pt_write(xe_walk->vm->xe, map, offset, pte);
parent->num_live++;
@@ -488,10 +499,12 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
* this device *requires* 64K PTE size for VRAM, fail.
*/
if (level == 0 && !xe_parent->is_compact) {
- if (xe_pt_is_pte_ps64K(addr, next, xe_walk))
+ if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
+ xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K;
pte |= XE_PTE_PS64;
- else if (XE_WARN_ON(xe_walk->needs_64K))
+ } else if (XE_WARN_ON(xe_walk->needs_64K)) {
return -EINVAL;
+ }
}
ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte);
@@ -534,13 +547,16 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
*child = &xe_child->base;
/*
- * Prefer the compact pagetable layout for L0 if possible.
+ * Prefer the compact pagetable layout for L0 if possible. Only
+ * possible if VMA covers entire 2MB region as compact 64k and
+ * 4k pages cannot be mixed within a 2MB region.
* TODO: Suballocate the pt bo to avoid wasting a lot of
* memory.
*/
if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 &&
covers && xe_pt_scan_64K(addr, next, xe_walk)) {
walk->shifts = xe_compact_pt_shifts;
+ xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT;
flags |= XE_PDE_64K;
xe_child->is_compact = true;
}
@@ -853,7 +869,7 @@ static void xe_pt_commit_bind(struct xe_vma *vma,
xe_pt_destroy(xe_pt_entry(pt_dir, j_),
xe_vma_vm(vma)->flags, deferred);
- pt_dir->dir.entries[j_] = &newpte->base;
+ pt_dir->children[j_] = &newpte->base;
}
kfree(entries[i].pt_entries);
}
@@ -1507,7 +1523,7 @@ xe_pt_commit_unbind(struct xe_vma *vma,
xe_pt_destroy(xe_pt_entry(pt_dir, i),
xe_vma_vm(vma)->flags, deferred);
- pt_dir->dir.entries[i] = NULL;
+ pt_dir->children[i] = NULL;
}
}
}
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.c b/drivers/gpu/drm/xe/xe_pt_walk.c
index 8f6c8d063f39..b8b3d2aea492 100644
--- a/drivers/gpu/drm/xe/xe_pt_walk.c
+++ b/drivers/gpu/drm/xe/xe_pt_walk.c
@@ -74,7 +74,7 @@ int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level,
u64 addr, u64 end, struct xe_pt_walk *walk)
{
pgoff_t offset = xe_pt_offset(addr, level, walk);
- struct xe_ptw **entries = parent->dir ? parent->dir->entries : NULL;
+ struct xe_ptw **entries = parent->children ? parent->children : NULL;
const struct xe_pt_walk_ops *ops = walk->ops;
enum page_walk_action action;
struct xe_ptw *child;
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.h b/drivers/gpu/drm/xe/xe_pt_walk.h
index ec3d1e9efa6d..5ecc4d2f0f65 100644
--- a/drivers/gpu/drm/xe/xe_pt_walk.h
+++ b/drivers/gpu/drm/xe/xe_pt_walk.h
@@ -8,28 +8,15 @@
#include <linux/pagewalk.h>
#include <linux/types.h>
-struct xe_ptw_dir;
-
/**
* struct xe_ptw - base class for driver pagetable subclassing.
- * @dir: Pointer to an array of children if any.
+ * @children: Pointer to an array of children if any.
*
* Drivers could subclass this, and if it's a page-directory, typically
- * embed the xe_ptw_dir::entries array in the same allocation.
+ * embed an array of xe_ptw pointers.
*/
struct xe_ptw {
- struct xe_ptw_dir *dir;
-};
-
-/**
- * struct xe_ptw_dir - page directory structure
- * @entries: Array holding page directory children.
- *
- * It is the responsibility of the user to ensure @entries is
- * correctly sized.
- */
-struct xe_ptw_dir {
- struct xe_ptw *entries[0];
+ struct xe_ptw **children;
};
/**
diff --git a/drivers/gpu/drm/xe/xe_range_fence.c b/drivers/gpu/drm/xe/xe_range_fence.c
index d35d9ec58e86..372378e89e98 100644
--- a/drivers/gpu/drm/xe/xe_range_fence.c
+++ b/drivers/gpu/drm/xe/xe_range_fence.c
@@ -151,6 +151,11 @@ xe_range_fence_tree_next(struct xe_range_fence *rfence, u64 start, u64 last)
return xe_range_fence_tree_iter_next(rfence, start, last);
}
+static void xe_range_fence_free(struct xe_range_fence *rfence)
+{
+ kfree(rfence);
+}
+
const struct xe_range_fence_ops xe_range_fence_kfree_ops = {
- .free = (void (*)(struct xe_range_fence *rfence)) kfree,
+ .free = xe_range_fence_free,
};
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index 01106a1156ad..4e2ccad0e52f 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -274,7 +274,6 @@ int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm)
struct dma_fence *fence;
fence = xe_exec_queue_last_fence_get(job->q, vm);
- dma_fence_get(fence);
return drm_sched_job_add_dependency(&job->drm, fence);
}
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index e4c220cf9115..02c9577fe418 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -19,7 +19,7 @@
#include "xe_macros.h"
#include "xe_sched_job_types.h"
-struct user_fence {
+struct xe_user_fence {
struct xe_device *xe;
struct kref refcount;
struct dma_fence_cb cb;
@@ -27,31 +27,32 @@ struct user_fence {
struct mm_struct *mm;
u64 __user *addr;
u64 value;
+ int signalled;
};
static void user_fence_destroy(struct kref *kref)
{
- struct user_fence *ufence = container_of(kref, struct user_fence,
+ struct xe_user_fence *ufence = container_of(kref, struct xe_user_fence,
refcount);
mmdrop(ufence->mm);
kfree(ufence);
}
-static void user_fence_get(struct user_fence *ufence)
+static void user_fence_get(struct xe_user_fence *ufence)
{
kref_get(&ufence->refcount);
}
-static void user_fence_put(struct user_fence *ufence)
+static void user_fence_put(struct xe_user_fence *ufence)
{
kref_put(&ufence->refcount, user_fence_destroy);
}
-static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
- u64 value)
+static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
+ u64 value)
{
- struct user_fence *ufence;
+ struct xe_user_fence *ufence;
ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
if (!ufence)
@@ -69,7 +70,7 @@ static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr,
static void user_fence_worker(struct work_struct *w)
{
- struct user_fence *ufence = container_of(w, struct user_fence, worker);
+ struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker);
if (mmget_not_zero(ufence->mm)) {
kthread_use_mm(ufence->mm);
@@ -80,10 +81,11 @@ static void user_fence_worker(struct work_struct *w)
}
wake_up_all(&ufence->xe->ufence_wq);
+ WRITE_ONCE(ufence->signalled, 1);
user_fence_put(ufence);
}
-static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence)
+static void kick_ufence(struct xe_user_fence *ufence, struct dma_fence *fence)
{
INIT_WORK(&ufence->worker, user_fence_worker);
queue_work(ufence->xe->ordered_wq, &ufence->worker);
@@ -92,7 +94,7 @@ static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence)
static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
{
- struct user_fence *ufence = container_of(cb, struct user_fence, cb);
+ struct xe_user_fence *ufence = container_of(cb, struct xe_user_fence, cb);
kick_ufence(ufence, fence);
}
@@ -307,7 +309,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
/* Easy case... */
if (!num_in_fence) {
fence = xe_exec_queue_last_fence_get(q, vm);
- dma_fence_get(fence);
return fence;
}
@@ -322,7 +323,6 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
}
}
fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm);
- dma_fence_get(fences[current_fence - 1]);
cf = dma_fence_array_create(num_in_fence, fences,
vm->composite_fence_ctx,
vm->composite_fence_seqno++,
@@ -342,3 +342,39 @@ err_out:
return ERR_PTR(-ENOMEM);
}
+
+/**
+ * xe_sync_ufence_get() - Get user fence from sync
+ * @sync: input sync
+ *
+ * Get a user fence reference from sync.
+ *
+ * Return: xe_user_fence pointer with reference
+ */
+struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync)
+{
+ user_fence_get(sync->ufence);
+
+ return sync->ufence;
+}
+
+/**
+ * xe_sync_ufence_put() - Put user fence reference
+ * @ufence: user fence reference
+ *
+ */
+void xe_sync_ufence_put(struct xe_user_fence *ufence)
+{
+ user_fence_put(ufence);
+}
+
+/**
+ * xe_sync_ufence_get_status() - Get user fence status
+ * @ufence: user fence
+ *
+ * Return: 1 if signalled, 0 not signalled, <0 on error
+ */
+int xe_sync_ufence_get_status(struct xe_user_fence *ufence)
+{
+ return READ_ONCE(ufence->signalled);
+}
diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h
index f43cdcaca6c5..0fd0d51208e6 100644
--- a/drivers/gpu/drm/xe/xe_sync.h
+++ b/drivers/gpu/drm/xe/xe_sync.h
@@ -38,4 +38,8 @@ static inline bool xe_sync_is_ufence(struct xe_sync_entry *sync)
return !!sync->ufence;
}
+struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync);
+void xe_sync_ufence_put(struct xe_user_fence *ufence);
+int xe_sync_ufence_get_status(struct xe_user_fence *ufence);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sync_types.h b/drivers/gpu/drm/xe/xe_sync_types.h
index 852db5e7884f..30ac3f51993b 100644
--- a/drivers/gpu/drm/xe/xe_sync_types.h
+++ b/drivers/gpu/drm/xe/xe_sync_types.h
@@ -18,7 +18,7 @@ struct xe_sync_entry {
struct drm_syncobj *syncobj;
struct dma_fence *fence;
struct dma_fence_chain *chain_fence;
- struct user_fence *ufence;
+ struct xe_user_fence *ufence;
u64 addr;
u64 timeline_value;
u32 type;
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 044c20881de7..0650b2fa75ef 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -167,9 +167,10 @@ int xe_tile_init_noalloc(struct xe_tile *tile)
goto err_mem_access;
tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16);
- if (IS_ERR(tile->mem.kernel_bb_pool))
+ if (IS_ERR(tile->mem.kernel_bb_pool)) {
err = PTR_ERR(tile->mem.kernel_bb_pool);
-
+ goto err_mem_access;
+ }
xe_wa_apply_tile_workarounds(tile);
xe_tile_sysfs_init(tile);
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 95163c303f3e..4ddc55527f9a 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -12,6 +12,7 @@
#include <linux/tracepoint.h>
#include <linux/types.h>
+#include "xe_bo.h"
#include "xe_bo_types.h"
#include "xe_exec_queue_types.h"
#include "xe_gpu_scheduler_types.h"
@@ -26,16 +27,16 @@ DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
TP_ARGS(fence),
TP_STRUCT__entry(
- __field(u64, fence)
+ __field(struct xe_gt_tlb_invalidation_fence *, fence)
__field(int, seqno)
),
TP_fast_assign(
- __entry->fence = (u64)fence;
+ __entry->fence = fence;
__entry->seqno = fence->seqno;
),
- TP_printk("fence=0x%016llx, seqno=%d",
+ TP_printk("fence=%p, seqno=%d",
__entry->fence, __entry->seqno)
);
@@ -82,16 +83,16 @@ DECLARE_EVENT_CLASS(xe_bo,
TP_STRUCT__entry(
__field(size_t, size)
__field(u32, flags)
- __field(u64, vm)
+ __field(struct xe_vm *, vm)
),
TP_fast_assign(
__entry->size = bo->size;
__entry->flags = bo->flags;
- __entry->vm = (unsigned long)bo->vm;
+ __entry->vm = bo->vm;
),
- TP_printk("size=%zu, flags=0x%02x, vm=0x%016llx",
+ TP_printk("size=%zu, flags=0x%02x, vm=%p",
__entry->size, __entry->flags, __entry->vm)
);
@@ -100,9 +101,31 @@ DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
TP_ARGS(bo)
);
-DEFINE_EVENT(xe_bo, xe_bo_move,
- TP_PROTO(struct xe_bo *bo),
- TP_ARGS(bo)
+TRACE_EVENT(xe_bo_move,
+ TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
+ bool move_lacks_source),
+ TP_ARGS(bo, new_placement, old_placement, move_lacks_source),
+ TP_STRUCT__entry(
+ __field(struct xe_bo *, bo)
+ __field(size_t, size)
+ __field(u32, new_placement)
+ __field(u32, old_placement)
+ __array(char, device_id, 12)
+ __field(bool, move_lacks_source)
+ ),
+
+ TP_fast_assign(
+ __entry->bo = bo;
+ __entry->size = bo->size;
+ __entry->new_placement = new_placement;
+ __entry->old_placement = old_placement;
+ strscpy(__entry->device_id, dev_name(xe_bo_device(__entry->bo)->drm.dev), 12);
+ __entry->move_lacks_source = move_lacks_source;
+ ),
+ TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
+ __entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
+ xe_mem_type_to_name[__entry->old_placement],
+ xe_mem_type_to_name[__entry->new_placement], __entry->device_id)
);
DECLARE_EVENT_CLASS(xe_exec_queue,
@@ -327,16 +350,16 @@ DECLARE_EVENT_CLASS(xe_hw_fence,
TP_STRUCT__entry(
__field(u64, ctx)
__field(u32, seqno)
- __field(u64, fence)
+ __field(struct xe_hw_fence *, fence)
),
TP_fast_assign(
__entry->ctx = fence->dma.context;
__entry->seqno = fence->dma.seqno;
- __entry->fence = (unsigned long)fence;
+ __entry->fence = fence;
),
- TP_printk("ctx=0x%016llx, fence=0x%016llx, seqno=%u",
+ TP_printk("ctx=0x%016llx, fence=%p, seqno=%u",
__entry->ctx, __entry->fence, __entry->seqno)
);
@@ -365,7 +388,7 @@ DECLARE_EVENT_CLASS(xe_vma,
TP_ARGS(vma),
TP_STRUCT__entry(
- __field(u64, vma)
+ __field(struct xe_vma *, vma)
__field(u32, asid)
__field(u64, start)
__field(u64, end)
@@ -373,14 +396,14 @@ DECLARE_EVENT_CLASS(xe_vma,
),
TP_fast_assign(
- __entry->vma = (unsigned long)vma;
+ __entry->vma = vma;
__entry->asid = xe_vma_vm(vma)->usm.asid;
__entry->start = xe_vma_start(vma);
__entry->end = xe_vma_end(vma) - 1;
__entry->ptr = xe_vma_userptr(vma);
),
- TP_printk("vma=0x%016llx, asid=0x%05x, start=0x%012llx, end=0x%012llx, ptr=0x%012llx,",
+ TP_printk("vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
__entry->vma, __entry->asid, __entry->start,
__entry->end, __entry->ptr)
)
@@ -465,16 +488,16 @@ DECLARE_EVENT_CLASS(xe_vm,
TP_ARGS(vm),
TP_STRUCT__entry(
- __field(u64, vm)
+ __field(struct xe_vm *, vm)
__field(u32, asid)
),
TP_fast_assign(
- __entry->vm = (unsigned long)vm;
+ __entry->vm = vm;
__entry->asid = vm->usm.asid;
),
- TP_printk("vm=0x%016llx, asid=0x%05x", __entry->vm,
+ TP_printk("vm=%p, asid=0x%05x", __entry->vm,
__entry->asid)
);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 30db264d34a3..3b21afe5b488 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -37,8 +37,6 @@
#include "generated/xe_wa_oob.h"
#include "xe_wa.h"
-#define TEST_VM_ASYNC_OPS_ERROR
-
static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
{
return vm->gpuvm.r_obj;
@@ -114,11 +112,8 @@ retry:
num_pages - pinned,
read_only ? 0 : FOLL_WRITE,
&pages[pinned]);
- if (ret < 0) {
- if (in_kthread)
- ret = 0;
+ if (ret < 0)
break;
- }
pinned += ret;
ret = 0;
@@ -902,6 +897,11 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
struct xe_device *xe = vm->xe;
bool read_only = xe_vma_read_only(vma);
+ if (vma->ufence) {
+ xe_sync_ufence_put(vma->ufence);
+ vma->ufence = NULL;
+ }
+
if (xe_vma_is_userptr(vma)) {
struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
@@ -1000,9 +1000,16 @@ int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
int err;
XE_WARN_ON(!vm);
- err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
- if (!err && bo && !bo->vm)
- err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
+ if (num_shared)
+ err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared);
+ else
+ err = drm_exec_lock_obj(exec, xe_vm_obj(vm));
+ if (!err && bo && !bo->vm) {
+ if (num_shared)
+ err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared);
+ else
+ err = drm_exec_lock_obj(exec, &bo->ttm.base);
+ }
return err;
}
@@ -1606,6 +1613,16 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
trace_xe_vma_unbind(vma);
+ if (vma->ufence) {
+ struct xe_user_fence * const f = vma->ufence;
+
+ if (!xe_sync_ufence_get_status(f))
+ return ERR_PTR(-EBUSY);
+
+ vma->ufence = NULL;
+ xe_sync_ufence_put(f);
+ }
+
if (number_tiles > 1) {
fences = kmalloc_array(number_tiles, sizeof(*fences),
GFP_KERNEL);
@@ -1739,6 +1756,21 @@ err_fences:
return ERR_PTR(err);
}
+static struct xe_user_fence *
+find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_syncs; i++) {
+ struct xe_sync_entry *e = &syncs[i];
+
+ if (xe_sync_is_ufence(e))
+ return xe_sync_ufence_get(e);
+ }
+
+ return NULL;
+}
+
static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
u32 num_syncs, bool immediate, bool first_op,
@@ -1746,9 +1778,16 @@ static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma,
{
struct dma_fence *fence;
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
+ struct xe_user_fence *ufence;
xe_vm_assert_held(vm);
+ ufence = find_ufence_get(syncs, num_syncs);
+ if (vma->ufence && ufence)
+ xe_sync_ufence_put(vma->ufence);
+
+ vma->ufence = ufence ?: vma->ufence;
+
if (immediate) {
fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op,
last_op);
@@ -1984,6 +2023,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
xe_exec_queue_last_fence_get(wait_exec_queue, vm);
xe_sync_entry_signal(&syncs[i], NULL, fence);
+ dma_fence_put(fence);
}
}
@@ -2064,7 +2104,6 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
struct drm_gpuva_ops *ops;
struct drm_gpuva_op *__op;
- struct xe_vma_op *op;
struct drm_gpuvm_bo *vm_bo;
int err;
@@ -2111,23 +2150,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
if (IS_ERR(ops))
return ops;
-#ifdef TEST_VM_ASYNC_OPS_ERROR
- if (operation & FORCE_ASYNC_OP_ERROR) {
- op = list_first_entry_or_null(&ops->list, struct xe_vma_op,
- base.entry);
- if (op)
- op->inject_error = true;
- }
-#endif
-
drm_gpuva_for_each_op(__op, ops) {
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
if (__op->op == DRM_GPUVA_OP_MAP) {
- op->map.immediate =
- flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
- op->map.read_only =
- flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
op->map.pat_index = pat_index;
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
@@ -2197,13 +2223,17 @@ static u64 xe_vma_max_pte_size(struct xe_vma *vma)
{
if (vma->gpuva.flags & XE_VMA_PTE_1G)
return SZ_1G;
- else if (vma->gpuva.flags & XE_VMA_PTE_2M)
+ else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
return SZ_2M;
+ else if (vma->gpuva.flags & XE_VMA_PTE_64K)
+ return SZ_64K;
+ else if (vma->gpuva.flags & XE_VMA_PTE_4K)
+ return SZ_4K;
- return SZ_4K;
+ return SZ_1G; /* Uninitialized, used max size */
}
-static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
+static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
{
switch (size) {
case SZ_1G:
@@ -2212,9 +2242,13 @@ static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
case SZ_2M:
vma->gpuva.flags |= XE_VMA_PTE_2M;
break;
+ case SZ_64K:
+ vma->gpuva.flags |= XE_VMA_PTE_64K;
+ break;
+ case SZ_4K:
+ vma->gpuva.flags |= XE_VMA_PTE_4K;
+ break;
}
-
- return SZ_4K;
}
static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
@@ -2312,8 +2346,6 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
{
- flags |= op->map.read_only ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ?
VMA_CREATE_FLAG_IS_NULL : 0;
@@ -2444,7 +2476,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
case DRM_GPUVA_OP_MAP:
err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
op->syncs, op->num_syncs,
- op->map.immediate || !xe_vm_in_fault_mode(vm),
+ !xe_vm_in_fault_mode(vm),
op->flags & XE_VMA_OP_FIRST,
op->flags & XE_VMA_OP_LAST);
break;
@@ -2530,13 +2562,25 @@ retry_userptr:
}
drm_exec_fini(&exec);
- if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
+ if (err == -EAGAIN) {
lockdep_assert_held_write(&vm->lock);
- err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
- if (!err)
- goto retry_userptr;
- trace_xe_vma_fail(vma);
+ if (op->base.op == DRM_GPUVA_OP_REMAP) {
+ if (!op->remap.unmap_done)
+ vma = gpuva_to_vma(op->base.remap.unmap->va);
+ else if (op->remap.prev)
+ vma = op->remap.prev;
+ else
+ vma = op->remap.next;
+ }
+
+ if (xe_vma_is_userptr(vma)) {
+ err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
+ if (!err)
+ goto retry_userptr;
+
+ trace_xe_vma_fail(vma);
+ }
}
return err;
@@ -2548,13 +2592,6 @@ static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
lockdep_assert_held_write(&vm->lock);
-#ifdef TEST_VM_ASYNC_OPS_ERROR
- if (op->inject_error) {
- op->inject_error = false;
- return -ENOMEM;
- }
-#endif
-
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
ret = __xe_vma_op_execute(vm, op->map.vma, op);
@@ -2669,7 +2706,7 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
{
int i;
- for (i = num_ops_list - 1; i; ++i) {
+ for (i = num_ops_list - 1; i >= 0; --i) {
struct drm_gpuva_ops *__ops = ops[i];
struct drm_gpuva_op *__op;
@@ -2714,21 +2751,11 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
return 0;
}
-#ifdef TEST_VM_ASYNC_OPS_ERROR
-#define SUPPORTED_FLAGS \
- (FORCE_ASYNC_OP_ERROR | DRM_XE_VM_BIND_FLAG_READONLY | \
- DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | 0xffff)
-#else
-#define SUPPORTED_FLAGS \
- (DRM_XE_VM_BIND_FLAG_READONLY | \
- DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL | \
- 0xffff)
-#endif
+#define SUPPORTED_FLAGS (DRM_XE_VM_BIND_FLAG_NULL | \
+ DRM_XE_VM_BIND_FLAG_DUMPABLE)
#define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
-#define MAX_BINDS 512 /* FIXME: Picking random upper limit */
-
static int vm_bind_ioctl_check_args(struct xe_device *xe,
struct drm_xe_vm_bind *args,
struct drm_xe_vm_bind_op **bind_ops)
@@ -2740,16 +2767,16 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, args->extensions) ||
- XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS))
+ if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
if (args->num_binds > 1) {
u64 __user *bind_user =
u64_to_user_ptr(args->vector_of_binds);
- *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) *
- args->num_binds, GFP_KERNEL);
+ *bind_ops = kvmalloc_array(args->num_binds,
+ sizeof(struct drm_xe_vm_bind_op),
+ GFP_KERNEL | __GFP_ACCOUNT);
if (!*bind_ops)
return -ENOMEM;
@@ -2839,7 +2866,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
free_bind_ops:
if (args->num_binds > 1)
- kfree(*bind_ops);
+ kvfree(*bind_ops);
return err;
}
@@ -2927,13 +2954,15 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
if (args->num_binds) {
- bos = kcalloc(args->num_binds, sizeof(*bos), GFP_KERNEL);
+ bos = kvcalloc(args->num_binds, sizeof(*bos),
+ GFP_KERNEL | __GFP_ACCOUNT);
if (!bos) {
err = -ENOMEM;
goto release_vm_lock;
}
- ops = kcalloc(args->num_binds, sizeof(*ops), GFP_KERNEL);
+ ops = kvcalloc(args->num_binds, sizeof(*ops),
+ GFP_KERNEL | __GFP_ACCOUNT);
if (!ops) {
err = -ENOMEM;
goto release_vm_lock;
@@ -3074,10 +3103,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; bos && i < args->num_binds; ++i)
xe_bo_put(bos[i]);
- kfree(bos);
- kfree(ops);
+ kvfree(bos);
+ kvfree(ops);
if (args->num_binds > 1)
- kfree(bind_ops);
+ kvfree(bind_ops);
return err;
@@ -3101,10 +3130,10 @@ put_exec_queue:
if (q)
xe_exec_queue_put(q);
free_objs:
- kfree(bos);
- kfree(ops);
+ kvfree(bos);
+ kvfree(ops);
if (args->num_binds > 1)
- kfree(bind_ops);
+ kvfree(bind_ops);
return err;
}
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 1fec66ae2eb2..7300eea5394b 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -19,11 +19,9 @@
struct xe_bo;
struct xe_sync_entry;
+struct xe_user_fence;
struct xe_vm;
-#define TEST_VM_ASYNC_OPS_ERROR
-#define FORCE_ASYNC_OP_ERROR BIT(31)
-
#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
#define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1)
#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
@@ -32,6 +30,8 @@ struct xe_vm;
#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
+#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8)
+#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9)
/** struct xe_userptr - User pointer */
struct xe_userptr {
@@ -105,6 +105,12 @@ struct xe_vma {
* @pat_index: The pat index to use when encoding the PTEs for this vma.
*/
u16 pat_index;
+
+ /**
+ * @ufence: The user fence that was provided with MAP.
+ * Needs to be signalled before UNMAP can be processed.
+ */
+ struct xe_user_fence *ufence;
};
/**
@@ -289,10 +295,6 @@ struct xe_vm {
struct xe_vma_op_map {
/** @vma: VMA to map */
struct xe_vma *vma;
- /** @immediate: Immediate bind */
- bool immediate;
- /** @read_only: Read only */
- bool read_only;
/** @is_null: is NULL binding */
bool is_null;
/** @pat_index: The pat index to use for this operation. */
@@ -360,11 +362,6 @@ struct xe_vma_op {
/** @flags: operation flags */
enum xe_vma_op_flags flags;
-#ifdef TEST_VM_ASYNC_OPS_ERROR
- /** @inject_error: inject error to test async op error handling */
- bool inject_error;
-#endif
-
union {
/** @map: VMA map operation specific data */
struct xe_vma_op_map map;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 42fd504abbcd..89983d7d73ca 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -169,6 +169,7 @@ static const struct host1x_info host1x06_info = {
.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
.sid_table = tegra186_sid_table,
.reserve_vblank_syncpts = false,
+ .skip_reset_assert = true,
};
static const struct host1x_sid_entry tegra194_sid_table[] = {
@@ -680,13 +681,15 @@ static int __maybe_unused host1x_runtime_suspend(struct device *dev)
host1x_intr_stop(host);
host1x_syncpt_save(host);
- err = reset_control_bulk_assert(host->nresets, host->resets);
- if (err) {
- dev_err(dev, "failed to assert reset: %d\n", err);
- goto resume_host1x;
- }
+ if (!host->info->skip_reset_assert) {
+ err = reset_control_bulk_assert(host->nresets, host->resets);
+ if (err) {
+ dev_err(dev, "failed to assert reset: %d\n", err);
+ goto resume_host1x;
+ }
- usleep_range(1000, 2000);
+ usleep_range(1000, 2000);
+ }
clk_disable_unprepare(host->clk);
reset_control_bulk_release(host->nresets, host->resets);
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index c8e302de7625..925a118db23f 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -116,6 +116,12 @@ struct host1x_info {
* the display driver disables VBLANK increments.
*/
bool reserve_vblank_syncpts;
+ /*
+ * On Tegra186, secure world applications may require access to
+ * host1x during suspend/resume. To allow this, we need to leave
+ * host1x not in reset.
+ */
+ bool skip_reset_assert;
};
struct host1x {
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index 470ae2c29c94..e630caf644e8 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -176,9 +176,9 @@ __bpf_kfunc_end_defs();
* The following set contains all functions we agree BPF programs
* can use.
*/
-BTF_SET8_START(hid_bpf_kfunc_ids)
+BTF_KFUNCS_START(hid_bpf_kfunc_ids)
BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
-BTF_SET8_END(hid_bpf_kfunc_ids)
+BTF_KFUNCS_END(hid_bpf_kfunc_ids)
static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
.owner = THIS_MODULE,
@@ -487,12 +487,12 @@ static const struct btf_kfunc_id_set hid_bpf_fmodret_set = {
};
/* for syscall HID-BPF */
-BTF_SET8_START(hid_bpf_syscall_kfunc_ids)
+BTF_KFUNCS_START(hid_bpf_syscall_kfunc_ids)
BTF_ID_FLAGS(func, hid_bpf_attach_prog)
BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE)
BTF_ID_FLAGS(func, hid_bpf_hw_request)
-BTF_SET8_END(hid_bpf_syscall_kfunc_ids)
+BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids)
static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
.owner = THIS_MODULE,
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 6ef0c88e3e60..d2f3f234f29d 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -203,6 +203,8 @@ struct hidpp_device {
struct hidpp_scroll_counter vertical_wheel_counter;
u8 wireless_feature_index;
+
+ bool connected_once;
};
/* HID++ 1.0 error codes */
@@ -988,8 +990,13 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
hidpp->protocol_minor = response.rap.params[1];
print_version:
- hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
- hidpp->protocol_major, hidpp->protocol_minor);
+ if (!hidpp->connected_once) {
+ hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
+ hidpp->protocol_major, hidpp->protocol_minor);
+ hidpp->connected_once = true;
+ } else
+ hid_dbg(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
+ hidpp->protocol_major, hidpp->protocol_minor);
return 0;
}
@@ -4184,7 +4191,7 @@ static void hidpp_connect_event(struct work_struct *work)
/* Get device version to check if it is connected */
ret = hidpp_root_get_protocol_version(hidpp);
if (ret) {
- hid_info(hidpp->hid_dev, "Disconnected\n");
+ hid_dbg(hidpp->hid_dev, "Disconnected\n");
if (hidpp->battery.ps) {
hidpp->battery.online = false;
hidpp->battery.status = POWER_SUPPLY_STATUS_UNKNOWN;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index fd5b0637dad6..3e91e4d6ba6f 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -2153,6 +2153,10 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_SYNAPTICS, 0xcddc) },
+
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_SYNAPTICS, 0xce08) },
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index aa6cb033bb06..03d5601ce807 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -722,6 +722,8 @@ void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
spin_lock_irqsave(&ishtp_dev->cl_list_lock, flags);
list_for_each_entry(cl, &ishtp_dev->cl_list, link) {
cl->state = ISHTP_CL_DISCONNECTED;
+ if (warm_reset && cl->device->reference_count)
+ continue;
/*
* Wake any pending process. The waiter would check dev->state
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index 82c907f01bd3..8a7f2f6a4f86 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -49,7 +49,9 @@ static void ishtp_read_list_flush(struct ishtp_cl *cl)
list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
list_del(&rb->list);
- ishtp_io_rb_free(rb);
+ spin_lock(&cl->free_list_spinlock);
+ list_add_tail(&rb->list, &cl->free_rb_list.list);
+ spin_unlock(&cl->free_list_spinlock);
}
spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
}
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index b613f11ed949..2bc45b24075c 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -2087,7 +2087,7 @@ static int wacom_allocate_inputs(struct wacom *wacom)
return 0;
}
-static int wacom_register_inputs(struct wacom *wacom)
+static int wacom_setup_inputs(struct wacom *wacom)
{
struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
@@ -2106,10 +2106,6 @@ static int wacom_register_inputs(struct wacom *wacom)
input_free_device(pen_input_dev);
wacom_wac->pen_input = NULL;
pen_input_dev = NULL;
- } else {
- error = input_register_device(pen_input_dev);
- if (error)
- goto fail;
}
error = wacom_setup_touch_input_capabilities(touch_input_dev, wacom_wac);
@@ -2118,10 +2114,6 @@ static int wacom_register_inputs(struct wacom *wacom)
input_free_device(touch_input_dev);
wacom_wac->touch_input = NULL;
touch_input_dev = NULL;
- } else {
- error = input_register_device(touch_input_dev);
- if (error)
- goto fail;
}
error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac);
@@ -2130,7 +2122,34 @@ static int wacom_register_inputs(struct wacom *wacom)
input_free_device(pad_input_dev);
wacom_wac->pad_input = NULL;
pad_input_dev = NULL;
- } else {
+ }
+
+ return 0;
+}
+
+static int wacom_register_inputs(struct wacom *wacom)
+{
+ struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
+ struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
+ int error = 0;
+
+ pen_input_dev = wacom_wac->pen_input;
+ touch_input_dev = wacom_wac->touch_input;
+ pad_input_dev = wacom_wac->pad_input;
+
+ if (pen_input_dev) {
+ error = input_register_device(pen_input_dev);
+ if (error)
+ goto fail;
+ }
+
+ if (touch_input_dev) {
+ error = input_register_device(touch_input_dev);
+ if (error)
+ goto fail;
+ }
+
+ if (pad_input_dev) {
error = input_register_device(pad_input_dev);
if (error)
goto fail;
@@ -2383,6 +2402,20 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
if (error)
goto fail;
+ error = wacom_setup_inputs(wacom);
+ if (error)
+ goto fail;
+
+ if (features->type == HID_GENERIC)
+ connect_mask |= HID_CONNECT_DRIVER;
+
+ /* Regular HID work starts now */
+ error = hid_hw_start(hdev, connect_mask);
+ if (error) {
+ hid_err(hdev, "hw start failed\n");
+ goto fail;
+ }
+
error = wacom_register_inputs(wacom);
if (error)
goto fail;
@@ -2397,16 +2430,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
goto fail;
}
- if (features->type == HID_GENERIC)
- connect_mask |= HID_CONNECT_DRIVER;
-
- /* Regular HID work starts now */
- error = hid_hw_start(hdev, connect_mask);
- if (error) {
- hid_err(hdev, "hw start failed\n");
- goto fail;
- }
-
if (!wireless) {
/* Note that if query fails it is not a hard failure */
wacom_query_tablet_data(wacom);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index da8a01fedd39..fbe10fbc5769 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2575,7 +2575,14 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
wacom_wac->hid_data.tipswitch);
input_report_key(input, wacom_wac->tool[0], sense);
if (wacom_wac->serial[0]) {
- input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
+ /*
+ * xf86-input-wacom does not accept a serial number
+ * of '0'. Report the low 32 bits if possible, but
+ * if they are zero, report the upper ones instead.
+ */
+ __u32 serial_lo = wacom_wac->serial[0] & 0xFFFFFFFFu;
+ __u32 serial_hi = wacom_wac->serial[0] >> 32;
+ input_event(input, EV_MSC, MSC_SERIAL, (int)(serial_lo ? serial_lo : serial_hi));
input_report_abs(input, ABS_MISC, sense ? id : 0);
}
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 56f7e06c673e..adbf674355b2 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -322,125 +322,89 @@ static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;
- /* do we need a gpadl body msg */
pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
sizeof(struct vmbus_channel_gpadl_header) -
sizeof(struct gpa_range);
+ pfncount = umin(pagecount, pfnsize / sizeof(u64));
+
+ msgsize = sizeof(struct vmbus_channel_msginfo) +
+ sizeof(struct vmbus_channel_gpadl_header) +
+ sizeof(struct gpa_range) + pfncount * sizeof(u64);
+ msgheader = kzalloc(msgsize, GFP_KERNEL);
+ if (!msgheader)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&msgheader->submsglist);
+ msgheader->msgsize = msgsize;
+
+ gpadl_header = (struct vmbus_channel_gpadl_header *)
+ msgheader->msg;
+ gpadl_header->rangecount = 1;
+ gpadl_header->range_buflen = sizeof(struct gpa_range) +
+ pagecount * sizeof(u64);
+ gpadl_header->range[0].byte_offset = 0;
+ gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
+ for (i = 0; i < pfncount; i++)
+ gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
+ type, kbuffer, size, send_offset, i);
+ *msginfo = msgheader;
+
+ pfnsum = pfncount;
+ pfnleft = pagecount - pfncount;
+
+ /* how many pfns can we fit in a body message */
+ pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
+ sizeof(struct vmbus_channel_gpadl_body);
pfncount = pfnsize / sizeof(u64);
- if (pagecount > pfncount) {
- /* we need a gpadl body */
- /* fill in the header */
+ /*
+ * If pfnleft is zero, everything fits in the header and no body
+ * messages are needed
+ */
+ while (pfnleft) {
+ pfncurr = umin(pfncount, pfnleft);
msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_header) +
- sizeof(struct gpa_range) + pfncount * sizeof(u64);
- msgheader = kzalloc(msgsize, GFP_KERNEL);
- if (!msgheader)
- goto nomem;
-
- INIT_LIST_HEAD(&msgheader->submsglist);
- msgheader->msgsize = msgsize;
-
- gpadl_header = (struct vmbus_channel_gpadl_header *)
- msgheader->msg;
- gpadl_header->rangecount = 1;
- gpadl_header->range_buflen = sizeof(struct gpa_range) +
- pagecount * sizeof(u64);
- gpadl_header->range[0].byte_offset = 0;
- gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
- for (i = 0; i < pfncount; i++)
- gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
- type, kbuffer, size, send_offset, i);
- *msginfo = msgheader;
-
- pfnsum = pfncount;
- pfnleft = pagecount - pfncount;
-
- /* how many pfns can we fit */
- pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
- sizeof(struct vmbus_channel_gpadl_body);
- pfncount = pfnsize / sizeof(u64);
-
- /* fill in the body */
- while (pfnleft) {
- if (pfnleft > pfncount)
- pfncurr = pfncount;
- else
- pfncurr = pfnleft;
-
- msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_body) +
- pfncurr * sizeof(u64);
- msgbody = kzalloc(msgsize, GFP_KERNEL);
-
- if (!msgbody) {
- struct vmbus_channel_msginfo *pos = NULL;
- struct vmbus_channel_msginfo *tmp = NULL;
- /*
- * Free up all the allocated messages.
- */
- list_for_each_entry_safe(pos, tmp,
- &msgheader->submsglist,
- msglistentry) {
-
- list_del(&pos->msglistentry);
- kfree(pos);
- }
-
- goto nomem;
- }
-
- msgbody->msgsize = msgsize;
- gpadl_body =
- (struct vmbus_channel_gpadl_body *)msgbody->msg;
+ sizeof(struct vmbus_channel_gpadl_body) +
+ pfncurr * sizeof(u64);
+ msgbody = kzalloc(msgsize, GFP_KERNEL);
+ if (!msgbody) {
+ struct vmbus_channel_msginfo *pos = NULL;
+ struct vmbus_channel_msginfo *tmp = NULL;
/*
- * Gpadl is u32 and we are using a pointer which could
- * be 64-bit
- * This is governed by the guest/host protocol and
- * so the hypervisor guarantees that this is ok.
+ * Free up all the allocated messages.
*/
- for (i = 0; i < pfncurr; i++)
- gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
- kbuffer, size, send_offset, pfnsum + i);
-
- /* add to msg header */
- list_add_tail(&msgbody->msglistentry,
- &msgheader->submsglist);
- pfnsum += pfncurr;
- pfnleft -= pfncurr;
+ list_for_each_entry_safe(pos, tmp,
+ &msgheader->submsglist,
+ msglistentry) {
+
+ list_del(&pos->msglistentry);
+ kfree(pos);
+ }
+ kfree(msgheader);
+ return -ENOMEM;
}
- } else {
- /* everything fits in a header */
- msgsize = sizeof(struct vmbus_channel_msginfo) +
- sizeof(struct vmbus_channel_gpadl_header) +
- sizeof(struct gpa_range) + pagecount * sizeof(u64);
- msgheader = kzalloc(msgsize, GFP_KERNEL);
- if (msgheader == NULL)
- goto nomem;
-
- INIT_LIST_HEAD(&msgheader->submsglist);
- msgheader->msgsize = msgsize;
-
- gpadl_header = (struct vmbus_channel_gpadl_header *)
- msgheader->msg;
- gpadl_header->rangecount = 1;
- gpadl_header->range_buflen = sizeof(struct gpa_range) +
- pagecount * sizeof(u64);
- gpadl_header->range[0].byte_offset = 0;
- gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
- for (i = 0; i < pagecount; i++)
- gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
- type, kbuffer, size, send_offset, i);
-
- *msginfo = msgheader;
+
+ msgbody->msgsize = msgsize;
+ gpadl_body = (struct vmbus_channel_gpadl_body *)msgbody->msg;
+
+ /*
+ * Gpadl is u32 and we are using a pointer which could
+ * be 64-bit
+ * This is governed by the guest/host protocol and
+ * so the hypervisor guarantees that this is ok.
+ */
+ for (i = 0; i < pfncurr; i++)
+ gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
+ kbuffer, size, send_offset, pfnsum + i);
+
+ /* add to msg header */
+ list_add_tail(&msgbody->msglistentry, &msgheader->submsglist);
+ pfnsum += pfncurr;
+ pfnleft -= pfncurr;
}
return 0;
-nomem:
- kfree(msgheader);
- kfree(msgbody);
- return -ENOMEM;
}
/*
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 42aec2c5606a..9c97c4065fe7 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -296,6 +296,11 @@ static struct {
spinlock_t lock;
} host_ts;
+static bool timesync_implicit;
+
+module_param(timesync_implicit, bool, 0644);
+MODULE_PARM_DESC(timesync_implicit, "If set treat SAMPLE as SYNC when clock is behind");
+
static inline u64 reftime_to_ns(u64 reftime)
{
return (reftime - WLTIMEDELTA) * 100;
@@ -345,6 +350,29 @@ static void hv_set_host_time(struct work_struct *work)
}
/*
+ * Due to a bug on Hyper-V hosts, the sync flag may not always be sent on resume.
+ * Force a sync if the guest is behind.
+ */
+static inline bool hv_implicit_sync(u64 host_time)
+{
+ struct timespec64 new_ts;
+ struct timespec64 threshold_ts;
+
+ new_ts = ns_to_timespec64(reftime_to_ns(host_time));
+ ktime_get_real_ts64(&threshold_ts);
+
+ threshold_ts.tv_sec += 5;
+
+ /*
+ * If guest behind the host by 5 or more seconds.
+ */
+ if (timespec64_compare(&new_ts, &threshold_ts) >= 0)
+ return true;
+
+ return false;
+}
+
+/*
* Synchronize time with host after reboot, restore, etc.
*
* ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
@@ -384,7 +412,8 @@ static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
spin_unlock_irqrestore(&host_ts.lock, flags);
/* Schedule work to do do_settimeofday64() */
- if (adj_flags & ICTIMESYNCFLAG_SYNC)
+ if ((adj_flags & ICTIMESYNCFLAG_SYNC) ||
+ (timesync_implicit && hv_implicit_sync(host_ts.host_time)))
schedule_work(&adj_time_work);
}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index b33d5abd9beb..7f7965f3d187 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -988,7 +988,7 @@ static const struct dev_pm_ops vmbus_pm = {
};
/* The one and only one */
-static struct bus_type hv_bus = {
+static const struct bus_type hv_bus = {
.name = "vmbus",
.match = vmbus_match,
.shutdown = vmbus_shutdown,
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index f6e1e55e8292..4acc1858d8ac 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -195,6 +195,8 @@ struct aspeed_pwm_tacho_data {
u8 fan_tach_ch_source[MAX_ASPEED_FAN_TACH_CHANNELS];
struct aspeed_cooling_device *cdev[8];
const struct attribute_group *groups[3];
+ /* protects access to shared ASPEED_PTCR_RESULT */
+ struct mutex tach_lock;
};
enum type { TYPEM, TYPEN, TYPEO };
@@ -529,6 +531,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
u8 fan_tach_ch_source, type, mode, both;
int ret;
+ mutex_lock(&priv->tach_lock);
+
regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0);
regmap_write(priv->regmap, ASPEED_PTCR_TRIGGER, 0x1 << fan_tach_ch);
@@ -546,6 +550,8 @@ static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv,
ASPEED_RPM_STATUS_SLEEP_USEC,
usec);
+ mutex_unlock(&priv->tach_lock);
+
/* return -ETIMEDOUT if we didn't get an answer. */
if (ret)
return ret;
@@ -915,6 +921,7 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ mutex_init(&priv->tach_lock);
priv->regmap = devm_regmap_init(dev, NULL, (__force void *)regs,
&aspeed_pwm_tacho_regmap_config);
if (IS_ERR(priv->regmap))
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index ba82d1e79c13..b0991dde2e59 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -41,7 +41,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
-#define NUM_REAL_CORES 128 /* Number of Real cores per cpu */
+#define NUM_REAL_CORES 512 /* Number of Real cores per cpu */
#define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */
#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
@@ -419,7 +419,7 @@ static ssize_t show_temp(struct device *dev,
}
static int create_core_attrs(struct temp_data *tdata, struct device *dev,
- int attr_no)
+ int index)
{
int i;
static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
@@ -431,13 +431,20 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
};
for (i = 0; i < tdata->attr_size; i++) {
+ /*
+ * We map the attr number to core id of the CPU
+ * The attr number is always core id + 2
+ * The Pkgtemp will always show up as temp1_*, if available
+ */
+ int attr_no = tdata->is_pkg_data ? 1 : tdata->cpu_core_id + 2;
+
snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH,
"temp%d_%s", attr_no, suffixes[i]);
sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
tdata->sd_attrs[i].dev_attr.attr.mode = 0444;
tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
- tdata->sd_attrs[i].index = attr_no;
+ tdata->sd_attrs[i].index = index;
tdata->attrs[i] = &tdata->sd_attrs[i].dev_attr.attr;
}
tdata->attr_group.attrs = tdata->attrs;
@@ -495,30 +502,25 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
struct platform_data *pdata = platform_get_drvdata(pdev);
struct cpuinfo_x86 *c = &cpu_data(cpu);
u32 eax, edx;
- int err, index, attr_no;
+ int err, index;
if (!housekeeping_cpu(cpu, HK_TYPE_MISC))
return 0;
/*
- * Find attr number for sysfs:
- * We map the attr number to core id of the CPU
- * The attr number is always core id + 2
- * The Pkgtemp will always show up as temp1_*, if available
+ * Get the index of tdata in pdata->core_data[]
+ * tdata for package: pdata->core_data[1]
+ * tdata for core: pdata->core_data[2] .. pdata->core_data[NUM_REAL_CORES + 1]
*/
if (pkg_flag) {
- attr_no = PKG_SYSFS_ATTR_NO;
+ index = PKG_SYSFS_ATTR_NO;
} else {
- index = ida_alloc(&pdata->ida, GFP_KERNEL);
+ index = ida_alloc_max(&pdata->ida, NUM_REAL_CORES - 1, GFP_KERNEL);
if (index < 0)
return index;
- pdata->cpu_map[index] = topology_core_id(cpu);
- attr_no = index + BASE_SYSFS_ATTR_NO;
- }
- if (attr_no > MAX_CORE_DATA - 1) {
- err = -ERANGE;
- goto ida_free;
+ pdata->cpu_map[index] = topology_core_id(cpu);
+ index += BASE_SYSFS_ATTR_NO;
}
tdata = init_temp_data(cpu, pkg_flag);
@@ -544,20 +546,20 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
if (get_ttarget(tdata, &pdev->dev) >= 0)
tdata->attr_size++;
- pdata->core_data[attr_no] = tdata;
+ pdata->core_data[index] = tdata;
/* Create sysfs interfaces */
- err = create_core_attrs(tdata, pdata->hwmon_dev, attr_no);
+ err = create_core_attrs(tdata, pdata->hwmon_dev, index);
if (err)
goto exit_free;
return 0;
exit_free:
- pdata->core_data[attr_no] = NULL;
+ pdata->core_data[index] = NULL;
kfree(tdata);
ida_free:
if (!pkg_flag)
- ida_free(&pdata->ida, index);
+ ida_free(&pdata->ida, index - BASE_SYSFS_ATTR_NO);
return err;
}
@@ -780,7 +782,7 @@ static int __init coretemp_init(void)
if (!x86_match_cpu(coretemp_ids))
return -ENODEV;
- max_zones = topology_max_packages() * topology_max_die_per_package();
+ max_zones = topology_max_packages() * topology_max_dies_per_package();
zone_devices = kcalloc(max_zones, sizeof(struct platform_device *),
GFP_KERNEL);
if (!zone_devices)
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 6307112c2c0c..9ed2c4b6734e 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -209,7 +209,7 @@ static ssize_t power1_average_show(struct device *dev,
* With the new x86 topology modelling, x86_max_cores is the
* compute unit number.
*/
- cu_num = boot_cpu_data.x86_max_cores;
+ cu_num = topology_num_cores_per_package();
ret = read_registers(data);
if (ret)
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index 8d2ef3145bca..9fbab8f02334 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -3512,6 +3512,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
const u16 *reg_temp_mon, *reg_temp_alternate, *reg_temp_crit;
const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL;
int num_reg_temp, num_reg_temp_mon, num_reg_tsi_temp;
+ int num_reg_temp_config;
struct device *hwmon_dev;
struct sensor_template_group tsi_temp_tg;
@@ -3594,6 +3595,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6106_REG_TEMP_OVER;
reg_temp_hyst = NCT6106_REG_TEMP_HYST;
reg_temp_config = NCT6106_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6106_REG_TEMP_CRIT;
reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
@@ -3669,6 +3671,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6106_REG_TEMP_OVER;
reg_temp_hyst = NCT6106_REG_TEMP_HYST;
reg_temp_config = NCT6106_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6106_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6106_REG_TEMP_CRIT;
reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
@@ -3746,6 +3749,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6775_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6775_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6775_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6775_REG_TEMP_CRIT;
@@ -3821,6 +3825,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6775_REG_TEMP_OVER;
reg_temp_hyst = NCT6775_REG_TEMP_HYST;
reg_temp_config = NCT6776_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6776_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6776_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6776_REG_TEMP_CRIT;
@@ -3900,6 +3905,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6779_REG_TEMP_OVER;
reg_temp_hyst = NCT6779_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6779_REG_TEMP_CRIT;
@@ -4034,6 +4040,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6779_REG_TEMP_OVER;
reg_temp_hyst = NCT6779_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6779_REG_TEMP_CRIT;
@@ -4123,6 +4130,7 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
reg_temp_over = NCT6798_REG_TEMP_OVER;
reg_temp_hyst = NCT6798_REG_TEMP_HYST;
reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+ num_reg_temp_config = ARRAY_SIZE(NCT6779_REG_TEMP_CONFIG);
reg_temp_alternate = NCT6798_REG_TEMP_ALTERNATE;
reg_temp_crit = NCT6798_REG_TEMP_CRIT;
@@ -4204,7 +4212,8 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
= reg_temp_crit[src - 1];
if (reg_temp_crit_l && reg_temp_crit_l[i])
data->reg_temp[4][src - 1] = reg_temp_crit_l[i];
- data->reg_temp_config[src - 1] = reg_temp_config[i];
+ if (i < num_reg_temp_config)
+ data->reg_temp_config[src - 1] = reg_temp_config[i];
data->temp_src[src - 1] = src;
continue;
}
@@ -4217,7 +4226,8 @@ int nct6775_probe(struct device *dev, struct nct6775_data *data,
data->reg_temp[0][s] = reg_temp[i];
data->reg_temp[1][s] = reg_temp_over[i];
data->reg_temp[2][s] = reg_temp_hyst[i];
- data->reg_temp_config[s] = reg_temp_config[i];
+ if (i < num_reg_temp_config)
+ data->reg_temp_config[s] = reg_temp_config[i];
if (reg_temp_crit_h && reg_temp_crit_h[i])
data->reg_temp[3][s] = reg_temp_crit_h[i];
else if (reg_temp_crit[src - 1])
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 3757b9391e60..aa0ee8ecd6f2 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -90,10 +90,8 @@ obj-$(CONFIG_I2C_NPCM) += i2c-npcm7xx.o
obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
obj-$(CONFIG_I2C_OMAP) += i2c-omap.o
obj-$(CONFIG_I2C_OWL) += i2c-owl.o
-i2c-pasemi-objs := i2c-pasemi-core.o i2c-pasemi-pci.o
-obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
-i2c-apple-objs := i2c-pasemi-core.o i2c-pasemi-platform.o
-obj-$(CONFIG_I2C_APPLE) += i2c-apple.o
+obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi-core.o i2c-pasemi-pci.o
+obj-$(CONFIG_I2C_APPLE) += i2c-pasemi-core.o i2c-pasemi-platform.o
obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 5511fd46a65e..ce8c4846b7fa 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -445,6 +445,7 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
irq_status);
irq_handled |= (irq_status & ASPEED_I2CD_INTR_MASTER_ERRORS);
if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE) {
+ irq_handled = irq_status;
bus->cmd_err = ret;
bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
goto out_complete;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 3932e8d96a17..274e987e4cfa 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -498,11 +498,10 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
/* Set block buffer mode */
outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
- inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
-
if (read_write == I2C_SMBUS_WRITE) {
len = data->block[0];
outb_p(len, SMBHSTDAT0(priv));
+ inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
for (i = 0; i < len; i++)
outb_p(data->block[i+1], SMBBLKDAT(priv));
}
@@ -520,6 +519,7 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
}
data->block[0] = len;
+ inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
for (i = 0; i < len; i++)
data->block[i + 1] = inb_p(SMBBLKDAT(priv));
}
@@ -1416,7 +1416,6 @@ static void i801_add_mux(struct i801_priv *priv)
lookup->table[i] = GPIO_LOOKUP(mux_config->gpio_chip,
mux_config->gpios[i], "mux", 0);
gpiod_add_lookup_table(lookup);
- priv->lookup = lookup;
/*
* Register the mux device, we use PLATFORM_DEVID_NONE here
@@ -1430,7 +1429,10 @@ static void i801_add_mux(struct i801_priv *priv)
sizeof(struct i2c_mux_gpio_platform_data));
if (IS_ERR(priv->mux_pdev)) {
gpiod_remove_lookup_table(lookup);
+ devm_kfree(dev, lookup);
dev_err(dev, "Failed to register i2c-mux-gpio device\n");
+ } else {
+ priv->lookup = lookup;
}
}
@@ -1742,9 +1744,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
i801_enable_host_notify(&priv->adapter);
- i801_probe_optional_slaves(priv);
/* We ignore errors - multiplexing is optional */
i801_add_mux(priv);
+ i801_probe_optional_slaves(priv);
pci_set_drvdata(dev, priv);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 88a053987403..60e813137f84 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -803,6 +803,11 @@ static irqreturn_t i2c_imx_slave_handle(struct imx_i2c_struct *i2c_imx,
ctl &= ~I2CR_MTX;
imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR);
imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
+
+ /* flag the last byte as processed */
+ i2c_imx_slave_event(i2c_imx,
+ I2C_SLAVE_READ_PROCESSED, &value);
+
i2c_imx_slave_finish_op(i2c_imx);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c
index 7d54a9f34c74..bd8becbdeeb2 100644
--- a/drivers/i2c/busses/i2c-pasemi-core.c
+++ b/drivers/i2c/busses/i2c-pasemi-core.c
@@ -369,6 +369,7 @@ int pasemi_i2c_common_probe(struct pasemi_smbus *smbus)
return 0;
}
+EXPORT_SYMBOL_GPL(pasemi_i2c_common_probe);
irqreturn_t pasemi_irq_handler(int irq, void *dev_id)
{
@@ -378,3 +379,8 @@ irqreturn_t pasemi_irq_handler(int irq, void *dev_id)
complete(&smbus->irq_completion);
return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(pasemi_irq_handler);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Olof Johansson <olof@lixom.net>");
+MODULE_DESCRIPTION("PA Semi PWRficient SMBus driver");
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 0d2e7171e3a6..da94df466e83 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -613,20 +613,20 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i
peripheral.addr = msgs[i].addr;
+ ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
+ &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c);
+ if (ret)
+ goto err;
+
if (msgs[i].flags & I2C_M_RD) {
ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
&rx_addr, &rx_buf, I2C_READ, gi2c->rx_c);
if (ret)
goto err;
- }
-
- ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
- &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c);
- if (ret)
- goto err;
- if (msgs[i].flags & I2C_M_RD)
dma_async_issue_pending(gi2c->rx_c);
+ }
+
dma_async_issue_pending(gi2c->tx_c);
timeout = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index ec2a8da134e5..198afee5233c 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -378,11 +378,15 @@ static int wmt_i2c_probe(struct platform_device *pdev)
err = i2c_add_adapter(adap);
if (err)
- return err;
+ goto err_disable_clk;
platform_set_drvdata(pdev, i2c_dev);
return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(i2c_dev->clk);
+ return err;
}
static void wmt_i2c_remove(struct platform_device *pdev)
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 91adcac875a4..c9d7afe489e8 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -219,10 +219,12 @@ config BMA400
config BMA400_I2C
tristate
+ select REGMAP_I2C
depends on BMA400
config BMA400_SPI
tristate
+ select REGMAP_SPI
depends on BMA400
config BMC150_ACCEL
diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
index 90b7ae6d42b7..484fe2e9fb17 100644
--- a/drivers/iio/accel/adxl367.c
+++ b/drivers/iio/accel/adxl367.c
@@ -1429,9 +1429,11 @@ static int adxl367_verify_devid(struct adxl367_state *st)
unsigned int val;
int ret;
- ret = regmap_read_poll_timeout(st->regmap, ADXL367_REG_DEVID, val,
- val == ADXL367_DEVID_AD, 1000, 10000);
+ ret = regmap_read(st->regmap, ADXL367_REG_DEVID, &val);
if (ret)
+ return dev_err_probe(st->dev, ret, "Failed to read dev id\n");
+
+ if (val != ADXL367_DEVID_AD)
return dev_err_probe(st->dev, -ENODEV,
"Invalid dev id 0x%02X, expected 0x%02X\n",
val, ADXL367_DEVID_AD);
@@ -1510,6 +1512,8 @@ int adxl367_probe(struct device *dev, const struct adxl367_ops *ops,
if (ret)
return ret;
+ fsleep(15000);
+
ret = adxl367_verify_devid(st);
if (ret)
return ret;
diff --git a/drivers/iio/accel/adxl367_i2c.c b/drivers/iio/accel/adxl367_i2c.c
index b595fe94f3a3..62c74bdc0d77 100644
--- a/drivers/iio/accel/adxl367_i2c.c
+++ b/drivers/iio/accel/adxl367_i2c.c
@@ -11,7 +11,7 @@
#include "adxl367.h"
-#define ADXL367_I2C_FIFO_DATA 0x42
+#define ADXL367_I2C_FIFO_DATA 0x18
struct adxl367_i2c_state {
struct regmap *regmap;
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
index feb86fe6c422..62490424b6ae 100644
--- a/drivers/iio/adc/ad4130.c
+++ b/drivers/iio/adc/ad4130.c
@@ -1821,7 +1821,7 @@ static int ad4130_setup_int_clk(struct ad4130_state *st)
{
struct device *dev = &st->spi->dev;
struct device_node *of_node = dev_of_node(dev);
- struct clk_init_data init;
+ struct clk_init_data init = {};
const char *clk_name;
int ret;
@@ -1891,10 +1891,14 @@ static int ad4130_setup(struct iio_dev *indio_dev)
return ret;
/*
- * Configure all GPIOs for output. If configured, the interrupt function
- * of P2 takes priority over the GPIO out function.
+ * Configure unused GPIOs for output. If configured, the interrupt
+ * function of P2 takes priority over the GPIO out function.
*/
- val = AD4130_IO_CONTROL_GPIO_CTRL_MASK;
+ val = 0;
+ for (i = 0; i < AD4130_MAX_GPIOS; i++)
+ if (st->pins_fn[i + AD4130_AIN2_P1] == AD4130_PIN_FN_NONE)
+ val |= FIELD_PREP(AD4130_IO_CONTROL_GPIO_CTRL_MASK, BIT(i));
+
val |= FIELD_PREP(AD4130_IO_CONTROL_INT_PIN_SEL_MASK, st->int_pin_sel);
ret = regmap_write(st->regmap, AD4130_IO_CONTROL_REG, val);
diff --git a/drivers/iio/adc/ad7091r8.c b/drivers/iio/adc/ad7091r8.c
index 57700f124803..700564305057 100644
--- a/drivers/iio/adc/ad7091r8.c
+++ b/drivers/iio/adc/ad7091r8.c
@@ -195,7 +195,7 @@ static int ad7091r8_gpio_setup(struct ad7091r_state *st)
st->reset_gpio = devm_gpiod_get_optional(st->dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(st->reset_gpio))
- return dev_err_probe(st->dev, PTR_ERR(st->convst_gpio),
+ return dev_err_probe(st->dev, PTR_ERR(st->reset_gpio),
"Error on requesting reset GPIO\n");
if (st->reset_gpio) {
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 2de5494e7c22..b15b7a3b66d5 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -48,6 +48,18 @@ config HDC2010
To compile this driver as a module, choose M here: the module
will be called hdc2010.
+config HDC3020
+ tristate "TI HDC3020 relative humidity and temperature sensor"
+ depends on I2C
+ select CRC8
+ help
+ Say yes here to build support for the Texas Instruments
+ HDC3020, HDC3021 and HDC3022 relative humidity and temperature
+ sensors.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hdc3020.
+
config HID_SENSOR_HUMIDITY
tristate "HID Environmental humidity sensor"
depends on HID_SENSOR_HUB
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index f19ff3de97c5..5fbeef299f61 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_AM2315) += am2315.o
obj-$(CONFIG_DHT11) += dht11.o
obj-$(CONFIG_HDC100X) += hdc100x.o
obj-$(CONFIG_HDC2010) += hdc2010.o
+obj-$(CONFIG_HDC3020) += hdc3020.o
obj-$(CONFIG_HID_SENSOR_HUMIDITY) += hid-sensor-humidity.o
hts221-y := hts221_core.o \
diff --git a/drivers/iio/humidity/hdc3020.c b/drivers/iio/humidity/hdc3020.c
index 4e3311170725..ed70415512f6 100644
--- a/drivers/iio/humidity/hdc3020.c
+++ b/drivers/iio/humidity/hdc3020.c
@@ -322,7 +322,7 @@ static int hdc3020_read_raw(struct iio_dev *indio_dev,
if (chan->type != IIO_TEMP)
return -EINVAL;
- *val = 16852;
+ *val = -16852;
return IIO_VAL_INT;
default:
diff --git a/drivers/iio/imu/bno055/Kconfig b/drivers/iio/imu/bno055/Kconfig
index 83e53acfbe88..c7f5866a177d 100644
--- a/drivers/iio/imu/bno055/Kconfig
+++ b/drivers/iio/imu/bno055/Kconfig
@@ -8,6 +8,7 @@ config BOSCH_BNO055
config BOSCH_BNO055_SERIAL
tristate "Bosch BNO055 attached via UART"
depends on SERIAL_DEV_BUS
+ select REGMAP
select BOSCH_BNO055
help
Enable this to support Bosch BNO055 IMUs attached via UART.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 66d4ba088e70..d4f9b5d8d28d 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -109,6 +109,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
/* compute and process only all complete datum */
nb = fifo_count / bytes_per_datum;
fifo_count = nb * bytes_per_datum;
+ if (nb == 0)
+ goto end_session;
/* Each FIFO data contains all sensors, so same number for FIFO and sensor data */
fifo_period = NSEC_PER_SEC / INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider);
inv_sensors_timestamp_interrupt(&st->timestamp, fifo_period, nb, nb, pf->timestamp);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 676704f9151f..e6e6e94452a3 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -111,6 +111,7 @@ int inv_mpu6050_prepare_fifo(struct inv_mpu6050_state *st, bool enable)
if (enable) {
/* reset timestamping */
inv_sensors_timestamp_reset(&st->timestamp);
+ inv_sensors_timestamp_apply_odr(&st->timestamp, 0, 0, 0);
/* reset FIFO */
d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_RST;
ret = regmap_write(st->map, st->reg->user_ctrl, d);
@@ -184,6 +185,10 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
if (result)
goto error_power_off;
} else {
+ st->chip_config.gyro_fifo_enable = 0;
+ st->chip_config.accl_fifo_enable = 0;
+ st->chip_config.temp_fifo_enable = 0;
+ st->chip_config.magn_fifo_enable = 0;
result = inv_mpu6050_prepare_fifo(st, false);
if (result)
goto error_power_off;
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 9a85752124dd..173dc00762a1 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1584,10 +1584,13 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
ret = iio_device_register_sysfs_group(indio_dev,
&iio_dev_opaque->chan_attr_group);
if (ret)
- goto error_clear_attrs;
+ goto error_free_chan_attrs;
return 0;
+error_free_chan_attrs:
+ kfree(iio_dev_opaque->chan_attr_group.attrs);
+ iio_dev_opaque->chan_attr_group.attrs = NULL;
error_clear_attrs:
iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list);
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index 5cd27f04b45e..b6c4bef2a7bb 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -226,6 +226,7 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev,
case HID_USAGE_SENSOR_TIME_TIMESTAMP:
als_state->timestamp = hid_sensor_convert_timestamp(&als_state->common_attributes,
*(s64 *)raw_data);
+ ret = 0;
break;
default:
break;
diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c
index 69938204456f..42b70cd42b39 100644
--- a/drivers/iio/magnetometer/rm3100-core.c
+++ b/drivers/iio/magnetometer/rm3100-core.c
@@ -530,6 +530,7 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq)
struct rm3100_data *data;
unsigned int tmp;
int ret;
+ int samp_rate_index;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
@@ -586,9 +587,14 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq)
ret = regmap_read(regmap, RM3100_REG_TMRC, &tmp);
if (ret < 0)
return ret;
+
+ samp_rate_index = tmp - RM3100_TMRC_OFFSET;
+ if (samp_rate_index < 0 || samp_rate_index >= RM3100_SAMP_NUM) {
+ dev_err(dev, "The value read from RM3100_REG_TMRC is invalid!\n");
+ return -EINVAL;
+ }
/* Initializing max wait time, which is double conversion time. */
- data->conversion_time = rm3100_samp_rates[tmp - RM3100_TMRC_OFFSET][2]
- * 2;
+ data->conversion_time = rm3100_samp_rates[samp_rate_index][2] * 2;
/* Cycle count values may not be what we want. */
if ((tmp - RM3100_TMRC_OFFSET) == 0)
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index 433d6fac83c4..a444d4b2978b 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -4,6 +4,7 @@
*
* Inspired by the older BMP085 driver drivers/misc/bmp085-spi.c
*/
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/err.h>
@@ -35,6 +36,34 @@ static int bmp280_regmap_spi_read(void *context, const void *reg,
return spi_write_then_read(spi, reg, reg_size, val, val_size);
}
+static int bmp380_regmap_spi_read(void *context, const void *reg,
+ size_t reg_size, void *val, size_t val_size)
+{
+ struct spi_device *spi = to_spi_device(context);
+ u8 rx_buf[4];
+ ssize_t status;
+
+ /*
+ * Maximum number of consecutive bytes read for a temperature or
+ * pressure measurement is 3.
+ */
+ if (val_size > 3)
+ return -EINVAL;
+
+ /*
+ * According to the BMP3xx datasheets, for a basic SPI read opertion,
+ * the first byte needs to be dropped and the rest are the requested
+ * data.
+ */
+ status = spi_write_then_read(spi, reg, 1, rx_buf, val_size + 1);
+ if (status)
+ return status;
+
+ memcpy(val, rx_buf + 1, val_size);
+
+ return 0;
+}
+
static struct regmap_bus bmp280_regmap_bus = {
.write = bmp280_regmap_spi_write,
.read = bmp280_regmap_spi_read,
@@ -42,10 +71,19 @@ static struct regmap_bus bmp280_regmap_bus = {
.val_format_endian_default = REGMAP_ENDIAN_BIG,
};
+static struct regmap_bus bmp380_regmap_bus = {
+ .write = bmp280_regmap_spi_write,
+ .read = bmp380_regmap_spi_read,
+ .read_flag_mask = BIT(7),
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
static int bmp280_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
const struct bmp280_chip_info *chip_info;
+ struct regmap_bus *bmp_regmap_bus;
struct regmap *regmap;
int ret;
@@ -58,8 +96,18 @@ static int bmp280_spi_probe(struct spi_device *spi)
chip_info = spi_get_device_match_data(spi);
+ switch (chip_info->chip_id[0]) {
+ case BMP380_CHIP_ID:
+ case BMP390_CHIP_ID:
+ bmp_regmap_bus = &bmp380_regmap_bus;
+ break;
+ default:
+ bmp_regmap_bus = &bmp280_regmap_bus;
+ break;
+ }
+
regmap = devm_regmap_init(&spi->dev,
- &bmp280_regmap_bus,
+ bmp_regmap_bus,
&spi->dev,
chip_info->regmap_config);
if (IS_ERR(regmap)) {
@@ -87,6 +135,7 @@ static const struct of_device_id bmp280_of_spi_match[] = {
MODULE_DEVICE_TABLE(of, bmp280_of_spi_match);
static const struct spi_device_id bmp280_spi_id[] = {
+ { "bmp085", (kernel_ulong_t)&bmp180_chip_info },
{ "bmp180", (kernel_ulong_t)&bmp180_chip_info },
{ "bmp181", (kernel_ulong_t)&bmp180_chip_info },
{ "bmp280", (kernel_ulong_t)&bmp280_chip_info },
diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c
index 28c8269ba65d..0bba4c5a8d40 100644
--- a/drivers/iio/pressure/dlhl60d.c
+++ b/drivers/iio/pressure/dlhl60d.c
@@ -250,18 +250,17 @@ static irqreturn_t dlh_trigger_handler(int irq, void *private)
struct dlh_state *st = iio_priv(indio_dev);
int ret;
unsigned int chn, i = 0;
- __be32 tmp_buf[2];
+ __be32 tmp_buf[2] = { };
ret = dlh_start_capture_and_read(st);
if (ret)
goto out;
for_each_set_bit(chn, indio_dev->active_scan_mask,
- indio_dev->masklength) {
- memcpy(tmp_buf + i,
+ indio_dev->masklength) {
+ memcpy(&tmp_buf[i++],
&st->rx_buf[1] + chn * DLH_NUM_DATA_BYTES,
DLH_NUM_DATA_BYTES);
- i++;
}
iio_push_to_buffers(indio_dev, tmp_buf);
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 824349659d69..ce9c5bae83bf 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -401,6 +401,10 @@ static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
struct bnxt_re_fence_data *fence = &pd->fence;
struct ib_mr *ib_mr = &fence->mr->ib_mr;
struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
+ struct bnxt_re_dev *rdev = pd->rdev;
+
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
memset(wqe, 0, sizeof(*wqe));
wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
@@ -455,6 +459,9 @@ static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
struct device *dev = &rdev->en_dev->pdev->dev;
struct bnxt_re_mr *mr = fence->mr;
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
+
if (fence->mw) {
bnxt_re_dealloc_mw(fence->mw);
fence->mw = NULL;
@@ -486,6 +493,9 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
struct ib_mw *mw;
int rc;
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return 0;
+
dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
DMA_BIDIRECTIONAL);
rc = dma_mapping_error(dev, dma_addr);
@@ -1817,7 +1827,7 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
switch (srq_attr_mask) {
case IB_SRQ_MAX_WR:
/* SRQ resize is not supported */
- break;
+ return -EINVAL;
case IB_SRQ_LIMIT:
/* Change the SRQ threshold */
if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
@@ -1832,13 +1842,12 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
/* On success, update the shadow */
srq->srq_limit = srq_attr->srq_limit;
/* No need to Build and send response back to udata */
- break;
+ return 0;
default:
ibdev_err(&rdev->ibdev,
"Unsupported srq_attr_mask 0x%x", srq_attr_mask);
return -EINVAL;
}
- return 0;
}
int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
@@ -2556,11 +2565,6 @@ static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
- /* Need unconditional fence for local invalidate
- * opcode to work as expected.
- */
- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
-
if (wr->send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
if (wr->send_flags & IB_SEND_SOLICITED)
@@ -2583,12 +2587,6 @@ static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
wqe->frmr.levels = qplib_frpl->hwq.level;
wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
- /* Need unconditional fence for reg_mr
- * opcode to function as expected.
- */
-
- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
-
if (wr->wr.send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
@@ -2719,6 +2717,18 @@ bad:
return rc;
}
+static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
+{
+ /* Need unconditional fence for non-wire memory opcode
+ * to work as expected.
+ */
+ if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+}
+
int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
{
@@ -2798,8 +2808,11 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
rc = -EINVAL;
goto bad;
}
- if (!rc)
+ if (!rc) {
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
+ bnxt_re_legacy_set_uc_fence(&wqe);
rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
+ }
bad:
if (rc) {
ibdev_err(&qp->rdev->ibdev,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index f022c922fae5..54b4d2f3a5d8 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -280,9 +280,6 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
{
-
- if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
- return;
rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev);
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
bnxt_re_set_resource_limits(rdev);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index c98e04fe2ddd..439d0c7c5d0c 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -744,7 +744,8 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
- srq->threshold = le16_to_cpu(sb->srq_limit);
+ if (!rc)
+ srq->threshold = le16_to_cpu(sb->srq_limit);
dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
sbuf.sb, sbuf.dma_addr);
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 68c621ff59d0..5a91cbda4aee 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -2086,7 +2086,7 @@ int init_credit_return(struct hfi1_devdata *dd)
"Unable to allocate credit return DMA range for NUMA %d\n",
i);
ret = -ENOMEM;
- goto done;
+ goto free_cr_base;
}
}
set_dev_node(&dd->pcidev->dev, dd->node);
@@ -2094,6 +2094,10 @@ int init_credit_return(struct hfi1_devdata *dd)
ret = 0;
done:
return ret;
+
+free_cr_base:
+ free_credit_return(dd);
+ goto done;
}
void free_credit_return(struct hfi1_devdata *dd)
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 6e5ac2023328..b67d23b1f286 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -3158,7 +3158,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
{
int rval = 0;
- if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) {
+ if ((unlikely(tx->num_desc == tx->desc_limit))) {
rval = _extend_sdma_tx_descs(dd, tx);
if (rval) {
__sdma_txclean(dd, tx);
diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
index 8fb752f2eda2..2cb4b96db721 100644
--- a/drivers/infiniband/hw/irdma/defs.h
+++ b/drivers/infiniband/hw/irdma/defs.h
@@ -346,6 +346,7 @@ enum irdma_cqp_op_type {
#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
+#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
#define IRDMA_AE_RESET_SENT 0x0601
#define IRDMA_AE_TERMINATE_SENT 0x0602
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index bd4b2b896444..ad50b77282f8 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -387,6 +387,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
case IRDMA_AE_LCE_QP_CATASTROPHIC:
case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
+ case IRDMA_AE_LLP_TOO_MANY_RNRS:
case IRDMA_AE_LCE_CQ_CATASTROPHIC:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
default:
@@ -570,6 +571,13 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
irq_update_affinity_hint(msix_vec->irq, NULL);
free_irq(msix_vec->irq, dev_id);
+ if (rf == dev_id) {
+ tasklet_kill(&rf->dpc_tasklet);
+ } else {
+ struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id;
+
+ tasklet_kill(&iwceq->dpc_tasklet);
+ }
}
/**
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index b5eb8d421988..0b046c061742 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -839,7 +839,9 @@ static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
- init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
+ init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags ||
+ init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
+ init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta)
return -EINVAL;
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
@@ -2184,9 +2186,8 @@ static int irdma_create_cq(struct ib_cq *ibcq,
info.cq_base_pa = iwcq->kmem.pa;
}
- if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
- info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
- (u32)IRDMA_MAX_CQ_READ_THRESH);
+ info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
+ (u32)IRDMA_MAX_CQ_READ_THRESH);
if (irdma_sc_cq_init(cq, &info)) {
ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index f87531318feb..a78a067e3ce7 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -458,6 +458,12 @@ void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
dbg_cc_params->root = debugfs_create_dir("cc_params", mlx5_debugfs_get_dev_root(mdev));
for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
+ if ((i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID ||
+ i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP))
+ if (!MLX5_CAP_GEN(mdev, roce) ||
+ !MLX5_CAP_ROCE(mdev, roce_cc_general))
+ continue;
+
dbg_cc_params->params[i].offset = i;
dbg_cc_params->params[i].dev = dev;
dbg_cc_params->params[i].port_num = port_num;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 869369cb5b5f..253fea374a72 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2949,7 +2949,7 @@ DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
UVERBS_IDR_ANY_OBJECT,
- UVERBS_ACCESS_WRITE,
+ UVERBS_ACCESS_READ,
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(
MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
index df1d1b0a3ef7..9947feb7fb8a 100644
--- a/drivers/infiniband/hw/mlx5/wr.c
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -78,7 +78,7 @@ static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
*/
copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
left);
- memcpy(eseg->inline_hdr.start, pdata, copysz);
+ memcpy(eseg->inline_hdr.data, pdata, copysz);
stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
sizeof(eseg->inline_hdr.start) + copysz, 16);
*size += stride / 16;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 7887a6786ed4..f118ce0a9a61 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1879,8 +1879,17 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
/* RQ - read access only (0) */
rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
ureq.rq_len, true, 0, alloc_and_init);
- if (rc)
+ if (rc) {
+ ib_umem_release(qp->usq.umem);
+ qp->usq.umem = NULL;
+ if (rdma_protocol_roce(&dev->ibdev, 1)) {
+ qedr_free_pbl(dev, &qp->usq.pbl_info,
+ qp->usq.pbl_tbl);
+ } else {
+ kfree(qp->usq.pbl_tbl);
+ }
return rc;
+ }
}
memset(&in_params, 0, sizeof(in_params));
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7a5be705d718..6f2a688fccbf 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1272,10 +1272,10 @@ static int ipoib_get_iflink(const struct net_device *dev)
/* parent interface */
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
- return dev->ifindex;
+ return READ_ONCE(dev->ifindex);
/* child/vlan interface */
- return priv->parent->ifindex;
+ return READ_ONCE(priv->parent->ifindex);
}
static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 58f70cfec45a..040234c01be4 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -79,12 +79,16 @@ module_param(srpt_srq_size, int, 0444);
MODULE_PARM_DESC(srpt_srq_size,
"Shared receive queue (SRQ) size.");
+static int srpt_set_u64_x(const char *buffer, const struct kernel_param *kp)
+{
+ return kstrtou64(buffer, 16, (u64 *)kp->arg);
+}
static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
{
return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
}
-module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
- 0444);
+module_param_call(srpt_service_guid, srpt_set_u64_x, srpt_get_u64_x,
+ &srpt_service_guid, 0444);
MODULE_PARM_DESC(srpt_service_guid,
"Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA.");
@@ -210,10 +214,12 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
/**
* srpt_qp_event - QP event callback function
* @event: Description of the event that occurred.
- * @ch: SRPT RDMA channel.
+ * @ptr: SRPT RDMA channel.
*/
-static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
+static void srpt_qp_event(struct ib_event *event, void *ptr)
{
+ struct srpt_rdma_ch *ch = ptr;
+
pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
event->event, ch, ch->sess_name, ch->qp->qp_num,
get_ch_state_name(ch->state));
@@ -1807,8 +1813,7 @@ retry:
ch->cq_size = ch->rq_size + sq_size;
qp_init->qp_context = (void *)ch;
- qp_init->event_handler
- = (void(*)(struct ib_event *, void*))srpt_qp_event;
+ qp_init->event_handler = srpt_qp_event;
qp_init->send_cq = ch->cq;
qp_init->recv_cq = ch->cq;
qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 7c4b2a5cc1b5..14c828adebf7 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -130,7 +130,12 @@ static const struct xpad_device {
{ 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
{ 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 },
{ 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 },
+ { 0x03f0, 0x038D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wired */
+ { 0x03f0, 0x048D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wireless */
{ 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
+ { 0x03f0, 0x07A0, "HyperX Clutch Gladiate RGB", 0, XTYPE_XBOXONE },
+ { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, /* v2 */
+ { 0x03f0, 0x09B4, "HyperX Clutch Tanto", 0, XTYPE_XBOXONE },
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -463,6 +468,7 @@ static const struct usb_device_id xpad_table[] = {
{ USB_INTERFACE_INFO('X', 'B', 0) }, /* Xbox USB-IF not-approved class */
XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 controller */
XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */
+ XPAD_XBOX360_VENDOR(0x03f0), /* HP HyperX Xbox 360 controllers */
XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One controllers */
XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster Xbox 360 controllers */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index ba00ecfbd343..b41fd1240f43 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -315,12 +315,10 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
error = devm_gpio_request_one(dev, button->gpio,
flags, button->desc ? : DRV_NAME);
- if (error) {
- dev_err(dev,
- "unable to claim gpio %u, err=%d\n",
- button->gpio, error);
- return error;
- }
+ if (error)
+ return dev_err_probe(dev, error,
+ "unable to claim gpio %u\n",
+ button->gpio);
bdata->gpiod = gpio_to_desc(button->gpio);
if (!bdata->gpiod) {
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 953992b458e9..ca150618d32f 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -19,7 +19,6 @@
* Copyright (C) 2006 Nicolas Boichat (nicolas@boichat.ch)
*/
-#include "linux/usb.h"
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
@@ -194,8 +193,6 @@ enum tp_type {
/* list of device capability bits */
#define HAS_INTEGRATED_BUTTON 1
-/* maximum number of supported endpoints (currently trackpad and button) */
-#define MAX_ENDPOINTS 2
/* trackpad finger data block size */
#define FSIZE_TYPE1 (14 * sizeof(__le16))
@@ -894,18 +891,6 @@ static int bcm5974_resume(struct usb_interface *iface)
return error;
}
-static bool bcm5974_check_endpoints(struct usb_interface *iface,
- const struct bcm5974_config *cfg)
-{
- u8 ep_addr[MAX_ENDPOINTS + 1] = {0};
-
- ep_addr[0] = cfg->tp_ep;
- if (cfg->tp_type == TYPE1)
- ep_addr[1] = cfg->bt_ep;
-
- return usb_check_int_endpoints(iface, ep_addr);
-}
-
static int bcm5974_probe(struct usb_interface *iface,
const struct usb_device_id *id)
{
@@ -918,11 +903,6 @@ static int bcm5974_probe(struct usb_interface *iface,
/* find the product index */
cfg = bcm5974_get_config(udev);
- if (!bcm5974_check_endpoints(iface, cfg)) {
- dev_err(&iface->dev, "Unexpected non-int endpoint\n");
- return -ENODEV;
- }
-
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(struct bcm5974), GFP_KERNEL);
input_dev = input_allocate_device();
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 258d5fe3d395..42eaebb3bf5c 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -978,12 +978,12 @@ static int rmi_driver_remove(struct device *dev)
rmi_disable_irq(rmi_dev, false);
- irq_domain_remove(data->irqdomain);
- data->irqdomain = NULL;
-
rmi_f34_remove_sysfs(rmi_dev);
rmi_free_function_list(rmi_dev);
+ irq_domain_remove(data->irqdomain);
+ data->irqdomain = NULL;
+
return 0;
}
diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
index 20331e119beb..03d626776ba1 100644
--- a/drivers/interconnect/qcom/sc8180x.c
+++ b/drivers/interconnect/qcom/sc8180x.c
@@ -1372,6 +1372,7 @@ static struct qcom_icc_bcm bcm_mm0 = {
static struct qcom_icc_bcm bcm_co0 = {
.name = "CO0",
+ .keepalive = true,
.num_nodes = 1,
.nodes = { &slv_qns_cdsp_mem_noc }
};
diff --git a/drivers/interconnect/qcom/sm8550.c b/drivers/interconnect/qcom/sm8550.c
index 629faa4c9aae..fc22cecf650f 100644
--- a/drivers/interconnect/qcom/sm8550.c
+++ b/drivers/interconnect/qcom/sm8550.c
@@ -2223,6 +2223,7 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8550",
.of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
},
};
diff --git a/drivers/interconnect/qcom/sm8650.c b/drivers/interconnect/qcom/sm8650.c
index b83de54577b6..b962e6c233ef 100644
--- a/drivers/interconnect/qcom/sm8650.c
+++ b/drivers/interconnect/qcom/sm8650.c
@@ -1160,7 +1160,7 @@ static struct qcom_icc_node qns_gemnoc_sf = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
- .enable_mask = BIT(3),
+ .enable_mask = BIT(0),
.num_nodes = 1,
.nodes = { &ebi },
};
diff --git a/drivers/interconnect/qcom/x1e80100.c b/drivers/interconnect/qcom/x1e80100.c
index d19501d913b3..cbaf4f9c41be 100644
--- a/drivers/interconnect/qcom/x1e80100.c
+++ b/drivers/interconnect/qcom/x1e80100.c
@@ -1586,6 +1586,7 @@ static struct qcom_icc_node qns_pcie_south_gem_noc_pcie = {
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
+ .enable_mask = BIT(3),
.num_nodes = 1,
.nodes = { &ebi },
};
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 9a29d742617e..0af39bbbe3a3 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -163,6 +163,9 @@ config IOMMU_SVA
select IOMMU_MM_DATA
bool
+config IOMMU_IOPF
+ bool
+
config FSL_PAMU
bool "Freescale IOMMU support"
depends on PCI
@@ -179,7 +182,7 @@ config FSL_PAMU
config MSM_IOMMU
bool "MSM IOMMU Support"
depends on ARM
- depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
+ depends on ARCH_QCOM || COMPILE_TEST
select IOMMU_API
select IOMMU_IO_PGTABLE_ARMV7S
help
@@ -196,7 +199,7 @@ source "drivers/iommu/iommufd/Kconfig"
config IRQ_REMAP
bool "Support for Interrupt Remapping"
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
- select DMAR_TABLE
+ select DMAR_TABLE if INTEL_IOMMU
help
Supports Interrupt remapping for IO-APIC and MSI devices.
To use x2apic mode in the CPU's which support x2APIC enhancements or
@@ -398,6 +401,7 @@ config ARM_SMMU_V3_SVA
bool "Shared Virtual Addressing support for the ARM SMMUv3"
depends on ARM_SMMU_V3
select IOMMU_SVA
+ select IOMMU_IOPF
select MMU_NOTIFIER
help
Support for sharing process address spaces with devices using the
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 95ad9dbfbda0..542760d963ec 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
-obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o io-pgfault.o
+obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o
+obj-$(CONFIG_IOMMU_IOPF) += io-pgfault.o
obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o
obj-$(CONFIG_APPLE_DART) += apple-dart.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 8b3601f285fd..f482aab420f7 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -39,20 +39,16 @@ extern enum io_pgtable_fmt amd_iommu_pgtable;
extern int amd_iommu_gpt_level;
bool amd_iommu_v2_supported(void);
-struct amd_iommu *get_amd_iommu(unsigned int idx);
-u8 amd_iommu_pc_get_max_banks(unsigned int idx);
-bool amd_iommu_pc_supported(void);
-u8 amd_iommu_pc_get_max_counters(unsigned int idx);
-int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
- u8 fxn, u64 *value);
-int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
- u8 fxn, u64 *value);
/* Device capabilities */
int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev);
void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev);
-int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, u64 address);
+/* GCR3 setup */
+int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data,
+ ioasid_t pasid, unsigned long gcr3);
+int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid);
+
/*
* This function flushes all internal caches of
* the IOMMU used by this driver.
@@ -63,10 +59,10 @@ void amd_iommu_domain_update(struct protection_domain *domain);
void amd_iommu_domain_flush_complete(struct protection_domain *domain);
void amd_iommu_domain_flush_pages(struct protection_domain *domain,
u64 address, size_t size);
-int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
-int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
- unsigned long cr3);
-int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid);
+void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
+ ioasid_t pasid, u64 address, size_t size);
+void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data,
+ ioasid_t pasid);
#ifdef CONFIG_IRQ_REMAP
int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
@@ -77,10 +73,6 @@ static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
}
#endif
-#define PPR_SUCCESS 0x0
-#define PPR_INVALID 0x1
-#define PPR_FAILURE 0xf
-
int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
int status, int tag);
@@ -150,6 +142,21 @@ static inline void *alloc_pgtable_page(int nid, gfp_t gfp)
return page ? page_address(page) : NULL;
}
+/*
+ * This must be called after device probe completes. During probe
+ * use rlookup_amd_iommu() get the iommu.
+ */
+static inline struct amd_iommu *get_amd_iommu_from_dev(struct device *dev)
+{
+ return iommu_get_iommu_dev(dev, struct amd_iommu, iommu);
+}
+
+/* This must be called after device probe completes. */
+static inline struct amd_iommu *get_amd_iommu_from_dev_data(struct iommu_dev_data *dev_data)
+{
+ return iommu_get_iommu_dev(dev_data->dev, struct amd_iommu, iommu);
+}
+
bool translation_pre_enabled(struct amd_iommu *iommu);
bool amd_iommu_is_attach_deferred(struct device *dev);
int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line);
@@ -164,5 +171,4 @@ void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
u64 *root, int mode);
struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
-extern bool amd_iommu_snp_en;
#endif
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 809d74faa1a5..d1fed5fc219b 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -453,15 +453,6 @@
#define MAX_DOMAIN_ID 65536
-/* Protection domain flags */
-#define PD_DMA_OPS_MASK BIT(0) /* domain used for dma_ops */
-#define PD_DEFAULT_MASK BIT(1) /* domain is a default dma_ops
- domain for an IOMMU */
-#define PD_PASSTHROUGH_MASK BIT(2) /* domain has no page
- translation */
-#define PD_IOMMUV2_MASK BIT(3) /* domain has gcr3 table */
-#define PD_GIOV_MASK BIT(4) /* domain enable GIOV support */
-
/* Timeout stuff */
#define LOOP_TIMEOUT 100000
#define MMIO_STATUS_TIMEOUT 2000000
@@ -513,14 +504,6 @@ extern struct kmem_cache *amd_iommu_irq_cache;
#define for_each_iommu_safe(iommu, next) \
list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
-#define APERTURE_RANGE_SHIFT 27 /* 128 MB */
-#define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT)
-#define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT)
-#define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */
-#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
-#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
-
-
struct amd_iommu;
struct iommu_domain;
struct irq_domain;
@@ -541,6 +524,13 @@ struct amd_irte_ops;
#define io_pgtable_cfg_to_data(x) \
container_of((x), struct amd_io_pgtable, pgtbl_cfg)
+struct gcr3_tbl_info {
+ u64 *gcr3_tbl; /* Guest CR3 table */
+ int glx; /* Number of levels for GCR3 table */
+ u32 pasid_cnt; /* Track attached PASIDs */
+ u16 domid; /* Per device domain ID */
+};
+
struct amd_io_pgtable {
struct io_pgtable_cfg pgtbl_cfg;
struct io_pgtable iop;
@@ -549,6 +539,11 @@ struct amd_io_pgtable {
u64 *pgd; /* v2 pgtable pgd pointer */
};
+enum protection_domain_mode {
+ PD_MODE_V1 = 1,
+ PD_MODE_V2,
+};
+
/*
* This structure contains generic data for IOMMU protection domains
* independent of their use.
@@ -560,10 +555,8 @@ struct protection_domain {
struct amd_io_pgtable iop;
spinlock_t lock; /* mostly used to lock the page table*/
u16 id; /* the domain id written to the device table */
- int glx; /* Number of levels for GCR3 table */
int nid; /* Node ID */
- u64 *gcr3_tbl; /* Guest CR3 table */
- unsigned long flags; /* flags to find out type of domain */
+ enum protection_domain_mode pd_mode; /* Track page table type */
bool dirty_tracking; /* dirty tracking is enabled in the domain */
unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
@@ -816,6 +809,7 @@ struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
struct llist_node dev_data_list; /* For global dev_data_list */
struct protection_domain *domain; /* Domain the device is bound to */
+ struct gcr3_tbl_info gcr3_info; /* Per-device GCR3 table */
struct device *dev;
u16 devid; /* PCI Device ID */
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index c83bd0c2a1c9..e7a44929f0da 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -30,6 +30,7 @@
#include <asm/io_apic.h>
#include <asm/irq_remapping.h>
#include <asm/set_memory.h>
+#include <asm/sev.h>
#include <linux/crash_dump.h>
@@ -2068,6 +2069,9 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
/* Prevent binding other PCI device drivers to IOMMU devices */
iommu->dev->match_driver = false;
+ /* ACPI _PRT won't have an IRQ for IOMMU */
+ iommu->dev->irq_managed = 1;
+
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
&iommu->cap);
@@ -2769,6 +2773,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
iommu_set_exclusion_range(iommu);
+ iommu_enable_gt(iommu);
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
@@ -2825,6 +2830,7 @@ static void early_enable_iommus(void)
iommu_disable_irtcachedis(iommu);
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
+ iommu_enable_gt(iommu);
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
@@ -2838,10 +2844,8 @@ static void enable_iommus_v2(void)
{
struct amd_iommu *iommu;
- for_each_iommu(iommu) {
+ for_each_iommu(iommu)
iommu_enable_ppr_log(iommu);
- iommu_enable_gt(iommu);
- }
}
static void enable_iommus_vapic(void)
@@ -3221,6 +3225,36 @@ out:
return true;
}
+static void iommu_snp_enable(void)
+{
+#ifdef CONFIG_KVM_AMD_SEV
+ if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ return;
+ /*
+ * The SNP support requires that IOMMU must be enabled, and is
+ * not configured in the passthrough mode.
+ */
+ if (no_iommu || iommu_default_passthrough()) {
+ pr_err("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
+ return;
+ }
+
+ amd_iommu_snp_en = check_feature(FEATURE_SNP);
+ if (!amd_iommu_snp_en) {
+ pr_err("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
+ return;
+ }
+
+ pr_info("IOMMU SNP support enabled.\n");
+
+ /* Enforce IOMMU v1 pagetable when SNP is enabled. */
+ if (amd_iommu_pgtable != AMD_IOMMU_V1) {
+ pr_warn("Forcing use of AMD IOMMU v1 page table due to SNP.\n");
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ }
+#endif
+}
+
/****************************************************************************
*
* AMD IOMMU Initialization State Machine
@@ -3256,6 +3290,7 @@ static int __init state_next(void)
break;
case IOMMU_ENABLED:
register_syscore_ops(&amd_iommu_syscore_ops);
+ iommu_snp_enable();
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
break;
@@ -3694,13 +3729,11 @@ u8 amd_iommu_pc_get_max_banks(unsigned int idx)
return 0;
}
-EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
bool amd_iommu_pc_supported(void)
{
return amd_iommu_pc_present;
}
-EXPORT_SYMBOL(amd_iommu_pc_supported);
u8 amd_iommu_pc_get_max_counters(unsigned int idx)
{
@@ -3711,7 +3744,6 @@ u8 amd_iommu_pc_get_max_counters(unsigned int idx)
return 0;
}
-EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
u8 fxn, u64 *value, bool is_write)
@@ -3767,40 +3799,85 @@ int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64
return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
}
-#ifdef CONFIG_AMD_MEM_ENCRYPT
-int amd_iommu_snp_enable(void)
+#ifdef CONFIG_KVM_AMD_SEV
+static int iommu_page_make_shared(void *page)
{
- /*
- * The SNP support requires that IOMMU must be enabled, and is
- * not configured in the passthrough mode.
- */
- if (no_iommu || iommu_default_passthrough()) {
- pr_err("SNP: IOMMU is disabled or configured in passthrough mode, SNP cannot be supported");
- return -EINVAL;
+ unsigned long paddr, pfn;
+
+ paddr = iommu_virt_to_phys(page);
+ /* Cbit maybe set in the paddr */
+ pfn = __sme_clr(paddr) >> PAGE_SHIFT;
+
+ if (!(pfn % PTRS_PER_PMD)) {
+ int ret, level;
+ bool assigned;
+
+ ret = snp_lookup_rmpentry(pfn, &assigned, &level);
+ if (ret) {
+ pr_warn("IOMMU PFN %lx RMP lookup failed, ret %d\n", pfn, ret);
+ return ret;
+ }
+
+ if (!assigned) {
+ pr_warn("IOMMU PFN %lx not assigned in RMP table\n", pfn);
+ return -EINVAL;
+ }
+
+ if (level > PG_LEVEL_4K) {
+ ret = psmash(pfn);
+ if (!ret)
+ goto done;
+
+ pr_warn("PSMASH failed for IOMMU PFN %lx huge RMP entry, ret: %d, level: %d\n",
+ pfn, ret, level);
+ return ret;
+ }
}
- /*
- * Prevent enabling SNP after IOMMU_ENABLED state because this process
- * affect how IOMMU driver sets up data structures and configures
- * IOMMU hardware.
- */
- if (init_state > IOMMU_ENABLED) {
- pr_err("SNP: Too late to enable SNP for IOMMU.\n");
- return -EINVAL;
+done:
+ return rmp_make_shared(pfn, PG_LEVEL_4K);
+}
+
+static int iommu_make_shared(void *va, size_t size)
+{
+ void *page;
+ int ret;
+
+ if (!va)
+ return 0;
+
+ for (page = va; page < (va + size); page += PAGE_SIZE) {
+ ret = iommu_page_make_shared(page);
+ if (ret)
+ return ret;
}
- amd_iommu_snp_en = check_feature(FEATURE_SNP);
+ return 0;
+}
+
+int amd_iommu_snp_disable(void)
+{
+ struct amd_iommu *iommu;
+ int ret;
+
if (!amd_iommu_snp_en)
- return -EINVAL;
+ return 0;
- pr_info("SNP enabled\n");
+ for_each_iommu(iommu) {
+ ret = iommu_make_shared(iommu->evt_buf, EVT_BUFFER_SIZE);
+ if (ret)
+ return ret;
- /* Enforce IOMMU v1 pagetable when SNP is enabled. */
- if (amd_iommu_pgtable != AMD_IOMMU_V1) {
- pr_warn("Force to using AMD IOMMU v1 page table due to SNP\n");
- amd_iommu_pgtable = AMD_IOMMU_V1;
+ ret = iommu_make_shared(iommu->ppr_log, PPR_LOG_SIZE);
+ if (ret)
+ return ret;
+
+ ret = iommu_make_shared((void *)iommu->cmd_sem, PAGE_SIZE);
+ if (ret)
+ return ret;
}
return 0;
}
+EXPORT_SYMBOL_GPL(amd_iommu_snp_disable);
#endif
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
index 6d69ba60744f..93489d2db4e8 100644
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ b/drivers/iommu/amd/io_pgtable_v2.c
@@ -350,38 +350,26 @@ static const struct iommu_flush_ops v2_flush_ops = {
static void v2_free_pgtable(struct io_pgtable *iop)
{
- struct protection_domain *pdom;
struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
- pdom = container_of(pgtable, struct protection_domain, iop);
- if (!(pdom->flags & PD_IOMMUV2_MASK))
+ if (!pgtable || !pgtable->pgd)
return;
- /* Clear gcr3 entry */
- amd_iommu_domain_clear_gcr3(&pdom->domain, 0);
-
- /* Make changes visible to IOMMUs */
- amd_iommu_domain_update(pdom);
-
/* Free page table */
free_pgtable(pgtable->pgd, get_pgtable_level());
+ pgtable->pgd = NULL;
}
static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{
struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
struct protection_domain *pdom = (struct protection_domain *)cookie;
- int ret;
int ias = IOMMU_IN_ADDR_BIT_SIZE;
pgtable->pgd = alloc_pgtable_page(pdom->nid, GFP_ATOMIC);
if (!pgtable->pgd)
return NULL;
- ret = amd_iommu_domain_set_gcr3(&pdom->domain, 0, iommu_virt_to_phys(pgtable->pgd));
- if (ret)
- goto err_free_pgd;
-
if (get_pgtable_level() == PAGE_MODE_5_LEVEL)
ias = 57;
@@ -395,11 +383,6 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
cfg->tlb = &v2_flush_ops;
return &pgtable->iop;
-
-err_free_pgd:
- free_pgtable_page(pgtable->pgd);
-
- return NULL;
}
struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 4283dd8191f0..d35c1b8c8e65 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -45,10 +45,6 @@
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
-/* IO virtual address start page frame number */
-#define IOVA_START_PFN (1)
-#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
-
/* Reserved IOVA ranges */
#define MSI_RANGE_START (0xfee00000)
#define MSI_RANGE_END (0xfeefffff)
@@ -79,6 +75,9 @@ struct kmem_cache *amd_iommu_irq_cache;
static void detach_device(struct device *dev);
+static void set_dte_entry(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data);
+
/****************************************************************************
*
* Helper functions
@@ -87,7 +86,7 @@ static void detach_device(struct device *dev);
static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
{
- return (pdom && (pdom->flags & PD_IOMMUV2_MASK));
+ return (pdom && (pdom->pd_mode == PD_MODE_V2));
}
static inline int get_acpihid_device_id(struct device *dev,
@@ -1388,14 +1387,9 @@ void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address,
size_t size, ioasid_t pasid, bool gn)
{
- struct amd_iommu *iommu;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
struct iommu_cmd cmd;
- int qdep;
-
- qdep = dev_data->ats_qdep;
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- return -EINVAL;
+ int qdep = dev_data->ats_qdep;
build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address,
size, pasid, gn);
@@ -1415,16 +1409,12 @@ static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data)
*/
static int device_flush_dte(struct iommu_dev_data *dev_data)
{
- struct amd_iommu *iommu;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
struct pci_dev *pdev = NULL;
struct amd_iommu_pci_seg *pci_seg;
u16 alias;
int ret;
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- return -EINVAL;
-
if (dev_is_pci(dev_data->dev))
pdev = to_pci_dev(dev_data->dev);
@@ -1453,27 +1443,37 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
return ret;
}
-/*
- * TLB invalidation function which is called from the mapping functions.
- * It invalidates a single PTE if the range to flush is within a single
- * page. Otherwise it flushes the whole TLB of the IOMMU.
- */
-static void __domain_flush_pages(struct protection_domain *domain,
+static int domain_flush_pages_v2(struct protection_domain *pdom,
u64 address, size_t size)
{
struct iommu_dev_data *dev_data;
struct iommu_cmd cmd;
- int ret = 0, i;
- ioasid_t pasid = IOMMU_NO_PASID;
- bool gn = false;
+ int ret = 0;
- if (pdom_is_v2_pgtbl_mode(domain))
- gn = true;
+ list_for_each_entry(dev_data, &pdom->dev_list, list) {
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
+ u16 domid = dev_data->gcr3_info.domid;
- build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, gn);
+ build_inv_iommu_pages(&cmd, address, size,
+ domid, IOMMU_NO_PASID, true);
+
+ ret |= iommu_queue_command(iommu, &cmd);
+ }
+
+ return ret;
+}
+
+static int domain_flush_pages_v1(struct protection_domain *pdom,
+ u64 address, size_t size)
+{
+ struct iommu_cmd cmd;
+ int ret = 0, i;
+
+ build_inv_iommu_pages(&cmd, address, size,
+ pdom->id, IOMMU_NO_PASID, false);
for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
- if (!domain->dev_iommu[i])
+ if (!pdom->dev_iommu[i])
continue;
/*
@@ -1483,6 +1483,28 @@ static void __domain_flush_pages(struct protection_domain *domain,
ret |= iommu_queue_command(amd_iommus[i], &cmd);
}
+ return ret;
+}
+
+/*
+ * TLB invalidation function which is called from the mapping functions.
+ * It flushes range of PTEs of the domain.
+ */
+static void __domain_flush_pages(struct protection_domain *domain,
+ u64 address, size_t size)
+{
+ struct iommu_dev_data *dev_data;
+ int ret = 0;
+ ioasid_t pasid = IOMMU_NO_PASID;
+ bool gn = false;
+
+ if (pdom_is_v2_pgtbl_mode(domain)) {
+ gn = true;
+ ret = domain_flush_pages_v2(domain, address, size);
+ } else {
+ ret = domain_flush_pages_v1(domain, address, size);
+ }
+
list_for_each_entry(dev_data, &domain->dev_list, list) {
if (!dev_data->ats_enabled)
@@ -1551,6 +1573,29 @@ static void amd_iommu_domain_flush_all(struct protection_domain *domain)
CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
}
+void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
+ ioasid_t pasid, u64 address, size_t size)
+{
+ struct iommu_cmd cmd;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
+
+ build_inv_iommu_pages(&cmd, address, size,
+ dev_data->gcr3_info.domid, pasid, true);
+ iommu_queue_command(iommu, &cmd);
+
+ if (dev_data->ats_enabled)
+ device_flush_iotlb(dev_data, address, size, pasid, true);
+
+ iommu_completion_wait(iommu);
+}
+
+void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data,
+ ioasid_t pasid)
+{
+ amd_iommu_dev_flush_pasid_pages(dev_data, 0,
+ CMD_INV_IOMMU_ALL_PAGES_ADDRESS, pasid);
+}
+
void amd_iommu_domain_flush_complete(struct protection_domain *domain)
{
int i;
@@ -1592,6 +1637,49 @@ static void domain_flush_devices(struct protection_domain *domain)
device_flush_dte(dev_data);
}
+static void update_device_table(struct protection_domain *domain)
+{
+ struct iommu_dev_data *dev_data;
+
+ list_for_each_entry(dev_data, &domain->dev_list, list) {
+ struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
+
+ set_dte_entry(iommu, dev_data);
+ clone_aliases(iommu, dev_data->dev);
+ }
+}
+
+void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
+{
+ update_device_table(domain);
+ domain_flush_devices(domain);
+}
+
+void amd_iommu_domain_update(struct protection_domain *domain)
+{
+ /* Update device table */
+ amd_iommu_update_and_flush_device_table(domain);
+
+ /* Flush domain TLB(s) and wait for completion */
+ amd_iommu_domain_flush_all(domain);
+}
+
+int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
+ int status, int tag)
+{
+ struct iommu_dev_data *dev_data;
+ struct amd_iommu *iommu;
+ struct iommu_cmd cmd;
+
+ dev_data = dev_iommu_priv_get(&pdev->dev);
+ iommu = get_amd_iommu_from_dev(&pdev->dev);
+
+ build_complete_ppr(&cmd, dev_data->devid, pasid, status,
+ tag, dev_data->pri_tlp);
+
+ return iommu_queue_command(iommu, &cmd);
+}
+
/****************************************************************************
*
* The next functions belong to the domain allocation. A domain is
@@ -1656,16 +1744,22 @@ static void free_gcr3_tbl_level2(u64 *tbl)
}
}
-static void free_gcr3_table(struct protection_domain *domain)
+static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
{
- if (domain->glx == 2)
- free_gcr3_tbl_level2(domain->gcr3_tbl);
- else if (domain->glx == 1)
- free_gcr3_tbl_level1(domain->gcr3_tbl);
+ if (gcr3_info->glx == 2)
+ free_gcr3_tbl_level2(gcr3_info->gcr3_tbl);
+ else if (gcr3_info->glx == 1)
+ free_gcr3_tbl_level1(gcr3_info->gcr3_tbl);
else
- BUG_ON(domain->glx != 0);
+ WARN_ON_ONCE(gcr3_info->glx != 0);
- free_page((unsigned long)domain->gcr3_tbl);
+ gcr3_info->glx = 0;
+
+ /* Free per device domain ID */
+ domain_id_free(gcr3_info->domid);
+
+ free_page((unsigned long)gcr3_info->gcr3_tbl);
+ gcr3_info->gcr3_tbl = NULL;
}
/*
@@ -1684,33 +1778,133 @@ static int get_gcr3_levels(int pasids)
return levels ? (DIV_ROUND_UP(levels, 9) - 1) : levels;
}
-/* Note: This function expects iommu_domain->lock to be held prior calling the function. */
-static int setup_gcr3_table(struct protection_domain *domain, int pasids)
+static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
+ struct amd_iommu *iommu, int pasids)
{
int levels = get_gcr3_levels(pasids);
+ int nid = iommu ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE;
if (levels > amd_iommu_max_glx_val)
return -EINVAL;
- domain->gcr3_tbl = alloc_pgtable_page(domain->nid, GFP_ATOMIC);
- if (domain->gcr3_tbl == NULL)
+ if (gcr3_info->gcr3_tbl)
+ return -EBUSY;
+
+ /* Allocate per device domain ID */
+ gcr3_info->domid = domain_id_alloc();
+
+ gcr3_info->gcr3_tbl = alloc_pgtable_page(nid, GFP_ATOMIC);
+ if (gcr3_info->gcr3_tbl == NULL) {
+ domain_id_free(gcr3_info->domid);
return -ENOMEM;
+ }
- domain->glx = levels;
- domain->flags |= PD_IOMMUV2_MASK;
+ gcr3_info->glx = levels;
- amd_iommu_domain_update(domain);
+ return 0;
+}
+static u64 *__get_gcr3_pte(struct gcr3_tbl_info *gcr3_info,
+ ioasid_t pasid, bool alloc)
+{
+ int index;
+ u64 *pte;
+ u64 *root = gcr3_info->gcr3_tbl;
+ int level = gcr3_info->glx;
+
+ while (true) {
+
+ index = (pasid >> (9 * level)) & 0x1ff;
+ pte = &root[index];
+
+ if (level == 0)
+ break;
+
+ if (!(*pte & GCR3_VALID)) {
+ if (!alloc)
+ return NULL;
+
+ root = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (root == NULL)
+ return NULL;
+
+ *pte = iommu_virt_to_phys(root) | GCR3_VALID;
+ }
+
+ root = iommu_phys_to_virt(*pte & PAGE_MASK);
+
+ level -= 1;
+ }
+
+ return pte;
+}
+
+static int update_gcr3(struct iommu_dev_data *dev_data,
+ ioasid_t pasid, unsigned long gcr3, bool set)
+{
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ u64 *pte;
+
+ pte = __get_gcr3_pte(gcr3_info, pasid, true);
+ if (pte == NULL)
+ return -ENOMEM;
+
+ if (set)
+ *pte = (gcr3 & PAGE_MASK) | GCR3_VALID;
+ else
+ *pte = 0;
+
+ amd_iommu_dev_flush_pasid_all(dev_data, pasid);
return 0;
}
-static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
- struct protection_domain *domain, bool ats, bool ppr)
+int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid,
+ unsigned long gcr3)
+{
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ int ret;
+
+ iommu_group_mutex_assert(dev_data->dev);
+
+ ret = update_gcr3(dev_data, pasid, gcr3, true);
+ if (ret)
+ return ret;
+
+ gcr3_info->pasid_cnt++;
+ return ret;
+}
+
+int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid)
+{
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+ int ret;
+
+ iommu_group_mutex_assert(dev_data->dev);
+
+ ret = update_gcr3(dev_data, pasid, 0, false);
+ if (ret)
+ return ret;
+
+ gcr3_info->pasid_cnt--;
+ return ret;
+}
+
+static void set_dte_entry(struct amd_iommu *iommu,
+ struct iommu_dev_data *dev_data)
{
u64 pte_root = 0;
u64 flags = 0;
u32 old_domid;
+ u16 devid = dev_data->devid;
+ u16 domid;
+ struct protection_domain *domain = dev_data->domain;
struct dev_table_entry *dev_table = get_dev_table(iommu);
+ struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
+
+ if (gcr3_info && gcr3_info->gcr3_tbl)
+ domid = dev_data->gcr3_info.domid;
+ else
+ domid = domain->id;
if (domain->iop.mode != PAGE_MODE_NONE)
pte_root = iommu_virt_to_phys(domain->iop.root);
@@ -1724,23 +1918,23 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
* When SNP is enabled, Only set TV bit when IOMMU
* page translation is in use.
*/
- if (!amd_iommu_snp_en || (domain->id != 0))
+ if (!amd_iommu_snp_en || (domid != 0))
pte_root |= DTE_FLAG_TV;
flags = dev_table[devid].data[1];
- if (ats)
+ if (dev_data->ats_enabled)
flags |= DTE_FLAG_IOTLB;
- if (ppr)
+ if (dev_data->ppr)
pte_root |= 1ULL << DEV_ENTRY_PPR;
if (domain->dirty_tracking)
pte_root |= DTE_FLAG_HAD;
- if (domain->flags & PD_IOMMUV2_MASK) {
- u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
- u64 glx = domain->glx;
+ if (gcr3_info && gcr3_info->gcr3_tbl) {
+ u64 gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl);
+ u64 glx = gcr3_info->glx;
u64 tmp;
pte_root |= DTE_FLAG_GV;
@@ -1768,12 +1962,13 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
((u64)GUEST_PGTABLE_5_LEVEL << DTE_GPT_LEVEL_SHIFT);
}
- if (domain->flags & PD_GIOV_MASK)
+ /* GIOV is supported with V2 page table mode only */
+ if (pdom_is_v2_pgtbl_mode(domain))
pte_root |= DTE_FLAG_GIOV;
}
flags &= ~DEV_DOMID_MASK;
- flags |= domain->id;
+ flags |= domid;
old_domid = dev_table[devid].data[1] & DEV_DOMID_MASK;
dev_table[devid].data[1] = flags;
@@ -1804,16 +1999,11 @@ static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
amd_iommu_apply_erratum_63(iommu, devid);
}
-static void do_attach(struct iommu_dev_data *dev_data,
- struct protection_domain *domain)
+static int do_attach(struct iommu_dev_data *dev_data,
+ struct protection_domain *domain)
{
- struct amd_iommu *iommu;
- bool ats;
-
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- return;
- ats = dev_data->ats_enabled;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
+ int ret = 0;
/* Update data structures */
dev_data->domain = domain;
@@ -1827,22 +2017,40 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_iommu[iommu->index] += 1;
domain->dev_cnt += 1;
+ /* Init GCR3 table and update device table */
+ if (domain->pd_mode == PD_MODE_V2) {
+ /* By default, setup GCR3 table to support single PASID */
+ ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, 1);
+ if (ret)
+ return ret;
+
+ ret = update_gcr3(dev_data, 0,
+ iommu_virt_to_phys(domain->iop.pgd), true);
+ if (ret) {
+ free_gcr3_table(&dev_data->gcr3_info);
+ return ret;
+ }
+ }
+
/* Update device table */
- set_dte_entry(iommu, dev_data->devid, domain,
- ats, dev_data->ppr);
+ set_dte_entry(iommu, dev_data);
clone_aliases(iommu, dev_data->dev);
device_flush_dte(dev_data);
+
+ return ret;
}
static void do_detach(struct iommu_dev_data *dev_data)
{
struct protection_domain *domain = dev_data->domain;
- struct amd_iommu *iommu;
+ struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- return;
+ /* Clear GCR3 table */
+ if (domain->pd_mode == PD_MODE_V2) {
+ update_gcr3(dev_data, 0, 0, false);
+ free_gcr3_table(&dev_data->gcr3_info);
+ }
/* Update data structures */
dev_data->domain = NULL;
@@ -1886,7 +2094,7 @@ static int attach_device(struct device *dev,
if (dev_is_pci(dev))
pdev_enable_caps(to_pci_dev(dev));
- do_attach(dev_data, domain);
+ ret = do_attach(dev_data, domain);
out:
spin_unlock(&dev_data->lock);
@@ -1954,8 +2162,7 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
ret = iommu_init_device(iommu, dev);
if (ret) {
- if (ret != -ENOTSUPP)
- dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
+ dev_err(dev, "Failed to initialize - trying to proceed anyway\n");
iommu_dev = ERR_PTR(ret);
iommu_ignore_device(iommu, dev);
} else {
@@ -2000,42 +2207,6 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev)
/*****************************************************************************
*
- * The next functions belong to the dma_ops mapping/unmapping code.
- *
- *****************************************************************************/
-
-static void update_device_table(struct protection_domain *domain)
-{
- struct iommu_dev_data *dev_data;
-
- list_for_each_entry(dev_data, &domain->dev_list, list) {
- struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
-
- if (!iommu)
- continue;
- set_dte_entry(iommu, dev_data->devid, domain,
- dev_data->ats_enabled, dev_data->ppr);
- clone_aliases(iommu, dev_data->dev);
- }
-}
-
-void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
-{
- update_device_table(domain);
- domain_flush_devices(domain);
-}
-
-void amd_iommu_domain_update(struct protection_domain *domain)
-{
- /* Update device table */
- amd_iommu_update_and_flush_device_table(domain);
-
- /* Flush domain TLB(s) and wait for completion */
- amd_iommu_domain_flush_all(domain);
-}
-
-/*****************************************************************************
- *
* The following functions belong to the exported interface of AMD IOMMU
*
* This interface allows access to lower level functions of the IOMMU
@@ -2070,9 +2241,6 @@ static void protection_domain_free(struct protection_domain *domain)
if (domain->iop.pgtbl_cfg.tlb)
free_io_pgtable_ops(&domain->iop.iop.ops);
- if (domain->flags & PD_IOMMUV2_MASK)
- free_gcr3_table(domain);
-
if (domain->iop.root)
free_page((unsigned long)domain->iop.root);
@@ -2094,19 +2262,16 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
return -ENOMEM;
}
+ domain->pd_mode = PD_MODE_V1;
amd_iommu_domain_set_pgtable(domain, pt_root, mode);
return 0;
}
-static int protection_domain_init_v2(struct protection_domain *domain)
+static int protection_domain_init_v2(struct protection_domain *pdom)
{
- domain->flags |= PD_GIOV_MASK;
-
- domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
-
- if (setup_gcr3_table(domain, 1))
- return -ENOMEM;
+ pdom->pd_mode = PD_MODE_V2;
+ pdom->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
return 0;
}
@@ -2194,11 +2359,8 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
struct protection_domain *domain;
struct amd_iommu *iommu = NULL;
- if (dev) {
- iommu = rlookup_amd_iommu(dev);
- if (!iommu)
- return ERR_PTR(-ENODEV);
- }
+ if (dev)
+ iommu = get_amd_iommu_from_dev(dev);
/*
* Since DTE[Mode]=0 is prohibited on SNP-enabled system,
@@ -2279,7 +2441,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
struct protection_domain *domain = to_pdomain(dom);
- struct amd_iommu *iommu = rlookup_amd_iommu(dev);
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
int ret;
/*
@@ -2337,7 +2499,7 @@ static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
int prot = 0;
int ret = -EINVAL;
- if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ if ((domain->pd_mode == PD_MODE_V1) &&
(domain->iop.mode == PAGE_MODE_NONE))
return -EINVAL;
@@ -2383,7 +2545,7 @@ static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
size_t r;
- if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
+ if ((domain->pd_mode == PD_MODE_V1) &&
(domain->iop.mode == PAGE_MODE_NONE))
return 0;
@@ -2418,7 +2580,7 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
case IOMMU_CAP_DEFERRED_FLUSH:
return true;
case IOMMU_CAP_DIRTY_TRACKING: {
- struct amd_iommu *iommu = rlookup_amd_iommu(dev);
+ struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
return amd_iommu_hd_support(iommu);
}
@@ -2447,9 +2609,7 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
}
list_for_each_entry(dev_data, &pdomain->dev_list, list) {
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- continue;
+ iommu = get_amd_iommu_from_dev_data(dev_data);
dev_table = get_dev_table(iommu);
pte_root = dev_table[dev_data->devid].data[0];
@@ -2509,9 +2669,7 @@ static void amd_iommu_get_resv_regions(struct device *dev,
return;
devid = PCI_SBDF_TO_DEVID(sbdf);
- iommu = rlookup_amd_iommu(dev);
- if (!iommu)
- return;
+ iommu = get_amd_iommu_from_dev(dev);
pci_seg = iommu->pci_seg;
list_for_each_entry(entry, &pci_seg->unity_map, list) {
@@ -2645,216 +2803,6 @@ const struct iommu_ops amd_iommu_ops = {
}
};
-static int __flush_pasid(struct protection_domain *domain, u32 pasid,
- u64 address, size_t size)
-{
- struct iommu_dev_data *dev_data;
- struct iommu_cmd cmd;
- int i, ret;
-
- if (!(domain->flags & PD_IOMMUV2_MASK))
- return -EINVAL;
-
- build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, true);
-
- /*
- * IOMMU TLB needs to be flushed before Device TLB to
- * prevent device TLB refill from IOMMU TLB
- */
- for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
- if (domain->dev_iommu[i] == 0)
- continue;
-
- ret = iommu_queue_command(amd_iommus[i], &cmd);
- if (ret != 0)
- goto out;
- }
-
- /* Wait until IOMMU TLB flushes are complete */
- amd_iommu_domain_flush_complete(domain);
-
- /* Now flush device TLBs */
- list_for_each_entry(dev_data, &domain->dev_list, list) {
- struct amd_iommu *iommu;
- int qdep;
-
- /*
- There might be non-IOMMUv2 capable devices in an IOMMUv2
- * domain.
- */
- if (!dev_data->ats_enabled)
- continue;
-
- qdep = dev_data->ats_qdep;
- iommu = rlookup_amd_iommu(dev_data->dev);
- if (!iommu)
- continue;
- build_inv_iotlb_pages(&cmd, dev_data->devid, qdep,
- address, size, pasid, true);
-
- ret = iommu_queue_command(iommu, &cmd);
- if (ret != 0)
- goto out;
- }
-
- /* Wait until all device TLBs are flushed */
- amd_iommu_domain_flush_complete(domain);
-
- ret = 0;
-
-out:
-
- return ret;
-}
-
-static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
- u64 address)
-{
- return __flush_pasid(domain, pasid, address, PAGE_SIZE);
-}
-
-int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
- u64 address)
-{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&domain->lock, flags);
- ret = __amd_iommu_flush_page(domain, pasid, address);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
-{
- return __flush_pasid(domain, pasid, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
-}
-
-int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
-{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&domain->lock, flags);
- ret = __amd_iommu_flush_tlb(domain, pasid);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
-{
- int index;
- u64 *pte;
-
- while (true) {
-
- index = (pasid >> (9 * level)) & 0x1ff;
- pte = &root[index];
-
- if (level == 0)
- break;
-
- if (!(*pte & GCR3_VALID)) {
- if (!alloc)
- return NULL;
-
- root = (void *)get_zeroed_page(GFP_ATOMIC);
- if (root == NULL)
- return NULL;
-
- *pte = iommu_virt_to_phys(root) | GCR3_VALID;
- }
-
- root = iommu_phys_to_virt(*pte & PAGE_MASK);
-
- level -= 1;
- }
-
- return pte;
-}
-
-static int __set_gcr3(struct protection_domain *domain, u32 pasid,
- unsigned long cr3)
-{
- u64 *pte;
-
- if (domain->iop.mode != PAGE_MODE_NONE)
- return -EINVAL;
-
- pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
- if (pte == NULL)
- return -ENOMEM;
-
- *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
-
- return __amd_iommu_flush_tlb(domain, pasid);
-}
-
-static int __clear_gcr3(struct protection_domain *domain, u32 pasid)
-{
- u64 *pte;
-
- if (domain->iop.mode != PAGE_MODE_NONE)
- return -EINVAL;
-
- pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
- if (pte == NULL)
- return 0;
-
- *pte = 0;
-
- return __amd_iommu_flush_tlb(domain, pasid);
-}
-
-int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
- unsigned long cr3)
-{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&domain->lock, flags);
- ret = __set_gcr3(domain, pasid, cr3);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
-{
- struct protection_domain *domain = to_pdomain(dom);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&domain->lock, flags);
- ret = __clear_gcr3(domain, pasid);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- return ret;
-}
-
-int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
- int status, int tag)
-{
- struct iommu_dev_data *dev_data;
- struct amd_iommu *iommu;
- struct iommu_cmd cmd;
-
- dev_data = dev_iommu_priv_get(&pdev->dev);
- iommu = rlookup_amd_iommu(&pdev->dev);
- if (!iommu)
- return -ENODEV;
-
- build_complete_ppr(&cmd, dev_data->devid, pasid, status,
- tag, dev_data->pri_tlp);
-
- return iommu_queue_command(iommu, &cmd);
-}
-
#ifdef CONFIG_IRQ_REMAP
/*****************************************************************************
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index ef3ee95706da..eb1e62cd499a 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -779,7 +779,8 @@ static void apple_dart_domain_free(struct iommu_domain *domain)
kfree(dart_domain);
}
-static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int apple_dart_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 05722121f00e..2cd433a9c8a0 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -10,7 +10,6 @@
#include <linux/slab.h>
#include "arm-smmu-v3.h"
-#include "../../iommu-sva.h"
#include "../../io-pgtable-arm.h"
struct arm_smmu_mmu_notifier {
@@ -292,10 +291,8 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
struct mm_struct *mm)
{
int ret;
- unsigned long flags;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_mmu_notifier *smmu_mn;
- struct arm_smmu_master *master;
list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
if (smmu_mn->mn.mm == mm) {
@@ -325,28 +322,9 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
goto err_free_cd;
}
- spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_for_each_entry(master, &smmu_domain->devices, domain_head) {
- ret = arm_smmu_write_ctx_desc(master, mm_get_enqcmd_pasid(mm),
- cd);
- if (ret) {
- list_for_each_entry_from_reverse(
- master, &smmu_domain->devices, domain_head)
- arm_smmu_write_ctx_desc(
- master, mm_get_enqcmd_pasid(mm), NULL);
- break;
- }
- }
- spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- if (ret)
- goto err_put_notifier;
-
list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
return smmu_mn;
-err_put_notifier:
- /* Frees smmu_mn */
- mmu_notifier_put(&smmu_mn->mn);
err_free_cd:
arm_smmu_free_shared_cd(cd);
return ERR_PTR(ret);
@@ -363,9 +341,6 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
list_del(&smmu_mn->list);
- arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
- NULL);
-
/*
* If we went through clear(), we've already invalidated, and no
* new TLB entry can have been formed.
@@ -381,13 +356,20 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
arm_smmu_free_shared_cd(cd);
}
-static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
+static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
+ struct mm_struct *mm)
{
int ret;
struct arm_smmu_bond *bond;
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_domain *smmu_domain;
+
+ if (!(domain->type & __IOMMU_DOMAIN_PAGING))
+ return -ENODEV;
+ smmu_domain = to_smmu_domain(domain);
+ if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
+ return -ENODEV;
if (!master || !master->sva_enabled)
return -ENODEV;
@@ -404,9 +386,15 @@ static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
goto err_free_bond;
}
+ ret = arm_smmu_write_ctx_desc(master, pasid, bond->smmu_mn->cd);
+ if (ret)
+ goto err_put_notifier;
+
list_add(&bond->list, &master->bonds);
return 0;
+err_put_notifier:
+ arm_smmu_mmu_notifier_put(bond->smmu_mn);
err_free_bond:
kfree(bond);
return ret;
@@ -487,7 +475,6 @@ bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
{
- int ret;
struct device *dev = master->dev;
/*
@@ -500,16 +487,7 @@ static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
if (!master->iopf_enabled)
return -EINVAL;
- ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev);
- if (ret)
- return ret;
-
- ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
- if (ret) {
- iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
- return ret;
- }
- return 0;
+ return iopf_queue_add_device(master->smmu->evtq.iopf, dev);
}
static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
@@ -519,7 +497,6 @@ static void arm_smmu_master_sva_disable_iopf(struct arm_smmu_master *master)
if (!master->iopf_enabled)
return;
- iommu_unregister_device_fault_handler(dev);
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
}
@@ -568,6 +545,9 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
mutex_lock(&sva_lock);
+
+ arm_smmu_write_ctx_desc(master, id, NULL);
+
list_for_each_entry(t, &master->bonds, list) {
if (t->mm == mm) {
bond = t;
@@ -590,7 +570,7 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
struct mm_struct *mm = domain->mm;
mutex_lock(&sva_lock);
- ret = __arm_smmu_sva_bind(dev, mm);
+ ret = __arm_smmu_sva_bind(dev, id, mm);
mutex_unlock(&sva_lock);
return ret;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 0ffb1cf17e0b..5ed036225e69 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -29,7 +29,6 @@
#include "arm-smmu-v3.h"
#include "../../dma-iommu.h"
-#include "../../iommu-sva.h"
static bool disable_bypass = true;
module_param(disable_bypass, bool, 0444);
@@ -48,6 +47,9 @@ enum arm_smmu_msi_index {
ARM_SMMU_MAX_MSIS,
};
+static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu,
+ ioasid_t sid);
+
static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
[EVTQ_MSI_INDEX] = {
ARM_SMMU_EVTQ_IRQ_CFG0,
@@ -86,6 +88,9 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
{ 0, NULL},
};
+static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_device *smmu);
+
static void parse_driver_options(struct arm_smmu_device *smmu)
{
int i = 0;
@@ -921,31 +926,29 @@ static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
}
-static int arm_smmu_page_response(struct device *dev,
- struct iommu_fault_event *unused,
- struct iommu_page_response *resp)
+static void arm_smmu_page_response(struct device *dev, struct iopf_fault *unused,
+ struct iommu_page_response *resp)
{
struct arm_smmu_cmdq_ent cmd = {0};
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
int sid = master->streams[0].id;
- if (master->stall_enabled) {
- cmd.opcode = CMDQ_OP_RESUME;
- cmd.resume.sid = sid;
- cmd.resume.stag = resp->grpid;
- switch (resp->code) {
- case IOMMU_PAGE_RESP_INVALID:
- case IOMMU_PAGE_RESP_FAILURE:
- cmd.resume.resp = CMDQ_RESUME_0_RESP_ABORT;
- break;
- case IOMMU_PAGE_RESP_SUCCESS:
- cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY;
- break;
- default:
- return -EINVAL;
- }
- } else {
- return -ENODEV;
+ if (WARN_ON(!master->stall_enabled))
+ return;
+
+ cmd.opcode = CMDQ_OP_RESUME;
+ cmd.resume.sid = sid;
+ cmd.resume.stag = resp->grpid;
+ switch (resp->code) {
+ case IOMMU_PAGE_RESP_INVALID:
+ case IOMMU_PAGE_RESP_FAILURE:
+ cmd.resume.resp = CMDQ_RESUME_0_RESP_ABORT;
+ break;
+ case IOMMU_PAGE_RESP_SUCCESS:
+ cmd.resume.resp = CMDQ_RESUME_0_RESP_RETRY;
+ break;
+ default:
+ break;
}
arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
@@ -955,8 +958,6 @@ static int arm_smmu_page_response(struct device *dev,
* terminated... at some point in the future. PRI_RESP is fire and
* forget.
*/
-
- return 0;
}
/* Context descriptor manipulation functions */
@@ -971,6 +972,199 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
+/*
+ * Based on the value of ent report which bits of the STE the HW will access. It
+ * would be nice if this was complete according to the spec, but minimally it
+ * has to capture the bits this driver uses.
+ */
+static void arm_smmu_get_ste_used(const struct arm_smmu_ste *ent,
+ struct arm_smmu_ste *used_bits)
+{
+ unsigned int cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(ent->data[0]));
+
+ used_bits->data[0] = cpu_to_le64(STRTAB_STE_0_V);
+ if (!(ent->data[0] & cpu_to_le64(STRTAB_STE_0_V)))
+ return;
+
+ used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_CFG);
+
+ /* S1 translates */
+ if (cfg & BIT(0)) {
+ used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_S1FMT |
+ STRTAB_STE_0_S1CTXPTR_MASK |
+ STRTAB_STE_0_S1CDMAX);
+ used_bits->data[1] |=
+ cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR |
+ STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH |
+ STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW |
+ STRTAB_STE_1_EATS);
+ used_bits->data[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
+ }
+
+ /* S2 translates */
+ if (cfg & BIT(1)) {
+ used_bits->data[1] |=
+ cpu_to_le64(STRTAB_STE_1_EATS | STRTAB_STE_1_SHCFG);
+ used_bits->data[2] |=
+ cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR |
+ STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI |
+ STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2R);
+ used_bits->data[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK);
+ }
+
+ if (cfg == STRTAB_STE_0_CFG_BYPASS)
+ used_bits->data[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG);
+}
+
+/*
+ * Figure out if we can do a hitless update of entry to become target. Returns a
+ * bit mask where 1 indicates that qword needs to be set disruptively.
+ * unused_update is an intermediate value of entry that has unused bits set to
+ * their new values.
+ */
+static u8 arm_smmu_entry_qword_diff(const struct arm_smmu_ste *entry,
+ const struct arm_smmu_ste *target,
+ struct arm_smmu_ste *unused_update)
+{
+ struct arm_smmu_ste target_used = {};
+ struct arm_smmu_ste cur_used = {};
+ u8 used_qword_diff = 0;
+ unsigned int i;
+
+ arm_smmu_get_ste_used(entry, &cur_used);
+ arm_smmu_get_ste_used(target, &target_used);
+
+ for (i = 0; i != ARRAY_SIZE(target_used.data); i++) {
+ /*
+ * Check that masks are up to date, the make functions are not
+ * allowed to set a bit to 1 if the used function doesn't say it
+ * is used.
+ */
+ WARN_ON_ONCE(target->data[i] & ~target_used.data[i]);
+
+ /* Bits can change because they are not currently being used */
+ unused_update->data[i] = (entry->data[i] & cur_used.data[i]) |
+ (target->data[i] & ~cur_used.data[i]);
+ /*
+ * Each bit indicates that a used bit in a qword needs to be
+ * changed after unused_update is applied.
+ */
+ if ((unused_update->data[i] & target_used.data[i]) !=
+ target->data[i])
+ used_qword_diff |= 1 << i;
+ }
+ return used_qword_diff;
+}
+
+static bool entry_set(struct arm_smmu_device *smmu, ioasid_t sid,
+ struct arm_smmu_ste *entry,
+ const struct arm_smmu_ste *target, unsigned int start,
+ unsigned int len)
+{
+ bool changed = false;
+ unsigned int i;
+
+ for (i = start; len != 0; len--, i++) {
+ if (entry->data[i] != target->data[i]) {
+ WRITE_ONCE(entry->data[i], target->data[i]);
+ changed = true;
+ }
+ }
+
+ if (changed)
+ arm_smmu_sync_ste_for_sid(smmu, sid);
+ return changed;
+}
+
+/*
+ * Update the STE/CD to the target configuration. The transition from the
+ * current entry to the target entry takes place over multiple steps that
+ * attempts to make the transition hitless if possible. This function takes care
+ * not to create a situation where the HW can perceive a corrupted entry. HW is
+ * only required to have a 64 bit atomicity with stores from the CPU, while
+ * entries are many 64 bit values big.
+ *
+ * The difference between the current value and the target value is analyzed to
+ * determine which of three updates are required - disruptive, hitless or no
+ * change.
+ *
+ * In the most general disruptive case we can make any update in three steps:
+ * - Disrupting the entry (V=0)
+ * - Fill now unused qwords, execpt qword 0 which contains V
+ * - Make qword 0 have the final value and valid (V=1) with a single 64
+ * bit store
+ *
+ * However this disrupts the HW while it is happening. There are several
+ * interesting cases where a STE/CD can be updated without disturbing the HW
+ * because only a small number of bits are changing (S1DSS, CONFIG, etc) or
+ * because the used bits don't intersect. We can detect this by calculating how
+ * many 64 bit values need update after adjusting the unused bits and skip the
+ * V=0 process. This relies on the IGNORED behavior described in the
+ * specification.
+ */
+static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
+ struct arm_smmu_ste *entry,
+ const struct arm_smmu_ste *target)
+{
+ unsigned int num_entry_qwords = ARRAY_SIZE(target->data);
+ struct arm_smmu_device *smmu = master->smmu;
+ struct arm_smmu_ste unused_update;
+ u8 used_qword_diff;
+
+ used_qword_diff =
+ arm_smmu_entry_qword_diff(entry, target, &unused_update);
+ if (hweight8(used_qword_diff) == 1) {
+ /*
+ * Only one qword needs its used bits to be changed. This is a
+ * hitless update, update all bits the current STE is ignoring
+ * to their new values, then update a single "critical qword" to
+ * change the STE and finally 0 out any bits that are now unused
+ * in the target configuration.
+ */
+ unsigned int critical_qword_index = ffs(used_qword_diff) - 1;
+
+ /*
+ * Skip writing unused bits in the critical qword since we'll be
+ * writing it in the next step anyways. This can save a sync
+ * when the only change is in that qword.
+ */
+ unused_update.data[critical_qword_index] =
+ entry->data[critical_qword_index];
+ entry_set(smmu, sid, entry, &unused_update, 0, num_entry_qwords);
+ entry_set(smmu, sid, entry, target, critical_qword_index, 1);
+ entry_set(smmu, sid, entry, target, 0, num_entry_qwords);
+ } else if (used_qword_diff) {
+ /*
+ * At least two qwords need their inuse bits to be changed. This
+ * requires a breaking update, zero the V bit, write all qwords
+ * but 0, then set qword 0
+ */
+ unused_update.data[0] = entry->data[0] & (~STRTAB_STE_0_V);
+ entry_set(smmu, sid, entry, &unused_update, 0, 1);
+ entry_set(smmu, sid, entry, target, 1, num_entry_qwords - 1);
+ entry_set(smmu, sid, entry, target, 0, 1);
+ } else {
+ /*
+ * No inuse bit changed. Sanity check that all unused bits are 0
+ * in the entry. The target was already sanity checked by
+ * compute_qword_diff().
+ */
+ WARN_ON_ONCE(
+ entry_set(smmu, sid, entry, target, 0, num_entry_qwords));
+ }
+
+ /* It's likely that we'll want to use the new STE soon */
+ if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) {
+ struct arm_smmu_cmdq_ent
+ prefetch_cmd = { .opcode = CMDQ_OP_PREFETCH_CFG,
+ .prefetch = {
+ .sid = sid,
+ } };
+
+ arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+ }
+}
+
static void arm_smmu_sync_cd(struct arm_smmu_master *master,
int ssid, bool leaf)
{
@@ -1251,158 +1445,131 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
-static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
- struct arm_smmu_ste *dst)
+static void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
{
- /*
- * This is hideously complicated, but we only really care about
- * three cases at the moment:
- *
- * 1. Invalid (all zero) -> bypass/fault (init)
- * 2. Bypass/fault -> translation/bypass (attach)
- * 3. Translation/bypass -> bypass/fault (detach)
- *
- * Given that we can't update the STE atomically and the SMMU
- * doesn't read the thing in a defined order, that leaves us
- * with the following maintenance requirements:
- *
- * 1. Update Config, return (init time STEs aren't live)
- * 2. Write everything apart from dword 0, sync, write dword 0, sync
- * 3. Update Config, sync
- */
- u64 val = le64_to_cpu(dst->data[0]);
- bool ste_live = false;
- struct arm_smmu_device *smmu = master->smmu;
- struct arm_smmu_ctx_desc_cfg *cd_table = NULL;
- struct arm_smmu_s2_cfg *s2_cfg = NULL;
- struct arm_smmu_domain *smmu_domain = master->domain;
- struct arm_smmu_cmdq_ent prefetch_cmd = {
- .opcode = CMDQ_OP_PREFETCH_CFG,
- .prefetch = {
- .sid = sid,
- },
- };
-
- if (smmu_domain) {
- switch (smmu_domain->stage) {
- case ARM_SMMU_DOMAIN_S1:
- cd_table = &master->cd_table;
- break;
- case ARM_SMMU_DOMAIN_S2:
- s2_cfg = &smmu_domain->s2_cfg;
- break;
- default:
- break;
- }
- }
-
- if (val & STRTAB_STE_0_V) {
- switch (FIELD_GET(STRTAB_STE_0_CFG, val)) {
- case STRTAB_STE_0_CFG_BYPASS:
- break;
- case STRTAB_STE_0_CFG_S1_TRANS:
- case STRTAB_STE_0_CFG_S2_TRANS:
- ste_live = true;
- break;
- case STRTAB_STE_0_CFG_ABORT:
- BUG_ON(!disable_bypass);
- break;
- default:
- BUG(); /* STE corruption */
- }
- }
+ memset(target, 0, sizeof(*target));
+ target->data[0] = cpu_to_le64(
+ STRTAB_STE_0_V |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT));
+}
- /* Nuke the existing STE_0 value, as we're going to rewrite it */
- val = STRTAB_STE_0_V;
+static void arm_smmu_make_bypass_ste(struct arm_smmu_ste *target)
+{
+ memset(target, 0, sizeof(*target));
+ target->data[0] = cpu_to_le64(
+ STRTAB_STE_0_V |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS));
+ target->data[1] = cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
+}
- /* Bypass/fault */
- if (!smmu_domain || !(cd_table || s2_cfg)) {
- if (!smmu_domain && disable_bypass)
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
- else
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
+static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
+ struct arm_smmu_master *master)
+{
+ struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
+ struct arm_smmu_device *smmu = master->smmu;
- dst->data[0] = cpu_to_le64(val);
- dst->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
- STRTAB_STE_1_SHCFG_INCOMING));
- dst->data[2] = 0; /* Nuke the VMID */
+ memset(target, 0, sizeof(*target));
+ target->data[0] = cpu_to_le64(
+ STRTAB_STE_0_V |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
+ FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt) |
+ (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
+ FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax));
+
+ target->data[1] = cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
+ FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
+ FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
+ FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
+ ((smmu->features & ARM_SMMU_FEAT_STALLS &&
+ !master->stall_enabled) ?
+ STRTAB_STE_1_S1STALLD :
+ 0) |
+ FIELD_PREP(STRTAB_STE_1_EATS,
+ master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0));
+
+ if (smmu->features & ARM_SMMU_FEAT_E2H) {
/*
- * The SMMU can perform negative caching, so we must sync
- * the STE regardless of whether the old value was live.
+ * To support BTM the streamworld needs to match the
+ * configuration of the CPU so that the ASID broadcasts are
+ * properly matched. This means either S/NS-EL2-E2H (hypervisor)
+ * or NS-EL1 (guest). Since an SVA domain can be installed in a
+ * PASID this should always use a BTM compatible configuration
+ * if the HW supports it.
*/
- if (smmu)
- arm_smmu_sync_ste_for_sid(smmu, sid);
- return;
- }
-
- if (cd_table) {
- u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ?
- STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
-
- BUG_ON(ste_live);
- dst->data[1] = cpu_to_le64(
- FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
- FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
- FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
- FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
- FIELD_PREP(STRTAB_STE_1_STRW, strw));
-
- if (smmu->features & ARM_SMMU_FEAT_STALLS &&
- !master->stall_enabled)
- dst->data[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
+ target->data[1] |= cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_EL2));
+ } else {
+ target->data[1] |= cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_STRW, STRTAB_STE_1_STRW_NSEL1));
- val |= (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
- FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
- FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax) |
- FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt);
+ /*
+ * VMID 0 is reserved for stage-2 bypass EL1 STEs, see
+ * arm_smmu_domain_alloc_id()
+ */
+ target->data[2] =
+ cpu_to_le64(FIELD_PREP(STRTAB_STE_2_S2VMID, 0));
}
+}
- if (s2_cfg) {
- BUG_ON(ste_live);
- dst->data[2] = cpu_to_le64(
- FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
- FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
+static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
+ struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg;
+ const struct io_pgtable_cfg *pgtbl_cfg =
+ &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg;
+ typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr =
+ &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+ u64 vtcr_val;
+
+ memset(target, 0, sizeof(*target));
+ target->data[0] = cpu_to_le64(
+ STRTAB_STE_0_V |
+ FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS));
+
+ target->data[1] = cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_1_EATS,
+ master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0) |
+ FIELD_PREP(STRTAB_STE_1_SHCFG,
+ STRTAB_STE_1_SHCFG_INCOMING));
+
+ vtcr_val = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
+ FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
+ target->data[2] = cpu_to_le64(
+ FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
+ FIELD_PREP(STRTAB_STE_2_VTCR, vtcr_val) |
+ STRTAB_STE_2_S2AA64 |
#ifdef __BIG_ENDIAN
- STRTAB_STE_2_S2ENDI |
+ STRTAB_STE_2_S2ENDI |
#endif
- STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
- STRTAB_STE_2_S2R);
+ STRTAB_STE_2_S2PTW |
+ STRTAB_STE_2_S2R);
- dst->data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
-
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
- }
-
- if (master->ats_enabled)
- dst->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
- STRTAB_STE_1_EATS_TRANS));
-
- arm_smmu_sync_ste_for_sid(smmu, sid);
- /* See comment in arm_smmu_write_ctx_desc() */
- WRITE_ONCE(dst->data[0], cpu_to_le64(val));
- arm_smmu_sync_ste_for_sid(smmu, sid);
-
- /* It's likely that we'll want to use the new STE soon */
- if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
- arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+ target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr &
+ STRTAB_STE_3_S2TTB_MASK);
}
-static void arm_smmu_init_bypass_stes(struct arm_smmu_ste *strtab,
- unsigned int nent, bool force)
+/*
+ * This can safely directly manipulate the STE memory without a sync sequence
+ * because the STE table has not been installed in the SMMU yet.
+ */
+static void arm_smmu_init_initial_stes(struct arm_smmu_ste *strtab,
+ unsigned int nent)
{
unsigned int i;
- u64 val = STRTAB_STE_0_V;
-
- if (disable_bypass && !force)
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
- else
- val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
for (i = 0; i < nent; ++i) {
- strtab->data[0] = cpu_to_le64(val);
- strtab->data[1] = cpu_to_le64(FIELD_PREP(
- STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
- strtab->data[2] = 0;
+ if (disable_bypass)
+ arm_smmu_make_abort_ste(strtab);
+ else
+ arm_smmu_make_bypass_ste(strtab);
strtab++;
}
}
@@ -1430,7 +1597,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
return -ENOMEM;
}
- arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false);
+ arm_smmu_init_initial_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
arm_smmu_write_strtab_l1_desc(strtab, desc);
return 0;
}
@@ -1460,27 +1627,19 @@ arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid)
/* IRQ and event handlers */
static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
{
- int ret;
- u32 reason;
+ int ret = 0;
u32 perm = 0;
struct arm_smmu_master *master;
bool ssid_valid = evt[0] & EVTQ_0_SSV;
u32 sid = FIELD_GET(EVTQ_0_SID, evt[0]);
- struct iommu_fault_event fault_evt = { };
+ struct iopf_fault fault_evt = { };
struct iommu_fault *flt = &fault_evt.fault;
switch (FIELD_GET(EVTQ_0_ID, evt[0])) {
case EVT_ID_TRANSLATION_FAULT:
- reason = IOMMU_FAULT_REASON_PTE_FETCH;
- break;
case EVT_ID_ADDR_SIZE_FAULT:
- reason = IOMMU_FAULT_REASON_OOR_ADDRESS;
- break;
case EVT_ID_ACCESS_FAULT:
- reason = IOMMU_FAULT_REASON_ACCESS;
- break;
case EVT_ID_PERMISSION_FAULT:
- reason = IOMMU_FAULT_REASON_PERMISSION;
break;
default:
return -EOPNOTSUPP;
@@ -1490,6 +1649,9 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
if (evt[1] & EVTQ_1_S2)
return -EFAULT;
+ if (!(evt[1] & EVTQ_1_STALL))
+ return -EOPNOTSUPP;
+
if (evt[1] & EVTQ_1_RnW)
perm |= IOMMU_FAULT_PERM_READ;
else
@@ -1501,32 +1663,17 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
if (evt[1] & EVTQ_1_PnU)
perm |= IOMMU_FAULT_PERM_PRIV;
- if (evt[1] & EVTQ_1_STALL) {
- flt->type = IOMMU_FAULT_PAGE_REQ;
- flt->prm = (struct iommu_fault_page_request) {
- .flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
- .grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
- .perm = perm,
- .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
- };
-
- if (ssid_valid) {
- flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
- flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
- }
- } else {
- flt->type = IOMMU_FAULT_DMA_UNRECOV;
- flt->event = (struct iommu_fault_unrecoverable) {
- .reason = reason,
- .flags = IOMMU_FAULT_UNRECOV_ADDR_VALID,
- .perm = perm,
- .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
- };
+ flt->type = IOMMU_FAULT_PAGE_REQ;
+ flt->prm = (struct iommu_fault_page_request) {
+ .flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
+ .grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
+ .perm = perm,
+ .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
+ };
- if (ssid_valid) {
- flt->event.flags |= IOMMU_FAULT_UNRECOV_PASID_VALID;
- flt->event.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
- }
+ if (ssid_valid) {
+ flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+ flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
}
mutex_lock(&smmu->streams_mutex);
@@ -1536,17 +1683,7 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
goto out_unlock;
}
- ret = iommu_report_device_fault(master->dev, &fault_evt);
- if (ret && flt->type == IOMMU_FAULT_PAGE_REQ) {
- /* Nobody cared, abort the access */
- struct iommu_page_response resp = {
- .pasid = flt->prm.pasid,
- .grpid = flt->prm.grpid,
- .code = IOMMU_PAGE_RESP_FAILURE,
- };
- arm_smmu_page_response(master->dev, &fault_evt, &resp);
- }
-
+ iommu_report_device_fault(master->dev, &fault_evt);
out_unlock:
mutex_unlock(&smmu->streams_mutex);
return ret;
@@ -2025,15 +2162,15 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
- struct arm_smmu_domain *smmu_domain;
if (type == IOMMU_DOMAIN_SVA)
return arm_smmu_sva_domain_alloc();
+ return ERR_PTR(-EOPNOTSUPP);
+}
- if (type != IOMMU_DOMAIN_UNMANAGED &&
- type != IOMMU_DOMAIN_DMA &&
- type != IOMMU_DOMAIN_IDENTITY)
- return NULL;
+static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
+{
+ struct arm_smmu_domain *smmu_domain;
/*
* Allocate the domain and initialise some of its data structures.
@@ -2042,13 +2179,23 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
*/
smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
if (!smmu_domain)
- return NULL;
+ return ERR_PTR(-ENOMEM);
mutex_init(&smmu_domain->init_mutex);
INIT_LIST_HEAD(&smmu_domain->devices);
spin_lock_init(&smmu_domain->devices_lock);
INIT_LIST_HEAD(&smmu_domain->mmu_notifiers);
+ if (dev) {
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+ int ret;
+
+ ret = arm_smmu_domain_finalise(smmu_domain, master->smmu);
+ if (ret) {
+ kfree(smmu_domain);
+ return ERR_PTR(ret);
+ }
+ }
return &smmu_domain->domain;
}
@@ -2074,12 +2221,12 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
kfree(smmu_domain);
}
-static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
+static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
+ struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
int ret;
u32 asid;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
@@ -2111,13 +2258,12 @@ out_unlock:
return ret;
}
-static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
+static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu,
+ struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
int vmid;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
- typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
/* Reserve VMID 0 for stage-2 bypass STEs */
vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1,
@@ -2125,35 +2271,21 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
if (vmid < 0)
return vmid;
- vtcr = &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
cfg->vmid = (u16)vmid;
- cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
- cfg->vtcr = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2IR0, vtcr->irgn) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2OR0, vtcr->orgn) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2SH0, vtcr->sh) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2TG, vtcr->tg) |
- FIELD_PREP(STRTAB_STE_2_VTCR_S2PS, vtcr->ps);
return 0;
}
-static int arm_smmu_domain_finalise(struct iommu_domain *domain)
+static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_device *smmu)
{
int ret;
unsigned long ias, oas;
enum io_pgtable_fmt fmt;
struct io_pgtable_cfg pgtbl_cfg;
struct io_pgtable_ops *pgtbl_ops;
- int (*finalise_stage_fn)(struct arm_smmu_domain *,
- struct io_pgtable_cfg *);
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- struct arm_smmu_device *smmu = smmu_domain->smmu;
-
- if (domain->type == IOMMU_DOMAIN_IDENTITY) {
- smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
- return 0;
- }
+ int (*finalise_stage_fn)(struct arm_smmu_device *smmu,
+ struct arm_smmu_domain *smmu_domain,
+ struct io_pgtable_cfg *pgtbl_cfg);
/* Restrict the stage to what we can actually support */
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
@@ -2192,17 +2324,18 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
if (!pgtbl_ops)
return -ENOMEM;
- domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
- domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
- domain->geometry.force_aperture = true;
+ smmu_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+ smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
+ smmu_domain->domain.geometry.force_aperture = true;
- ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
+ ret = finalise_stage_fn(smmu, smmu_domain, &pgtbl_cfg);
if (ret < 0) {
free_io_pgtable_ops(pgtbl_ops);
return ret;
}
smmu_domain->pgtbl_ops = pgtbl_ops;
+ smmu_domain->smmu = smmu;
return 0;
}
@@ -2225,7 +2358,8 @@ arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
}
}
-static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
+static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master,
+ const struct arm_smmu_ste *target)
{
int i, j;
struct arm_smmu_device *smmu = master->smmu;
@@ -2242,7 +2376,7 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
if (j < i)
continue;
- arm_smmu_write_strtab_ent(master, sid, step);
+ arm_smmu_write_ste(master, sid, step, target);
}
}
@@ -2261,12 +2395,12 @@ static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev));
}
-static void arm_smmu_enable_ats(struct arm_smmu_master *master)
+static void arm_smmu_enable_ats(struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain)
{
size_t stu;
struct pci_dev *pdev;
struct arm_smmu_device *smmu = master->smmu;
- struct arm_smmu_domain *smmu_domain = master->domain;
/* Don't enable ATS at the endpoint if it's not enabled in the STE */
if (!master->ats_enabled)
@@ -2282,10 +2416,9 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master)
dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
}
-static void arm_smmu_disable_ats(struct arm_smmu_master *master)
+static void arm_smmu_disable_ats(struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain)
{
- struct arm_smmu_domain *smmu_domain = master->domain;
-
if (!master->ats_enabled)
return;
@@ -2348,35 +2481,28 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
static void arm_smmu_detach_dev(struct arm_smmu_master *master)
{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(master->dev);
+ struct arm_smmu_domain *smmu_domain;
unsigned long flags;
- struct arm_smmu_domain *smmu_domain = master->domain;
- if (!smmu_domain)
+ if (!domain || !(domain->type & __IOMMU_DOMAIN_PAGING))
return;
- arm_smmu_disable_ats(master);
+ smmu_domain = to_smmu_domain(domain);
+ arm_smmu_disable_ats(master, smmu_domain);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_del(&master->domain_head);
+ list_del_init(&master->domain_head);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- master->domain = NULL;
master->ats_enabled = false;
- arm_smmu_install_ste_for_dev(master);
- /*
- * Clearing the CD entry isn't strictly required to detach the domain
- * since the table is uninstalled anyway, but it helps avoid confusion
- * in the call to arm_smmu_write_ctx_desc on the next attach (which
- * expects the entry to be empty).
- */
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && master->cd_table.cdtab)
- arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL);
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
unsigned long flags;
+ struct arm_smmu_ste target;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -2398,15 +2524,10 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -EBUSY;
}
- arm_smmu_detach_dev(master);
-
mutex_lock(&smmu_domain->init_mutex);
if (!smmu_domain->smmu) {
- smmu_domain->smmu = smmu;
- ret = arm_smmu_domain_finalise(domain);
- if (ret)
- smmu_domain->smmu = NULL;
+ ret = arm_smmu_domain_finalise(smmu_domain, smmu);
} else if (smmu_domain->smmu != smmu)
ret = -EINVAL;
@@ -2414,57 +2535,140 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
if (ret)
return ret;
- master->domain = smmu_domain;
-
/*
- * The SMMU does not support enabling ATS with bypass. When the STE is
- * in bypass (STE.Config[2:0] == 0b100), ATS Translation Requests and
- * Translated transactions are denied as though ATS is disabled for the
- * stream (STE.EATS == 0b00), causing F_BAD_ATS_TREQ and
- * F_TRANSL_FORBIDDEN events (IHI0070Ea 5.2 Stream Table Entry).
+ * Prevent arm_smmu_share_asid() from trying to change the ASID
+ * of either the old or new domain while we are working on it.
+ * This allows the STE and the smmu_domain->devices list to
+ * be inconsistent during this routine.
*/
- if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
- master->ats_enabled = arm_smmu_ats_supported(master);
+ mutex_lock(&arm_smmu_asid_lock);
+
+ arm_smmu_detach_dev(master);
+
+ master->ats_enabled = arm_smmu_ats_supported(master);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_add(&master->domain_head, &smmu_domain->devices);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+ switch (smmu_domain->stage) {
+ case ARM_SMMU_DOMAIN_S1:
if (!master->cd_table.cdtab) {
ret = arm_smmu_alloc_cd_tables(master);
- if (ret) {
- master->domain = NULL;
+ if (ret)
+ goto out_list_del;
+ } else {
+ /*
+ * arm_smmu_write_ctx_desc() relies on the entry being
+ * invalid to work, clear any existing entry.
+ */
+ ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
+ NULL);
+ if (ret)
goto out_list_del;
- }
}
- /*
- * Prevent SVA from concurrently modifying the CD or writing to
- * the CD entry
- */
- mutex_lock(&arm_smmu_asid_lock);
ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, &smmu_domain->cd);
- mutex_unlock(&arm_smmu_asid_lock);
- if (ret) {
- master->domain = NULL;
+ if (ret)
goto out_list_del;
- }
- }
- arm_smmu_install_ste_for_dev(master);
+ arm_smmu_make_cdtable_ste(&target, master);
+ arm_smmu_install_ste_for_dev(master, &target);
+ break;
+ case ARM_SMMU_DOMAIN_S2:
+ arm_smmu_make_s2_domain_ste(&target, master, smmu_domain);
+ arm_smmu_install_ste_for_dev(master, &target);
+ if (master->cd_table.cdtab)
+ arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
+ NULL);
+ break;
+ }
- arm_smmu_enable_ats(master);
- return 0;
+ arm_smmu_enable_ats(master, smmu_domain);
+ goto out_unlock;
out_list_del:
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_del(&master->domain_head);
+ list_del_init(&master->domain_head);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+out_unlock:
+ mutex_unlock(&arm_smmu_asid_lock);
return ret;
}
+static int arm_smmu_attach_dev_ste(struct device *dev,
+ struct arm_smmu_ste *ste)
+{
+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);
+
+ if (arm_smmu_master_sva_enabled(master))
+ return -EBUSY;
+
+ /*
+ * Do not allow any ASID to be changed while are working on the STE,
+ * otherwise we could miss invalidations.
+ */
+ mutex_lock(&arm_smmu_asid_lock);
+
+ /*
+ * The SMMU does not support enabling ATS with bypass/abort. When the
+ * STE is in bypass (STE.Config[2:0] == 0b100), ATS Translation Requests
+ * and Translated transactions are denied as though ATS is disabled for
+ * the stream (STE.EATS == 0b00), causing F_BAD_ATS_TREQ and
+ * F_TRANSL_FORBIDDEN events (IHI0070Ea 5.2 Stream Table Entry).
+ */
+ arm_smmu_detach_dev(master);
+
+ arm_smmu_install_ste_for_dev(master, ste);
+ mutex_unlock(&arm_smmu_asid_lock);
+
+ /*
+ * This has to be done after removing the master from the
+ * arm_smmu_domain->devices to avoid races updating the same context
+ * descriptor from arm_smmu_share_asid().
+ */
+ if (master->cd_table.cdtab)
+ arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL);
+ return 0;
+}
+
+static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_make_bypass_ste(&ste);
+ return arm_smmu_attach_dev_ste(dev, &ste);
+}
+
+static const struct iommu_domain_ops arm_smmu_identity_ops = {
+ .attach_dev = arm_smmu_attach_dev_identity,
+};
+
+static struct iommu_domain arm_smmu_identity_domain = {
+ .type = IOMMU_DOMAIN_IDENTITY,
+ .ops = &arm_smmu_identity_ops,
+};
+
+static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct arm_smmu_ste ste;
+
+ arm_smmu_make_abort_ste(&ste);
+ return arm_smmu_attach_dev_ste(dev, &ste);
+}
+
+static const struct iommu_domain_ops arm_smmu_blocked_ops = {
+ .attach_dev = arm_smmu_attach_dev_blocked,
+};
+
+static struct iommu_domain arm_smmu_blocked_domain = {
+ .type = IOMMU_DOMAIN_BLOCKED,
+ .ops = &arm_smmu_blocked_ops,
+};
+
static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -2658,6 +2862,7 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
master->dev = dev;
master->smmu = smmu;
INIT_LIST_HEAD(&master->bonds);
+ INIT_LIST_HEAD(&master->domain_head);
dev_iommu_priv_set(dev, master);
ret = arm_smmu_insert_master(smmu, master);
@@ -2699,7 +2904,13 @@ static void arm_smmu_release_device(struct device *dev)
if (WARN_ON(arm_smmu_master_sva_enabled(master)))
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
- arm_smmu_detach_dev(master);
+
+ /* Put the STE back to what arm_smmu_init_strtab() sets */
+ if (disable_bypass && !dev->iommu->require_direct)
+ arm_smmu_attach_dev_blocked(&arm_smmu_blocked_domain, dev);
+ else
+ arm_smmu_attach_dev_identity(&arm_smmu_identity_domain, dev);
+
arm_smmu_disable_pasid(master);
arm_smmu_remove_master(master);
if (master->cd_table.cdtab)
@@ -2739,7 +2950,8 @@ static int arm_smmu_enable_nesting(struct iommu_domain *domain)
return ret;
}
-static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int arm_smmu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
return iommu_fwspec_add_ids(dev, args->args, 1);
}
@@ -2844,8 +3056,11 @@ static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
}
static struct iommu_ops arm_smmu_ops = {
+ .identity_domain = &arm_smmu_identity_domain,
+ .blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
+ .domain_alloc_paging = arm_smmu_domain_alloc_paging,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
@@ -3049,7 +3264,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
cfg->strtab_base_cfg = reg;
- arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false);
+ arm_smmu_init_initial_stes(strtab, cfg->num_l1_ents);
return 0;
}
@@ -3125,7 +3340,8 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
static void arm_smmu_free_msis(void *data)
{
struct device *dev = data;
- platform_msi_domain_free_irqs(dev);
+
+ platform_device_msi_free_irqs_all(dev);
}
static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
@@ -3166,7 +3382,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
}
/* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
- ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
+ ret = platform_device_msi_init_and_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
if (ret) {
dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n");
return;
@@ -3760,7 +3976,6 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
list_for_each_entry(e, &rmr_list, list) {
- struct arm_smmu_ste *step;
struct iommu_iort_rmr_data *rmr;
int ret, i;
@@ -3773,8 +3988,12 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
continue;
}
- step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]);
- arm_smmu_init_bypass_stes(step, 1, true);
+ /*
+ * STE table is not programmed to HW, see
+ * arm_smmu_initial_bypass_stes()
+ */
+ arm_smmu_make_bypass_ste(
+ arm_smmu_get_step_for_sid(smmu, rmr->sids[i]));
}
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 65fb388d5173..23baf117e7e4 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -609,8 +609,6 @@ struct arm_smmu_ctx_desc_cfg {
struct arm_smmu_s2_cfg {
u16 vmid;
- u64 vttbr;
- u64 vtcr;
};
struct arm_smmu_strtab_cfg {
@@ -697,7 +695,6 @@ struct arm_smmu_stream {
struct arm_smmu_master {
struct arm_smmu_device *smmu;
struct device *dev;
- struct arm_smmu_domain *domain;
struct list_head domain_head;
struct arm_smmu_stream *streams;
/* Locked by the iommu core using the group mutex */
@@ -715,7 +712,6 @@ struct arm_smmu_master {
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
- ARM_SMMU_DOMAIN_BYPASS,
};
struct arm_smmu_domain {
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 8b04ece00420..5c7cfc51b57c 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -260,6 +260,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sm6375-mdss" },
{ .compatible = "qcom,sm8150-mdss" },
{ .compatible = "qcom,sm8250-mdss" },
+ { .compatible = "qcom,x1e80100-mdss" },
{ }
};
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 68b6bc5e7c71..c572d877b0e1 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -859,10 +859,14 @@ static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
arm_smmu_rpm_put(smmu);
}
-static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
+static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
+ if (type != IOMMU_DOMAIN_UNMANAGED) {
+ if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
+ return NULL;
+ }
/*
* Allocate the domain and initialise some of its data structures.
* We can't really do anything meaningful until we've added a
@@ -875,15 +879,6 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->cb_lock);
- if (dev) {
- struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
-
- if (arm_smmu_init_domain_context(smmu_domain, cfg->smmu, dev)) {
- kfree(smmu_domain);
- return NULL;
- }
- }
-
return &smmu_domain->domain;
}
@@ -1551,7 +1546,8 @@ static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain,
return ret;
}
-static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int arm_smmu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
u32 mask, fwid = 0;
@@ -1600,7 +1596,7 @@ static struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
- .domain_alloc_paging = arm_smmu_domain_alloc_paging,
+ .domain_alloc = arm_smmu_domain_alloc,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.probe_finalize = arm_smmu_probe_finalize,
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 17a1c163fef6..e079bb7a993e 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -546,7 +546,8 @@ static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
return &qcom_iommu->iommu;
}
-static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int qcom_iommu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct qcom_iommu_dev *qcom_iommu;
struct platform_device *iommu_pdev;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 50ccc4f1ef81..b58f5a3311c3 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -859,6 +859,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
iommu_deferred_attach(dev, domain))
return DMA_MAPPING_ERROR;
+ /* If anyone ever wants this we'd need support in the IOVA allocator */
+ if (dev_WARN_ONCE(dev, dma_get_min_align_mask(dev) > iova_mask(iovad),
+ "Unsupported alignment constraint\n"))
+ return DMA_MAPPING_ERROR;
+
size = iova_align(iovad, size + iova_off);
iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 2c6e9094f1e9..d98c9161948a 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1431,7 +1431,7 @@ static void exynos_iommu_release_device(struct device *dev)
}
static int exynos_iommu_of_xlate(struct device *dev,
- struct of_phandle_args *spec)
+ const struct of_phandle_args *spec)
{
struct platform_device *sysmmu = of_find_device_by_node(spec->np);
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index 012cd2541a68..6cf9f48e7d8c 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -51,6 +51,7 @@ config INTEL_IOMMU_SVM
depends on X86_64
select MMU_NOTIFIER
select IOMMU_SVA
+ select IOMMU_IOPF
help
Shared Virtual Memory (SVM) provides a facility for devices
to access DMA resources through process address space by
@@ -64,17 +65,6 @@ config INTEL_IOMMU_DEFAULT_ON
one is found. If this option is not selected, DMAR support can
be enabled by passing intel_iommu=on to the kernel.
-config INTEL_IOMMU_BROKEN_GFX_WA
- bool "Workaround broken graphics drivers (going away soon)"
- depends on BROKEN && X86
- help
- Current Graphics drivers tend to use physical address
- for DMA and avoid using DMA APIs. Setting this config
- option permits the IOMMU driver to set a unity map for
- all the OS-visible memory. Hence the driver can continue
- to use physical addresses for DMA, at least until this
- option is removed in the 2.6.32 kernel.
-
config INTEL_IOMMU_FLOPPY_WA
def_bool y
depends on X86
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
index 5dabf081a779..5402b699a122 100644
--- a/drivers/iommu/intel/Makefile
+++ b/drivers/iommu/intel/Makefile
@@ -5,5 +5,7 @@ obj-$(CONFIG_DMAR_TABLE) += trace.o cap_audit.o
obj-$(CONFIG_DMAR_PERF) += perf.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
+ifdef CONFIG_INTEL_IOMMU
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
+endif
obj-$(CONFIG_INTEL_IOMMU_PERF_EVENTS) += perfmon.o
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 23cb80d62a9a..36d7427b1202 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1095,7 +1095,9 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->agaw = agaw;
iommu->msagaw = msagaw;
iommu->segment = drhd->segment;
-
+ iommu->device_rbtree = RB_ROOT;
+ spin_lock_init(&iommu->device_rbtree_lock);
+ mutex_init(&iommu->iopf_lock);
iommu->node = NUMA_NO_NODE;
ver = readl(iommu->reg + DMAR_VER_REG);
@@ -1271,6 +1273,8 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
{
u32 fault;
int head, tail;
+ struct device *dev;
+ u64 iqe_err, ite_sid;
struct q_inval *qi = iommu->qi;
int shift = qi_shift(iommu);
@@ -1315,6 +1319,13 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
tail = readl(iommu->reg + DMAR_IQT_REG);
tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
+ /*
+ * SID field is valid only when the ITE field is Set in FSTS_REG
+ * see Intel VT-d spec r4.1, section 11.4.9.9
+ */
+ iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG);
+ ite_sid = DMAR_IQER_REG_ITESID(iqe_err);
+
writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
pr_info("Invalidation Time-out Error (ITE) cleared\n");
@@ -1324,6 +1335,19 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
head = (head - 2 + QI_LENGTH) % QI_LENGTH;
} while (head != tail);
+ /*
+ * If device was released or isn't present, no need to retry
+ * the ATS invalidate request anymore.
+ *
+ * 0 value of ite_sid means old VT-d device, no ite_sid value.
+ * see Intel VT-d spec r4.1, section 11.4.9.9
+ */
+ if (ite_sid) {
+ dev = device_rbtree_find(iommu, ite_sid);
+ if (!dev || !dev_is_pci(dev) ||
+ !pci_device_is_present(to_pci_dev(dev)))
+ return -ETIMEDOUT;
+ }
if (qi->desc_status[wait_index] == QI_ABORT)
return -EAGAIN;
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 6fb5f6fceea1..50eb9aed47cc 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -27,7 +27,6 @@
#include "iommu.h"
#include "../dma-iommu.h"
#include "../irq_remapping.h"
-#include "../iommu-sva.h"
#include "pasid.h"
#include "cap_audit.h"
#include "perfmon.h"
@@ -97,6 +96,81 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
return re->hi & VTD_PAGE_MASK;
}
+static int device_rid_cmp_key(const void *key, const struct rb_node *node)
+{
+ struct device_domain_info *info =
+ rb_entry(node, struct device_domain_info, node);
+ const u16 *rid_lhs = key;
+
+ if (*rid_lhs < PCI_DEVID(info->bus, info->devfn))
+ return -1;
+
+ if (*rid_lhs > PCI_DEVID(info->bus, info->devfn))
+ return 1;
+
+ return 0;
+}
+
+static int device_rid_cmp(struct rb_node *lhs, const struct rb_node *rhs)
+{
+ struct device_domain_info *info =
+ rb_entry(lhs, struct device_domain_info, node);
+ u16 key = PCI_DEVID(info->bus, info->devfn);
+
+ return device_rid_cmp_key(&key, rhs);
+}
+
+/*
+ * Looks up an IOMMU-probed device using its source ID.
+ *
+ * Returns the pointer to the device if there is a match. Otherwise,
+ * returns NULL.
+ *
+ * Note that this helper doesn't guarantee that the device won't be
+ * released by the iommu subsystem after being returned. The caller
+ * should use its own synchronization mechanism to avoid the device
+ * being released during its use if its possibly the case.
+ */
+struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid)
+{
+ struct device_domain_info *info = NULL;
+ struct rb_node *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
+ node = rb_find(&rid, &iommu->device_rbtree, device_rid_cmp_key);
+ if (node)
+ info = rb_entry(node, struct device_domain_info, node);
+ spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
+
+ return info ? info->dev : NULL;
+}
+
+static int device_rbtree_insert(struct intel_iommu *iommu,
+ struct device_domain_info *info)
+{
+ struct rb_node *curr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
+ curr = rb_find_add(&info->node, &iommu->device_rbtree, device_rid_cmp);
+ spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
+ if (WARN_ON(curr))
+ return -EEXIST;
+
+ return 0;
+}
+
+static void device_rbtree_remove(struct device_domain_info *info)
+{
+ struct intel_iommu *iommu = info->iommu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
+ rb_erase(&info->node, &iommu->device_rbtree);
+ spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
+}
+
/*
* This domain is a statically identity mapping domain.
* 1. This domain creats a static 1:1 mapping to all usable memory.
@@ -396,8 +470,6 @@ static int domain_update_device_node(struct dmar_domain *domain)
return nid;
}
-static void domain_update_iotlb(struct dmar_domain *domain);
-
/* Return the super pagesize bitmap if supported. */
static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain)
{
@@ -1218,7 +1290,7 @@ domain_lookup_dev_info(struct dmar_domain *domain,
return NULL;
}
-static void domain_update_iotlb(struct dmar_domain *domain)
+void domain_update_iotlb(struct dmar_domain *domain)
{
struct dev_pasid_info *dev_pasid;
struct device_domain_info *info;
@@ -1368,6 +1440,46 @@ static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
spin_unlock_irqrestore(&domain->lock, flags);
}
+static void __iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
+ unsigned long pfn, unsigned int pages,
+ int ih)
+{
+ unsigned int aligned_pages = __roundup_pow_of_two(pages);
+ unsigned long bitmask = aligned_pages - 1;
+ unsigned int mask = ilog2(aligned_pages);
+ u64 addr = (u64)pfn << VTD_PAGE_SHIFT;
+
+ /*
+ * PSI masks the low order bits of the base address. If the
+ * address isn't aligned to the mask, then compute a mask value
+ * needed to ensure the target range is flushed.
+ */
+ if (unlikely(bitmask & pfn)) {
+ unsigned long end_pfn = pfn + pages - 1, shared_bits;
+
+ /*
+ * Since end_pfn <= pfn + bitmask, the only way bits
+ * higher than bitmask can differ in pfn and end_pfn is
+ * by carrying. This means after masking out bitmask,
+ * high bits starting with the first set bit in
+ * shared_bits are all equal in both pfn and end_pfn.
+ */
+ shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
+ mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
+ }
+
+ /*
+ * Fallback to domain selective flush if no PSI support or
+ * the size is too big.
+ */
+ if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
+ iommu->flush.flush_iotlb(iommu, did, 0, 0,
+ DMA_TLB_DSI_FLUSH);
+ else
+ iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
+ DMA_TLB_PSI_FLUSH);
+}
+
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
struct dmar_domain *domain,
unsigned long pfn, unsigned int pages,
@@ -1384,42 +1496,10 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
if (ih)
ih = 1 << 6;
- if (domain->use_first_level) {
+ if (domain->use_first_level)
domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
- } else {
- unsigned long bitmask = aligned_pages - 1;
-
- /*
- * PSI masks the low order bits of the base address. If the
- * address isn't aligned to the mask, then compute a mask value
- * needed to ensure the target range is flushed.
- */
- if (unlikely(bitmask & pfn)) {
- unsigned long end_pfn = pfn + pages - 1, shared_bits;
-
- /*
- * Since end_pfn <= pfn + bitmask, the only way bits
- * higher than bitmask can differ in pfn and end_pfn is
- * by carrying. This means after masking out bitmask,
- * high bits starting with the first set bit in
- * shared_bits are all equal in both pfn and end_pfn.
- */
- shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
- mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
- }
-
- /*
- * Fallback to domain selective flush if no PSI support or
- * the size is too big.
- */
- if (!cap_pgsel_inv(iommu->cap) ||
- mask > cap_max_amask_val(iommu->cap))
- iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH);
- else
- iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
- DMA_TLB_PSI_FLUSH);
- }
+ else
+ __iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih);
/*
* In caching mode, changes of pages from non-present to present require
@@ -1443,6 +1523,46 @@ static void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain *
iommu_flush_write_buffer(iommu);
}
+/*
+ * Flush the relevant caches in nested translation if the domain
+ * also serves as a parent
+ */
+static void parent_domain_flush(struct dmar_domain *domain,
+ unsigned long pfn,
+ unsigned long pages, int ih)
+{
+ struct dmar_domain *s1_domain;
+
+ spin_lock(&domain->s1_lock);
+ list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
+ struct device_domain_info *device_info;
+ struct iommu_domain_info *info;
+ unsigned long flags;
+ unsigned long i;
+
+ xa_for_each(&s1_domain->iommu_array, i, info)
+ __iommu_flush_iotlb_psi(info->iommu, info->did,
+ pfn, pages, ih);
+
+ if (!s1_domain->has_iotlb_device)
+ continue;
+
+ spin_lock_irqsave(&s1_domain->lock, flags);
+ list_for_each_entry(device_info, &s1_domain->devices, link)
+ /*
+ * Address translation cache in device side caches the
+ * result of nested translation. There is no easy way
+ * to identify the exact set of nested translations
+ * affected by a change in S2. So just flush the entire
+ * device cache.
+ */
+ __iommu_flush_dev_iotlb(device_info, 0,
+ MAX_AGAW_PFN_WIDTH);
+ spin_unlock_irqrestore(&s1_domain->lock, flags);
+ }
+ spin_unlock(&domain->s1_lock);
+}
+
static void intel_flush_iotlb_all(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
@@ -1462,6 +1582,9 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain)
if (!cap_caching_mode(iommu->cap))
iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH);
}
+
+ if (dmar_domain->nested_parent)
+ parent_domain_flush(dmar_domain, 0, -1, 0);
}
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1727,34 +1850,17 @@ static void domain_exit(struct dmar_domain *domain)
kfree(domain);
}
-/*
- * Get the PASID directory size for scalable mode context entry.
- * Value of X in the PDTS field of a scalable mode context entry
- * indicates PASID directory with 2^(X + 7) entries.
- */
-static unsigned long context_get_sm_pds(struct pasid_table *table)
-{
- unsigned long pds, max_pde;
-
- max_pde = table->max_pasid >> PASID_PDE_SHIFT;
- pds = find_first_bit(&max_pde, MAX_NR_PASID_BITS);
- if (pds < 7)
- return 0;
-
- return pds - 7;
-}
-
static int domain_context_mapping_one(struct dmar_domain *domain,
struct intel_iommu *iommu,
- struct pasid_table *table,
u8 bus, u8 devfn)
{
struct device_domain_info *info =
domain_lookup_dev_info(domain, iommu, bus, devfn);
u16 did = domain_id_iommu(domain, iommu);
int translation = CONTEXT_TT_MULTI_LEVEL;
+ struct dma_pte *pgd = domain->pgd;
struct context_entry *context;
- int ret;
+ int agaw, ret;
if (hw_pass_through && domain_type_is_si(domain))
translation = CONTEXT_TT_PASS_THROUGH;
@@ -1797,65 +1903,37 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
}
context_clear_entry(context);
+ context_set_domain_id(context, did);
- if (sm_supported(iommu)) {
- unsigned long pds;
-
- /* Setup the PASID DIR pointer: */
- pds = context_get_sm_pds(table);
- context->lo = (u64)virt_to_phys(table->table) |
- context_pdts(pds);
-
- /* Setup the RID_PASID field: */
- context_set_sm_rid2pasid(context, IOMMU_NO_PASID);
-
+ if (translation != CONTEXT_TT_PASS_THROUGH) {
/*
- * Setup the Device-TLB enable bit and Page request
- * Enable bit:
+ * Skip top levels of page tables for iommu which has
+ * less agaw than default. Unnecessary for PT mode.
*/
- if (info && info->ats_supported)
- context_set_sm_dte(context);
- if (info && info->pri_supported)
- context_set_sm_pre(context);
- if (info && info->pasid_supported)
- context_set_pasid(context);
- } else {
- struct dma_pte *pgd = domain->pgd;
- int agaw;
-
- context_set_domain_id(context, did);
-
- if (translation != CONTEXT_TT_PASS_THROUGH) {
- /*
- * Skip top levels of page tables for iommu which has
- * less agaw than default. Unnecessary for PT mode.
- */
- for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
- ret = -ENOMEM;
- pgd = phys_to_virt(dma_pte_addr(pgd));
- if (!dma_pte_present(pgd))
- goto out_unlock;
- }
-
- if (info && info->ats_supported)
- translation = CONTEXT_TT_DEV_IOTLB;
- else
- translation = CONTEXT_TT_MULTI_LEVEL;
-
- context_set_address_root(context, virt_to_phys(pgd));
- context_set_address_width(context, agaw);
- } else {
- /*
- * In pass through mode, AW must be programmed to
- * indicate the largest AGAW value supported by
- * hardware. And ASR is ignored by hardware.
- */
- context_set_address_width(context, iommu->msagaw);
+ for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
+ ret = -ENOMEM;
+ pgd = phys_to_virt(dma_pte_addr(pgd));
+ if (!dma_pte_present(pgd))
+ goto out_unlock;
}
- context_set_translation_type(context, translation);
+ if (info && info->ats_supported)
+ translation = CONTEXT_TT_DEV_IOTLB;
+ else
+ translation = CONTEXT_TT_MULTI_LEVEL;
+
+ context_set_address_root(context, virt_to_phys(pgd));
+ context_set_address_width(context, agaw);
+ } else {
+ /*
+ * In pass through mode, AW must be programmed to
+ * indicate the largest AGAW value supported by
+ * hardware. And ASR is ignored by hardware.
+ */
+ context_set_address_width(context, iommu->msagaw);
}
+ context_set_translation_type(context, translation);
context_set_fault_enable(context);
context_set_present(context);
if (!ecap_coherent(iommu->ecap))
@@ -1885,43 +1963,29 @@ out_unlock:
return ret;
}
-struct domain_context_mapping_data {
- struct dmar_domain *domain;
- struct intel_iommu *iommu;
- struct pasid_table *table;
-};
-
static int domain_context_mapping_cb(struct pci_dev *pdev,
u16 alias, void *opaque)
{
- struct domain_context_mapping_data *data = opaque;
+ struct device_domain_info *info = dev_iommu_priv_get(&pdev->dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct dmar_domain *domain = opaque;
- return domain_context_mapping_one(data->domain, data->iommu,
- data->table, PCI_BUS_NUM(alias),
- alias & 0xff);
+ return domain_context_mapping_one(domain, iommu,
+ PCI_BUS_NUM(alias), alias & 0xff);
}
static int
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct domain_context_mapping_data data;
struct intel_iommu *iommu = info->iommu;
u8 bus = info->bus, devfn = info->devfn;
- struct pasid_table *table;
-
- table = intel_pasid_get_table(dev);
if (!dev_is_pci(dev))
- return domain_context_mapping_one(domain, iommu, table,
- bus, devfn);
-
- data.domain = domain;
- data.iommu = iommu;
- data.table = table;
+ return domain_context_mapping_one(domain, iommu, bus, devfn);
return pci_for_each_dma_alias(to_pci_dev(dev),
- &domain_context_mapping_cb, &data);
+ domain_context_mapping_cb, domain);
}
/* Returns a number of VTD pages, but aligned to MM page size */
@@ -1985,6 +2049,9 @@ static void switch_to_super_page(struct dmar_domain *domain,
iommu_flush_iotlb_psi(info->iommu, domain,
start_pfn, lvl_pages,
0, 0);
+ if (domain->nested_parent)
+ parent_domain_flush(domain, start_pfn,
+ lvl_pages, 0);
}
pte++;
@@ -2108,9 +2175,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
struct context_entry *context;
u16 did_old;
- if (!iommu)
- return;
-
spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 0);
if (!context) {
@@ -2118,14 +2182,7 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
return;
}
- if (sm_supported(iommu)) {
- if (hw_pass_through && domain_type_is_si(info->domain))
- did_old = FLPT_DEFAULT_DID;
- else
- did_old = domain_id_iommu(info->domain, iommu);
- } else {
- did_old = context_domain_id(context);
- }
+ did_old = context_domain_id(context);
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
@@ -2136,9 +2193,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
- if (sm_supported(iommu))
- qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
-
iommu->flush.flush_iotlb(iommu,
did_old,
0,
@@ -2278,28 +2332,19 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
list_add(&info->link, &domain->devices);
spin_unlock_irqrestore(&domain->lock, flags);
- /* PASID table is mandatory for a PCI device in scalable mode. */
- if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
- /* Setup the PASID entry for requests without PASID: */
- if (hw_pass_through && domain_type_is_si(domain))
- ret = intel_pasid_setup_pass_through(iommu,
- dev, IOMMU_NO_PASID);
- else if (domain->use_first_level)
- ret = domain_setup_first_level(iommu, domain, dev,
- IOMMU_NO_PASID);
- else
- ret = intel_pasid_setup_second_level(iommu, domain,
- dev, IOMMU_NO_PASID);
- if (ret) {
- dev_err(dev, "Setup RID2PASID failed\n");
- device_block_translation(dev);
- return ret;
- }
- }
+ if (dev_is_real_dma_subdevice(dev))
+ return 0;
+
+ if (!sm_supported(iommu))
+ ret = domain_context_mapping(domain, dev);
+ else if (hw_pass_through && domain_type_is_si(domain))
+ ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID);
+ else if (domain->use_first_level)
+ ret = domain_setup_first_level(iommu, domain, dev, IOMMU_NO_PASID);
+ else
+ ret = intel_pasid_setup_second_level(iommu, domain, dev, IOMMU_NO_PASID);
- ret = domain_context_mapping(domain, dev);
if (ret) {
- dev_err(dev, "Domain context map failed\n");
device_block_translation(dev);
return ret;
}
@@ -2660,10 +2705,6 @@ static int __init init_dmars(void)
iommu_set_root_entry(iommu);
}
-#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
- dmar_map_gfx = 0;
-#endif
-
if (!dmar_map_gfx)
iommu_identity_mapping |= IDENTMAP_GFX;
@@ -3747,30 +3788,6 @@ static void domain_context_clear(struct device_domain_info *info)
&domain_context_clear_one_cb, info);
}
-static void dmar_remove_one_dev_info(struct device *dev)
-{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct dmar_domain *domain = info->domain;
- struct intel_iommu *iommu = info->iommu;
- unsigned long flags;
-
- if (!dev_is_real_dma_subdevice(info->dev)) {
- if (dev_is_pci(info->dev) && sm_supported(iommu))
- intel_pasid_tear_down_entry(iommu, info->dev,
- IOMMU_NO_PASID, false);
-
- iommu_disable_pci_caps(info);
- domain_context_clear(info);
- }
-
- spin_lock_irqsave(&domain->lock, flags);
- list_del(&info->link);
- spin_unlock_irqrestore(&domain->lock, flags);
-
- domain_detach_iommu(domain, iommu);
- info->domain = NULL;
-}
-
/*
* Clear the page table pointer in context or pasid table entries so that
* all DMA requests without PASID from the device are blocked. If the page
@@ -3883,6 +3900,7 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
bool nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
struct intel_iommu *iommu = info->iommu;
+ struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
/* Must be NESTING domain */
@@ -3908,11 +3926,16 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
if (!domain)
return ERR_PTR(-ENOMEM);
- if (nested_parent)
- to_dmar_domain(domain)->nested_parent = true;
+ dmar_domain = to_dmar_domain(domain);
+
+ if (nested_parent) {
+ dmar_domain->nested_parent = true;
+ INIT_LIST_HEAD(&dmar_domain->s1_domains);
+ spin_lock_init(&dmar_domain->s1_lock);
+ }
if (dirty_tracking) {
- if (to_dmar_domain(domain)->use_first_level) {
+ if (dmar_domain->use_first_level) {
iommu_domain_free(domain);
return ERR_PTR(-EOPNOTSUPP);
}
@@ -3924,8 +3947,12 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
static void intel_iommu_domain_free(struct iommu_domain *domain)
{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+
+ WARN_ON(dmar_domain->nested_parent &&
+ !list_empty(&dmar_domain->s1_domains));
if (domain != &si_domain->domain)
- domain_exit(to_dmar_domain(domain));
+ domain_exit(dmar_domain);
}
int prepare_domain_attach_device(struct iommu_domain *domain,
@@ -3965,6 +3992,10 @@ int prepare_domain_attach_device(struct iommu_domain *domain,
dmar_domain->agaw--;
}
+ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
+ context_copied(iommu, info->bus, info->devfn))
+ return intel_pasid_setup_sm_context(dev);
+
return 0;
}
@@ -4107,6 +4138,9 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
start_pfn, nrpages,
list_empty(&gather->freelist), 0);
+ if (dmar_domain->nested_parent)
+ parent_domain_flush(dmar_domain, start_pfn, nrpages,
+ list_empty(&gather->freelist));
put_pages_list(&gather->freelist);
}
@@ -4265,26 +4299,50 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
dev_iommu_priv_set(dev, info);
+ ret = device_rbtree_insert(iommu, info);
+ if (ret)
+ goto free;
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
dev_err(dev, "PASID table allocation failed\n");
- kfree(info);
- return ERR_PTR(ret);
+ goto clear_rbtree;
+ }
+
+ if (!context_copied(iommu, info->bus, info->devfn)) {
+ ret = intel_pasid_setup_sm_context(dev);
+ if (ret)
+ goto free_table;
}
}
intel_iommu_debugfs_create_dev(info);
return &iommu->iommu;
+free_table:
+ intel_pasid_free_table(dev);
+clear_rbtree:
+ device_rbtree_remove(info);
+free:
+ kfree(info);
+
+ return ERR_PTR(ret);
}
static void intel_iommu_release_device(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+
+ mutex_lock(&iommu->iopf_lock);
+ device_rbtree_remove(info);
+ mutex_unlock(&iommu->iopf_lock);
+
+ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
+ !context_copied(iommu, info->bus, info->devfn))
+ intel_pasid_teardown_sm_context(dev);
- dmar_remove_one_dev_info(dev);
intel_pasid_free_table(dev);
intel_iommu_debugfs_remove_dev(info);
kfree(info);
@@ -4427,23 +4485,15 @@ static int intel_iommu_enable_iopf(struct device *dev)
if (ret)
return ret;
- ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
- if (ret)
- goto iopf_remove_device;
-
ret = pci_enable_pri(pdev, PRQ_DEPTH);
- if (ret)
- goto iopf_unregister_handler;
+ if (ret) {
+ iopf_queue_remove_device(iommu->iopf_queue, dev);
+ return ret;
+ }
+
info->pri_enabled = 1;
return 0;
-
-iopf_unregister_handler:
- iommu_unregister_device_fault_handler(dev);
-iopf_remove_device:
- iopf_queue_remove_device(iommu->iopf_queue, dev);
-
- return ret;
}
static int intel_iommu_disable_iopf(struct device *dev)
@@ -4464,14 +4514,7 @@ static int intel_iommu_disable_iopf(struct device *dev)
*/
pci_disable_pri(to_pci_dev(dev));
info->pri_enabled = 0;
-
- /*
- * With PRI disabled and outstanding PRQs drained, unregistering
- * fault handler and removing device from iopf queue should never
- * fail.
- */
- WARN_ON(iommu_unregister_device_fault_handler(dev));
- WARN_ON(iopf_queue_remove_device(iommu->iopf_queue, dev));
+ iopf_queue_remove_device(iommu->iopf_queue, dev);
return 0;
}
@@ -4664,21 +4707,70 @@ static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type)
return vtd;
}
+/*
+ * Set dirty tracking for the device list of a domain. The caller must
+ * hold the domain->lock when calling it.
+ */
+static int device_set_dirty_tracking(struct list_head *devices, bool enable)
+{
+ struct device_domain_info *info;
+ int ret = 0;
+
+ list_for_each_entry(info, devices, link) {
+ ret = intel_pasid_setup_dirty_tracking(info->iommu, info->dev,
+ IOMMU_NO_PASID, enable);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int parent_domain_set_dirty_tracking(struct dmar_domain *domain,
+ bool enable)
+{
+ struct dmar_domain *s1_domain;
+ unsigned long flags;
+ int ret;
+
+ spin_lock(&domain->s1_lock);
+ list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
+ spin_lock_irqsave(&s1_domain->lock, flags);
+ ret = device_set_dirty_tracking(&s1_domain->devices, enable);
+ spin_unlock_irqrestore(&s1_domain->lock, flags);
+ if (ret)
+ goto err_unwind;
+ }
+ spin_unlock(&domain->s1_lock);
+ return 0;
+
+err_unwind:
+ list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
+ spin_lock_irqsave(&s1_domain->lock, flags);
+ device_set_dirty_tracking(&s1_domain->devices,
+ domain->dirty_tracking);
+ spin_unlock_irqrestore(&s1_domain->lock, flags);
+ }
+ spin_unlock(&domain->s1_lock);
+ return ret;
+}
+
static int intel_iommu_set_dirty_tracking(struct iommu_domain *domain,
bool enable)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct device_domain_info *info;
int ret;
spin_lock(&dmar_domain->lock);
if (dmar_domain->dirty_tracking == enable)
goto out_unlock;
- list_for_each_entry(info, &dmar_domain->devices, link) {
- ret = intel_pasid_setup_dirty_tracking(info->iommu,
- info->domain, info->dev,
- IOMMU_NO_PASID, enable);
+ ret = device_set_dirty_tracking(&dmar_domain->devices, enable);
+ if (ret)
+ goto err_unwind;
+
+ if (dmar_domain->nested_parent) {
+ ret = parent_domain_set_dirty_tracking(dmar_domain, enable);
if (ret)
goto err_unwind;
}
@@ -4690,10 +4782,8 @@ out_unlock:
return 0;
err_unwind:
- list_for_each_entry(info, &dmar_domain->devices, link)
- intel_pasid_setup_dirty_tracking(info->iommu, dmar_domain,
- info->dev, IOMMU_NO_PASID,
- dmar_domain->dirty_tracking);
+ device_set_dirty_tracking(&dmar_domain->devices,
+ dmar_domain->dirty_tracking);
spin_unlock(&dmar_domain->lock);
return ret;
}
@@ -4743,6 +4833,7 @@ static const struct iommu_dirty_ops intel_dirty_ops = {
const struct iommu_ops intel_iommu_ops = {
.blocked_domain = &blocking_domain,
+ .release_domain = &blocking_domain,
.capable = intel_iommu_capable,
.hw_info = intel_iommu_hw_info,
.domain_alloc = intel_iommu_domain_alloc,
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index d02f916d8e59..404d2476a877 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -627,6 +627,10 @@ struct dmar_domain {
int agaw;
/* maximum mapped address */
u64 max_addr;
+ /* Protect the s1_domains list */
+ spinlock_t s1_lock;
+ /* Track s1_domains nested on this domain */
+ struct list_head s1_domains;
};
/* Nested user domain */
@@ -637,6 +641,8 @@ struct dmar_domain {
unsigned long s1_pgtbl;
/* page table attributes */
struct iommu_hwpt_vtd_s1 s1_cfg;
+ /* link to parent domain siblings */
+ struct list_head s2_link;
};
};
@@ -713,9 +719,16 @@ struct intel_iommu {
#endif
struct iopf_queue *iopf_queue;
unsigned char iopfq_name[16];
+ /* Synchronization between fault report and iommu device release. */
+ struct mutex iopf_lock;
struct q_inval *qi; /* Queued invalidation info */
u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/
+ /* rb tree for all probed devices */
+ struct rb_root device_rbtree;
+ /* protect the device_rbtree */
+ spinlock_t device_rbtree_lock;
+
#ifdef CONFIG_IRQ_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
struct irq_domain *ir_domain;
@@ -749,6 +762,8 @@ struct device_domain_info {
struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
struct pasid_table *pasid_table; /* pasid table */
+ /* device tracking node(lookup by PCI RID) */
+ struct rb_node node;
#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
struct dentry *debugfs_dentry; /* pointer to device directory dentry */
#endif
@@ -1060,6 +1075,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
*/
#define QI_OPT_WAIT_DRAIN BIT(0)
+void domain_update_iotlb(struct dmar_domain *domain);
int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
void device_block_translation(struct device *dev);
@@ -1074,13 +1090,14 @@ void free_pgtable_page(void *vaddr);
void iommu_flush_write_buffer(struct intel_iommu *iommu);
struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
const struct iommu_user_data *user_data);
+struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid);
#ifdef CONFIG_INTEL_IOMMU_SVM
void intel_svm_check(struct intel_iommu *iommu);
int intel_svm_enable_prq(struct intel_iommu *iommu);
int intel_svm_finish_prq(struct intel_iommu *iommu);
-int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
- struct iommu_page_response *msg);
+void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
+ struct iommu_page_response *msg);
struct iommu_domain *intel_svm_domain_alloc(void);
void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid);
void intel_drain_pasid_prq(struct device *dev, u32 pasid);
diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
index f26c7f1c46cc..a7d68f3d518a 100644
--- a/drivers/iommu/intel/nested.c
+++ b/drivers/iommu/intel/nested.c
@@ -65,12 +65,20 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
list_add(&info->link, &dmar_domain->devices);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
+ domain_update_iotlb(dmar_domain);
+
return 0;
}
static void intel_nested_domain_free(struct iommu_domain *domain)
{
- kfree(to_dmar_domain(domain));
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct dmar_domain *s2_domain = dmar_domain->s2_domain;
+
+ spin_lock(&s2_domain->s1_lock);
+ list_del(&dmar_domain->s2_link);
+ spin_unlock(&s2_domain->s1_lock);
+ kfree(dmar_domain);
}
static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
@@ -95,7 +103,7 @@ static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
}
static void intel_nested_flush_cache(struct dmar_domain *domain, u64 addr,
- unsigned long npages, bool ih)
+ u64 npages, bool ih)
{
struct iommu_domain_info *info;
unsigned int mask;
@@ -201,5 +209,9 @@ struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
spin_lock_init(&domain->lock);
xa_init(&domain->iommu_array);
+ spin_lock(&s2_domain->s1_lock);
+ list_add(&domain->s2_link, &s2_domain->s1_domains);
+ spin_unlock(&s2_domain->s1_lock);
+
return &domain->domain;
}
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 3239cefa4c33..11f0b856d74c 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -214,6 +214,9 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
if (!info || !info->ats_enabled)
return;
+ if (pci_dev_is_disconnected(to_pci_dev(dev)))
+ return;
+
sid = info->bus << 8 | info->devfn;
qdep = info->ats_qdep;
pfsid = info->pfsid;
@@ -428,7 +431,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
* Set up dirty tracking on a second only or nested translation type.
*/
int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
- struct dmar_domain *domain,
struct device *dev, u32 pasid,
bool enabled)
{
@@ -445,7 +447,7 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
return -ENODEV;
}
- did = domain_id_iommu(domain, iommu);
+ did = pasid_get_domain_id(pte);
pgtt = pasid_pte_get_pgtt(pte);
if (pgtt != PASID_ENTRY_PGTT_SL_ONLY &&
pgtt != PASID_ENTRY_PGTT_NESTED) {
@@ -658,6 +660,8 @@ int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
pasid_set_domain_id(pte, did);
pasid_set_address_width(pte, s2_domain->agaw);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+ if (s2_domain->dirty_tracking)
+ pasid_set_ssade(pte);
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
pasid_set_present(pte);
spin_unlock(&iommu->lock);
@@ -666,3 +670,205 @@ int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
return 0;
}
+
+/*
+ * Interfaces to setup or teardown a pasid table to the scalable-mode
+ * context table entry:
+ */
+
+static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct context_entry *context;
+
+ spin_lock(&iommu->lock);
+ context = iommu_context_addr(iommu, bus, devfn, false);
+ if (!context) {
+ spin_unlock(&iommu->lock);
+ return;
+ }
+
+ context_clear_entry(context);
+ __iommu_flush_cache(iommu, context, sizeof(*context));
+ spin_unlock(&iommu->lock);
+
+ /*
+ * Cache invalidation for changes to a scalable-mode context table
+ * entry.
+ *
+ * Section 6.5.3.3 of the VT-d spec:
+ * - Device-selective context-cache invalidation;
+ * - Domain-selective PASID-cache invalidation to affected domains
+ * (can be skipped if all PASID entries were not-present);
+ * - Domain-selective IOTLB invalidation to affected domains;
+ * - Global Device-TLB invalidation to affected functions.
+ *
+ * The iommu has been parked in the blocking state. All domains have
+ * been detached from the device or PASID. The PASID and IOTLB caches
+ * have been invalidated during the domain detach path.
+ */
+ iommu->flush.flush_context(iommu, 0, PCI_DEVID(bus, devfn),
+ DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL);
+ devtlb_invalidation_with_pasid(iommu, dev, IOMMU_NO_PASID);
+}
+
+static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct device *dev = data;
+
+ if (dev == &pdev->dev)
+ device_pasid_table_teardown(dev, PCI_BUS_NUM(alias), alias & 0xff);
+
+ return 0;
+}
+
+void intel_pasid_teardown_sm_context(struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ if (!dev_is_pci(dev)) {
+ device_pasid_table_teardown(dev, info->bus, info->devfn);
+ return;
+ }
+
+ pci_for_each_dma_alias(to_pci_dev(dev), pci_pasid_table_teardown, dev);
+}
+
+/*
+ * Get the PASID directory size for scalable mode context entry.
+ * Value of X in the PDTS field of a scalable mode context entry
+ * indicates PASID directory with 2^(X + 7) entries.
+ */
+static unsigned long context_get_sm_pds(struct pasid_table *table)
+{
+ unsigned long pds, max_pde;
+
+ max_pde = table->max_pasid >> PASID_PDE_SHIFT;
+ pds = find_first_bit(&max_pde, MAX_NR_PASID_BITS);
+ if (pds < 7)
+ return 0;
+
+ return pds - 7;
+}
+
+static int context_entry_set_pasid_table(struct context_entry *context,
+ struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct pasid_table *table = info->pasid_table;
+ struct intel_iommu *iommu = info->iommu;
+ unsigned long pds;
+
+ context_clear_entry(context);
+
+ pds = context_get_sm_pds(table);
+ context->lo = (u64)virt_to_phys(table->table) | context_pdts(pds);
+ context_set_sm_rid2pasid(context, IOMMU_NO_PASID);
+
+ if (info->ats_supported)
+ context_set_sm_dte(context);
+ if (info->pri_supported)
+ context_set_sm_pre(context);
+ if (info->pasid_supported)
+ context_set_pasid(context);
+
+ context_set_fault_enable(context);
+ context_set_present(context);
+ __iommu_flush_cache(iommu, context, sizeof(*context));
+
+ return 0;
+}
+
+static int device_pasid_table_setup(struct device *dev, u8 bus, u8 devfn)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
+ struct context_entry *context;
+
+ spin_lock(&iommu->lock);
+ context = iommu_context_addr(iommu, bus, devfn, true);
+ if (!context) {
+ spin_unlock(&iommu->lock);
+ return -ENOMEM;
+ }
+
+ if (context_present(context) && !context_copied(iommu, bus, devfn)) {
+ spin_unlock(&iommu->lock);
+ return 0;
+ }
+
+ if (context_copied(iommu, bus, devfn)) {
+ context_clear_entry(context);
+ __iommu_flush_cache(iommu, context, sizeof(*context));
+
+ /*
+ * For kdump cases, old valid entries may be cached due to
+ * the in-flight DMA and copied pgtable, but there is no
+ * unmapping behaviour for them, thus we need explicit cache
+ * flushes for all affected domain IDs and PASIDs used in
+ * the copied PASID table. Given that we have no idea about
+ * which domain IDs and PASIDs were used in the copied tables,
+ * upgrade them to global PASID and IOTLB cache invalidation.
+ */
+ iommu->flush.flush_context(iommu, 0,
+ PCI_DEVID(bus, devfn),
+ DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL);
+ qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ devtlb_invalidation_with_pasid(iommu, dev, IOMMU_NO_PASID);
+
+ /*
+ * At this point, the device is supposed to finish reset at
+ * its driver probe stage, so no in-flight DMA will exist,
+ * and we don't need to worry anymore hereafter.
+ */
+ clear_context_copied(iommu, bus, devfn);
+ }
+
+ context_entry_set_pasid_table(context, dev);
+ spin_unlock(&iommu->lock);
+
+ /*
+ * It's a non-present to present mapping. If hardware doesn't cache
+ * non-present entry we don't need to flush the caches. If it does
+ * cache non-present entries, then it does so in the special
+ * domain #0, which we have to flush:
+ */
+ if (cap_caching_mode(iommu->cap)) {
+ iommu->flush.flush_context(iommu, 0,
+ PCI_DEVID(bus, devfn),
+ DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
+ }
+
+ return 0;
+}
+
+static int pci_pasid_table_setup(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct device *dev = data;
+
+ if (dev != &pdev->dev)
+ return 0;
+
+ return device_pasid_table_setup(dev, PCI_BUS_NUM(alias), alias & 0xff);
+}
+
+/*
+ * Set the device's PASID table to its context table entry.
+ *
+ * The PASID table is set to the context entries of both device itself
+ * and its alias requester ID for DMA.
+ */
+int intel_pasid_setup_sm_context(struct device *dev)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ if (!dev_is_pci(dev))
+ return device_pasid_table_setup(dev, info->bus, info->devfn);
+
+ return pci_for_each_dma_alias(to_pci_dev(dev), pci_pasid_table_setup, dev);
+}
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index 8d40d4c66e31..da9978fef7ac 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -307,7 +307,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, u32 pasid);
int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
- struct dmar_domain *domain,
struct device *dev, u32 pasid,
bool enabled);
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
@@ -319,4 +318,6 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
bool fault_ignore);
void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
struct device *dev, u32 pasid);
+int intel_pasid_setup_sm_context(struct device *dev);
+void intel_pasid_teardown_sm_context(struct device *dev);
#endif /* __INTEL_PASID_H */
diff --git a/drivers/iommu/intel/perf.c b/drivers/iommu/intel/perf.c
index 94ee70ac38e3..adc4de6bbd88 100644
--- a/drivers/iommu/intel/perf.c
+++ b/drivers/iommu/intel/perf.c
@@ -33,7 +33,7 @@ int dmar_latency_enable(struct intel_iommu *iommu, enum latency_type type)
spin_lock_irqsave(&latency_lock, flags);
if (!iommu->perf_statistic) {
- iommu->perf_statistic = kzalloc(sizeof(*lstat) * DMAR_LATENCY_NUM,
+ iommu->perf_statistic = kcalloc(DMAR_LATENCY_NUM, sizeof(*lstat),
GFP_ATOMIC);
if (!iommu->perf_statistic) {
ret = -ENOMEM;
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 40edd282903f..c1bed89b1026 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -22,7 +22,6 @@
#include "iommu.h"
#include "pasid.h"
#include "perf.h"
-#include "../iommu-sva.h"
#include "trace.h"
static irqreturn_t prq_event_thread(int irq, void *d);
@@ -315,10 +314,11 @@ out:
return 0;
}
-static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
- struct iommu_domain *domain, ioasid_t pasid)
+static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct intel_iommu *iommu = info->iommu;
struct mm_struct *mm = domain->mm;
struct intel_svm_dev *sdev;
struct intel_svm *svm;
@@ -360,7 +360,6 @@ static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
sdev->iommu = iommu;
sdev->did = FLPT_DEFAULT_DID;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
- init_rcu_head(&sdev->rcu);
if (info->ats_enabled) {
sdev->qdep = info->ats_qdep;
if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
@@ -408,13 +407,6 @@ void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid)
if (svm->notifier.ops)
mmu_notifier_unregister(&svm->notifier, mm);
pasid_private_remove(svm->pasid);
- /*
- * We mandate that no page faults may be outstanding
- * for the PASID when intel_svm_unbind_mm() is called.
- * If that is not obeyed, subtle errors will happen.
- * Let's make them less subtle...
- */
- memset(svm, 0x6b, sizeof(*svm));
kfree(svm);
}
}
@@ -562,16 +554,12 @@ static int prq_to_iommu_prot(struct page_req_dsc *req)
return prot;
}
-static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
- struct page_req_dsc *desc)
+static void intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
+ struct page_req_dsc *desc)
{
- struct iommu_fault_event event;
-
- if (!dev || !dev_is_pci(dev))
- return -ENODEV;
+ struct iopf_fault event = { };
/* Fill in event data for device specific processing */
- memset(&event, 0, sizeof(struct iommu_fault_event));
event.fault.type = IOMMU_FAULT_PAGE_REQ;
event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
event.fault.prm.pasid = desc->pasid;
@@ -603,7 +591,7 @@ static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
event.fault.prm.private_data[0] = ktime_to_ns(ktime_get());
}
- return iommu_report_device_fault(dev, &event);
+ iommu_report_device_fault(dev, &event);
}
static void handle_bad_prq_event(struct intel_iommu *iommu,
@@ -650,7 +638,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
struct intel_iommu *iommu = d;
struct page_req_dsc *req;
int head, tail, handled;
- struct pci_dev *pdev;
+ struct device *dev;
u64 address;
/*
@@ -696,23 +684,22 @@ bad_req:
if (unlikely(req->lpig && !req->rd_req && !req->wr_req))
goto prq_advance;
- pdev = pci_get_domain_bus_and_slot(iommu->segment,
- PCI_BUS_NUM(req->rid),
- req->rid & 0xff);
/*
* If prq is to be handled outside iommu driver via receiver of
* the fault notifiers, we skip the page response here.
*/
- if (!pdev)
+ mutex_lock(&iommu->iopf_lock);
+ dev = device_rbtree_find(iommu, req->rid);
+ if (!dev) {
+ mutex_unlock(&iommu->iopf_lock);
goto bad_req;
+ }
- if (intel_svm_prq_report(iommu, &pdev->dev, req))
- handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
- else
- trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
- req->priv_data[0], req->priv_data[1],
- iommu->prq_seq_number++);
- pci_dev_put(pdev);
+ intel_svm_prq_report(iommu, dev, req);
+ trace_prq_report(iommu, dev, req->qw_0, req->qw_1,
+ req->priv_data[0], req->priv_data[1],
+ iommu->prq_seq_number++);
+ mutex_unlock(&iommu->iopf_lock);
prq_advance:
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}
@@ -742,9 +729,8 @@ prq_advance:
return IRQ_RETVAL(handled);
}
-int intel_svm_page_response(struct device *dev,
- struct iommu_fault_event *evt,
- struct iommu_page_response *msg)
+void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
+ struct iommu_page_response *msg)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
@@ -753,7 +739,6 @@ int intel_svm_page_response(struct device *dev,
bool private_present;
bool pasid_present;
bool last_page;
- int ret = 0;
u16 sid;
prm = &evt->fault.prm;
@@ -762,16 +747,6 @@ int intel_svm_page_response(struct device *dev,
private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
- if (!pasid_present) {
- ret = -EINVAL;
- goto out;
- }
-
- if (prm->pasid == 0 || prm->pasid >= PASID_MAX) {
- ret = -EINVAL;
- goto out;
- }
-
/*
* Per VT-d spec. v3.0 ch7.7, system software must respond
* with page group response if private data is present (PDP)
@@ -800,17 +775,6 @@ int intel_svm_page_response(struct device *dev,
qi_submit_sync(iommu, &desc, 1, 0);
}
-out:
- return ret;
-}
-
-static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
- struct device *dev, ioasid_t pasid)
-{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct intel_iommu *iommu = info->iommu;
-
- return intel_svm_bind_mm(iommu, dev, domain, pasid);
}
static void intel_svm_domain_free(struct iommu_domain *domain)
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
index e5b8b9110c13..06d78fcc79fd 100644
--- a/drivers/iommu/io-pgfault.c
+++ b/drivers/iommu/io-pgfault.c
@@ -11,101 +11,140 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include "iommu-sva.h"
+#include "iommu-priv.h"
-/**
- * struct iopf_queue - IO Page Fault queue
- * @wq: the fault workqueue
- * @devices: devices attached to this queue
- * @lock: protects the device list
+/*
+ * Return the fault parameter of a device if it exists. Otherwise, return NULL.
+ * On a successful return, the caller takes a reference of this parameter and
+ * should put it after use by calling iopf_put_dev_fault_param().
*/
-struct iopf_queue {
- struct workqueue_struct *wq;
- struct list_head devices;
- struct mutex lock;
-};
+static struct iommu_fault_param *iopf_get_dev_fault_param(struct device *dev)
+{
+ struct dev_iommu *param = dev->iommu;
+ struct iommu_fault_param *fault_param;
-/**
- * struct iopf_device_param - IO Page Fault data attached to a device
- * @dev: the device that owns this param
- * @queue: IOPF queue
- * @queue_list: index into queue->devices
- * @partial: faults that are part of a Page Request Group for which the last
- * request hasn't been submitted yet.
- */
-struct iopf_device_param {
- struct device *dev;
- struct iopf_queue *queue;
- struct list_head queue_list;
- struct list_head partial;
-};
-
-struct iopf_fault {
- struct iommu_fault fault;
- struct list_head list;
-};
-
-struct iopf_group {
- struct iopf_fault last_fault;
- struct list_head faults;
- struct work_struct work;
- struct device *dev;
-};
-
-static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
- enum iommu_page_response_code status)
+ rcu_read_lock();
+ fault_param = rcu_dereference(param->fault_param);
+ if (fault_param && !refcount_inc_not_zero(&fault_param->users))
+ fault_param = NULL;
+ rcu_read_unlock();
+
+ return fault_param;
+}
+
+/* Caller must hold a reference of the fault parameter. */
+static void iopf_put_dev_fault_param(struct iommu_fault_param *fault_param)
{
- struct iommu_page_response resp = {
- .version = IOMMU_PAGE_RESP_VERSION_1,
- .pasid = iopf->fault.prm.pasid,
- .grpid = iopf->fault.prm.grpid,
- .code = status,
- };
+ if (refcount_dec_and_test(&fault_param->users))
+ kfree_rcu(fault_param, rcu);
+}
- if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) &&
- (iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID))
- resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
+static void __iopf_free_group(struct iopf_group *group)
+{
+ struct iopf_fault *iopf, *next;
- return iommu_page_response(dev, &resp);
+ list_for_each_entry_safe(iopf, next, &group->faults, list) {
+ if (!(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
+ kfree(iopf);
+ }
+
+ /* Pair with iommu_report_device_fault(). */
+ iopf_put_dev_fault_param(group->fault_param);
}
-static void iopf_handler(struct work_struct *work)
+void iopf_free_group(struct iopf_group *group)
+{
+ __iopf_free_group(group);
+ kfree(group);
+}
+EXPORT_SYMBOL_GPL(iopf_free_group);
+
+static struct iommu_domain *get_domain_for_iopf(struct device *dev,
+ struct iommu_fault *fault)
{
- struct iopf_group *group;
struct iommu_domain *domain;
- struct iopf_fault *iopf, *next;
- enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
- group = container_of(work, struct iopf_group, work);
- domain = iommu_get_domain_for_dev_pasid(group->dev,
- group->last_fault.fault.prm.pasid, 0);
- if (!domain || !domain->iopf_handler)
- status = IOMMU_PAGE_RESP_INVALID;
+ if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) {
+ domain = iommu_get_domain_for_dev_pasid(dev, fault->prm.pasid, 0);
+ if (IS_ERR(domain))
+ domain = NULL;
+ } else {
+ domain = iommu_get_domain_for_dev(dev);
+ }
- list_for_each_entry_safe(iopf, next, &group->faults, list) {
+ if (!domain || !domain->iopf_handler) {
+ dev_warn_ratelimited(dev,
+ "iopf (pasid %d) without domain attached or handler installed\n",
+ fault->prm.pasid);
+
+ return NULL;
+ }
+
+ return domain;
+}
+
+/* Non-last request of a group. Postpone until the last one. */
+static int report_partial_fault(struct iommu_fault_param *fault_param,
+ struct iommu_fault *fault)
+{
+ struct iopf_fault *iopf;
+
+ iopf = kzalloc(sizeof(*iopf), GFP_KERNEL);
+ if (!iopf)
+ return -ENOMEM;
+
+ iopf->fault = *fault;
+
+ mutex_lock(&fault_param->lock);
+ list_add(&iopf->list, &fault_param->partial);
+ mutex_unlock(&fault_param->lock);
+
+ return 0;
+}
+
+static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param,
+ struct iopf_fault *evt,
+ struct iopf_group *abort_group)
+{
+ struct iopf_fault *iopf, *next;
+ struct iopf_group *group;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group) {
/*
- * For the moment, errors are sticky: don't handle subsequent
- * faults in the group if there is an error.
+ * We always need to construct the group as we need it to abort
+ * the request at the driver if it can't be handled.
*/
- if (status == IOMMU_PAGE_RESP_SUCCESS)
- status = domain->iopf_handler(&iopf->fault,
- domain->fault_data);
+ group = abort_group;
+ }
- if (!(iopf->fault.prm.flags &
- IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
- kfree(iopf);
+ group->fault_param = iopf_param;
+ group->last_fault.fault = evt->fault;
+ INIT_LIST_HEAD(&group->faults);
+ INIT_LIST_HEAD(&group->pending_node);
+ list_add(&group->last_fault.list, &group->faults);
+
+ /* See if we have partial faults for this group */
+ mutex_lock(&iopf_param->lock);
+ list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
+ if (iopf->fault.prm.grpid == evt->fault.prm.grpid)
+ /* Insert *before* the last fault */
+ list_move(&iopf->list, &group->faults);
}
+ list_add(&group->pending_node, &iopf_param->faults);
+ mutex_unlock(&iopf_param->lock);
- iopf_complete_group(group->dev, &group->last_fault, status);
- kfree(group);
+ return group;
}
/**
- * iommu_queue_iopf - IO Page Fault handler
- * @fault: fault event
- * @cookie: struct device, passed to iommu_register_device_fault_handler.
+ * iommu_report_device_fault() - Report fault event to device driver
+ * @dev: the device
+ * @evt: fault event data
*
- * Add a fault to the device workqueue, to be handled by mm.
+ * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
+ * handler. If this function fails then ops->page_response() was called to
+ * complete evt if required.
*
* This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard
* them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't
@@ -137,83 +176,57 @@ static void iopf_handler(struct work_struct *work)
* freed after the device has stopped generating page faults (or the iommu
* hardware has been set to block the page faults) and the pending page faults
* have been flushed.
- *
- * Return: 0 on success and <0 on error.
*/
-int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
+void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
{
- int ret;
+ struct iommu_fault *fault = &evt->fault;
+ struct iommu_fault_param *iopf_param;
+ struct iopf_group abort_group = {};
struct iopf_group *group;
- struct iopf_fault *iopf, *next;
- struct iopf_device_param *iopf_param;
- struct device *dev = cookie;
- struct dev_iommu *param = dev->iommu;
-
- lockdep_assert_held(&param->lock);
-
- if (fault->type != IOMMU_FAULT_PAGE_REQ)
- /* Not a recoverable page fault */
- return -EOPNOTSUPP;
-
- /*
- * As long as we're holding param->lock, the queue can't be unlinked
- * from the device and therefore cannot disappear.
- */
- iopf_param = param->iopf_param;
- if (!iopf_param)
- return -ENODEV;
+ iopf_param = iopf_get_dev_fault_param(dev);
+ if (WARN_ON(!iopf_param))
+ return;
if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
- iopf = kzalloc(sizeof(*iopf), GFP_KERNEL);
- if (!iopf)
- return -ENOMEM;
-
- iopf->fault = *fault;
-
- /* Non-last request of a group. Postpone until the last one */
- list_add(&iopf->list, &iopf_param->partial);
-
- return 0;
+ report_partial_fault(iopf_param, fault);
+ iopf_put_dev_fault_param(iopf_param);
+ /* A request that is not the last does not need to be ack'd */
}
- group = kzalloc(sizeof(*group), GFP_KERNEL);
- if (!group) {
- /*
- * The caller will send a response to the hardware. But we do
- * need to clean up before leaving, otherwise partial faults
- * will be stuck.
- */
- ret = -ENOMEM;
- goto cleanup_partial;
- }
+ /*
+ * This is the last page fault of a group. Allocate an iopf group and
+ * pass it to domain's page fault handler. The group holds a reference
+ * count of the fault parameter. It will be released after response or
+ * error path of this function. If an error is returned, the caller
+ * will send a response to the hardware. We need to clean up before
+ * leaving, otherwise partial faults will be stuck.
+ */
+ group = iopf_group_alloc(iopf_param, evt, &abort_group);
+ if (group == &abort_group)
+ goto err_abort;
- group->dev = dev;
- group->last_fault.fault = *fault;
- INIT_LIST_HEAD(&group->faults);
- list_add(&group->last_fault.list, &group->faults);
- INIT_WORK(&group->work, iopf_handler);
+ group->domain = get_domain_for_iopf(dev, fault);
+ if (!group->domain)
+ goto err_abort;
- /* See if we have partial faults for this group */
- list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
- if (iopf->fault.prm.grpid == fault->prm.grpid)
- /* Insert *before* the last fault */
- list_move(&iopf->list, &group->faults);
- }
+ /*
+ * On success iopf_handler must call iopf_group_response() and
+ * iopf_free_group()
+ */
+ if (group->domain->iopf_handler(group))
+ goto err_abort;
- queue_work(iopf_param->queue->wq, &group->work);
- return 0;
+ return;
-cleanup_partial:
- list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
- if (iopf->fault.prm.grpid == fault->prm.grpid) {
- list_del(&iopf->list);
- kfree(iopf);
- }
- }
- return ret;
+err_abort:
+ iopf_group_response(group, IOMMU_PAGE_RESP_FAILURE);
+ if (group == &abort_group)
+ __iopf_free_group(group);
+ else
+ iopf_free_group(group);
}
-EXPORT_SYMBOL_GPL(iommu_queue_iopf);
+EXPORT_SYMBOL_GPL(iommu_report_device_fault);
/**
* iopf_queue_flush_dev - Ensure that all queued faults have been processed
@@ -229,26 +242,52 @@ EXPORT_SYMBOL_GPL(iommu_queue_iopf);
*/
int iopf_queue_flush_dev(struct device *dev)
{
- int ret = 0;
- struct iopf_device_param *iopf_param;
- struct dev_iommu *param = dev->iommu;
+ struct iommu_fault_param *iopf_param;
- if (!param)
+ /*
+ * It's a driver bug to be here after iopf_queue_remove_device().
+ * Therefore, it's safe to dereference the fault parameter without
+ * holding the lock.
+ */
+ iopf_param = rcu_dereference_check(dev->iommu->fault_param, true);
+ if (WARN_ON(!iopf_param))
return -ENODEV;
- mutex_lock(&param->lock);
- iopf_param = param->iopf_param;
- if (iopf_param)
- flush_workqueue(iopf_param->queue->wq);
- else
- ret = -ENODEV;
- mutex_unlock(&param->lock);
+ flush_workqueue(iopf_param->queue->wq);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
/**
+ * iopf_group_response - Respond a group of page faults
+ * @group: the group of faults with the same group id
+ * @status: the response code
+ */
+void iopf_group_response(struct iopf_group *group,
+ enum iommu_page_response_code status)
+{
+ struct iommu_fault_param *fault_param = group->fault_param;
+ struct iopf_fault *iopf = &group->last_fault;
+ struct device *dev = group->fault_param->dev;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
+ struct iommu_page_response resp = {
+ .pasid = iopf->fault.prm.pasid,
+ .grpid = iopf->fault.prm.grpid,
+ .code = status,
+ };
+
+ /* Only send response if there is a fault report pending */
+ mutex_lock(&fault_param->lock);
+ if (!list_empty(&group->pending_node)) {
+ ops->page_response(dev, &group->last_fault, &resp);
+ list_del_init(&group->pending_node);
+ }
+ mutex_unlock(&fault_param->lock);
+}
+EXPORT_SYMBOL_GPL(iopf_group_response);
+
+/**
* iopf_queue_discard_partial - Remove all pending partial fault
* @queue: the queue whose partial faults need to be discarded
*
@@ -261,18 +300,20 @@ EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
int iopf_queue_discard_partial(struct iopf_queue *queue)
{
struct iopf_fault *iopf, *next;
- struct iopf_device_param *iopf_param;
+ struct iommu_fault_param *iopf_param;
if (!queue)
return -EINVAL;
mutex_lock(&queue->lock);
list_for_each_entry(iopf_param, &queue->devices, queue_list) {
+ mutex_lock(&iopf_param->lock);
list_for_each_entry_safe(iopf, next, &iopf_param->partial,
list) {
list_del(&iopf->list);
kfree(iopf);
}
+ mutex_unlock(&iopf_param->lock);
}
mutex_unlock(&queue->lock);
return 0;
@@ -288,34 +329,42 @@ EXPORT_SYMBOL_GPL(iopf_queue_discard_partial);
*/
int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
{
- int ret = -EBUSY;
- struct iopf_device_param *iopf_param;
+ int ret = 0;
struct dev_iommu *param = dev->iommu;
+ struct iommu_fault_param *fault_param;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
- if (!param)
+ if (!ops->page_response)
return -ENODEV;
- iopf_param = kzalloc(sizeof(*iopf_param), GFP_KERNEL);
- if (!iopf_param)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&iopf_param->partial);
- iopf_param->queue = queue;
- iopf_param->dev = dev;
-
mutex_lock(&queue->lock);
mutex_lock(&param->lock);
- if (!param->iopf_param) {
- list_add(&iopf_param->queue_list, &queue->devices);
- param->iopf_param = iopf_param;
- ret = 0;
+ if (rcu_dereference_check(param->fault_param,
+ lockdep_is_held(&param->lock))) {
+ ret = -EBUSY;
+ goto done_unlock;
+ }
+
+ fault_param = kzalloc(sizeof(*fault_param), GFP_KERNEL);
+ if (!fault_param) {
+ ret = -ENOMEM;
+ goto done_unlock;
}
+
+ mutex_init(&fault_param->lock);
+ INIT_LIST_HEAD(&fault_param->faults);
+ INIT_LIST_HEAD(&fault_param->partial);
+ fault_param->dev = dev;
+ refcount_set(&fault_param->users, 1);
+ list_add(&fault_param->queue_list, &queue->devices);
+ fault_param->queue = queue;
+
+ rcu_assign_pointer(param->fault_param, fault_param);
+
+done_unlock:
mutex_unlock(&param->lock);
mutex_unlock(&queue->lock);
- if (ret)
- kfree(iopf_param);
-
return ret;
}
EXPORT_SYMBOL_GPL(iopf_queue_add_device);
@@ -325,40 +374,66 @@ EXPORT_SYMBOL_GPL(iopf_queue_add_device);
* @queue: IOPF queue
* @dev: device to remove
*
- * Caller makes sure that no more faults are reported for this device.
+ * Removing a device from an iopf_queue. It's recommended to follow these
+ * steps when removing a device:
*
- * Return: 0 on success and <0 on error.
+ * - Disable new PRI reception: Turn off PRI generation in the IOMMU hardware
+ * and flush any hardware page request queues. This should be done before
+ * calling into this helper.
+ * - Acknowledge all outstanding PRQs to the device: Respond to all outstanding
+ * page requests with IOMMU_PAGE_RESP_INVALID, indicating the device should
+ * not retry. This helper function handles this.
+ * - Disable PRI on the device: After calling this helper, the caller could
+ * then disable PRI on the device.
+ *
+ * Calling iopf_queue_remove_device() essentially disassociates the device.
+ * The fault_param might still exist, but iommu_page_response() will do
+ * nothing. The device fault parameter reference count has been properly
+ * passed from iommu_report_device_fault() to the fault handling work, and
+ * will eventually be released after iommu_page_response().
*/
-int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
+void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
{
- int ret = -EINVAL;
- struct iopf_fault *iopf, *next;
- struct iopf_device_param *iopf_param;
+ struct iopf_fault *partial_iopf;
+ struct iopf_fault *next;
+ struct iopf_group *group, *temp;
struct dev_iommu *param = dev->iommu;
-
- if (!param || !queue)
- return -EINVAL;
+ struct iommu_fault_param *fault_param;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
mutex_lock(&queue->lock);
mutex_lock(&param->lock);
- iopf_param = param->iopf_param;
- if (iopf_param && iopf_param->queue == queue) {
- list_del(&iopf_param->queue_list);
- param->iopf_param = NULL;
- ret = 0;
+ fault_param = rcu_dereference_check(param->fault_param,
+ lockdep_is_held(&param->lock));
+
+ if (WARN_ON(!fault_param || fault_param->queue != queue))
+ goto unlock;
+
+ mutex_lock(&fault_param->lock);
+ list_for_each_entry_safe(partial_iopf, next, &fault_param->partial, list)
+ kfree(partial_iopf);
+
+ list_for_each_entry_safe(group, temp, &fault_param->faults, pending_node) {
+ struct iopf_fault *iopf = &group->last_fault;
+ struct iommu_page_response resp = {
+ .pasid = iopf->fault.prm.pasid,
+ .grpid = iopf->fault.prm.grpid,
+ .code = IOMMU_PAGE_RESP_INVALID
+ };
+
+ ops->page_response(dev, iopf, &resp);
+ list_del_init(&group->pending_node);
}
- mutex_unlock(&param->lock);
- mutex_unlock(&queue->lock);
- if (ret)
- return ret;
+ mutex_unlock(&fault_param->lock);
- /* Just in case some faults are still stuck */
- list_for_each_entry_safe(iopf, next, &iopf_param->partial, list)
- kfree(iopf);
+ list_del(&fault_param->queue_list);
- kfree(iopf_param);
-
- return 0;
+ /* dec the ref owned by iopf_queue_add_device() */
+ rcu_assign_pointer(param->fault_param, NULL);
+ iopf_put_dev_fault_param(fault_param);
+unlock:
+ mutex_unlock(&param->lock);
+ mutex_unlock(&queue->lock);
}
EXPORT_SYMBOL_GPL(iopf_queue_remove_device);
@@ -404,7 +479,7 @@ EXPORT_SYMBOL_GPL(iopf_queue_alloc);
*/
void iopf_queue_free(struct iopf_queue *queue)
{
- struct iopf_device_param *iopf_param, *next;
+ struct iommu_fault_param *iopf_param, *next;
if (!queue)
return;
diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h
index 2024a2313348..5f731d994803 100644
--- a/drivers/iommu/iommu-priv.h
+++ b/drivers/iommu/iommu-priv.h
@@ -21,10 +21,11 @@ int iommu_group_replace_domain(struct iommu_group *group,
struct iommu_domain *new_domain);
int iommu_device_register_bus(struct iommu_device *iommu,
- const struct iommu_ops *ops, struct bus_type *bus,
+ const struct iommu_ops *ops,
+ const struct bus_type *bus,
struct notifier_block *nb);
void iommu_device_unregister_bus(struct iommu_device *iommu,
- struct bus_type *bus,
+ const struct bus_type *bus,
struct notifier_block *nb);
#endif /* __LINUX_IOMMU_PRIV_H */
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index c3fc9201d0be..640acc804e8c 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -7,7 +7,7 @@
#include <linux/sched/mm.h>
#include <linux/iommu.h>
-#include "iommu-sva.h"
+#include "iommu-priv.h"
static DEFINE_MUTEX(iommu_sva_lock);
@@ -41,6 +41,7 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
}
iommu_mm->pasid = pasid;
INIT_LIST_HEAD(&iommu_mm->sva_domains);
+ INIT_LIST_HEAD(&iommu_mm->sva_handles);
/*
* Make sure the write to mm->iommu_mm is not reordered in front of
* initialization to iommu_mm fields. If it does, readers may see a
@@ -82,6 +83,14 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
goto out_unlock;
}
+ list_for_each_entry(handle, &mm->iommu_mm->sva_handles, handle_item) {
+ if (handle->dev == dev) {
+ refcount_inc(&handle->users);
+ mutex_unlock(&iommu_sva_lock);
+ return handle;
+ }
+ }
+
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle) {
ret = -ENOMEM;
@@ -111,6 +120,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
list_add(&domain->next, &mm->iommu_mm->sva_domains);
out:
+ refcount_set(&handle->users, 1);
+ list_add(&handle->handle_item, &mm->iommu_mm->sva_handles);
mutex_unlock(&iommu_sva_lock);
handle->dev = dev;
handle->domain = domain;
@@ -141,6 +152,12 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
struct device *dev = handle->dev;
mutex_lock(&iommu_sva_lock);
+ if (!refcount_dec_and_test(&handle->users)) {
+ mutex_unlock(&iommu_sva_lock);
+ return;
+ }
+ list_del(&handle->handle_item);
+
iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
if (--domain->users == 0) {
list_del(&domain->next);
@@ -159,15 +176,25 @@ u32 iommu_sva_get_pasid(struct iommu_sva *handle)
}
EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
+void mm_pasid_drop(struct mm_struct *mm)
+{
+ struct iommu_mm_data *iommu_mm = mm->iommu_mm;
+
+ if (!iommu_mm)
+ return;
+
+ iommu_free_global_pasid(iommu_mm->pasid);
+ kfree(iommu_mm);
+}
+
/*
* I/O page fault handler for SVA
*/
-enum iommu_page_response_code
-iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
+static enum iommu_page_response_code
+iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm)
{
vm_fault_t ret;
struct vm_area_struct *vma;
- struct mm_struct *mm = data;
unsigned int access_flags = 0;
unsigned int fault_flags = FAULT_FLAG_REMOTE;
struct iommu_fault_page_request *prm = &fault->prm;
@@ -217,13 +244,54 @@ out_put_mm:
return status;
}
-void mm_pasid_drop(struct mm_struct *mm)
+static void iommu_sva_handle_iopf(struct work_struct *work)
{
- struct iommu_mm_data *iommu_mm = mm->iommu_mm;
+ struct iopf_fault *iopf;
+ struct iopf_group *group;
+ enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
+
+ group = container_of(work, struct iopf_group, work);
+ list_for_each_entry(iopf, &group->faults, list) {
+ /*
+ * For the moment, errors are sticky: don't handle subsequent
+ * faults in the group if there is an error.
+ */
+ if (status != IOMMU_PAGE_RESP_SUCCESS)
+ break;
+
+ status = iommu_sva_handle_mm(&iopf->fault, group->domain->mm);
+ }
- if (!iommu_mm)
- return;
+ iopf_group_response(group, status);
+ iopf_free_group(group);
+}
- iommu_free_global_pasid(iommu_mm->pasid);
- kfree(iommu_mm);
+static int iommu_sva_iopf_handler(struct iopf_group *group)
+{
+ struct iommu_fault_param *fault_param = group->fault_param;
+
+ INIT_WORK(&group->work, iommu_sva_handle_iopf);
+ if (!queue_work(fault_param->queue->wq, &group->work))
+ return -EBUSY;
+
+ return 0;
+}
+
+struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
+ struct mm_struct *mm)
+{
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
+ struct iommu_domain *domain;
+
+ domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
+ if (!domain)
+ return NULL;
+
+ domain->type = IOMMU_DOMAIN_SVA;
+ mmgrab(mm);
+ domain->mm = mm;
+ domain->owner = ops;
+ domain->iopf_handler = iommu_sva_iopf_handler;
+
+ return domain;
}
diff --git a/drivers/iommu/iommu-sva.h b/drivers/iommu/iommu-sva.h
deleted file mode 100644
index 54946b5a7caf..000000000000
--- a/drivers/iommu/iommu-sva.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * SVA library for IOMMU drivers
- */
-#ifndef _IOMMU_SVA_H
-#define _IOMMU_SVA_H
-
-#include <linux/mm_types.h>
-
-/* I/O Page fault */
-struct device;
-struct iommu_fault;
-struct iopf_queue;
-
-#ifdef CONFIG_IOMMU_SVA
-int iommu_queue_iopf(struct iommu_fault *fault, void *cookie);
-
-int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
-int iopf_queue_remove_device(struct iopf_queue *queue,
- struct device *dev);
-int iopf_queue_flush_dev(struct device *dev);
-struct iopf_queue *iopf_queue_alloc(const char *name);
-void iopf_queue_free(struct iopf_queue *queue);
-int iopf_queue_discard_partial(struct iopf_queue *queue);
-enum iommu_page_response_code
-iommu_sva_handle_iopf(struct iommu_fault *fault, void *data);
-
-#else /* CONFIG_IOMMU_SVA */
-static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
-{
- return -ENODEV;
-}
-
-static inline int iopf_queue_add_device(struct iopf_queue *queue,
- struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline int iopf_queue_remove_device(struct iopf_queue *queue,
- struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline int iopf_queue_flush_dev(struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline struct iopf_queue *iopf_queue_alloc(const char *name)
-{
- return NULL;
-}
-
-static inline void iopf_queue_free(struct iopf_queue *queue)
-{
-}
-
-static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
-{
- return -ENODEV;
-}
-
-static inline enum iommu_page_response_code
-iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
-{
- return IOMMU_PAGE_RESP_INVALID;
-}
-#endif /* CONFIG_IOMMU_SVA */
-#endif /* _IOMMU_SVA_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d14413916f93..098869007c69 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -36,8 +36,6 @@
#include "dma-iommu.h"
#include "iommu-priv.h"
-#include "iommu-sva.h"
-
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
static DEFINE_IDA(iommu_global_pasid_ida);
@@ -291,7 +289,7 @@ EXPORT_SYMBOL_GPL(iommu_device_unregister);
#if IS_ENABLED(CONFIG_IOMMUFD_TEST)
void iommu_device_unregister_bus(struct iommu_device *iommu,
- struct bus_type *bus,
+ const struct bus_type *bus,
struct notifier_block *nb)
{
bus_unregister_notifier(bus, nb);
@@ -305,7 +303,8 @@ EXPORT_SYMBOL_GPL(iommu_device_unregister_bus);
* some memory to hold a notifier_block.
*/
int iommu_device_register_bus(struct iommu_device *iommu,
- const struct iommu_ops *ops, struct bus_type *bus,
+ const struct iommu_ops *ops,
+ const struct bus_type *bus,
struct notifier_block *nb)
{
int err;
@@ -463,13 +462,24 @@ static void iommu_deinit_device(struct device *dev)
/*
* release_device() must stop using any attached domain on the device.
- * If there are still other devices in the group they are not effected
+ * If there are still other devices in the group, they are not affected
* by this callback.
*
- * The IOMMU driver must set the device to either an identity or
- * blocking translation and stop using any domain pointer, as it is
- * going to be freed.
+ * If the iommu driver provides release_domain, the core code ensures
+ * that domain is attached prior to calling release_device. Drivers can
+ * use this to enforce a translation on the idle iommu. Typically, the
+ * global static blocked_domain is a good choice.
+ *
+ * Otherwise, the iommu driver must set the device to either an identity
+ * or a blocking translation in release_device() and stop using any
+ * domain pointer, as it is going to be freed.
+ *
+ * Regardless, if a delayed attach never occurred, then the release
+ * should still avoid touching any hardware configuration either.
*/
+ if (!dev->iommu->attach_deferred && ops->release_domain)
+ ops->release_domain->ops->attach_dev(ops->release_domain, dev);
+
if (ops->release_device)
ops->release_device(dev);
@@ -1248,6 +1258,25 @@ void iommu_group_remove_device(struct device *dev)
}
EXPORT_SYMBOL_GPL(iommu_group_remove_device);
+#if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
+/**
+ * iommu_group_mutex_assert - Check device group mutex lock
+ * @dev: the device that has group param set
+ *
+ * This function is called by an iommu driver to check whether it holds
+ * group mutex lock for the given device or not.
+ *
+ * Note that this function must be called after device group param is set.
+ */
+void iommu_group_mutex_assert(struct device *dev)
+{
+ struct iommu_group *group = dev->iommu_group;
+
+ lockdep_assert_held(&group->mutex);
+}
+EXPORT_SYMBOL_GPL(iommu_group_mutex_assert);
+#endif
+
static struct device *iommu_group_first_dev(struct iommu_group *group)
{
lockdep_assert_held(&group->mutex);
@@ -1331,217 +1360,6 @@ void iommu_group_put(struct iommu_group *group)
EXPORT_SYMBOL_GPL(iommu_group_put);
/**
- * iommu_register_device_fault_handler() - Register a device fault handler
- * @dev: the device
- * @handler: the fault handler
- * @data: private data passed as argument to the handler
- *
- * When an IOMMU fault event is received, this handler gets called with the
- * fault event and data as argument. The handler should return 0 on success. If
- * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
- * complete the fault by calling iommu_page_response() with one of the following
- * response code:
- * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
- * - IOMMU_PAGE_RESP_INVALID: terminate the fault
- * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
- * page faults if possible.
- *
- * Return 0 if the fault handler was installed successfully, or an error.
- */
-int iommu_register_device_fault_handler(struct device *dev,
- iommu_dev_fault_handler_t handler,
- void *data)
-{
- struct dev_iommu *param = dev->iommu;
- int ret = 0;
-
- if (!param)
- return -EINVAL;
-
- mutex_lock(&param->lock);
- /* Only allow one fault handler registered for each device */
- if (param->fault_param) {
- ret = -EBUSY;
- goto done_unlock;
- }
-
- get_device(dev);
- param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
- if (!param->fault_param) {
- put_device(dev);
- ret = -ENOMEM;
- goto done_unlock;
- }
- param->fault_param->handler = handler;
- param->fault_param->data = data;
- mutex_init(&param->fault_param->lock);
- INIT_LIST_HEAD(&param->fault_param->faults);
-
-done_unlock:
- mutex_unlock(&param->lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
-
-/**
- * iommu_unregister_device_fault_handler() - Unregister the device fault handler
- * @dev: the device
- *
- * Remove the device fault handler installed with
- * iommu_register_device_fault_handler().
- *
- * Return 0 on success, or an error.
- */
-int iommu_unregister_device_fault_handler(struct device *dev)
-{
- struct dev_iommu *param = dev->iommu;
- int ret = 0;
-
- if (!param)
- return -EINVAL;
-
- mutex_lock(&param->lock);
-
- if (!param->fault_param)
- goto unlock;
-
- /* we cannot unregister handler if there are pending faults */
- if (!list_empty(&param->fault_param->faults)) {
- ret = -EBUSY;
- goto unlock;
- }
-
- kfree(param->fault_param);
- param->fault_param = NULL;
- put_device(dev);
-unlock:
- mutex_unlock(&param->lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
-
-/**
- * iommu_report_device_fault() - Report fault event to device driver
- * @dev: the device
- * @evt: fault event data
- *
- * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
- * handler. When this function fails and the fault is recoverable, it is the
- * caller's responsibility to complete the fault.
- *
- * Return 0 on success, or an error.
- */
-int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
-{
- struct dev_iommu *param = dev->iommu;
- struct iommu_fault_event *evt_pending = NULL;
- struct iommu_fault_param *fparam;
- int ret = 0;
-
- if (!param || !evt)
- return -EINVAL;
-
- /* we only report device fault if there is a handler registered */
- mutex_lock(&param->lock);
- fparam = param->fault_param;
- if (!fparam || !fparam->handler) {
- ret = -EINVAL;
- goto done_unlock;
- }
-
- if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
- (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
- evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
- GFP_KERNEL);
- if (!evt_pending) {
- ret = -ENOMEM;
- goto done_unlock;
- }
- mutex_lock(&fparam->lock);
- list_add_tail(&evt_pending->list, &fparam->faults);
- mutex_unlock(&fparam->lock);
- }
-
- ret = fparam->handler(&evt->fault, fparam->data);
- if (ret && evt_pending) {
- mutex_lock(&fparam->lock);
- list_del(&evt_pending->list);
- mutex_unlock(&fparam->lock);
- kfree(evt_pending);
- }
-done_unlock:
- mutex_unlock(&param->lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_report_device_fault);
-
-int iommu_page_response(struct device *dev,
- struct iommu_page_response *msg)
-{
- bool needs_pasid;
- int ret = -EINVAL;
- struct iommu_fault_event *evt;
- struct iommu_fault_page_request *prm;
- struct dev_iommu *param = dev->iommu;
- const struct iommu_ops *ops = dev_iommu_ops(dev);
- bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
-
- if (!ops->page_response)
- return -ENODEV;
-
- if (!param || !param->fault_param)
- return -EINVAL;
-
- if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
- msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
- return -EINVAL;
-
- /* Only send response if there is a fault report pending */
- mutex_lock(&param->fault_param->lock);
- if (list_empty(&param->fault_param->faults)) {
- dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
- goto done_unlock;
- }
- /*
- * Check if we have a matching page request pending to respond,
- * otherwise return -EINVAL
- */
- list_for_each_entry(evt, &param->fault_param->faults, list) {
- prm = &evt->fault.prm;
- if (prm->grpid != msg->grpid)
- continue;
-
- /*
- * If the PASID is required, the corresponding request is
- * matched using the group ID, the PASID valid bit and the PASID
- * value. Otherwise only the group ID matches request and
- * response.
- */
- needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
- if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
- continue;
-
- if (!needs_pasid && has_pasid) {
- /* No big deal, just clear it. */
- msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
- msg->pasid = 0;
- }
-
- ret = ops->page_response(dev, evt, msg);
- list_del(&evt->list);
- kfree(evt);
- break;
- }
-
-done_unlock:
- mutex_unlock(&param->fault_param->lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_page_response);
-
-/**
* iommu_group_id - Return ID for a group
* @group: the group to ID
*
@@ -2986,7 +2804,7 @@ bool iommu_default_passthrough(void)
}
EXPORT_SYMBOL_GPL(iommu_default_passthrough);
-const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
+const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
{
const struct iommu_ops *ops = NULL;
struct iommu_device *iommu;
@@ -3037,7 +2855,7 @@ void iommu_fwspec_free(struct device *dev)
}
EXPORT_SYMBOL_GPL(iommu_fwspec_free);
-int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
+int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
int i, new_num;
@@ -3623,26 +3441,6 @@ struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev,
}
EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid);
-struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
- struct mm_struct *mm)
-{
- const struct iommu_ops *ops = dev_iommu_ops(dev);
- struct iommu_domain *domain;
-
- domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
- if (!domain)
- return NULL;
-
- domain->type = IOMMU_DOMAIN_SVA;
- mmgrab(mm);
- domain->mm = mm;
- domain->owner = ops;
- domain->iopf_handler = iommu_sva_handle_iopf;
- domain->fault_data = mm;
-
- return domain;
-}
-
ioasid_t iommu_alloc_global_pasid(struct device *dev)
{
int ret;
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index 3f3f1fa1a0a9..33d142f8057d 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -263,7 +263,8 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd)
if (cmd->__reserved)
return -EOPNOTSUPP;
- if (cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len)
+ if ((cmd->data_type == IOMMU_HWPT_DATA_NONE && cmd->data_len) ||
+ (cmd->data_type != IOMMU_HWPT_DATA_NONE && !cmd->data_len))
return -EINVAL;
idev = iommufd_get_device(ucmd, cmd->dev_id);
diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
index 504ac1b01b2d..05fd9d3abf1b 100644
--- a/drivers/iommu/iommufd/io_pagetable.c
+++ b/drivers/iommu/iommufd/io_pagetable.c
@@ -1330,20 +1330,23 @@ out_unlock:
int iopt_add_access(struct io_pagetable *iopt, struct iommufd_access *access)
{
+ u32 new_id;
int rc;
down_write(&iopt->domains_rwsem);
down_write(&iopt->iova_rwsem);
- rc = xa_alloc(&iopt->access_list, &access->iopt_access_list_id, access,
- xa_limit_16b, GFP_KERNEL_ACCOUNT);
+ rc = xa_alloc(&iopt->access_list, &new_id, access, xa_limit_16b,
+ GFP_KERNEL_ACCOUNT);
+
if (rc)
goto out_unlock;
rc = iopt_calculate_iova_alignment(iopt);
if (rc) {
- xa_erase(&iopt->access_list, access->iopt_access_list_id);
+ xa_erase(&iopt->access_list, new_id);
goto out_unlock;
}
+ access->iopt_access_list_id = new_id;
out_unlock:
up_write(&iopt->iova_rwsem);
diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h
index 482d4059f5db..e854d3f67205 100644
--- a/drivers/iommu/iommufd/iommufd_test.h
+++ b/drivers/iommu/iommufd/iommufd_test.h
@@ -45,6 +45,7 @@ enum {
enum {
MOCK_FLAGS_DEVICE_NO_DIRTY = 1 << 0,
+ MOCK_FLAGS_DEVICE_HUGE_IOVA = 1 << 1,
};
enum {
diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c
index 0a92c9eeaf7f..db8c46bee155 100644
--- a/drivers/iommu/iommufd/iova_bitmap.c
+++ b/drivers/iommu/iommufd/iova_bitmap.c
@@ -100,7 +100,7 @@ struct iova_bitmap {
struct iova_bitmap_map mapped;
/* userspace address of the bitmap */
- u64 __user *bitmap;
+ u8 __user *bitmap;
/* u64 index that @mapped points to */
unsigned long mapped_base_index;
@@ -113,6 +113,9 @@ struct iova_bitmap {
/* length of the IOVA range for the whole bitmap */
size_t length;
+
+ /* length of the IOVA range set ahead the pinned pages */
+ unsigned long set_ahead_length;
};
/*
@@ -162,7 +165,7 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
{
struct iova_bitmap_map *mapped = &bitmap->mapped;
unsigned long npages;
- u64 __user *addr;
+ u8 __user *addr;
long ret;
/*
@@ -176,17 +179,18 @@ static int iova_bitmap_get(struct iova_bitmap *bitmap)
sizeof(*bitmap->bitmap), PAGE_SIZE);
/*
- * We always cap at max number of 'struct page' a base page can fit.
- * This is, for example, on x86 means 2M of bitmap data max.
- */
- npages = min(npages, PAGE_SIZE / sizeof(struct page *));
-
- /*
* Bitmap address to be pinned is calculated via pointer arithmetic
* with bitmap u64 word index.
*/
addr = bitmap->bitmap + bitmap->mapped_base_index;
+ /*
+ * We always cap at max number of 'struct page' a base page can fit.
+ * This is, for example, on x86 means 2M of bitmap data max.
+ */
+ npages = min(npages + !!offset_in_page(addr),
+ PAGE_SIZE / sizeof(struct page *));
+
ret = pin_user_pages_fast((unsigned long)addr, npages,
FOLL_WRITE, mapped->pages);
if (ret <= 0)
@@ -247,7 +251,7 @@ struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
mapped = &bitmap->mapped;
mapped->pgshift = __ffs(page_size);
- bitmap->bitmap = data;
+ bitmap->bitmap = (u8 __user *)data;
bitmap->mapped_total_index =
iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
bitmap->iova = iova;
@@ -304,7 +308,7 @@ static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap)
remaining = bitmap->mapped_total_index - bitmap->mapped_base_index;
remaining = min_t(unsigned long, remaining,
- bytes / sizeof(*bitmap->bitmap));
+ DIV_ROUND_UP(bytes, sizeof(*bitmap->bitmap)));
return remaining;
}
@@ -341,6 +345,32 @@ static bool iova_bitmap_done(struct iova_bitmap *bitmap)
return bitmap->mapped_base_index >= bitmap->mapped_total_index;
}
+static int iova_bitmap_set_ahead(struct iova_bitmap *bitmap,
+ size_t set_ahead_length)
+{
+ int ret = 0;
+
+ while (set_ahead_length > 0 && !iova_bitmap_done(bitmap)) {
+ unsigned long length = iova_bitmap_mapped_length(bitmap);
+ unsigned long iova = iova_bitmap_mapped_iova(bitmap);
+
+ ret = iova_bitmap_get(bitmap);
+ if (ret)
+ break;
+
+ length = min(length, set_ahead_length);
+ iova_bitmap_set(bitmap, iova, length);
+
+ set_ahead_length -= length;
+ bitmap->mapped_base_index +=
+ iova_bitmap_offset_to_index(bitmap, length - 1) + 1;
+ iova_bitmap_put(bitmap);
+ }
+
+ bitmap->set_ahead_length = 0;
+ return ret;
+}
+
/*
* Advances to the next range, releases the current pinned
* pages and pins the next set of bitmap pages.
@@ -357,6 +387,15 @@ static int iova_bitmap_advance(struct iova_bitmap *bitmap)
if (iova_bitmap_done(bitmap))
return 0;
+ /* Iterate, set and skip any bits requested for next iteration */
+ if (bitmap->set_ahead_length) {
+ int ret;
+
+ ret = iova_bitmap_set_ahead(bitmap, bitmap->set_ahead_length);
+ if (ret)
+ return ret;
+ }
+
/* When advancing the index we pin the next set of bitmap pages */
return iova_bitmap_get(bitmap);
}
@@ -409,6 +448,7 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
unsigned long last_bit = (((iova + length - 1) - mapped->iova) >>
mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE;
+ unsigned long last_page_idx = mapped->npages - 1;
do {
unsigned int page_idx = cur_bit / BITS_PER_PAGE;
@@ -417,10 +457,18 @@ void iova_bitmap_set(struct iova_bitmap *bitmap,
last_bit - cur_bit + 1);
void *kaddr;
+ if (unlikely(page_idx > last_page_idx))
+ break;
+
kaddr = kmap_local_page(mapped->pages[page_idx]);
bitmap_set(kaddr, offset, nbits);
kunmap_local(kaddr);
cur_bit += nbits;
} while (cur_bit <= last_bit);
+
+ if (unlikely(cur_bit <= last_bit)) {
+ bitmap->set_ahead_length =
+ ((last_bit - cur_bit + 1) << bitmap->mapped.pgshift);
+ }
}
EXPORT_SYMBOL_NS_GPL(iova_bitmap_set, IOMMUFD);
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index d9e9920c7eba..7a2199470f31 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -36,11 +36,12 @@ static struct mock_bus_type iommufd_mock_bus_type = {
},
};
-static atomic_t mock_dev_num;
+static DEFINE_IDA(mock_dev_ida);
enum {
MOCK_DIRTY_TRACK = 1,
MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
+ MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE,
/*
* Like a real page table alignment requires the low bits of the address
@@ -53,6 +54,7 @@ enum {
MOCK_PFN_START_IOVA = _MOCK_PFN_START,
MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
+ MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
};
/*
@@ -61,8 +63,8 @@ enum {
* In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
* value. This has a much smaller randomization space and syzkaller can hit it.
*/
-static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
- u64 *iova)
+static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
+ u64 *iova)
{
struct syz_layout {
__u32 nth_area;
@@ -86,6 +88,21 @@ static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
return 0;
}
+static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access,
+ u64 *iova)
+{
+ unsigned long ret;
+
+ mutex_lock(&access->ioas_lock);
+ if (!access->ioas) {
+ mutex_unlock(&access->ioas_lock);
+ return 0;
+ }
+ ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova);
+ mutex_unlock(&access->ioas_lock);
+ return ret;
+}
+
void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
unsigned int ioas_id, u64 *iova, u32 *flags)
{
@@ -98,7 +115,7 @@ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
if (IS_ERR(ioas))
return;
- *iova = iommufd_test_syz_conv_iova(&ioas->iopt, iova);
+ *iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova);
iommufd_put_object(ucmd->ictx, &ioas->obj);
}
@@ -121,6 +138,7 @@ enum selftest_obj_type {
struct mock_dev {
struct device dev;
unsigned long flags;
+ int id;
};
struct selftest_obj {
@@ -191,6 +209,34 @@ static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
return 0;
}
+static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock,
+ unsigned long iova, size_t page_size,
+ unsigned long flags)
+{
+ unsigned long cur, end = iova + page_size - 1;
+ bool dirty = false;
+ void *ent, *old;
+
+ for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) {
+ ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
+ if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA))
+ continue;
+
+ dirty = true;
+ /* Clear dirty */
+ if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
+ unsigned long val;
+
+ val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
+ old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
+ xa_mk_value(val), GFP_KERNEL);
+ WARN_ON_ONCE(ent != old);
+ }
+ }
+
+ return dirty;
+}
+
static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
unsigned long iova, size_t size,
unsigned long flags,
@@ -198,31 +244,31 @@ static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
{
struct mock_iommu_domain *mock =
container_of(domain, struct mock_iommu_domain, domain);
- unsigned long i, max = size / MOCK_IO_PAGE_SIZE;
- void *ent, *old;
+ unsigned long end = iova + size;
+ void *ent;
if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap)
return -EINVAL;
- for (i = 0; i < max; i++) {
- unsigned long cur = iova + i * MOCK_IO_PAGE_SIZE;
+ do {
+ unsigned long pgsize = MOCK_IO_PAGE_SIZE;
+ unsigned long head;
- ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
- if (ent && (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) {
- /* Clear dirty */
- if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
- unsigned long val;
-
- val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
- old = xa_store(&mock->pfns,
- cur / MOCK_IO_PAGE_SIZE,
- xa_mk_value(val), GFP_KERNEL);
- WARN_ON_ONCE(ent != old);
- }
- iommu_dirty_bitmap_record(dirty, cur,
- MOCK_IO_PAGE_SIZE);
+ ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
+ if (!ent) {
+ iova += pgsize;
+ continue;
}
- }
+
+ if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA)
+ pgsize = MOCK_HUGE_PAGE_SIZE;
+ head = iova & ~(pgsize - 1);
+
+ /* Clear dirty */
+ if (mock_test_and_clear_dirty(mock, head, pgsize, flags))
+ iommu_dirty_bitmap_record(dirty, head, pgsize);
+ iova = head + pgsize;
+ } while (iova < end);
return 0;
}
@@ -234,6 +280,7 @@ const struct iommu_dirty_ops dirty_ops = {
static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
{
+ struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
struct mock_iommu_domain *mock;
mock = kzalloc(sizeof(*mock), GFP_KERNEL);
@@ -242,6 +289,8 @@ static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
+ if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
+ mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
mock->domain.ops = mock_ops.default_domain_ops;
mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
xa_init(&mock->pfns);
@@ -287,7 +336,7 @@ mock_domain_alloc_user(struct device *dev, u32 flags,
return ERR_PTR(-EOPNOTSUPP);
if (user_data || (has_dirty_flag && no_dirty_ops))
return ERR_PTR(-EOPNOTSUPP);
- domain = mock_domain_alloc_paging(NULL);
+ domain = mock_domain_alloc_paging(dev);
if (!domain)
return ERR_PTR(-ENOMEM);
if (has_dirty_flag)
@@ -350,6 +399,9 @@ static int mock_domain_map_pages(struct iommu_domain *domain,
if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
flags = MOCK_PFN_LAST_IOVA;
+ if (pgsize != MOCK_IO_PAGE_SIZE) {
+ flags |= MOCK_PFN_HUGE_IOVA;
+ }
old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE,
xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) |
flags),
@@ -394,20 +446,27 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
/*
* iommufd generates unmaps that must be a strict
- * superset of the map's performend So every starting
- * IOVA should have been an iova passed to map, and the
+ * superset of the map's performend So every
+ * starting/ending IOVA should have been an iova passed
+ * to map.
*
- * First IOVA must be present and have been a first IOVA
- * passed to map_pages
+ * This simple logic doesn't work when the HUGE_PAGE is
+ * turned on since the core code will automatically
+ * switch between the two page sizes creating a break in
+ * the unmap calls. The break can land in the middle of
+ * contiguous IOVA.
*/
- if (first) {
- WARN_ON(ent && !(xa_to_value(ent) &
- MOCK_PFN_START_IOVA));
- first = false;
+ if (!(domain->pgsize_bitmap & MOCK_HUGE_PAGE_SIZE)) {
+ if (first) {
+ WARN_ON(ent && !(xa_to_value(ent) &
+ MOCK_PFN_START_IOVA));
+ first = false;
+ }
+ if (pgcount == 1 &&
+ cur + MOCK_IO_PAGE_SIZE == pgsize)
+ WARN_ON(ent && !(xa_to_value(ent) &
+ MOCK_PFN_LAST_IOVA));
}
- if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
- WARN_ON(ent && !(xa_to_value(ent) &
- MOCK_PFN_LAST_IOVA));
iova += MOCK_IO_PAGE_SIZE;
ret += MOCK_IO_PAGE_SIZE;
@@ -595,7 +654,7 @@ static void mock_dev_release(struct device *dev)
{
struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
- atomic_dec(&mock_dev_num);
+ ida_free(&mock_dev_ida, mdev->id);
kfree(mdev);
}
@@ -604,7 +663,8 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
struct mock_dev *mdev;
int rc;
- if (dev_flags & ~(MOCK_FLAGS_DEVICE_NO_DIRTY))
+ if (dev_flags &
+ ~(MOCK_FLAGS_DEVICE_NO_DIRTY | MOCK_FLAGS_DEVICE_HUGE_IOVA))
return ERR_PTR(-EINVAL);
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
@@ -616,8 +676,12 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
mdev->dev.release = mock_dev_release;
mdev->dev.bus = &iommufd_mock_bus_type.bus;
- rc = dev_set_name(&mdev->dev, "iommufd_mock%u",
- atomic_inc_return(&mock_dev_num));
+ rc = ida_alloc(&mock_dev_ida, GFP_KERNEL);
+ if (rc < 0)
+ goto err_put;
+ mdev->id = rc;
+
+ rc = dev_set_name(&mdev->dev, "iommufd_mock%u", mdev->id);
if (rc)
goto err_put;
@@ -1119,7 +1183,7 @@ static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
}
if (flags & MOCK_FLAGS_ACCESS_SYZ)
- iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt,
+ iova = iommufd_test_syz_conv_iova(staccess->access,
&cmd->access_pages.iova);
npages = (ALIGN(iova + length, PAGE_SIZE) -
@@ -1221,8 +1285,8 @@ static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
}
if (flags & MOCK_FLAGS_ACCESS_SYZ)
- iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt,
- &cmd->access_rw.iova);
+ iova = iommufd_test_syz_conv_iova(staccess->access,
+ &cmd->access_rw.iova);
rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
if (rc)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index d30e453d0fb4..d59d0ea2fd21 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -24,24 +24,8 @@ static bool iova_rcache_insert(struct iova_domain *iovad,
static unsigned long iova_rcache_get(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn);
-static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
-
-unsigned long iova_rcache_range(void)
-{
- return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
-}
-
-static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
-{
- struct iova_domain *iovad;
-
- iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
-
- free_cpu_cached_iovas(cpu, iovad);
- return 0;
-}
-
+static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_global_cached_iovas(struct iova_domain *iovad);
static struct iova *to_iova(struct rb_node *node)
@@ -252,54 +236,6 @@ static void free_iova_mem(struct iova *iova)
kmem_cache_free(iova_cache, iova);
}
-int iova_cache_get(void)
-{
- mutex_lock(&iova_cache_mutex);
- if (!iova_cache_users) {
- int ret;
-
- ret = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", NULL,
- iova_cpuhp_dead);
- if (ret) {
- mutex_unlock(&iova_cache_mutex);
- pr_err("Couldn't register cpuhp handler\n");
- return ret;
- }
-
- iova_cache = kmem_cache_create(
- "iommu_iova", sizeof(struct iova), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!iova_cache) {
- cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
- mutex_unlock(&iova_cache_mutex);
- pr_err("Couldn't create iova cache\n");
- return -ENOMEM;
- }
- }
-
- iova_cache_users++;
- mutex_unlock(&iova_cache_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(iova_cache_get);
-
-void iova_cache_put(void)
-{
- mutex_lock(&iova_cache_mutex);
- if (WARN_ON(!iova_cache_users)) {
- mutex_unlock(&iova_cache_mutex);
- return;
- }
- iova_cache_users--;
- if (!iova_cache_users) {
- cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
- kmem_cache_destroy(iova_cache);
- }
- mutex_unlock(&iova_cache_mutex);
-}
-EXPORT_SYMBOL_GPL(iova_cache_put);
-
/**
* alloc_iova - allocates an iova
* @iovad: - iova domain in question
@@ -654,11 +590,18 @@ struct iova_rcache {
struct delayed_work work;
};
+static struct kmem_cache *iova_magazine_cache;
+
+unsigned long iova_rcache_range(void)
+{
+ return PAGE_SIZE << (IOVA_RANGE_CACHE_MAX_SIZE - 1);
+}
+
static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
{
struct iova_magazine *mag;
- mag = kmalloc(sizeof(*mag), flags);
+ mag = kmem_cache_alloc(iova_magazine_cache, flags);
if (mag)
mag->size = 0;
@@ -667,7 +610,7 @@ static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
static void iova_magazine_free(struct iova_magazine *mag)
{
- kfree(mag);
+ kmem_cache_free(iova_magazine_cache, mag);
}
static void
@@ -990,5 +933,71 @@ static void free_global_cached_iovas(struct iova_domain *iovad)
spin_unlock_irqrestore(&rcache->lock, flags);
}
}
+
+static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
+{
+ struct iova_domain *iovad;
+
+ iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
+
+ free_cpu_cached_iovas(cpu, iovad);
+ return 0;
+}
+
+int iova_cache_get(void)
+{
+ int err = -ENOMEM;
+
+ mutex_lock(&iova_cache_mutex);
+ if (!iova_cache_users) {
+ iova_cache = kmem_cache_create("iommu_iova", sizeof(struct iova), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!iova_cache)
+ goto out_err;
+
+ iova_magazine_cache = kmem_cache_create("iommu_iova_magazine",
+ sizeof(struct iova_magazine),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!iova_magazine_cache)
+ goto out_err;
+
+ err = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead",
+ NULL, iova_cpuhp_dead);
+ if (err) {
+ pr_err("IOVA: Couldn't register cpuhp handler: %pe\n", ERR_PTR(err));
+ goto out_err;
+ }
+ }
+
+ iova_cache_users++;
+ mutex_unlock(&iova_cache_mutex);
+
+ return 0;
+
+out_err:
+ kmem_cache_destroy(iova_cache);
+ kmem_cache_destroy(iova_magazine_cache);
+ mutex_unlock(&iova_cache_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(iova_cache_get);
+
+void iova_cache_put(void)
+{
+ mutex_lock(&iova_cache_mutex);
+ if (WARN_ON(!iova_cache_users)) {
+ mutex_unlock(&iova_cache_mutex);
+ return;
+ }
+ iova_cache_users--;
+ if (!iova_cache_users) {
+ cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
+ kmem_cache_destroy(iova_cache);
+ kmem_cache_destroy(iova_magazine_cache);
+ }
+ mutex_unlock(&iova_cache_mutex);
+}
+EXPORT_SYMBOL_GPL(iova_cache_put);
+
MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index ace1fc4bd34b..b657cc09605f 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -709,7 +709,7 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
}
static int ipmmu_init_platform_device(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct platform_device *ipmmu_pdev;
@@ -773,7 +773,7 @@ static bool ipmmu_device_is_allowed(struct device *dev)
}
static int ipmmu_of_xlate(struct device *dev,
- struct of_phandle_args *spec)
+ const struct of_phandle_args *spec)
{
if (!ipmmu_device_is_allowed(dev))
return -ENODEV;
@@ -1005,7 +1005,6 @@ static const struct of_device_id ipmmu_of_ids[] = {
static int ipmmu_probe(struct platform_device *pdev)
{
struct ipmmu_vmsa_device *mmu;
- struct resource *res;
int irq;
int ret;
@@ -1025,8 +1024,7 @@ static int ipmmu_probe(struct platform_device *pdev)
return ret;
/* Map I/O memory and request IRQ. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmu->base = devm_ioremap_resource(&pdev->dev, res);
+ mmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmu->base))
return PTR_ERR(mmu->base);
@@ -1123,7 +1121,6 @@ static void ipmmu_remove(struct platform_device *pdev)
ipmmu_device_reset(mmu);
}
-#ifdef CONFIG_PM_SLEEP
static int ipmmu_resume_noirq(struct device *dev)
{
struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
@@ -1153,18 +1150,14 @@ static int ipmmu_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops ipmmu_pm = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
};
-#define DEV_PM_OPS &ipmmu_pm
-#else
-#define DEV_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
static struct platform_driver ipmmu_driver = {
.driver = {
.name = "ipmmu-vmsa",
- .of_match_table = of_match_ptr(ipmmu_of_ids),
- .pm = DEV_PM_OPS,
+ .of_match_table = ipmmu_of_ids,
+ .pm = pm_sleep_ptr(&ipmmu_pm),
},
.probe = ipmmu_probe,
.remove_new = ipmmu_remove,
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 83314b9d8f38..ee59647c2050 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -99,7 +99,8 @@ int __init irq_remapping_prepare(void)
if (disable_irq_remap)
return -ENOSYS;
- if (intel_irq_remap_ops.prepare() == 0)
+ if (IS_ENABLED(CONFIG_INTEL_IOMMU) &&
+ intel_irq_remap_ops.prepare() == 0)
remap_ops = &intel_irq_remap_ops;
else if (IS_ENABLED(CONFIG_AMD_IOMMU) &&
amd_iommu_irq_ops.prepare() == 0)
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index f86af9815d6f..989e0869d805 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -598,7 +598,7 @@ static void print_ctx_regs(void __iomem *base, int ctx)
static int insert_iommu_master(struct device *dev,
struct msm_iommu_dev **iommu,
- struct of_phandle_args *spec)
+ const struct of_phandle_args *spec)
{
struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev);
int sid;
@@ -626,7 +626,7 @@ static int insert_iommu_master(struct device *dev,
}
static int qcom_iommu_of_xlate(struct device *dev,
- struct of_phandle_args *spec)
+ const struct of_phandle_args *spec)
{
struct msm_iommu_dev *iommu = NULL, *iter;
unsigned long flags;
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 7abe9e85a570..b8c47f18bc26 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -957,7 +957,8 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
return group;
}
-static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int mtk_iommu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct platform_device *m4updev;
@@ -1264,7 +1265,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->plat_data = of_device_get_match_data(dev);
/* Protect memory. HW will access here while translation fault.*/
- protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
+ protect = devm_kcalloc(dev, 2, MTK_PROTECT_PA_ALIGN, GFP_KERNEL);
if (!protect)
return -ENOMEM;
data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 25b41222abae..a9fa2a54dc9b 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -398,7 +398,8 @@ static const struct iommu_ops mtk_iommu_v1_ops;
* MTK generation one iommu HW only support one iommu domain, and all the client
* sharing the same iova address space.
*/
-static int mtk_iommu_v1_create_mapping(struct device *dev, struct of_phandle_args *args)
+static int mtk_iommu_v1_create_mapping(struct device *dev,
+ const struct of_phandle_args *args)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct mtk_iommu_v1_data *data;
@@ -621,8 +622,8 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev)
data->dev = dev;
/* Protect memory. HW will access here while translation fault.*/
- protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2,
- GFP_KERNEL | GFP_DMA);
+ protect = devm_kcalloc(dev, 2, MTK_PROTECT_PA_ALIGN,
+ GFP_KERNEL | GFP_DMA);
if (!protect)
return -ENOMEM;
data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 719652b60840..3afe0b48a48d 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -29,7 +29,7 @@ static int of_iommu_xlate(struct device *dev,
!of_device_is_available(iommu_spec->np))
return -ENODEV;
- ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
+ ret = iommu_fwspec_init(dev, fwnode, ops);
if (ret)
return ret;
/*
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 2685861c0a12..da79d9f4cf63 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1140,7 +1140,7 @@ static void rk_iommu_release_device(struct device *dev)
}
static int rk_iommu_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct platform_device *iommu_dev;
struct rk_iommudata *data;
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index 537359f10997..ba53571a8239 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -390,7 +390,8 @@ static struct iommu_device *sprd_iommu_probe_device(struct device *dev)
return &sdev->iommu;
}
-static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int sprd_iommu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct platform_device *pdev;
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index 41484a5a399b..decd52cba998 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -819,7 +819,7 @@ static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
}
static int sun50i_iommu_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
unsigned id = args->args[0];
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 310871728ab4..14e525bd0d9b 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -830,7 +830,7 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
}
static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
const struct iommu_ops *ops = smmu->iommu.ops;
int err;
@@ -959,7 +959,7 @@ static struct iommu_group *tegra_smmu_device_group(struct device *dev)
}
static int tegra_smmu_of_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
struct tegra_mc *mc = platform_get_drvdata(iommu_pdev);
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 34db37fd9675..04048f64a2c0 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -1051,7 +1051,8 @@ static struct iommu_group *viommu_device_group(struct device *dev)
return generic_device_group(dev);
}
-static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static int viommu_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
return iommu_fwspec_add_ids(dev, args->args, 1);
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index f7149d0f3d45..72c07a12f5e1 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -546,6 +546,17 @@ config SIFIVE_PLIC
select IRQ_DOMAIN_HIERARCHY
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
+config STARFIVE_JH8100_INTC
+ bool "StarFive JH8100 External Interrupt Controller"
+ depends on ARCH_STARFIVE || COMPILE_TEST
+ default ARCH_STARFIVE
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ This enables support for the INTC chip found in StarFive JH8100
+ SoC.
+
+ If you don't know what to do here, say Y.
+
config EXYNOS_IRQ_COMBINER
bool "Samsung Exynos IRQ combiner support" if COMPILE_TEST
depends on (ARCH_EXYNOS && ARM) || COMPILE_TEST
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index ffd945fe71aa..ec4a18380998 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -96,6 +96,7 @@ obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o
obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
+obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o
obj-$(CONFIG_IMX_MU_MSI) += irq-imx-mu-msi.o
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index 9745a119d0e6..eb02d203c963 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -242,7 +242,7 @@ static int __init bcm6345_l1_init_one(struct device_node *dn,
else if (intc->n_words != n_words)
return -EINVAL;
- cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
+ cpu = intc->cpus[idx] = kzalloc(struct_size(cpu, enable_cache, n_words),
GFP_KERNEL);
if (!cpu)
return -ENOMEM;
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 24ca1d656adc..36e71af054e9 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -249,7 +249,7 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
return -EINVAL;
}
- cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
+ cpu = intc->cpus[idx] = kzalloc(struct_size(cpu, mask_cache, n_words),
GFP_KERNEL);
if (!cpu)
return -ENOMEM;
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 5559c943f03f..2b0b3175cea0 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -2,7 +2,7 @@
/*
* Generic Broadcom Set Top Box Level 2 Interrupt controller driver
*
- * Copyright (C) 2014-2017 Broadcom
+ * Copyright (C) 2014-2024 Broadcom
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -112,6 +112,9 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
generic_handle_domain_irq(b->domain, irq);
} while (status);
out:
+ /* Don't ack parent before all device writes are done */
+ wmb();
+
chained_irq_exit(chip, desc);
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index d097001c1e3e..fca888b36680 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -207,6 +207,11 @@ static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
}
+static bool rdists_support_shareable(void)
+{
+ return !(gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE);
+}
+
static u16 get_its_list(struct its_vm *vm)
{
struct its_node *its;
@@ -2710,10 +2715,12 @@ static u64 inherit_vpe_l1_table_from_its(void)
break;
}
val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
- val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
- FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
- val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
- FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
+ if (rdists_support_shareable()) {
+ val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
+ FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
+ val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
+ FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
+ }
val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
return val;
@@ -2936,8 +2943,10 @@ static int allocate_vpe_l1_table(void)
WARN_ON(!IS_ALIGNED(pa, psz));
val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
- val |= GICR_VPROPBASER_RaWb;
- val |= GICR_VPROPBASER_InnerShareable;
+ if (rdists_support_shareable()) {
+ val |= GICR_VPROPBASER_RaWb;
+ val |= GICR_VPROPBASER_InnerShareable;
+ }
val |= GICR_VPROPBASER_4_1_Z;
val |= GICR_VPROPBASER_4_1_VALID;
@@ -3126,7 +3135,7 @@ static void its_cpu_init_lpis(void)
gicr_write_propbaser(val, rbase + GICR_PROPBASER);
tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
- if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE)
+ if (!rdists_support_shareable())
tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK;
if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
@@ -3153,7 +3162,7 @@ static void its_cpu_init_lpis(void)
gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
- if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE)
+ if (!rdists_support_shareable())
tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK;
if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
@@ -3172,6 +3181,7 @@ static void its_cpu_init_lpis(void)
val |= GICR_CTLR_ENABLE_LPIS;
writel_relaxed(val, rbase + GICR_CTLR);
+out:
if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
@@ -3207,7 +3217,6 @@ static void its_cpu_init_lpis(void)
/* Make sure the GIC has seen the above */
dsb(sy);
-out:
gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
smp_processor_id(),
@@ -3817,8 +3826,9 @@ static int its_vpe_set_affinity(struct irq_data *d,
bool force)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
- int from, cpu = cpumask_first(mask_val);
+ struct cpumask common, *table_mask;
unsigned long flags;
+ int from, cpu;
/*
* Changing affinity is mega expensive, so let's be as lazy as
@@ -3834,19 +3844,22 @@ static int its_vpe_set_affinity(struct irq_data *d,
* taken on any vLPI handling path that evaluates vpe->col_idx.
*/
from = vpe_to_cpuid_lock(vpe, &flags);
- if (from == cpu)
- goto out;
-
- vpe->col_idx = cpu;
+ table_mask = gic_data_rdist_cpu(from)->vpe_table_mask;
/*
- * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
- * is sharing its VPE table with the current one.
+ * If we are offered another CPU in the same GICv4.1 ITS
+ * affinity, pick this one. Otherwise, any CPU will do.
*/
- if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
- cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
+ if (table_mask && cpumask_and(&common, mask_val, table_mask))
+ cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common);
+ else
+ cpu = cpumask_first(mask_val);
+
+ if (from == cpu)
goto out;
+ vpe->col_idx = cpu;
+
its_send_vmovp(vpe);
its_vpe_db_proxy_move(vpe, from, cpu);
@@ -3880,14 +3893,18 @@ static void its_vpe_schedule(struct its_vpe *vpe)
val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
GENMASK_ULL(51, 12);
val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
- val |= GICR_VPROPBASER_RaWb;
- val |= GICR_VPROPBASER_InnerShareable;
+ if (rdists_support_shareable()) {
+ val |= GICR_VPROPBASER_RaWb;
+ val |= GICR_VPROPBASER_InnerShareable;
+ }
gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
val = virt_to_phys(page_address(vpe->vpt_page)) &
GENMASK_ULL(51, 16);
- val |= GICR_VPENDBASER_RaWaWb;
- val |= GICR_VPENDBASER_InnerShareable;
+ if (rdists_support_shareable()) {
+ val |= GICR_VPENDBASER_RaWaWb;
+ val |= GICR_VPENDBASER_InnerShareable;
+ }
/*
* There is no good way of finding out if the pending table is
* empty as we can race against the doorbell interrupt very
@@ -4419,12 +4436,12 @@ static const struct irq_domain_ops its_sgi_domain_ops = {
static int its_vpe_id_alloc(void)
{
- return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
+ return ida_alloc_max(&its_vpeid_ida, ITS_MAX_VPEID - 1, GFP_KERNEL);
}
static void its_vpe_id_free(u16 id)
{
- ida_simple_remove(&its_vpeid_ida, id);
+ ida_free(&its_vpeid_ida, id);
}
static int its_vpe_init(struct its_vpe *vpe)
@@ -5078,6 +5095,8 @@ static int __init its_probe_one(struct its_node *its)
u32 ctlr;
int err;
+ its_enable_quirks(its);
+
if (is_v4(its)) {
if (!(its->typer & GITS_TYPER_VMOVP)) {
err = its_compute_its_list_map(its);
@@ -5429,7 +5448,6 @@ static int __init its_of_probe(struct device_node *node)
if (!its)
return -ENOMEM;
- its_enable_quirks(its);
err = its_probe_one(its);
if (err) {
its_node_destroy(its);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 98b0329b7154..6fb276504bcc 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -19,6 +19,7 @@
#include <linux/percpu.h>
#include <linux/refcount.h>
#include <linux/slab.h>
+#include <linux/iopoll.h>
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-common.h>
@@ -180,11 +181,6 @@ static enum gic_intid_range get_intid_range(struct irq_data *d)
return __get_intid_range(d->hwirq);
}
-static inline unsigned int gic_irq(struct irq_data *d)
-{
- return d->hwirq;
-}
-
static inline bool gic_irq_in_rdist(struct irq_data *d)
{
switch (get_intid_range(d)) {
@@ -251,17 +247,13 @@ static inline void __iomem *gic_dist_base(struct irq_data *d)
static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
{
- u32 count = 1000000; /* 1s! */
+ u32 val;
+ int ret;
- while (readl_relaxed(base + GICD_CTLR) & bit) {
- count--;
- if (!count) {
- pr_err_ratelimited("RWP timeout, gone fishing\n");
- return;
- }
- cpu_relax();
- udelay(1);
- }
+ ret = readl_relaxed_poll_timeout_atomic(base + GICD_CTLR, val, !(val & bit),
+ 1, USEC_PER_SEC);
+ if (ret == -ETIMEDOUT)
+ pr_err_ratelimited("RWP timeout, gone fishing\n");
}
/* Wait for completion of a distributor change */
@@ -279,8 +271,8 @@ static void gic_redist_wait_for_rwp(void)
static void gic_enable_redist(bool enable)
{
void __iomem *rbase;
- u32 count = 1000000; /* 1s! */
u32 val;
+ int ret;
if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
return;
@@ -301,16 +293,13 @@ static void gic_enable_redist(bool enable)
return; /* No PM support in this redistributor */
}
- while (--count) {
- val = readl_relaxed(rbase + GICR_WAKER);
- if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
- break;
- cpu_relax();
- udelay(1);
- }
- if (!count)
+ ret = readl_relaxed_poll_timeout_atomic(rbase + GICR_WAKER, val,
+ enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep),
+ 1, USEC_PER_SEC);
+ if (ret == -ETIMEDOUT) {
pr_err_ratelimited("redistributor failed to %s...\n",
enable ? "wakeup" : "sleep");
+ }
}
/*
@@ -548,7 +537,7 @@ static int gic_irq_nmi_setup(struct irq_data *d)
* A secondary irq_chip should be in charge of LPI request,
* it should not be possible to get there
*/
- if (WARN_ON(gic_irq(d) >= 8192))
+ if (WARN_ON(irqd_to_hwirq(d) >= 8192))
return -EINVAL;
/* desc lock should already be held */
@@ -588,7 +577,7 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
* A secondary irq_chip should be in charge of LPI request,
* it should not be possible to get there
*/
- if (WARN_ON(gic_irq(d) >= 8192))
+ if (WARN_ON(irqd_to_hwirq(d) >= 8192))
return;
/* desc lock should already be held */
@@ -626,7 +615,7 @@ static bool gic_arm64_erratum_2941627_needed(struct irq_data *d)
static void gic_eoi_irq(struct irq_data *d)
{
- write_gicreg(gic_irq(d), ICC_EOIR1_EL1);
+ write_gicreg(irqd_to_hwirq(d), ICC_EOIR1_EL1);
isb();
if (gic_arm64_erratum_2941627_needed(d)) {
@@ -646,19 +635,19 @@ static void gic_eoimode1_eoi_irq(struct irq_data *d)
* No need to deactivate an LPI, or an interrupt that
* is is getting forwarded to a vcpu.
*/
- if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
+ if (irqd_to_hwirq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
return;
if (!gic_arm64_erratum_2941627_needed(d))
- gic_write_dir(gic_irq(d));
+ gic_write_dir(irqd_to_hwirq(d));
else
gic_poke_irq(d, GICD_ICACTIVER);
}
static int gic_set_type(struct irq_data *d, unsigned int type)
{
+ irq_hw_number_t irq = irqd_to_hwirq(d);
enum gic_intid_range range;
- unsigned int irq = gic_irq(d);
void __iomem *base;
u32 offset, index;
int ret;
@@ -684,7 +673,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
ret = gic_configure_irq(index, type, base + offset, NULL);
if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
/* Misconfigured PPIs are usually not fatal */
- pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
+ pr_warn("GIC: PPI INTID%ld is secure or misconfigured\n", irq);
ret = 0;
}
@@ -1702,9 +1691,13 @@ static int gic_irq_domain_select(struct irq_domain *d,
irq_hw_number_t hwirq;
/* Not for us */
- if (fwspec->fwnode != d->fwnode)
+ if (fwspec->fwnode != d->fwnode)
return 0;
+ /* Handle pure domain searches */
+ if (!fwspec->param_count)
+ return d->bus_token == bus_token;
+
/* If this is not DT, then we have a single domain */
if (!is_of_node(fwspec->fwnode))
return 1;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 412196a7dad5..98aa383e39db 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -162,11 +162,6 @@ static inline void __iomem *gic_cpu_base(struct irq_data *d)
return gic_data_cpu_base(gic_data);
}
-static inline unsigned int gic_irq(struct irq_data *d)
-{
- return d->hwirq;
-}
-
static inline bool cascading_gic_irq(struct irq_data *d)
{
void *data = irq_data_get_irq_handler_data(d);
@@ -183,14 +178,16 @@ static inline bool cascading_gic_irq(struct irq_data *d)
*/
static void gic_poke_irq(struct irq_data *d, u32 offset)
{
- u32 mask = 1 << (gic_irq(d) % 32);
- writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4);
+ u32 mask = 1 << (irqd_to_hwirq(d) % 32);
+
+ writel_relaxed(mask, gic_dist_base(d) + offset + (irqd_to_hwirq(d) / 32) * 4);
}
static int gic_peek_irq(struct irq_data *d, u32 offset)
{
- u32 mask = 1 << (gic_irq(d) % 32);
- return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask);
+ u32 mask = 1 << (irqd_to_hwirq(d) % 32);
+
+ return !!(readl_relaxed(gic_dist_base(d) + offset + (irqd_to_hwirq(d) / 32) * 4) & mask);
}
static void gic_mask_irq(struct irq_data *d)
@@ -220,7 +217,7 @@ static void gic_unmask_irq(struct irq_data *d)
static void gic_eoi_irq(struct irq_data *d)
{
- u32 hwirq = gic_irq(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
if (hwirq < 16)
hwirq = this_cpu_read(sgi_intid);
@@ -230,7 +227,7 @@ static void gic_eoi_irq(struct irq_data *d)
static void gic_eoimode1_eoi_irq(struct irq_data *d)
{
- u32 hwirq = gic_irq(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
/* Do not deactivate an IRQ forwarded to a vcpu. */
if (irqd_is_forwarded_to_vcpu(d))
@@ -293,8 +290,8 @@ static int gic_irq_get_irqchip_state(struct irq_data *d,
static int gic_set_type(struct irq_data *d, unsigned int type)
{
+ irq_hw_number_t gicirq = irqd_to_hwirq(d);
void __iomem *base = gic_dist_base(d);
- unsigned int gicirq = gic_irq(d);
int ret;
/* Interrupt configuration for SGIs can't be changed */
@@ -309,7 +306,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG, NULL);
if (ret && gicirq < 32) {
/* Misconfigured PPIs are usually not fatal */
- pr_warn("GIC: PPI%d is secure or misconfigured\n", gicirq - 16);
+ pr_warn("GIC: PPI%ld is secure or misconfigured\n", gicirq - 16);
ret = 0;
}
@@ -319,7 +316,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
{
/* Only interrupts on the primary GIC can be forwarded to a vcpu. */
- if (cascading_gic_irq(d) || gic_irq(d) < 16)
+ if (cascading_gic_irq(d) || irqd_to_hwirq(d) < 16)
return -EINVAL;
if (vcpu)
@@ -796,7 +793,7 @@ static void rmw_writeb(u8 bval, void __iomem *addr)
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
- void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
+ void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + irqd_to_hwirq(d);
struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
unsigned int cpu;
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
index 5831be454673..b42ed68acfa6 100644
--- a/drivers/irqchip/irq-imgpdc.c
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -461,12 +461,11 @@ err_generic:
return ret;
}
-static int pdc_intc_remove(struct platform_device *pdev)
+static void pdc_intc_remove(struct platform_device *pdev)
{
struct pdc_intc_priv *priv = platform_get_drvdata(pdev);
irq_domain_remove(priv->domain);
- return 0;
}
static const struct of_device_id pdc_intc_match[] = {
@@ -479,8 +478,8 @@ static struct platform_driver pdc_intc_driver = {
.name = "pdc-intc",
.of_match_table = pdc_intc_match,
},
- .probe = pdc_intc_probe,
- .remove = pdc_intc_remove,
+ .probe = pdc_intc_probe,
+ .remove_new = pdc_intc_remove,
};
static int __init pdc_intc_init(void)
diff --git a/drivers/irqchip/irq-imx-intmux.c b/drivers/irqchip/irq-imx-intmux.c
index aa041e4dfee0..511adfaeec82 100644
--- a/drivers/irqchip/irq-imx-intmux.c
+++ b/drivers/irqchip/irq-imx-intmux.c
@@ -166,6 +166,10 @@ static int imx_intmux_irq_select(struct irq_domain *d, struct irq_fwspec *fwspec
if (fwspec->fwnode != d->fwnode)
return false;
+ /* Handle pure domain searches */
+ if (!fwspec->param_count)
+ return d->bus_token == bus_token;
+
return irqchip_data->chanidx == fwspec->param[1];
}
@@ -282,7 +286,7 @@ out:
return ret;
}
-static int imx_intmux_remove(struct platform_device *pdev)
+static void imx_intmux_remove(struct platform_device *pdev)
{
struct intmux_data *data = platform_get_drvdata(pdev);
int i;
@@ -298,8 +302,6 @@ static int imx_intmux_remove(struct platform_device *pdev)
}
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -354,11 +356,11 @@ static const struct of_device_id imx_intmux_id[] = {
static struct platform_driver imx_intmux_driver = {
.driver = {
- .name = "imx-intmux",
- .of_match_table = imx_intmux_id,
- .pm = &imx_intmux_pm_ops,
+ .name = "imx-intmux",
+ .of_match_table = imx_intmux_id,
+ .pm = &imx_intmux_pm_ops,
},
- .probe = imx_intmux_probe,
- .remove = imx_intmux_remove,
+ .probe = imx_intmux_probe,
+ .remove_new = imx_intmux_remove,
};
builtin_platform_driver(imx_intmux_driver);
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index bd9543314539..20cf7a9e9ece 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -231,7 +231,7 @@ out:
return ret;
}
-static int imx_irqsteer_remove(struct platform_device *pdev)
+static void imx_irqsteer_remove(struct platform_device *pdev)
{
struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev);
int i;
@@ -243,8 +243,6 @@ static int imx_irqsteer_remove(struct platform_device *pdev)
irq_domain_remove(irqsteer_data->domain);
clk_disable_unprepare(irqsteer_data->ipg_clk);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -307,11 +305,11 @@ static const struct of_device_id imx_irqsteer_dt_ids[] = {
static struct platform_driver imx_irqsteer_driver = {
.driver = {
- .name = "imx-irqsteer",
- .of_match_table = imx_irqsteer_dt_ids,
- .pm = &imx_irqsteer_pm_ops,
+ .name = "imx-irqsteer",
+ .of_match_table = imx_irqsteer_dt_ids,
+ .pm = &imx_irqsteer_pm_ops,
},
- .probe = imx_irqsteer_probe,
- .remove = imx_irqsteer_remove,
+ .probe = imx_irqsteer_probe,
+ .remove_new = imx_irqsteer_remove,
};
builtin_platform_driver(imx_irqsteer_driver);
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index a36396db4b08..30f1979fa124 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -190,7 +190,7 @@ static int keystone_irq_probe(struct platform_device *pdev)
return 0;
}
-static int keystone_irq_remove(struct platform_device *pdev)
+static void keystone_irq_remove(struct platform_device *pdev)
{
struct keystone_irq_device *kirq = platform_get_drvdata(pdev);
int hwirq;
@@ -201,7 +201,6 @@ static int keystone_irq_remove(struct platform_device *pdev)
irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq));
irq_domain_remove(kirq->irqd);
- return 0;
}
static const struct of_device_id keystone_irq_dt_ids[] = {
@@ -212,7 +211,7 @@ MODULE_DEVICE_TABLE(of, keystone_irq_dt_ids);
static struct platform_driver keystone_irq_device_driver = {
.probe = keystone_irq_probe,
- .remove = keystone_irq_remove,
+ .remove_new = keystone_irq_remove,
.driver = {
.name = "keystone_irq",
.of_match_table = of_match_ptr(keystone_irq_dt_ids),
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index 1623cd779175..b64cbe3052e8 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -198,6 +198,12 @@ static void eiointc_irq_dispatch(struct irq_desc *desc)
for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) {
pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
+
+ /* Skip handling if pending bitmap is zero */
+ if (!pending)
+ continue;
+
+ /* Clear the IRQs */
iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3));
while (pending) {
int bit = __ffs(pending);
@@ -241,7 +247,7 @@ static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
int ret;
unsigned int i, type;
unsigned long hwirq = 0;
- struct eiointc *priv = domain->host_data;
+ struct eiointc_priv *priv = domain->host_data;
ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
if (ret)
@@ -304,23 +310,7 @@ static int eiointc_suspend(void)
static void eiointc_resume(void)
{
- int i, j;
- struct irq_desc *desc;
- struct irq_data *irq_data;
-
eiointc_router_init(0);
-
- for (i = 0; i < nr_pics; i++) {
- for (j = 0; j < eiointc_priv[0]->vec_count; j++) {
- desc = irq_resolve_mapping(eiointc_priv[i]->eiointc_domain, j);
- if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) {
- raw_spin_lock(&desc->lock);
- irq_data = irq_domain_get_irq_data(eiointc_priv[i]->eiointc_domain, irq_desc_get_irq(desc));
- eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0);
- raw_spin_unlock(&desc->lock);
- }
- }
- }
}
static struct syscore_ops eiointc_syscore_ops = {
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index 15cf80b46322..1aef5c4d27c6 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -398,7 +398,7 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
return 0;
}
-static int ls_scfg_msi_remove(struct platform_device *pdev)
+static void ls_scfg_msi_remove(struct platform_device *pdev)
{
struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
int i;
@@ -410,17 +410,15 @@ static int ls_scfg_msi_remove(struct platform_device *pdev)
irq_domain_remove(msi_data->parent);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static struct platform_driver ls_scfg_msi_driver = {
.driver = {
- .name = "ls-scfg-msi",
- .of_match_table = ls_scfg_msi_id,
+ .name = "ls-scfg-msi",
+ .of_match_table = ls_scfg_msi_id,
},
- .probe = ls_scfg_msi_probe,
- .remove = ls_scfg_msi_remove,
+ .probe = ls_scfg_msi_probe,
+ .remove_new = ls_scfg_msi_remove,
};
module_platform_driver(ls_scfg_msi_driver);
diff --git a/drivers/irqchip/irq-madera.c b/drivers/irqchip/irq-madera.c
index 3eb1f8cdf674..acceb6e7fa95 100644
--- a/drivers/irqchip/irq-madera.c
+++ b/drivers/irqchip/irq-madera.c
@@ -222,7 +222,7 @@ static int madera_irq_probe(struct platform_device *pdev)
return 0;
}
-static int madera_irq_remove(struct platform_device *pdev)
+static void madera_irq_remove(struct platform_device *pdev)
{
struct madera *madera = dev_get_drvdata(pdev->dev.parent);
@@ -232,13 +232,11 @@ static int madera_irq_remove(struct platform_device *pdev)
*/
madera->irq_dev = NULL;
regmap_del_irq_chip(madera->irq, madera->irq_data);
-
- return 0;
}
static struct platform_driver madera_irq_driver = {
- .probe = &madera_irq_probe,
- .remove = &madera_irq_remove,
+ .probe = madera_irq_probe,
+ .remove_new = madera_irq_remove,
.driver = {
.name = "madera-irq",
.pm = &madera_irq_pm_ops,
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 5101a3fb11df..58881d313979 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -235,22 +235,17 @@ static const struct irq_domain_ops mbigen_domain_ops = {
static int mbigen_of_create_domain(struct platform_device *pdev,
struct mbigen_device *mgn_chip)
{
- struct device *parent;
struct platform_device *child;
struct irq_domain *domain;
struct device_node *np;
u32 num_pins;
int ret = 0;
- parent = bus_get_dev_root(&platform_bus_type);
- if (!parent)
- return -ENODEV;
-
for_each_child_of_node(pdev->dev.of_node, np) {
if (!of_property_read_bool(np, "interrupt-controller"))
continue;
- child = of_platform_device_create(np, NULL, parent);
+ child = of_platform_device_create(np, NULL, NULL);
if (!child) {
ret = -ENOMEM;
break;
@@ -273,7 +268,6 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
}
}
- put_device(parent);
if (ret)
of_node_put(np);
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index f88df39f4129..9a1791908598 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -154,6 +154,10 @@ static const struct meson_gpio_irq_params c3_params = {
INIT_MESON_S4_COMMON_DATA(55)
};
+static const struct meson_gpio_irq_params t7_params = {
+ INIT_MESON_S4_COMMON_DATA(157)
+};
+
static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
{ .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
{ .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
@@ -165,6 +169,7 @@ static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
{ .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params },
{ .compatible = "amlogic,meson-s4-gpio-intc", .data = &s4_params },
{ .compatible = "amlogic,c3-gpio-intc", .data = &c3_params },
+ { .compatible = "amlogic,t7-gpio-intc", .data = &t7_params },
{ }
};
diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c
index ef3d3646ccc2..d17d9c0e2880 100644
--- a/drivers/irqchip/irq-mvebu-pic.c
+++ b/drivers/irqchip/irq-mvebu-pic.c
@@ -167,14 +167,12 @@ static int mvebu_pic_probe(struct platform_device *pdev)
return 0;
}
-static int mvebu_pic_remove(struct platform_device *pdev)
+static void mvebu_pic_remove(struct platform_device *pdev)
{
struct mvebu_pic *pic = platform_get_drvdata(pdev);
on_each_cpu(mvebu_pic_disable_percpu_irq, pic, 1);
irq_domain_remove(pic->domain);
-
- return 0;
}
static const struct of_device_id mvebu_pic_of_match[] = {
@@ -184,11 +182,11 @@ static const struct of_device_id mvebu_pic_of_match[] = {
MODULE_DEVICE_TABLE(of, mvebu_pic_of_match);
static struct platform_driver mvebu_pic_driver = {
- .probe = mvebu_pic_probe,
- .remove = mvebu_pic_remove,
+ .probe = mvebu_pic_probe,
+ .remove_new = mvebu_pic_remove,
.driver = {
- .name = "mvebu-pic",
- .of_match_table = mvebu_pic_of_match,
+ .name = "mvebu-pic",
+ .of_match_table = mvebu_pic_of_match,
},
};
module_platform_driver(mvebu_pic_driver);
diff --git a/drivers/irqchip/irq-pruss-intc.c b/drivers/irqchip/irq-pruss-intc.c
index 0f64ecb9b1f4..060eb000e9d3 100644
--- a/drivers/irqchip/irq-pruss-intc.c
+++ b/drivers/irqchip/irq-pruss-intc.c
@@ -599,7 +599,7 @@ fail_irq:
return ret;
}
-static int pruss_intc_remove(struct platform_device *pdev)
+static void pruss_intc_remove(struct platform_device *pdev)
{
struct pruss_intc *intc = platform_get_drvdata(pdev);
u8 max_system_events = intc->soc_config->num_system_events;
@@ -616,8 +616,6 @@ static int pruss_intc_remove(struct platform_device *pdev)
irq_dispose_mapping(irq_find_mapping(intc->domain, hwirq));
irq_domain_remove(intc->domain);
-
- return 0;
}
static const struct pruss_intc_match_data pruss_intc_data = {
@@ -645,12 +643,12 @@ MODULE_DEVICE_TABLE(of, pruss_intc_of_match);
static struct platform_driver pruss_intc_driver = {
.driver = {
- .name = "pruss-intc",
- .of_match_table = pruss_intc_of_match,
- .suppress_bind_attrs = true,
+ .name = "pruss-intc",
+ .of_match_table = pruss_intc_of_match,
+ .suppress_bind_attrs = true,
},
- .probe = pruss_intc_probe,
- .remove = pruss_intc_remove,
+ .probe = pruss_intc_probe,
+ .remove_new = pruss_intc_remove,
};
module_platform_driver(pruss_intc_driver);
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
index cda5838d2232..7942d8eb3d00 100644
--- a/drivers/irqchip/irq-qcom-mpm.c
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -389,8 +389,8 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
/* Don't use devm_ioremap_resource, as we're accessing a shared region. */
priv->base = devm_ioremap(dev, res.start, resource_size(&res));
of_node_put(msgram_np);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ if (!priv->base)
+ return -ENOMEM;
} else {
/* Otherwise, fall back to simple MMIO. */
priv->base = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index fa19585f3dee..9ad37237ba95 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -561,14 +561,13 @@ err0:
return ret;
}
-static int intc_irqpin_remove(struct platform_device *pdev)
+static void intc_irqpin_remove(struct platform_device *pdev)
{
struct intc_irqpin_priv *p = platform_get_drvdata(pdev);
irq_domain_remove(p->irq_domain);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static int __maybe_unused intc_irqpin_suspend(struct device *dev)
@@ -585,11 +584,11 @@ static SIMPLE_DEV_PM_OPS(intc_irqpin_pm_ops, intc_irqpin_suspend, NULL);
static struct platform_driver intc_irqpin_device_driver = {
.probe = intc_irqpin_probe,
- .remove = intc_irqpin_remove,
+ .remove_new = intc_irqpin_remove,
.driver = {
- .name = "renesas_intc_irqpin",
- .of_match_table = intc_irqpin_dt_ids,
- .pm = &intc_irqpin_pm_ops,
+ .name = "renesas_intc_irqpin",
+ .of_match_table = intc_irqpin_dt_ids,
+ .pm = &intc_irqpin_pm_ops,
}
};
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 49b446b396f9..76026e0b8e20 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -218,14 +218,13 @@ err_runtime_pm_disable:
return ret;
}
-static int irqc_remove(struct platform_device *pdev)
+static void irqc_remove(struct platform_device *pdev)
{
struct irqc_priv *p = platform_get_drvdata(pdev);
irq_domain_remove(p->irq_domain);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- return 0;
}
static int __maybe_unused irqc_suspend(struct device *dev)
@@ -248,11 +247,11 @@ MODULE_DEVICE_TABLE(of, irqc_dt_ids);
static struct platform_driver irqc_device_driver = {
.probe = irqc_probe,
- .remove = irqc_remove,
+ .remove_new = irqc_remove,
.driver = {
- .name = "renesas_irqc",
+ .name = "renesas_irqc",
.of_match_table = irqc_dt_ids,
- .pm = &irqc_pm_ops,
+ .pm = &irqc_pm_ops,
}
};
diff --git a/drivers/irqchip/irq-renesas-rza1.c b/drivers/irqchip/irq-renesas-rza1.c
index e4c99c2e0373..f05afe82db4d 100644
--- a/drivers/irqchip/irq-renesas-rza1.c
+++ b/drivers/irqchip/irq-renesas-rza1.c
@@ -244,12 +244,11 @@ out_put_node:
return ret;
}
-static int rza1_irqc_remove(struct platform_device *pdev)
+static void rza1_irqc_remove(struct platform_device *pdev)
{
struct rza1_irqc_priv *priv = platform_get_drvdata(pdev);
irq_domain_remove(priv->irq_domain);
- return 0;
}
static const struct of_device_id rza1_irqc_dt_ids[] = {
@@ -260,9 +259,9 @@ MODULE_DEVICE_TABLE(of, rza1_irqc_dt_ids);
static struct platform_driver rza1_irqc_device_driver = {
.probe = rza1_irqc_probe,
- .remove = rza1_irqc_remove,
+ .remove_new = rza1_irqc_remove,
.driver = {
- .name = "renesas_rza1_irqc",
+ .name = "renesas_rza1_irqc",
.of_match_table = rza1_irqc_dt_ids,
}
};
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index e8d01b14ccdd..f87aeab460eb 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -17,17 +17,29 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/smp.h>
+#include <linux/soc/andes/irq.h>
+
+#include <asm/hwcap.h>
static struct irq_domain *intc_domain;
+static unsigned int riscv_intc_nr_irqs __ro_after_init = BITS_PER_LONG;
+static unsigned int riscv_intc_custom_base __ro_after_init = BITS_PER_LONG;
+static unsigned int riscv_intc_custom_nr_irqs __ro_after_init;
static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
{
unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
- if (unlikely(cause >= BITS_PER_LONG))
- panic("unexpected interrupt cause");
+ if (generic_handle_domain_irq(intc_domain, cause))
+ pr_warn_ratelimited("Failed to handle interrupt (cause: %ld)\n", cause);
+}
+
+static asmlinkage void riscv_intc_aia_irq(struct pt_regs *regs)
+{
+ unsigned long topi;
- generic_handle_domain_irq(intc_domain, cause);
+ while ((topi = csr_read(CSR_TOPI)))
+ generic_handle_domain_irq(intc_domain, topi >> TOPI_IID_SHIFT);
}
/*
@@ -39,12 +51,43 @@ static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
static void riscv_intc_irq_mask(struct irq_data *d)
{
- csr_clear(CSR_IE, BIT(d->hwirq));
+ if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG)
+ csr_clear(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG));
+ else
+ csr_clear(CSR_IE, BIT(d->hwirq));
}
static void riscv_intc_irq_unmask(struct irq_data *d)
{
- csr_set(CSR_IE, BIT(d->hwirq));
+ if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG)
+ csr_set(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG));
+ else
+ csr_set(CSR_IE, BIT(d->hwirq));
+}
+
+static void andes_intc_irq_mask(struct irq_data *d)
+{
+ /*
+ * Andes specific S-mode local interrupt causes (hwirq)
+ * are defined as (256 + n) and controlled by n-th bit
+ * of SLIE.
+ */
+ unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
+
+ if (d->hwirq < ANDES_SLI_CAUSE_BASE)
+ csr_clear(CSR_IE, mask);
+ else
+ csr_clear(ANDES_CSR_SLIE, mask);
+}
+
+static void andes_intc_irq_unmask(struct irq_data *d)
+{
+ unsigned int mask = BIT(d->hwirq % BITS_PER_LONG);
+
+ if (d->hwirq < ANDES_SLI_CAUSE_BASE)
+ csr_set(CSR_IE, mask);
+ else
+ csr_set(ANDES_CSR_SLIE, mask);
}
static void riscv_intc_irq_eoi(struct irq_data *d)
@@ -70,12 +113,21 @@ static struct irq_chip riscv_intc_chip = {
.irq_eoi = riscv_intc_irq_eoi,
};
+static struct irq_chip andes_intc_chip = {
+ .name = "RISC-V INTC",
+ .irq_mask = andes_intc_irq_mask,
+ .irq_unmask = andes_intc_irq_unmask,
+ .irq_eoi = riscv_intc_irq_eoi,
+};
+
static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
+ struct irq_chip *chip = d->host_data;
+
irq_set_percpu_devid(irq);
- irq_domain_set_info(d, irq, hwirq, &riscv_intc_chip, d->host_data,
- handle_percpu_devid_irq, NULL, NULL);
+ irq_domain_set_info(d, irq, hwirq, chip, NULL, handle_percpu_devid_irq,
+ NULL, NULL);
return 0;
}
@@ -93,6 +145,14 @@ static int riscv_intc_domain_alloc(struct irq_domain *domain,
if (ret)
return ret;
+ /*
+ * Only allow hwirq for which we have corresponding standard or
+ * custom interrupt enable register.
+ */
+ if ((hwirq >= riscv_intc_nr_irqs && hwirq < riscv_intc_custom_base) ||
+ (hwirq >= riscv_intc_custom_base + riscv_intc_custom_nr_irqs))
+ return -EINVAL;
+
for (i = 0; i < nr_irqs; i++) {
ret = riscv_intc_domain_map(domain, virq + i, hwirq + i);
if (ret)
@@ -113,18 +173,20 @@ static struct fwnode_handle *riscv_intc_hwnode(void)
return intc_domain->fwnode;
}
-static int __init riscv_intc_init_common(struct fwnode_handle *fn)
+static int __init riscv_intc_init_common(struct fwnode_handle *fn, struct irq_chip *chip)
{
int rc;
- intc_domain = irq_domain_create_linear(fn, BITS_PER_LONG,
- &riscv_intc_domain_ops, NULL);
+ intc_domain = irq_domain_create_tree(fn, &riscv_intc_domain_ops, chip);
if (!intc_domain) {
pr_err("unable to add IRQ domain\n");
return -ENXIO;
}
- rc = set_handle_irq(&riscv_intc_irq);
+ if (riscv_isa_extension_available(NULL, SxAIA))
+ rc = set_handle_irq(&riscv_intc_aia_irq);
+ else
+ rc = set_handle_irq(&riscv_intc_irq);
if (rc) {
pr_err("failed to set irq handler\n");
return rc;
@@ -132,7 +194,11 @@ static int __init riscv_intc_init_common(struct fwnode_handle *fn)
riscv_set_intc_hwnode_fn(riscv_intc_hwnode);
- pr_info("%d local interrupts mapped\n", BITS_PER_LONG);
+ pr_info("%d local interrupts mapped%s\n",
+ riscv_isa_extension_available(NULL, SxAIA) ? 64 : riscv_intc_nr_irqs,
+ riscv_isa_extension_available(NULL, SxAIA) ? " using AIA" : "");
+ if (riscv_intc_custom_nr_irqs)
+ pr_info("%d custom local interrupts mapped\n", riscv_intc_custom_nr_irqs);
return 0;
}
@@ -140,8 +206,9 @@ static int __init riscv_intc_init_common(struct fwnode_handle *fn)
static int __init riscv_intc_init(struct device_node *node,
struct device_node *parent)
{
- int rc;
+ struct irq_chip *chip = &riscv_intc_chip;
unsigned long hartid;
+ int rc;
rc = riscv_of_parent_hartid(node, &hartid);
if (rc < 0) {
@@ -166,10 +233,17 @@ static int __init riscv_intc_init(struct device_node *node,
return 0;
}
- return riscv_intc_init_common(of_node_to_fwnode(node));
+ if (of_device_is_compatible(node, "andestech,cpu-intc")) {
+ riscv_intc_custom_base = ANDES_SLI_CAUSE_BASE;
+ riscv_intc_custom_nr_irqs = ANDES_RV_IRQ_LAST;
+ chip = &andes_intc_chip;
+ }
+
+ return riscv_intc_init_common(of_node_to_fwnode(node), chip);
}
IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
+IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init);
#ifdef CONFIG_ACPI
@@ -196,7 +270,7 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
return -ENOMEM;
}
- return riscv_intc_init_common(fn);
+ return riscv_intc_init_common(fn, &riscv_intc_chip);
}
IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 5b7bc4fd9517..f3d4cb9e34f7 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -3,7 +3,6 @@
* Copyright (C) 2017 SiFive
* Copyright (C) 2018 Christoph Hellwig
*/
-#define pr_fmt(fmt) "plic: " fmt
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -64,6 +63,7 @@
#define PLIC_QUIRK_EDGE_INTERRUPT 0
struct plic_priv {
+ struct device *dev;
struct cpumask lmask;
struct irq_domain *irqdomain;
void __iomem *regs;
@@ -103,9 +103,11 @@ static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable)
static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
{
- raw_spin_lock(&handler->enable_lock);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&handler->enable_lock, flags);
__plic_toggle(handler->enable_base, hwirq, enable);
- raw_spin_unlock(&handler->enable_lock);
+ raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
}
static inline void plic_irq_toggle(const struct cpumask *mask,
@@ -148,7 +150,13 @@ static void plic_irq_eoi(struct irq_data *d)
{
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
- writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ if (unlikely(irqd_irq_disabled(d))) {
+ plic_toggle(handler, d->hwirq, 1);
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ plic_toggle(handler, d->hwirq, 0);
+ } else {
+ writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+ }
}
#ifdef CONFIG_SMP
@@ -236,6 +244,7 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type)
static int plic_irq_suspend(void)
{
unsigned int i, cpu;
+ unsigned long flags;
u32 __iomem *reg;
struct plic_priv *priv;
@@ -253,12 +262,12 @@ static int plic_irq_suspend(void)
if (!handler->present)
continue;
- raw_spin_lock(&handler->enable_lock);
+ raw_spin_lock_irqsave(&handler->enable_lock, flags);
for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
reg = handler->enable_base + i * sizeof(u32);
handler->enable_save[i] = readl(reg);
}
- raw_spin_unlock(&handler->enable_lock);
+ raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
}
return 0;
@@ -267,6 +276,7 @@ static int plic_irq_suspend(void)
static void plic_irq_resume(void)
{
unsigned int i, index, cpu;
+ unsigned long flags;
u32 __iomem *reg;
struct plic_priv *priv;
@@ -284,12 +294,12 @@ static void plic_irq_resume(void)
if (!handler->present)
continue;
- raw_spin_lock(&handler->enable_lock);
+ raw_spin_lock_irqsave(&handler->enable_lock, flags);
for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
reg = handler->enable_base + i * sizeof(u32);
writel(handler->enable_save[i], reg);
}
- raw_spin_unlock(&handler->enable_lock);
+ raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
}
}
@@ -370,9 +380,10 @@ static void plic_handle_irq(struct irq_desc *desc)
while ((hwirq = readl(claim))) {
int err = generic_handle_domain_irq(handler->priv->irqdomain,
hwirq);
- if (unlikely(err))
- pr_warn_ratelimited("can't find mapping for hwirq %lu\n",
- hwirq);
+ if (unlikely(err)) {
+ dev_warn_ratelimited(handler->priv->dev,
+ "can't find mapping for hwirq %lu\n", hwirq);
+ }
}
chained_irq_exit(chip, desc);
@@ -400,63 +411,122 @@ static int plic_starting_cpu(unsigned int cpu)
enable_percpu_irq(plic_parent_irq,
irq_get_trigger_type(plic_parent_irq));
else
- pr_warn("cpu%d: parent irq not available\n", cpu);
+ dev_warn(handler->priv->dev, "cpu%d: parent irq not available\n", cpu);
plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
return 0;
}
-static int __init __plic_init(struct device_node *node,
- struct device_node *parent,
- unsigned long plic_quirks)
+static const struct of_device_id plic_match[] = {
+ { .compatible = "sifive,plic-1.0.0" },
+ { .compatible = "riscv,plic0" },
+ { .compatible = "andestech,nceplic100",
+ .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) },
+ { .compatible = "thead,c900-plic",
+ .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) },
+ {}
+};
+
+static int plic_parse_nr_irqs_and_contexts(struct platform_device *pdev,
+ u32 *nr_irqs, u32 *nr_contexts)
{
- int error = 0, nr_contexts, nr_handlers = 0, i;
- u32 nr_irqs;
- struct plic_priv *priv;
- struct plic_handler *handler;
- unsigned int cpu;
+ struct device *dev = &pdev->dev;
+ int rc;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ /*
+ * Currently, only OF fwnode is supported so extend this
+ * function for ACPI support.
+ */
+ if (!is_of_node(dev->fwnode))
+ return -EINVAL;
- priv->plic_quirks = plic_quirks;
+ rc = of_property_read_u32(to_of_node(dev->fwnode), "riscv,ndev", nr_irqs);
+ if (rc) {
+ dev_err(dev, "riscv,ndev property not available\n");
+ return rc;
+ }
- priv->regs = of_iomap(node, 0);
- if (WARN_ON(!priv->regs)) {
- error = -EIO;
- goto out_free_priv;
+ *nr_contexts = of_irq_count(to_of_node(dev->fwnode));
+ if (WARN_ON(!(*nr_contexts))) {
+ dev_err(dev, "no PLIC context available\n");
+ return -EINVAL;
}
- error = -EINVAL;
- of_property_read_u32(node, "riscv,ndev", &nr_irqs);
- if (WARN_ON(!nr_irqs))
- goto out_iounmap;
+ return 0;
+}
- priv->nr_irqs = nr_irqs;
+static int plic_parse_context_parent(struct platform_device *pdev, u32 context,
+ u32 *parent_hwirq, int *parent_cpu)
+{
+ struct device *dev = &pdev->dev;
+ struct of_phandle_args parent;
+ unsigned long hartid;
+ int rc;
- priv->prio_save = bitmap_alloc(nr_irqs, GFP_KERNEL);
- if (!priv->prio_save)
- goto out_free_priority_reg;
+ /*
+ * Currently, only OF fwnode is supported so extend this
+ * function for ACPI support.
+ */
+ if (!is_of_node(dev->fwnode))
+ return -EINVAL;
- nr_contexts = of_irq_count(node);
- if (WARN_ON(!nr_contexts))
- goto out_free_priority_reg;
+ rc = of_irq_parse_one(to_of_node(dev->fwnode), context, &parent);
+ if (rc)
+ return rc;
- error = -ENOMEM;
- priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
- &plic_irqdomain_ops, priv);
- if (WARN_ON(!priv->irqdomain))
- goto out_free_priority_reg;
+ rc = riscv_of_parent_hartid(parent.np, &hartid);
+ if (rc)
+ return rc;
- for (i = 0; i < nr_contexts; i++) {
- struct of_phandle_args parent;
- irq_hw_number_t hwirq;
- int cpu;
- unsigned long hartid;
+ *parent_hwirq = parent.args[0];
+ *parent_cpu = riscv_hartid_to_cpuid(hartid);
+ return 0;
+}
+
+static int plic_probe(struct platform_device *pdev)
+{
+ int error = 0, nr_contexts, nr_handlers = 0, cpu, i;
+ struct device *dev = &pdev->dev;
+ unsigned long plic_quirks = 0;
+ struct plic_handler *handler;
+ u32 nr_irqs, parent_hwirq;
+ struct irq_domain *domain;
+ struct plic_priv *priv;
+ irq_hw_number_t hwirq;
+ bool cpuhp_setup;
- if (of_irq_parse_one(node, i, &parent)) {
- pr_err("failed to parse parent for context %d.\n", i);
+ if (is_of_node(dev->fwnode)) {
+ const struct of_device_id *id;
+
+ id = of_match_node(plic_match, to_of_node(dev->fwnode));
+ if (id)
+ plic_quirks = (unsigned long)id->data;
+ }
+
+ error = plic_parse_nr_irqs_and_contexts(pdev, &nr_irqs, &nr_contexts);
+ if (error)
+ return error;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->plic_quirks = plic_quirks;
+ priv->nr_irqs = nr_irqs;
+
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(!priv->regs))
+ return -EIO;
+
+ priv->prio_save = devm_bitmap_zalloc(dev, nr_irqs, GFP_KERNEL);
+ if (!priv->prio_save)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_contexts; i++) {
+ error = plic_parse_context_parent(pdev, i, &parent_hwirq, &cpu);
+ if (error) {
+ dev_warn(dev, "hwirq for context%d not found\n", i);
continue;
}
@@ -464,7 +534,7 @@ static int __init __plic_init(struct device_node *node,
* Skip contexts other than external interrupts for our
* privilege level.
*/
- if (parent.args[0] != RV_IRQ_EXT) {
+ if (parent_hwirq != RV_IRQ_EXT) {
/* Disable S-mode enable bits if running in M-mode. */
if (IS_ENABLED(CONFIG_RISCV_M_MODE)) {
void __iomem *enable_base = priv->regs +
@@ -477,24 +547,17 @@ static int __init __plic_init(struct device_node *node,
continue;
}
- error = riscv_of_parent_hartid(parent.np, &hartid);
- if (error < 0) {
- pr_warn("failed to parse hart ID for context %d.\n", i);
- continue;
- }
-
- cpu = riscv_hartid_to_cpuid(hartid);
if (cpu < 0) {
- pr_warn("Invalid cpuid for context %d\n", i);
+ dev_warn(dev, "Invalid cpuid for context %d\n", i);
continue;
}
/* Find parent domain and register chained handler */
- if (!plic_parent_irq && irq_find_host(parent.np)) {
- plic_parent_irq = irq_of_parse_and_map(node, i);
+ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
+ if (!plic_parent_irq && domain) {
+ plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
if (plic_parent_irq)
- irq_set_chained_handler(plic_parent_irq,
- plic_handle_irq);
+ irq_set_chained_handler(plic_parent_irq, plic_handle_irq);
}
/*
@@ -504,7 +567,7 @@ static int __init __plic_init(struct device_node *node,
*/
handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present) {
- pr_warn("handler already present for context %d.\n", i);
+ dev_warn(dev, "handler already present for context %d.\n", i);
plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
goto done;
}
@@ -518,10 +581,10 @@ static int __init __plic_init(struct device_node *node,
i * CONTEXT_ENABLE_SIZE;
handler->priv = priv;
- handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32),
- sizeof(*handler->enable_save), GFP_KERNEL);
+ handler->enable_save = devm_kcalloc(dev, DIV_ROUND_UP(nr_irqs, 32),
+ sizeof(*handler->enable_save), GFP_KERNEL);
if (!handler->enable_save)
- goto out_free_enable_reg;
+ goto fail_cleanup_contexts;
done:
for (hwirq = 1; hwirq <= nr_irqs; hwirq++) {
plic_toggle(handler, hwirq, 0);
@@ -531,52 +594,60 @@ done:
nr_handlers++;
}
+ priv->irqdomain = irq_domain_add_linear(to_of_node(dev->fwnode), nr_irqs + 1,
+ &plic_irqdomain_ops, priv);
+ if (WARN_ON(!priv->irqdomain))
+ goto fail_cleanup_contexts;
+
/*
* We can have multiple PLIC instances so setup cpuhp state
- * and register syscore operations only when context handler
- * for current/boot CPU is present.
+ * and register syscore operations only once after context
+ * handlers of all online CPUs are initialized.
*/
- handler = this_cpu_ptr(&plic_handlers);
- if (handler->present && !plic_cpuhp_setup_done) {
- cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
- "irqchip/sifive/plic:starting",
- plic_starting_cpu, plic_dying_cpu);
- register_syscore_ops(&plic_irq_syscore_ops);
- plic_cpuhp_setup_done = true;
+ if (!plic_cpuhp_setup_done) {
+ cpuhp_setup = true;
+ for_each_online_cpu(cpu) {
+ handler = per_cpu_ptr(&plic_handlers, cpu);
+ if (!handler->present) {
+ cpuhp_setup = false;
+ break;
+ }
+ }
+ if (cpuhp_setup) {
+ cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ "irqchip/sifive/plic:starting",
+ plic_starting_cpu, plic_dying_cpu);
+ register_syscore_ops(&plic_irq_syscore_ops);
+ plic_cpuhp_setup_done = true;
+ }
}
- pr_info("%pOFP: mapped %d interrupts with %d handlers for"
- " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
+ dev_info(dev, "mapped %d interrupts with %d handlers for %d contexts.\n",
+ nr_irqs, nr_handlers, nr_contexts);
return 0;
-out_free_enable_reg:
- for_each_cpu(cpu, cpu_present_mask) {
+fail_cleanup_contexts:
+ for (i = 0; i < nr_contexts; i++) {
+ if (plic_parse_context_parent(pdev, i, &parent_hwirq, &cpu))
+ continue;
+ if (parent_hwirq != RV_IRQ_EXT || cpu < 0)
+ continue;
+
handler = per_cpu_ptr(&plic_handlers, cpu);
- kfree(handler->enable_save);
+ handler->present = false;
+ handler->hart_base = NULL;
+ handler->enable_base = NULL;
+ handler->enable_save = NULL;
+ handler->priv = NULL;
}
-out_free_priority_reg:
- kfree(priv->prio_save);
-out_iounmap:
- iounmap(priv->regs);
-out_free_priv:
- kfree(priv);
- return error;
+ return -ENOMEM;
}
-static int __init plic_init(struct device_node *node,
- struct device_node *parent)
-{
- return __plic_init(node, parent, 0);
-}
-
-IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
-IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
-
-static int __init plic_edge_init(struct device_node *node,
- struct device_node *parent)
-{
- return __plic_init(node, parent, BIT(PLIC_QUIRK_EDGE_INTERRUPT));
-}
-
-IRQCHIP_DECLARE(andestech_nceplic100, "andestech,nceplic100", plic_edge_init);
-IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_edge_init);
+static struct platform_driver plic_driver = {
+ .driver = {
+ .name = "riscv-plic",
+ .of_match_table = plic_match,
+ },
+ .probe = plic_probe,
+};
+builtin_platform_driver(plic_driver);
diff --git a/drivers/irqchip/irq-starfive-jh8100-intc.c b/drivers/irqchip/irq-starfive-jh8100-intc.c
new file mode 100644
index 000000000000..0f5837176e53
--- /dev/null
+++ b/drivers/irqchip/irq-starfive-jh8100-intc.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * StarFive JH8100 External Interrupt Controller driver
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ *
+ * Author: Changhuang Liang <changhuang.liang@starfivetech.com>
+ */
+
+#define pr_fmt(fmt) "irq-starfive-jh8100: " fmt
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define STARFIVE_INTC_SRC0_CLEAR 0x10
+#define STARFIVE_INTC_SRC0_MASK 0x14
+#define STARFIVE_INTC_SRC0_INT 0x1c
+
+#define STARFIVE_INTC_SRC_IRQ_NUM 32
+
+struct starfive_irq_chip {
+ void __iomem *base;
+ struct irq_domain *domain;
+ raw_spinlock_t lock;
+};
+
+static void starfive_intc_bit_set(struct starfive_irq_chip *irqc,
+ u32 reg, u32 bit_mask)
+{
+ u32 value;
+
+ value = ioread32(irqc->base + reg);
+ value |= bit_mask;
+ iowrite32(value, irqc->base + reg);
+}
+
+static void starfive_intc_bit_clear(struct starfive_irq_chip *irqc,
+ u32 reg, u32 bit_mask)
+{
+ u32 value;
+
+ value = ioread32(irqc->base + reg);
+ value &= ~bit_mask;
+ iowrite32(value, irqc->base + reg);
+}
+
+static void starfive_intc_unmask(struct irq_data *d)
+{
+ struct starfive_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+
+ raw_spin_lock(&irqc->lock);
+ starfive_intc_bit_clear(irqc, STARFIVE_INTC_SRC0_MASK, BIT(d->hwirq));
+ raw_spin_unlock(&irqc->lock);
+}
+
+static void starfive_intc_mask(struct irq_data *d)
+{
+ struct starfive_irq_chip *irqc = irq_data_get_irq_chip_data(d);
+
+ raw_spin_lock(&irqc->lock);
+ starfive_intc_bit_set(irqc, STARFIVE_INTC_SRC0_MASK, BIT(d->hwirq));
+ raw_spin_unlock(&irqc->lock);
+}
+
+static struct irq_chip intc_dev = {
+ .name = "StarFive JH8100 INTC",
+ .irq_unmask = starfive_intc_unmask,
+ .irq_mask = starfive_intc_mask,
+};
+
+static int starfive_intc_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_domain_set_info(d, irq, hwirq, &intc_dev, d->host_data,
+ handle_level_irq, NULL, NULL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops starfive_intc_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = starfive_intc_map,
+};
+
+static void starfive_intc_irq_handler(struct irq_desc *desc)
+{
+ struct starfive_irq_chip *irqc = irq_data_get_irq_handler_data(&desc->irq_data);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long value;
+ int hwirq;
+
+ chained_irq_enter(chip, desc);
+
+ value = ioread32(irqc->base + STARFIVE_INTC_SRC0_INT);
+ while (value) {
+ hwirq = ffs(value) - 1;
+
+ generic_handle_domain_irq(irqc->domain, hwirq);
+
+ starfive_intc_bit_set(irqc, STARFIVE_INTC_SRC0_CLEAR, BIT(hwirq));
+ starfive_intc_bit_clear(irqc, STARFIVE_INTC_SRC0_CLEAR, BIT(hwirq));
+
+ __clear_bit(hwirq, &value);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int __init starfive_intc_init(struct device_node *intc,
+ struct device_node *parent)
+{
+ struct starfive_irq_chip *irqc;
+ struct reset_control *rst;
+ struct clk *clk;
+ int parent_irq;
+ int ret;
+
+ irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
+ if (!irqc)
+ return -ENOMEM;
+
+ irqc->base = of_iomap(intc, 0);
+ if (!irqc->base) {
+ pr_err("Unable to map registers\n");
+ ret = -ENXIO;
+ goto err_free;
+ }
+
+ rst = of_reset_control_get_exclusive(intc, NULL);
+ if (IS_ERR(rst)) {
+ pr_err("Unable to get reset control %pe\n", rst);
+ ret = PTR_ERR(rst);
+ goto err_unmap;
+ }
+
+ clk = of_clk_get(intc, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Unable to get clock %pe\n", clk);
+ ret = PTR_ERR(clk);
+ goto err_reset_put;
+ }
+
+ ret = reset_control_deassert(rst);
+ if (ret)
+ goto err_clk_put;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto err_reset_assert;
+
+ raw_spin_lock_init(&irqc->lock);
+
+ irqc->domain = irq_domain_add_linear(intc, STARFIVE_INTC_SRC_IRQ_NUM,
+ &starfive_intc_domain_ops, irqc);
+ if (!irqc->domain) {
+ pr_err("Unable to create IRQ domain\n");
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ parent_irq = of_irq_get(intc, 0);
+ if (parent_irq < 0) {
+ pr_err("Failed to get main IRQ: %d\n", parent_irq);
+ ret = parent_irq;
+ goto err_remove_domain;
+ }
+
+ irq_set_chained_handler_and_data(parent_irq, starfive_intc_irq_handler,
+ irqc);
+
+ pr_info("Interrupt controller register, nr_irqs %d\n",
+ STARFIVE_INTC_SRC_IRQ_NUM);
+
+ return 0;
+
+err_remove_domain:
+ irq_domain_remove(irqc->domain);
+err_clk_disable:
+ clk_disable_unprepare(clk);
+err_reset_assert:
+ reset_control_assert(rst);
+err_clk_put:
+ clk_put(clk);
+err_reset_put:
+ reset_control_put(rst);
+err_unmap:
+ iounmap(irqc->base);
+err_free:
+ kfree(irqc);
+ return ret;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(starfive_intc)
+IRQCHIP_MATCH("starfive,jh8100-intc", starfive_intc_init)
+IRQCHIP_PLATFORM_DRIVER_END(starfive_intc)
+
+MODULE_DESCRIPTION("StarFive JH8100 External Interrupt Controller");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Changhuang Liang <changhuang.liang@starfivetech.com>");
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 971240e2e31b..26a5193d0ae4 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -898,10 +898,9 @@ static void stm32_exti_remove_irq(void *data)
irq_domain_remove(domain);
}
-static int stm32_exti_remove(struct platform_device *pdev)
+static void stm32_exti_remove(struct platform_device *pdev)
{
stm32_exti_h_syscore_deinit();
- return 0;
}
static int stm32_exti_probe(struct platform_device *pdev)
@@ -991,10 +990,10 @@ MODULE_DEVICE_TABLE(of, stm32_exti_ids);
static struct platform_driver stm32_exti_driver = {
.probe = stm32_exti_probe,
- .remove = stm32_exti_remove,
+ .remove_new = stm32_exti_remove,
.driver = {
- .name = "stm32_exti",
- .of_match_table = stm32_exti_ids,
+ .name = "stm32_exti",
+ .of_match_table = stm32_exti_ids,
},
};
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index b2d61d4f6fe6..57f610dab6b8 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -139,13 +139,11 @@ static int ts4800_ic_probe(struct platform_device *pdev)
return 0;
}
-static int ts4800_ic_remove(struct platform_device *pdev)
+static void ts4800_ic_remove(struct platform_device *pdev)
{
struct ts4800_irq_data *data = platform_get_drvdata(pdev);
irq_domain_remove(data->domain);
-
- return 0;
}
static const struct of_device_id ts4800_ic_of_match[] = {
@@ -155,11 +153,11 @@ static const struct of_device_id ts4800_ic_of_match[] = {
MODULE_DEVICE_TABLE(of, ts4800_ic_of_match);
static struct platform_driver ts4800_ic_driver = {
- .probe = ts4800_ic_probe,
- .remove = ts4800_ic_remove,
+ .probe = ts4800_ic_probe,
+ .remove_new = ts4800_ic_remove,
.driver = {
- .name = "ts4800-irqc",
- .of_match_table = ts4800_ic_of_match,
+ .name = "ts4800-irqc",
+ .of_match_table = ts4800_ic_of_match,
},
};
module_platform_driver(ts4800_ic_driver);
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 9e3d5561e04e..ea93e7236c4a 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -47,9 +47,8 @@
/**
* struct vic_device - VIC PM device
- * @parent_irq: The parent IRQ number of the VIC if cascaded, or 0.
- * @irq: The IRQ number for the base of the VIC.
* @base: The register base for the VIC.
+ * @irq: The IRQ number for the base of the VIC.
* @valid_sources: A bitmask of valid interrupts
* @resume_sources: A bitmask of interrupts for resume.
* @resume_irqs: The IRQs enabled for resume.
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 6e80d7bd3c4d..3ed257334562 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -49,7 +49,9 @@ MODULE_LICENSE("GPL");
/* -------- driver information -------------------------------------- */
static DEFINE_MUTEX(capi_mutex);
-static struct class *capi_class;
+static const struct class capi_class = {
+ .name = "capi",
+};
static int capi_major = 68; /* allocated */
module_param_named(major, capi_major, uint, 0);
@@ -1393,18 +1395,19 @@ static int __init capi_init(void)
kcapi_exit();
return major_ret;
}
- capi_class = class_create("capi");
- if (IS_ERR(capi_class)) {
+
+ ret = class_register(&capi_class);
+ if (ret) {
unregister_chrdev(capi_major, "capi20");
kcapi_exit();
- return PTR_ERR(capi_class);
+ return ret;
}
- device_create(capi_class, NULL, MKDEV(capi_major, 0), NULL, "capi20");
+ device_create(&capi_class, NULL, MKDEV(capi_major, 0), NULL, "capi20");
if (capinc_tty_init() < 0) {
- device_destroy(capi_class, MKDEV(capi_major, 0));
- class_destroy(capi_class);
+ device_destroy(&capi_class, MKDEV(capi_major, 0));
+ class_unregister(&capi_class);
unregister_chrdev(capi_major, "capi20");
kcapi_exit();
return -ENOMEM;
@@ -1427,8 +1430,8 @@ static void __exit capi_exit(void)
{
proc_exit();
- device_destroy(capi_class, MKDEV(capi_major, 0));
- class_destroy(capi_class);
+ device_destroy(&capi_class, MKDEV(capi_major, 0));
+ class_unregister(&capi_class);
unregister_chrdev(capi_major, "capi20");
capinc_tty_exit();
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c
index 09b72f14d4b7..b4ed0bb8ddfb 100644
--- a/drivers/isdn/mISDN/dsp_pipeline.c
+++ b/drivers/isdn/mISDN/dsp_pipeline.c
@@ -31,7 +31,9 @@ struct dsp_element_entry {
static LIST_HEAD(dsp_elements);
/* sysfs */
-static struct class *elements_class;
+static const struct class elements_class = {
+ .name = "dsp_pipeline",
+};
static ssize_t
attr_show_args(struct device *dev, struct device_attribute *attr, char *buf)
@@ -80,7 +82,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
INIT_LIST_HEAD(&entry->list);
entry->elem = elem;
- entry->dev.class = elements_class;
+ entry->dev.class = &elements_class;
entry->dev.release = mISDN_dsp_dev_release;
dev_set_drvdata(&entry->dev, elem);
dev_set_name(&entry->dev, "%s", elem->name);
@@ -131,9 +133,11 @@ EXPORT_SYMBOL(mISDN_dsp_element_unregister);
int dsp_pipeline_module_init(void)
{
- elements_class = class_create("dsp_pipeline");
- if (IS_ERR(elements_class))
- return PTR_ERR(elements_class);
+ int err;
+
+ err = class_register(&elements_class);
+ if (err)
+ return err;
dsp_hwec_init();
@@ -146,7 +150,7 @@ void dsp_pipeline_module_exit(void)
dsp_hwec_exit();
- class_destroy(elements_class);
+ class_unregister(&elements_class);
list_for_each_entry_safe(entry, n, &dsp_elements, list) {
list_del(&entry->list);
diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
index 156b73d1f4a2..0a7acf59a420 100644
--- a/drivers/leds/rgb/leds-qcom-lpg.c
+++ b/drivers/leds/rgb/leds-qcom-lpg.c
@@ -77,7 +77,7 @@ struct lpg {
struct mutex lock;
- struct pwm_chip pwm;
+ struct pwm_chip *pwm;
const struct lpg_data *data;
@@ -978,7 +978,7 @@ static int lpg_pattern_mc_clear(struct led_classdev *cdev)
static inline struct lpg *lpg_pwm_from_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct lpg, pwm);
+ return pwmchip_get_drvdata(chip);
}
static int lpg_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -1093,13 +1093,17 @@ static const struct pwm_ops lpg_pwm_ops = {
static int lpg_add_pwm(struct lpg *lpg)
{
+ struct pwm_chip *chip;
int ret;
- lpg->pwm.dev = lpg->dev;
- lpg->pwm.npwm = lpg->num_channels;
- lpg->pwm.ops = &lpg_pwm_ops;
+ lpg->pwm = chip = devm_pwmchip_alloc(lpg->dev, lpg->num_channels, 0);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
- ret = devm_pwmchip_add(lpg->dev, &lpg->pwm);
+ chip->ops = &lpg_pwm_ops;
+ pwmchip_set_drvdata(chip, lpg);
+
+ ret = devm_pwmchip_add(lpg->dev, chip);
if (ret)
dev_err_probe(lpg->dev, ret, "failed to add PWM chip\n");
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index e3e28a4f7d01..b1abc2a0c971 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1587,8 +1587,8 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
}
/* Allocate platform MSIs for each ring */
- ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings,
- flexrm_mbox_msi_write);
+ ret = platform_device_msi_init_and_alloc_irqs(dev, mbox->num_rings,
+ flexrm_mbox_msi_write);
if (ret)
goto fail_destroy_cmpl_pool;
@@ -1641,7 +1641,7 @@ skip_debugfs:
fail_free_debugfs_root:
debugfs_remove_recursive(mbox->root);
- platform_msi_domain_free_irqs(dev);
+ platform_device_msi_free_irqs_all(dev);
fail_destroy_cmpl_pool:
dma_pool_destroy(mbox->cmpl_pool);
fail_destroy_bd_pool:
@@ -1657,7 +1657,7 @@ static void flexrm_mbox_remove(struct platform_device *pdev)
debugfs_remove_recursive(mbox->root);
- platform_msi_domain_free_irqs(dev);
+ platform_device_msi_free_irqs_all(dev);
dma_pool_destroy(mbox->cmpl_pool);
dma_pool_destroy(mbox->bd_pool);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index a743e2c572fc..68ce56fc61d0 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -634,4 +634,6 @@ config DM_AUDIT
Enables audit logging of several security relevant events in the
particular device-mapper targets, especially the integrity target.
+source "drivers/md/dm-vdo/Kconfig"
+
endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 027d7cfeca3f..476a214e4bdc 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_DM_ZERO) += dm-zero.o
obj-$(CONFIG_DM_RAID) += dm-raid.o
obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
+obj-$(CONFIG_DM_VDO) += dm-vdo/
obj-$(CONFIG_DM_CACHE) += dm-cache.o
obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
obj-$(CONFIG_DM_EBS) += dm-ebs.o
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 6ae2329052c9..4e6afa89921f 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -300,7 +300,7 @@ struct cached_dev {
struct list_head list;
struct bcache_device disk;
struct block_device *bdev;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct cache_sb sb;
struct cache_sb_disk *sb_disk;
@@ -423,7 +423,7 @@ struct cache {
struct kobject kobj;
struct block_device *bdev;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct task_struct *alloc_thread;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index dc3f50f69714..330bcd9ea4a9 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -900,9 +900,23 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
struct request_queue *q;
const size_t max_stripes = min_t(size_t, INT_MAX,
SIZE_MAX / sizeof(atomic_t));
+ struct queue_limits lim = {
+ .max_hw_sectors = UINT_MAX,
+ .max_sectors = UINT_MAX,
+ .max_segment_size = UINT_MAX,
+ .max_segments = BIO_MAX_VECS,
+ .max_hw_discard_sectors = UINT_MAX,
+ .io_min = block_size,
+ .logical_block_size = block_size,
+ .physical_block_size = block_size,
+ };
uint64_t n;
int idx;
+ if (cached_bdev) {
+ d->stripe_size = bdev_io_opt(cached_bdev) >> SECTOR_SHIFT;
+ lim.io_opt = umax(block_size, bdev_io_opt(cached_bdev));
+ }
if (!d->stripe_size)
d->stripe_size = 1 << 31;
else if (d->stripe_size < BCH_MIN_STRIPE_SZ)
@@ -935,8 +949,21 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
goto out_ida_remove;
- d->disk = blk_alloc_disk(NUMA_NO_NODE);
- if (!d->disk)
+ if (lim.logical_block_size > PAGE_SIZE && cached_bdev) {
+ /*
+ * This should only happen with BCACHE_SB_VERSION_BDEV.
+ * Block/page size is checked for BCACHE_SB_VERSION_CDEV.
+ */
+ pr_info("bcache%i: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n",
+ idx, lim.logical_block_size,
+ PAGE_SIZE, bdev_logical_block_size(cached_bdev));
+
+ /* This also adjusts physical block size/min io size if needed */
+ lim.logical_block_size = bdev_logical_block_size(cached_bdev);
+ }
+
+ d->disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(d->disk))
goto out_bioset_exit;
set_capacity(d->disk, sectors);
@@ -949,27 +976,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
d->disk->private_data = d;
q = d->disk->queue;
- q->limits.max_hw_sectors = UINT_MAX;
- q->limits.max_sectors = UINT_MAX;
- q->limits.max_segment_size = UINT_MAX;
- q->limits.max_segments = BIO_MAX_VECS;
- blk_queue_max_discard_sectors(q, UINT_MAX);
- q->limits.io_min = block_size;
- q->limits.logical_block_size = block_size;
- q->limits.physical_block_size = block_size;
-
- if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) {
- /*
- * This should only happen with BCACHE_SB_VERSION_BDEV.
- * Block/page size is checked for BCACHE_SB_VERSION_CDEV.
- */
- pr_info("%s: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n",
- d->disk->disk_name, q->limits.logical_block_size,
- PAGE_SIZE, bdev_logical_block_size(cached_bdev));
-
- /* This also adjusts physical block size/min io size if needed */
- blk_queue_logical_block_size(q, bdev_logical_block_size(cached_bdev));
- }
blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
@@ -1369,8 +1375,8 @@ static CLOSURE_CALLBACK(cached_dev_free)
if (dc->sb_disk)
put_page(virt_to_page(dc->sb_disk));
- if (dc->bdev_handle)
- bdev_release(dc->bdev_handle);
+ if (dc->bdev_file)
+ fput(dc->bdev_file);
wake_up(&unregister_wait);
@@ -1416,9 +1422,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
}
- dc->disk.stripe_size = q->limits.io_opt >> 9;
-
- if (dc->disk.stripe_size)
+ if (bdev_io_opt(dc->bdev))
dc->partial_stripes_expensive =
q->limits.raid_partial_stripes_expensive;
@@ -1428,9 +1432,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
if (ret)
return ret;
- blk_queue_io_opt(dc->disk.disk->queue,
- max(queue_io_opt(dc->disk.disk->queue), queue_io_opt(q)));
-
atomic_set(&dc->io_errors, 0);
dc->io_disable = false;
dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT;
@@ -1445,7 +1446,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
/* Cached device - bcache superblock */
static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
- struct bdev_handle *bdev_handle,
+ struct file *bdev_file,
struct cached_dev *dc)
{
const char *err = "cannot allocate memory";
@@ -1453,8 +1454,8 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
int ret = -ENOMEM;
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
- dc->bdev_handle = bdev_handle;
- dc->bdev = bdev_handle->bdev;
+ dc->bdev_file = bdev_file;
+ dc->bdev = file_bdev(bdev_file);
dc->sb_disk = sb_disk;
if (cached_dev_init(dc, sb->block_size << 9))
@@ -2218,8 +2219,8 @@ void bch_cache_release(struct kobject *kobj)
if (ca->sb_disk)
put_page(virt_to_page(ca->sb_disk));
- if (ca->bdev_handle)
- bdev_release(ca->bdev_handle);
+ if (ca->bdev_file)
+ fput(ca->bdev_file);
kfree(ca);
module_put(THIS_MODULE);
@@ -2339,18 +2340,18 @@ err_free:
}
static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
- struct bdev_handle *bdev_handle,
+ struct file *bdev_file,
struct cache *ca)
{
const char *err = NULL; /* must be set for any error case */
int ret = 0;
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
- ca->bdev_handle = bdev_handle;
- ca->bdev = bdev_handle->bdev;
+ ca->bdev_file = bdev_file;
+ ca->bdev = file_bdev(bdev_file);
ca->sb_disk = sb_disk;
- if (bdev_max_discard_sectors((bdev_handle->bdev)))
+ if (bdev_max_discard_sectors(file_bdev(bdev_file)))
ca->discard = CACHE_DISCARD(&ca->sb);
ret = cache_alloc(ca);
@@ -2361,20 +2362,20 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
err = "cache_alloc(): cache device is too small";
else
err = "cache_alloc(): unknown error";
- pr_notice("error %pg: %s\n", bdev_handle->bdev, err);
+ pr_notice("error %pg: %s\n", file_bdev(bdev_file), err);
/*
* If we failed here, it means ca->kobj is not initialized yet,
* kobject_put() won't be called and there is no chance to
- * call bdev_release() to bdev in bch_cache_release(). So
- * we explicitly call bdev_release() here.
+ * call fput() to bdev in bch_cache_release(). So
+ * we explicitly call fput() on the block device here.
*/
- bdev_release(bdev_handle);
+ fput(bdev_file);
return ret;
}
- if (kobject_add(&ca->kobj, bdev_kobj(bdev_handle->bdev), "bcache")) {
+ if (kobject_add(&ca->kobj, bdev_kobj(file_bdev(bdev_file)), "bcache")) {
pr_notice("error %pg: error calling kobject_add\n",
- bdev_handle->bdev);
+ file_bdev(bdev_file));
ret = -ENOMEM;
goto out;
}
@@ -2388,7 +2389,7 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
goto out;
}
- pr_info("registered cache device %pg\n", ca->bdev_handle->bdev);
+ pr_info("registered cache device %pg\n", file_bdev(ca->bdev_file));
out:
kobject_put(&ca->kobj);
@@ -2446,7 +2447,7 @@ struct async_reg_args {
char *path;
struct cache_sb *sb;
struct cache_sb_disk *sb_disk;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
void *holder;
};
@@ -2457,7 +2458,7 @@ static void register_bdev_worker(struct work_struct *work)
container_of(work, struct async_reg_args, reg_work.work);
mutex_lock(&bch_register_lock);
- if (register_bdev(args->sb, args->sb_disk, args->bdev_handle,
+ if (register_bdev(args->sb, args->sb_disk, args->bdev_file,
args->holder) < 0)
fail = true;
mutex_unlock(&bch_register_lock);
@@ -2478,7 +2479,7 @@ static void register_cache_worker(struct work_struct *work)
container_of(work, struct async_reg_args, reg_work.work);
/* blkdev_put() will be called in bch_cache_release() */
- if (register_cache(args->sb, args->sb_disk, args->bdev_handle,
+ if (register_cache(args->sb, args->sb_disk, args->bdev_file,
args->holder))
fail = true;
@@ -2516,7 +2517,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
char *path = NULL;
struct cache_sb *sb;
struct cache_sb_disk *sb_disk;
- struct bdev_handle *bdev_handle, *bdev_handle2;
+ struct file *bdev_file, *bdev_file2;
void *holder = NULL;
ssize_t ret;
bool async_registration = false;
@@ -2549,15 +2550,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
ret = -EINVAL;
err = "failed to open device";
- bdev_handle = bdev_open_by_path(strim(path), BLK_OPEN_READ, NULL, NULL);
- if (IS_ERR(bdev_handle))
+ bdev_file = bdev_file_open_by_path(strim(path), BLK_OPEN_READ, NULL, NULL);
+ if (IS_ERR(bdev_file))
goto out_free_sb;
err = "failed to set blocksize";
- if (set_blocksize(bdev_handle->bdev, 4096))
+ if (set_blocksize(file_bdev(bdev_file), 4096))
goto out_blkdev_put;
- err = read_super(sb, bdev_handle->bdev, &sb_disk);
+ err = read_super(sb, file_bdev(bdev_file), &sb_disk);
if (err)
goto out_blkdev_put;
@@ -2569,13 +2570,13 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
}
/* Now reopen in exclusive mode with proper holder */
- bdev_handle2 = bdev_open_by_dev(bdev_handle->bdev->bd_dev,
+ bdev_file2 = bdev_file_open_by_dev(file_bdev(bdev_file)->bd_dev,
BLK_OPEN_READ | BLK_OPEN_WRITE, holder, NULL);
- bdev_release(bdev_handle);
- bdev_handle = bdev_handle2;
- if (IS_ERR(bdev_handle)) {
- ret = PTR_ERR(bdev_handle);
- bdev_handle = NULL;
+ fput(bdev_file);
+ bdev_file = bdev_file2;
+ if (IS_ERR(bdev_file)) {
+ ret = PTR_ERR(bdev_file);
+ bdev_file = NULL;
if (ret == -EBUSY) {
dev_t dev;
@@ -2610,7 +2611,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
args->path = path;
args->sb = sb;
args->sb_disk = sb_disk;
- args->bdev_handle = bdev_handle;
+ args->bdev_file = bdev_file;
args->holder = holder;
register_device_async(args);
/* No wait and returns to user space */
@@ -2619,14 +2620,14 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (SB_IS_BDEV(sb)) {
mutex_lock(&bch_register_lock);
- ret = register_bdev(sb, sb_disk, bdev_handle, holder);
+ ret = register_bdev(sb, sb_disk, bdev_file, holder);
mutex_unlock(&bch_register_lock);
/* blkdev_put() will be called in cached_dev_free() */
if (ret < 0)
goto out_free_sb;
} else {
/* blkdev_put() will be called in bch_cache_release() */
- ret = register_cache(sb, sb_disk, bdev_handle, holder);
+ ret = register_cache(sb, sb_disk, bdev_file, holder);
if (ret)
goto out_free_sb;
}
@@ -2642,8 +2643,8 @@ out_free_holder:
out_put_sb_page:
put_page(virt_to_page(sb_disk));
out_blkdev_put:
- if (bdev_handle)
- bdev_release(bdev_handle);
+ if (bdev_file)
+ fput(bdev_file);
out_free_sb:
kfree(sb);
out_free_path:
diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c
index 9ab32abe5ed4..bca0f39e15b8 100644
--- a/drivers/md/dm-bio-prison-v1.c
+++ b/drivers/md/dm-bio-prison-v1.c
@@ -489,5 +489,5 @@ module_init(dm_bio_prison_init);
module_exit(dm_bio_prison_exit);
MODULE_DESCRIPTION(DM_NAME " bio prison");
-MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 13c65b7e1ed6..098bf526136c 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1292,7 +1292,8 @@ static void dmio_complete(unsigned long error, void *context)
}
static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
- unsigned int n_sectors, unsigned int offset)
+ unsigned int n_sectors, unsigned int offset,
+ unsigned short ioprio)
{
int r;
struct dm_io_request io_req = {
@@ -1315,7 +1316,7 @@ static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
io_req.mem.ptr.vma = (char *)b->data + offset;
}
- r = dm_io(&io_req, 1, &region, NULL);
+ r = dm_io(&io_req, 1, &region, NULL, ioprio);
if (unlikely(r))
b->end_io(b, errno_to_blk_status(r));
}
@@ -1331,7 +1332,8 @@ static void bio_complete(struct bio *bio)
}
static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
- unsigned int n_sectors, unsigned int offset)
+ unsigned int n_sectors, unsigned int offset,
+ unsigned short ioprio)
{
struct bio *bio;
char *ptr;
@@ -1339,13 +1341,14 @@ static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
if (!bio) {
- use_dmio(b, op, sector, n_sectors, offset);
+ use_dmio(b, op, sector, n_sectors, offset, ioprio);
return;
}
bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op);
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_complete;
bio->bi_private = b;
+ bio->bi_ioprio = ioprio;
ptr = (char *)b->data + offset;
len = n_sectors << SECTOR_SHIFT;
@@ -1368,7 +1371,7 @@ static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block
return sector;
}
-static void submit_io(struct dm_buffer *b, enum req_op op,
+static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio,
void (*end_io)(struct dm_buffer *, blk_status_t))
{
unsigned int n_sectors;
@@ -1398,9 +1401,9 @@ static void submit_io(struct dm_buffer *b, enum req_op op,
}
if (b->data_mode != DATA_MODE_VMALLOC)
- use_bio(b, op, sector, n_sectors, offset);
+ use_bio(b, op, sector, n_sectors, offset, ioprio);
else
- use_dmio(b, op, sector, n_sectors, offset);
+ use_dmio(b, op, sector, n_sectors, offset, ioprio);
}
/*
@@ -1456,7 +1459,7 @@ static void __write_dirty_buffer(struct dm_buffer *b,
b->write_end = b->dirty_end;
if (!write_list)
- submit_io(b, REQ_OP_WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
else
list_add_tail(&b->write_list, write_list);
}
@@ -1470,7 +1473,7 @@ static void __flush_write_list(struct list_head *write_list)
struct dm_buffer *b =
list_entry(write_list->next, struct dm_buffer, write_list);
list_del(&b->write_list);
- submit_io(b, REQ_OP_WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
cond_resched();
}
blk_finish_plug(&plug);
@@ -1852,7 +1855,8 @@ static void read_endio(struct dm_buffer *b, blk_status_t status)
* and uses dm_bufio_mark_buffer_dirty to write new data back).
*/
static void *new_read(struct dm_bufio_client *c, sector_t block,
- enum new_flag nf, struct dm_buffer **bp)
+ enum new_flag nf, struct dm_buffer **bp,
+ unsigned short ioprio)
{
int need_submit = 0;
struct dm_buffer *b;
@@ -1905,7 +1909,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
return NULL;
if (need_submit)
- submit_io(b, REQ_OP_READ, read_endio);
+ submit_io(b, REQ_OP_READ, ioprio, read_endio);
if (nf != NF_GET) /* we already tested this condition above */
wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
@@ -1926,32 +1930,46 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
struct dm_buffer **bp)
{
- return new_read(c, block, NF_GET, bp);
+ return new_read(c, block, NF_GET, bp, IOPRIO_DEFAULT);
}
EXPORT_SYMBOL_GPL(dm_bufio_get);
-void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
- struct dm_buffer **bp)
+static void *__dm_bufio_read(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp, unsigned short ioprio)
{
if (WARN_ON_ONCE(dm_bufio_in_request()))
return ERR_PTR(-EINVAL);
- return new_read(c, block, NF_READ, bp);
+ return new_read(c, block, NF_READ, bp, ioprio);
+}
+
+void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp)
+{
+ return __dm_bufio_read(c, block, bp, IOPRIO_DEFAULT);
}
EXPORT_SYMBOL_GPL(dm_bufio_read);
+void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp, unsigned short ioprio)
+{
+ return __dm_bufio_read(c, block, bp, ioprio);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_read_with_ioprio);
+
void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
struct dm_buffer **bp)
{
if (WARN_ON_ONCE(dm_bufio_in_request()))
return ERR_PTR(-EINVAL);
- return new_read(c, block, NF_FRESH, bp);
+ return new_read(c, block, NF_FRESH, bp, IOPRIO_DEFAULT);
}
EXPORT_SYMBOL_GPL(dm_bufio_new);
-void dm_bufio_prefetch(struct dm_bufio_client *c,
- sector_t block, unsigned int n_blocks)
+static void __dm_bufio_prefetch(struct dm_bufio_client *c,
+ sector_t block, unsigned int n_blocks,
+ unsigned short ioprio)
{
struct blk_plug plug;
@@ -1987,7 +2005,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
dm_bufio_unlock(c);
if (need_submit)
- submit_io(b, REQ_OP_READ, read_endio);
+ submit_io(b, REQ_OP_READ, ioprio, read_endio);
dm_bufio_release(b);
cond_resched();
@@ -2002,8 +2020,20 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
flush_plug:
blk_finish_plug(&plug);
}
+
+void dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks)
+{
+ return __dm_bufio_prefetch(c, block, n_blocks, IOPRIO_DEFAULT);
+}
EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
+void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, sector_t block,
+ unsigned int n_blocks, unsigned short ioprio)
+{
+ return __dm_bufio_prefetch(c, block, n_blocks, ioprio);
+}
+EXPORT_SYMBOL_GPL(dm_bufio_prefetch_with_ioprio);
+
void dm_bufio_release(struct dm_buffer *b)
{
struct dm_bufio_client *c = b->c;
@@ -2167,7 +2197,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
if (WARN_ON_ONCE(dm_bufio_in_request()))
return -EINVAL;
- return dm_io(&io_req, 1, &io_reg, NULL);
+ return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
}
EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
@@ -2191,7 +2221,7 @@ int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t c
if (WARN_ON_ONCE(dm_bufio_in_request()))
return -EINVAL; /* discards are optional */
- return dm_io(&io_req, 1, &io_reg, NULL);
+ return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
}
EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
@@ -2968,6 +2998,6 @@ MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444);
MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
-MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
+MODULE_AUTHOR("Mikulas Patocka <dm-devel@lists.linux.dev>");
MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 8bd2ad743d9a..2ed894155cab 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1947,7 +1947,7 @@ static void __exit smq_exit(void)
module_init(smq_init);
module_exit(smq_exit);
-MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("smq cache policy");
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f745f8508243..9a74c6316c5d 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -53,15 +53,17 @@
struct convert_context {
struct completion restart;
struct bio *bio_in;
- struct bio *bio_out;
struct bvec_iter iter_in;
+ struct bio *bio_out;
struct bvec_iter iter_out;
- u64 cc_sector;
atomic_t cc_pending;
+ u64 cc_sector;
union {
struct skcipher_request *req;
struct aead_request *req_aead;
} r;
+ bool aead_recheck;
+ bool aead_failed;
};
@@ -82,6 +84,8 @@ struct dm_crypt_io {
blk_status_t error;
sector_t sector;
+ struct bvec_iter saved_bi_iter;
+
struct rb_node rb_node;
} CRYPTO_MINALIGN_ATTR;
@@ -1370,10 +1374,13 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
if (r == -EBADMSG) {
sector_t s = le64_to_cpu(*sector);
- DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
- ctx->bio_in->bi_bdev, s);
- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
- ctx->bio_in, s, 0);
+ ctx->aead_failed = true;
+ if (ctx->aead_recheck) {
+ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+ ctx->bio_in->bi_bdev, s);
+ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
+ ctx->bio_in, s, 0);
+ }
}
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
@@ -1681,6 +1688,7 @@ retry:
GFP_NOIO, &cc->bs);
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
+ clone->bi_ioprio = io->base_bio->bi_ioprio;
remaining_size = size;
@@ -1757,6 +1765,8 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
io->base_bio = bio;
io->sector = sector;
io->error = 0;
+ io->ctx.aead_recheck = false;
+ io->ctx.aead_failed = false;
io->ctx.r.req = NULL;
io->integrity_metadata = NULL;
io->integrity_metadata_from_pool = false;
@@ -1768,6 +1778,8 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
atomic_inc(&io->io_pending);
}
+static void kcryptd_queue_read(struct dm_crypt_io *io);
+
/*
* One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer.
@@ -1781,6 +1793,15 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
if (!atomic_dec_and_test(&io->io_pending))
return;
+ if (likely(!io->ctx.aead_recheck) && unlikely(io->ctx.aead_failed) &&
+ cc->on_disk_tag_size && bio_data_dir(base_bio) == READ) {
+ io->ctx.aead_recheck = true;
+ io->ctx.aead_failed = false;
+ io->error = 0;
+ kcryptd_queue_read(io);
+ return;
+ }
+
if (io->ctx.r.req)
crypt_free_req(cc, io->ctx.r.req, base_bio);
@@ -1816,15 +1837,19 @@ static void crypt_endio(struct bio *clone)
struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->cc;
unsigned int rw = bio_data_dir(clone);
- blk_status_t error;
+ blk_status_t error = clone->bi_status;
+
+ if (io->ctx.aead_recheck && !error) {
+ kcryptd_queue_crypt(io);
+ return;
+ }
/*
* free the processed pages
*/
- if (rw == WRITE)
+ if (rw == WRITE || io->ctx.aead_recheck)
crypt_free_buffer_pages(cc, clone);
- error = clone->bi_status;
bio_put(clone);
if (rw == READ && !error) {
@@ -1845,6 +1870,22 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
struct crypt_config *cc = io->cc;
struct bio *clone;
+ if (io->ctx.aead_recheck) {
+ if (!(gfp & __GFP_DIRECT_RECLAIM))
+ return 1;
+ crypt_inc_pending(io);
+ clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
+ if (unlikely(!clone)) {
+ crypt_dec_pending(io);
+ return 1;
+ }
+ clone->bi_iter.bi_sector = cc->start + io->sector;
+ crypt_convert_init(cc, &io->ctx, clone, clone, io->sector);
+ io->saved_bi_iter = clone->bi_iter;
+ dm_submit_bio_remap(io->base_bio, clone);
+ return 0;
+ }
+
/*
* We need the original biovec array in order to decrypt the whole bio
* data *afterwards* -- thanks to immutable biovecs we don't need to
@@ -1924,7 +1965,6 @@ continue_locked:
schedule();
- set_current_state(TASK_RUNNING);
spin_lock_irq(&cc->write_thread_lock);
goto continue_locked;
@@ -2071,6 +2111,12 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
io->ctx.bio_out = clone;
io->ctx.iter_out = clone->bi_iter;
+ if (crypt_integrity_aead(cc)) {
+ bio_copy_data(clone, io->base_bio);
+ io->ctx.bio_in = clone;
+ io->ctx.iter_in = clone->bi_iter;
+ }
+
sector += bio_sectors(clone);
crypt_inc_pending(io);
@@ -2107,6 +2153,14 @@ dec:
static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
{
+ if (io->ctx.aead_recheck) {
+ if (!io->error) {
+ io->ctx.bio_in->bi_iter = io->saved_bi_iter;
+ bio_copy_data(io->base_bio, io->ctx.bio_in);
+ }
+ crypt_free_buffer_pages(io->cc, io->ctx.bio_in);
+ bio_put(io->ctx.bio_in);
+ }
crypt_dec_pending(io);
}
@@ -2136,11 +2190,17 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
crypt_inc_pending(io);
- crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
- io->sector);
+ if (io->ctx.aead_recheck) {
+ io->ctx.cc_sector = io->sector + cc->iv_offset;
+ r = crypt_convert(cc, &io->ctx,
+ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+ } else {
+ crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
+ io->sector);
- r = crypt_convert(cc, &io->ctx,
- test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+ r = crypt_convert(cc, &io->ctx,
+ test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+ }
/*
* Crypto API backlogged the request, because its queue was full
* and we're in softirq context, so continue from a workqueue
@@ -2182,10 +2242,13 @@ static void kcryptd_async_done(void *data, int error)
if (error == -EBADMSG) {
sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
- DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
- ctx->bio_in->bi_bdev, s);
- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
- ctx->bio_in, s, 0);
+ ctx->aead_failed = true;
+ if (ctx->aead_recheck) {
+ DMERR_LIMIT("%pg: INTEGRITY AEAD ERROR, sector %llu",
+ ctx->bio_in->bi_bdev, s);
+ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-aead",
+ ctx->bio_in, s, 0);
+ }
io->error = BLK_STS_PROTECTION;
} else if (error < 0)
io->error = BLK_STS_IOERR;
@@ -2233,7 +2296,11 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
* irqs_disabled(): the kernel may run some IO completion from the idle thread, but
* it is being executed with irqs disabled.
*/
- if (!(in_hardirq() || irqs_disabled())) {
+ if (in_hardirq() || irqs_disabled()) {
+ INIT_WORK(&io->work, kcryptd_crypt);
+ queue_work(system_bh_wq, &io->work);
+ return;
+ } else {
kcryptd_crypt(&io->work);
return;
}
@@ -3110,7 +3177,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
sval = strchr(opt_string + strlen("integrity:"), ':') + 1;
if (!strcasecmp(sval, "aead")) {
set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
- } else if (strcasecmp(sval, "none")) {
+ } else if (strcasecmp(sval, "none")) {
ti->error = "Unknown integrity profile";
return -EINVAL;
}
@@ -3639,7 +3706,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 24, 0},
+ .version = {1, 25, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 12a377e06d02..1a33820c9f46 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -573,5 +573,5 @@ static struct target_type dust_target = {
module_dm(dust);
MODULE_DESCRIPTION(DM_NAME " dust test target");
-MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>");
+MODULE_AUTHOR("Bryan Gurney <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index 435b45201f4d..b70d4016c2ac 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -454,6 +454,6 @@ static struct target_type ebs_target = {
};
module_dm(ebs);
-MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@lists.linux.dev>");
MODULE_DESCRIPTION(DM_NAME " emulated block size target");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 7916ed9f10e8..731467d4ed10 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -690,5 +690,5 @@ static struct target_type flakey_target = {
module_dm(flakey);
MODULE_DESCRIPTION(DM_NAME " flakey target");
-MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c5f03aab4552..d822ab2f739b 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -278,6 +278,8 @@ struct dm_integrity_c {
atomic64_t number_of_mismatches;
+ mempool_t recheck_pool;
+
struct notifier_block reboot_notifier;
};
@@ -553,7 +555,7 @@ static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf)
}
}
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r))
return r;
@@ -1071,7 +1073,7 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
io_loc.sector = ic->start + SB_SECTORS + sector;
io_loc.count = n_sectors;
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r)) {
dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ?
"reading journal" : "writing journal", r);
@@ -1188,7 +1190,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, u
io_loc.sector = target;
io_loc.count = n_sectors;
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r)) {
WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
fn(-1UL, data);
@@ -1517,7 +1519,7 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat
fr.io_reg.count = 0,
fr.ic = ic;
init_completion(&fr.comp);
- r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
+ r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL, IOPRIO_DEFAULT);
BUG_ON(r);
}
@@ -1689,6 +1691,77 @@ failed:
get_random_bytes(result, ic->tag_size);
}
+static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum)
+{
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ struct dm_integrity_c *ic = dio->ic;
+ struct bvec_iter iter;
+ struct bio_vec bv;
+ sector_t sector, logical_sector, area, offset;
+ struct page *page;
+ void *buffer;
+
+ get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
+ dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
+ &dio->metadata_offset);
+ sector = get_data_sector(ic, area, offset);
+ logical_sector = dio->range.logical_sector;
+
+ page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
+ buffer = page_to_virt(page);
+
+ __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
+ unsigned pos = 0;
+
+ do {
+ char *mem;
+ int r;
+ struct dm_io_request io_req;
+ struct dm_io_region io_loc;
+ io_req.bi_opf = REQ_OP_READ;
+ io_req.mem.type = DM_IO_KMEM;
+ io_req.mem.ptr.addr = buffer;
+ io_req.notify.fn = NULL;
+ io_req.client = ic->io;
+ io_loc.bdev = ic->dev->bdev;
+ io_loc.sector = sector;
+ io_loc.count = ic->sectors_per_block;
+
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
+ if (unlikely(r)) {
+ dio->bi_status = errno_to_blk_status(r);
+ goto free_ret;
+ }
+
+ integrity_sector_checksum(ic, logical_sector, buffer, checksum);
+ r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
+ &dio->metadata_offset, ic->tag_size, TAG_CMP);
+ if (r) {
+ if (r > 0) {
+ DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
+ bio->bi_bdev, logical_sector);
+ atomic64_inc(&ic->number_of_mismatches);
+ dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
+ bio, logical_sector, 0);
+ r = -EILSEQ;
+ }
+ dio->bi_status = errno_to_blk_status(r);
+ goto free_ret;
+ }
+
+ mem = bvec_kmap_local(&bv);
+ memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT);
+ kunmap_local(mem);
+
+ pos += ic->sectors_per_block << SECTOR_SHIFT;
+ sector += ic->sectors_per_block;
+ logical_sector += ic->sectors_per_block;
+ } while (pos < bv.bv_len);
+ }
+free_ret:
+ mempool_free(page, &ic->recheck_pool);
+}
+
static void integrity_metadata(struct work_struct *w)
{
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
@@ -1776,15 +1849,8 @@ again:
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
if (unlikely(r)) {
if (r > 0) {
- sector_t s;
-
- s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
- DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
- bio->bi_bdev, s);
- r = -EILSEQ;
- atomic64_inc(&ic->number_of_mismatches);
- dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
- bio, s, 0);
+ integrity_recheck(dio, checksums);
+ goto skip_io;
}
if (likely(checksums != checksums_onstack))
kfree(checksums);
@@ -2740,7 +2806,7 @@ next_chunk:
io_loc.sector = get_data_sector(ic, area, offset);
io_loc.count = n_sectors;
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r)) {
dm_integrity_io_error(ic, "reading data", r);
goto err;
@@ -3419,6 +3485,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
limits->dma_alignment = limits->logical_block_size - 1;
}
+ limits->max_integrity_segments = USHRT_MAX;
}
static void calculate_journal_section_size(struct dm_integrity_c *ic)
@@ -3586,7 +3653,6 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
blk_integrity_register(disk, &bi);
- blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
}
static void dm_integrity_free_page_list(struct page_list *pl)
@@ -4261,6 +4327,12 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
goto bad;
}
+ r = mempool_init_page_pool(&ic->recheck_pool, 1, 0);
+ if (r) {
+ ti->error = "Cannot allocate mempool";
+ goto bad;
+ }
+
ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
if (!ic->metadata_wq) {
@@ -4609,6 +4681,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
kvfree(ic->bbs);
if (ic->bufio)
dm_bufio_client_destroy(ic->bufio);
+ mempool_exit(&ic->recheck_pool);
mempool_exit(&ic->journal_io_mempool);
if (ic->io)
dm_io_client_destroy(ic->io);
@@ -4661,7 +4734,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index f053ce245814..7409490259d1 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -305,7 +305,7 @@ static void km_dp_init(struct dpages *dp, void *data)
*/
static void do_region(const blk_opf_t opf, unsigned int region,
struct dm_io_region *where, struct dpages *dp,
- struct io *io)
+ struct io *io, unsigned short ioprio)
{
struct bio *bio;
struct page *page;
@@ -354,6 +354,7 @@ static void do_region(const blk_opf_t opf, unsigned int region,
&io->client->bios);
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
bio->bi_end_io = endio;
+ bio->bi_ioprio = ioprio;
store_io_and_region_in_bio(bio, io, region);
if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
@@ -383,7 +384,7 @@ static void do_region(const blk_opf_t opf, unsigned int region,
static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
struct dm_io_region *where, struct dpages *dp,
- struct io *io, int sync)
+ struct io *io, int sync, unsigned short ioprio)
{
int i;
struct dpages old_pages = *dp;
@@ -400,7 +401,7 @@ static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
for (i = 0; i < num_regions; i++) {
*dp = old_pages;
if (where[i].count || (opf & REQ_PREFLUSH))
- do_region(opf, i, where + i, dp, io);
+ do_region(opf, i, where + i, dp, io, ioprio);
}
/*
@@ -425,7 +426,7 @@ static void sync_io_complete(unsigned long error, void *context)
static int sync_io(struct dm_io_client *client, unsigned int num_regions,
struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
- unsigned long *error_bits)
+ unsigned long *error_bits, unsigned short ioprio)
{
struct io *io;
struct sync_io sio;
@@ -447,7 +448,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
io->vma_invalidate_address = dp->vma_invalidate_address;
io->vma_invalidate_size = dp->vma_invalidate_size;
- dispatch_io(opf, num_regions, where, dp, io, 1);
+ dispatch_io(opf, num_regions, where, dp, io, 1, ioprio);
wait_for_completion_io(&sio.wait);
@@ -459,7 +460,8 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
static int async_io(struct dm_io_client *client, unsigned int num_regions,
struct dm_io_region *where, blk_opf_t opf,
- struct dpages *dp, io_notify_fn fn, void *context)
+ struct dpages *dp, io_notify_fn fn, void *context,
+ unsigned short ioprio)
{
struct io *io;
@@ -479,7 +481,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
io->vma_invalidate_address = dp->vma_invalidate_address;
io->vma_invalidate_size = dp->vma_invalidate_size;
- dispatch_io(opf, num_regions, where, dp, io, 0);
+ dispatch_io(opf, num_regions, where, dp, io, 0, ioprio);
return 0;
}
@@ -521,7 +523,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
}
int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
- struct dm_io_region *where, unsigned long *sync_error_bits)
+ struct dm_io_region *where, unsigned long *sync_error_bits,
+ unsigned short ioprio)
{
int r;
struct dpages dp;
@@ -532,11 +535,11 @@ int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
if (!io_req->notify.fn)
return sync_io(io_req->client, num_regions, where,
- io_req->bi_opf, &dp, sync_error_bits);
+ io_req->bi_opf, &dp, sync_error_bits, ioprio);
return async_io(io_req->client, num_regions, where,
io_req->bi_opf, &dp, io_req->notify.fn,
- io_req->notify.context);
+ io_req->notify.context, ioprio);
}
EXPORT_SYMBOL(dm_io);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 3b1ad7127cb8..c2c07bfa6471 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -25,7 +25,7 @@
#include <linux/ima.h>
#define DM_MSG_PREFIX "ioctl"
-#define DM_DRIVER_EMAIL "dm-devel@redhat.com"
+#define DM_DRIVER_EMAIL "dm-devel@lists.linux.dev"
struct dm_file {
/*
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 36bcfdccae04..6ea75436a433 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -578,9 +578,9 @@ static int run_io_job(struct kcopyd_job *job)
io_job_start(job->kc->throttle);
if (job->op == REQ_OP_READ)
- r = dm_io(&io_req, 1, &job->source, NULL);
+ r = dm_io(&io_req, 1, &job->source, NULL, IOPRIO_DEFAULT);
else
- r = dm_io(&io_req, job->num_dests, job->dests, NULL);
+ r = dm_io(&io_req, job->num_dests, job->dests, NULL, IOPRIO_DEFAULT);
return r;
}
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 7e4f27e86150..9fbb4b48fb2b 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -926,5 +926,5 @@ module_init(userspace_dirty_log_init);
module_exit(userspace_dirty_log_exit);
MODULE_DESCRIPTION(DM_NAME " userspace dirty log link");
-MODULE_AUTHOR("Jonathan Brassow <dm-devel@redhat.com>");
+MODULE_AUTHOR("Jonathan Brassow <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index f9f84236dfcd..9d85d045f9d9 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -300,7 +300,7 @@ static int rw_header(struct log_c *lc, enum req_op op)
{
lc->io_req.bi_opf = op;
- return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
+ return dm_io(&lc->io_req, 1, &lc->header_location, NULL, IOPRIO_DEFAULT);
}
static int flush_header(struct log_c *lc)
@@ -313,7 +313,7 @@ static int flush_header(struct log_c *lc)
lc->io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
- return dm_io(&lc->io_req, 1, &null_location, NULL);
+ return dm_io(&lc->io_req, 1, &null_location, NULL, IOPRIO_DEFAULT);
}
static int read_header(struct log_c *log)
@@ -908,5 +908,5 @@ module_init(dm_dirty_log_init);
module_exit(dm_dirty_log_exit);
MODULE_DESCRIPTION(DM_NAME " dirty region log");
-MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index bea3cda9938e..05d1328d1811 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -2266,5 +2266,5 @@ module_param_named(queue_if_no_path_timeout_secs, queue_if_no_path_timeout_secs,
MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
MODULE_DESCRIPTION(DM_NAME " multipath target");
-MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
+MODULE_AUTHOR("Sistina Software <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-ps-round-robin.c b/drivers/md/dm-ps-round-robin.c
index 0f04b673597a..d1745b123dc1 100644
--- a/drivers/md/dm-ps-round-robin.c
+++ b/drivers/md/dm-ps-round-robin.c
@@ -240,5 +240,5 @@ module_init(dm_rr_init);
module_exit(dm_rr_exit);
MODULE_DESCRIPTION(DM_NAME " round-robin multipath path selector");
-MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
+MODULE_AUTHOR("Sistina Software <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index eb009d6bb03a..abe88d1e6735 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -213,6 +213,7 @@ struct raid_dev {
#define RT_FLAG_RS_IN_SYNC 6
#define RT_FLAG_RS_RESYNCING 7
#define RT_FLAG_RS_GROW 8
+#define RT_FLAG_RS_FROZEN 9
/* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -3240,11 +3241,12 @@ size_check:
rs->md.ro = 1;
rs->md.in_sync = 1;
- /* Keep array frozen until resume. */
- set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
-
/* Has to be held on running the array */
mddev_suspend_and_lock_nointr(&rs->md);
+
+ /* Keep array frozen until resume. */
+ md_frozen_sync_thread(&rs->md);
+
r = md_run(&rs->md);
rs->md.in_sync = 0; /* Assume already marked dirty */
if (r) {
@@ -3329,17 +3331,18 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
struct mddev *mddev = &rs->md;
/*
- * If we're reshaping to add disk(s)), ti->len and
+ * If we're reshaping to add disk(s), ti->len and
* mddev->array_sectors will differ during the process
* (ti->len > mddev->array_sectors), so we have to requeue
* bios with addresses > mddev->array_sectors here or
* there will occur accesses past EOD of the component
* data images thus erroring the raid set.
*/
- if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
+ if (unlikely(bio_has_data(bio) && bio_end_sector(bio) > mddev->array_sectors))
return DM_MAPIO_REQUEUE;
- md_handle_request(mddev, bio);
+ if (unlikely(!md_handle_request(mddev, bio)))
+ return DM_MAPIO_REQUEUE;
return DM_MAPIO_SUBMITTED;
}
@@ -3718,21 +3721,33 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
+ int ret = 0;
if (!mddev->pers || !mddev->pers->sync_request)
return -EINVAL;
- if (!strcasecmp(argv[0], "frozen"))
- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- else
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ if (test_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags) ||
+ test_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags))
+ return -EBUSY;
- if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
- if (mddev->sync_thread) {
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_reap_sync_thread(mddev);
- }
- } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
+ if (!strcasecmp(argv[0], "frozen")) {
+ ret = mddev_lock(mddev);
+ if (ret)
+ return ret;
+
+ md_frozen_sync_thread(mddev);
+ mddev_unlock(mddev);
+ } else if (!strcasecmp(argv[0], "idle")) {
+ ret = mddev_lock(mddev);
+ if (ret)
+ return ret;
+
+ md_idle_sync_thread(mddev);
+ mddev_unlock(mddev);
+ }
+
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
return -EBUSY;
else if (!strcasecmp(argv[0], "resync"))
; /* MD_RECOVERY_NEEDED set below */
@@ -3791,15 +3806,46 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
}
+static void raid_presuspend(struct dm_target *ti)
+{
+ struct raid_set *rs = ti->private;
+ struct mddev *mddev = &rs->md;
+
+ /*
+ * From now on, disallow raid_message() to change sync_thread until
+ * resume, raid_postsuspend() is too late.
+ */
+ set_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
+
+ if (!reshape_interrupted(mddev))
+ return;
+
+ /*
+ * For raid456, if reshape is interrupted, IO across reshape position
+ * will never make progress, while caller will wait for IO to be done.
+ * Inform raid456 to handle those IO to prevent deadlock.
+ */
+ if (mddev->pers && mddev->pers->prepare_suspend)
+ mddev->pers->prepare_suspend(mddev);
+}
+
+static void raid_presuspend_undo(struct dm_target *ti)
+{
+ struct raid_set *rs = ti->private;
+
+ clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
+}
+
static void raid_postsuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
- /* Writes have to be stopped before suspending to avoid deadlocks. */
- if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery))
- md_stop_writes(&rs->md);
-
+ /*
+ * sync_thread must be stopped during suspend, and writes have
+ * to be stopped before suspending to avoid deadlocks.
+ */
+ md_stop_writes(&rs->md);
mddev_suspend(&rs->md, false);
}
}
@@ -4012,8 +4058,6 @@ static int raid_preresume(struct dm_target *ti)
}
/* Check for any resize/reshape on @rs and adjust/initiate */
- /* Be prepared for mddev_resume() in raid_resume() */
- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
mddev->resync_min = mddev->recovery_cp;
@@ -4047,7 +4091,9 @@ static void raid_resume(struct dm_target *ti)
* Take this opportunity to check whether any failed
* devices are reachable again.
*/
+ mddev_lock_nointr(mddev);
attempt_restore_of_faulty_devices(rs);
+ mddev_unlock(mddev);
}
if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
@@ -4055,10 +4101,13 @@ static void raid_resume(struct dm_target *ti)
if (mddev->delta_disks < 0)
rs_set_capacity(rs);
+ WARN_ON_ONCE(!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery));
+ WARN_ON_ONCE(test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
+ clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
mddev_lock_nointr(mddev);
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
mddev->ro = 0;
mddev->in_sync = 0;
+ md_unfrozen_sync_thread(mddev);
mddev_unlock_and_resume(mddev);
}
}
@@ -4074,6 +4123,8 @@ static struct target_type raid_target = {
.message = raid_message,
.iterate_devices = raid_iterate_devices,
.io_hints = raid_io_hints,
+ .presuspend = raid_presuspend,
+ .presuspend_undo = raid_presuspend_undo,
.postsuspend = raid_postsuspend,
.preresume = raid_preresume,
.resume = raid_resume,
@@ -4091,6 +4142,6 @@ MODULE_ALIAS("dm-raid10");
MODULE_ALIAS("dm-raid4");
MODULE_ALIAS("dm-raid5");
MODULE_ALIAS("dm-raid6");
-MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
-MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_AUTHOR("Neil Brown <dm-devel@lists.linux.dev>");
+MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ddcb2bc4a617..9511dae5b556 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -278,7 +278,7 @@ static int mirror_flush(struct dm_target *ti)
}
error_bits = -1;
- dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
+ dm_io(&io_req, ms->nr_mirrors, io, &error_bits, IOPRIO_DEFAULT);
if (unlikely(error_bits != 0)) {
for (i = 0; i < ms->nr_mirrors; i++)
if (test_bit(i, &error_bits))
@@ -554,7 +554,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
map_region(&io, m, bio);
bio_set_m(bio, m);
- BUG_ON(dm_io(&io_req, 1, &io, NULL));
+ BUG_ON(dm_io(&io_req, 1, &io, NULL, IOPRIO_DEFAULT));
}
static inline int region_in_sync(struct mirror_set *ms, region_t region,
@@ -681,7 +681,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
*/
bio_set_m(bio, get_default_mirror(ms));
- BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
+ BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL, IOPRIO_DEFAULT));
}
static void do_writes(struct mirror_set *ms, struct bio_list *writes)
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 852cfa37d48a..a4550975c27d 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -723,5 +723,5 @@ void dm_rh_start_recovery(struct dm_region_hash *rh)
EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
MODULE_DESCRIPTION(DM_NAME " region hash");
-MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 15649921f2a9..568d10842b1f 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -223,7 +223,7 @@ static void do_metadata(struct work_struct *work)
{
struct mdata_req *req = container_of(work, struct mdata_req, work);
- req->result = dm_io(req->io_req, 1, req->where, NULL);
+ req->result = dm_io(req->io_req, 1, req->where, NULL, IOPRIO_DEFAULT);
}
/*
@@ -247,7 +247,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf,
struct mdata_req req;
if (!metadata)
- return dm_io(&io_req, 1, &where, NULL);
+ return dm_io(&io_req, 1, &where, NULL, IOPRIO_DEFAULT);
req.where = &where;
req.io_req = &io_req;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 07c7f9795b10..4793ad2aa1f7 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -453,12 +453,13 @@ static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bi
cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
- if (r)
+ if (r) {
/*
* We reused an old cell; we can get rid of
* the new one.
*/
dm_bio_prison_free_cell(pool->prison, cell_prealloc);
+ }
return r;
}
@@ -707,9 +708,10 @@ static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
(void) sector_div(e, pool->sectors_per_block);
}
- if (e < b)
+ if (e < b) {
/* Can happen if the bio is within a single block. */
e = b;
+ }
*begin = b;
*end = e;
@@ -721,13 +723,14 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
sector_t bi_sector = bio->bi_iter.bi_sector;
bio_set_dev(bio, tc->pool_dev->bdev);
- if (block_size_is_power_of_two(pool))
+ if (block_size_is_power_of_two(pool)) {
bio->bi_iter.bi_sector =
(block << pool->sectors_per_block_shift) |
(bi_sector & (pool->sectors_per_block - 1));
- else
+ } else {
bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
sector_div(bi_sector, pool->sectors_per_block);
+ }
}
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
@@ -1401,9 +1404,10 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
if (pool->pf.zero_new_blocks) {
if (io_overwrites_block(pool, bio))
remap_and_issue_overwrite(tc, bio, data_block, m);
- else
+ else {
ll_zero(tc, m, data_block * pool->sectors_per_block,
(data_block + 1) * pool->sectors_per_block);
+ }
} else
process_prepared_mapping(m);
}
@@ -1416,17 +1420,17 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
sector_t virt_block_begin = virt_block * pool->sectors_per_block;
sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
- if (virt_block_end <= tc->origin_size)
+ if (virt_block_end <= tc->origin_size) {
schedule_copy(tc, virt_block, tc->origin_dev,
virt_block, data_dest, cell, bio,
pool->sectors_per_block);
- else if (virt_block_begin < tc->origin_size)
+ } else if (virt_block_begin < tc->origin_size) {
schedule_copy(tc, virt_block, tc->origin_dev,
virt_block, data_dest, cell, bio,
tc->origin_size - virt_block_begin);
- else
+ } else
schedule_zero(tc, virt_block, data_dest, cell, bio);
}
@@ -4560,5 +4564,5 @@ module_param_named(no_space_timeout, no_space_timeout_secs, uint, 0644);
MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
-MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-vdo/Kconfig b/drivers/md/dm-vdo/Kconfig
new file mode 100644
index 000000000000..111ecd2c2a24
--- /dev/null
+++ b/drivers/md/dm-vdo/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DM_VDO
+ tristate "VDO: deduplication and compression target"
+ depends on 64BIT
+ depends on BLK_DEV_DM
+ select DM_BUFIO
+ select LZ4_COMPRESS
+ select LZ4_DECOMPRESS
+ help
+ This device mapper target presents a block device with
+ deduplication, compression and thin-provisioning.
+
+ To compile this code as a module, choose M here: the module will
+ be called dm-vdo.
+
+ If unsure, say N.
diff --git a/drivers/md/dm-vdo/Makefile b/drivers/md/dm-vdo/Makefile
new file mode 100644
index 000000000000..33e09abc6acd
--- /dev/null
+++ b/drivers/md/dm-vdo/Makefile
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+ccflags-y := -I$(srctree)/$(src) -I$(srctree)/$(src)/indexer
+
+obj-$(CONFIG_DM_VDO) += dm-vdo.o
+
+dm-vdo-objs := \
+ action-manager.o \
+ admin-state.o \
+ block-map.o \
+ completion.o \
+ data-vio.o \
+ dedupe.o \
+ dm-vdo-target.o \
+ dump.o \
+ encodings.o \
+ errors.o \
+ flush.o \
+ funnel-queue.o \
+ funnel-workqueue.o \
+ int-map.o \
+ io-submitter.o \
+ logger.o \
+ logical-zone.o \
+ memory-alloc.o \
+ message-stats.o \
+ murmurhash3.o \
+ packer.o \
+ permassert.o \
+ physical-zone.o \
+ priority-table.o \
+ recovery-journal.o \
+ repair.o \
+ slab-depot.o \
+ status-codes.o \
+ string-utils.o \
+ thread-device.o \
+ thread-registry.o \
+ thread-utils.o \
+ vdo.o \
+ vio.o \
+ wait-queue.o \
+ indexer/chapter-index.o \
+ indexer/config.o \
+ indexer/delta-index.o \
+ indexer/funnel-requestqueue.o \
+ indexer/geometry.o \
+ indexer/index.o \
+ indexer/index-layout.o \
+ indexer/index-page-map.o \
+ indexer/index-session.o \
+ indexer/io-factory.o \
+ indexer/open-chapter.o \
+ indexer/radix-sort.o \
+ indexer/sparse-cache.o \
+ indexer/volume.o \
+ indexer/volume-index.o
diff --git a/drivers/md/dm-vdo/action-manager.c b/drivers/md/dm-vdo/action-manager.c
new file mode 100644
index 000000000000..a0e5e7077d13
--- /dev/null
+++ b/drivers/md/dm-vdo/action-manager.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "action-manager.h"
+
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "admin-state.h"
+#include "completion.h"
+#include "status-codes.h"
+#include "types.h"
+#include "vdo.h"
+
+/**
+ * struct action - An action to be performed in each of a set of zones.
+ * @in_use: Whether this structure is in use.
+ * @operation: The admin operation associated with this action.
+ * @preamble: The method to run on the initiator thread before the action is applied to each zone.
+ * @zone_action: The action to be performed in each zone.
+ * @conclusion: The method to run on the initiator thread after the action is applied to each zone.
+ * @parent: The object to notify when the action is complete.
+ * @context: The action specific context.
+ * @next: The action to perform after this one.
+ */
+struct action {
+ bool in_use;
+ const struct admin_state_code *operation;
+ vdo_action_preamble_fn preamble;
+ vdo_zone_action_fn zone_action;
+ vdo_action_conclusion_fn conclusion;
+ struct vdo_completion *parent;
+ void *context;
+ struct action *next;
+};
+
+/**
+ * struct action_manager - Definition of an action manager.
+ * @completion: The completion for performing actions.
+ * @state: The state of this action manager.
+ * @actions: The two action slots.
+ * @current_action: The current action slot.
+ * @zones: The number of zones in which an action is to be applied.
+ * @Scheduler: A function to schedule a default next action.
+ * @get_zone_thread_id: A function to get the id of the thread on which to apply an action to a
+ * zone.
+ * @initiator_thread_id: The ID of the thread on which actions may be initiated.
+ * @context: Opaque data associated with this action manager.
+ * @acting_zone: The zone currently being acted upon.
+ */
+struct action_manager {
+ struct vdo_completion completion;
+ struct admin_state state;
+ struct action actions[2];
+ struct action *current_action;
+ zone_count_t zones;
+ vdo_action_scheduler_fn scheduler;
+ vdo_zone_thread_getter_fn get_zone_thread_id;
+ thread_id_t initiator_thread_id;
+ void *context;
+ zone_count_t acting_zone;
+};
+
+static inline struct action_manager *as_action_manager(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_ACTION_COMPLETION);
+ return container_of(completion, struct action_manager, completion);
+}
+
+/* Implements vdo_action_scheduler_fn. */
+static bool no_default_action(void *context __always_unused)
+{
+ return false;
+}
+
+/* Implements vdo_action_preamble_fn. */
+static void no_preamble(void *context __always_unused, struct vdo_completion *completion)
+{
+ vdo_finish_completion(completion);
+}
+
+/* Implements vdo_action_conclusion_fn. */
+static int no_conclusion(void *context __always_unused)
+{
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_make_action_manager() - Make an action manager.
+ * @zones: The number of zones to which actions will be applied.
+ * @get_zone_thread_id: A function to get the thread id associated with a zone.
+ * @initiator_thread_id: The thread on which actions may initiated.
+ * @context: The object which holds the per-zone context for the action.
+ * @scheduler: A function to schedule a next action after an action concludes if there is no
+ * pending action (may be NULL).
+ * @vdo: The vdo used to initialize completions.
+ * @manager_ptr: A pointer to hold the new action manager.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_make_action_manager(zone_count_t zones,
+ vdo_zone_thread_getter_fn get_zone_thread_id,
+ thread_id_t initiator_thread_id, void *context,
+ vdo_action_scheduler_fn scheduler, struct vdo *vdo,
+ struct action_manager **manager_ptr)
+{
+ struct action_manager *manager;
+ int result = vdo_allocate(1, struct action_manager, __func__, &manager);
+
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *manager = (struct action_manager) {
+ .zones = zones,
+ .scheduler =
+ ((scheduler == NULL) ? no_default_action : scheduler),
+ .get_zone_thread_id = get_zone_thread_id,
+ .initiator_thread_id = initiator_thread_id,
+ .context = context,
+ };
+
+ manager->actions[0].next = &manager->actions[1];
+ manager->current_action = manager->actions[1].next =
+ &manager->actions[0];
+ vdo_set_admin_state_code(&manager->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+ vdo_initialize_completion(&manager->completion, vdo, VDO_ACTION_COMPLETION);
+ *manager_ptr = manager;
+ return VDO_SUCCESS;
+}
+
+const struct admin_state_code *vdo_get_current_manager_operation(struct action_manager *manager)
+{
+ return vdo_get_admin_state_code(&manager->state);
+}
+
+void *vdo_get_current_action_context(struct action_manager *manager)
+{
+ return manager->current_action->in_use ? manager->current_action->context : NULL;
+}
+
+static void finish_action_callback(struct vdo_completion *completion);
+static void apply_to_zone(struct vdo_completion *completion);
+
+static thread_id_t get_acting_zone_thread_id(struct action_manager *manager)
+{
+ return manager->get_zone_thread_id(manager->context, manager->acting_zone);
+}
+
+static void preserve_error(struct vdo_completion *completion)
+{
+ if (completion->parent != NULL)
+ vdo_set_completion_result(completion->parent, completion->result);
+
+ vdo_reset_completion(completion);
+ vdo_run_completion(completion);
+}
+
+static void prepare_for_next_zone(struct action_manager *manager)
+{
+ vdo_prepare_completion_for_requeue(&manager->completion, apply_to_zone,
+ preserve_error,
+ get_acting_zone_thread_id(manager),
+ manager->current_action->parent);
+}
+
+static void prepare_for_conclusion(struct action_manager *manager)
+{
+ vdo_prepare_completion_for_requeue(&manager->completion, finish_action_callback,
+ preserve_error, manager->initiator_thread_id,
+ manager->current_action->parent);
+}
+
+static void apply_to_zone(struct vdo_completion *completion)
+{
+ zone_count_t zone;
+ struct action_manager *manager = as_action_manager(completion);
+
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == get_acting_zone_thread_id(manager)),
+ "%s() called on acting zones's thread", __func__);
+
+ zone = manager->acting_zone++;
+ if (manager->acting_zone == manager->zones) {
+ /*
+ * We are about to apply to the last zone. Once that is finished, we're done, so go
+ * back to the initiator thread and finish up.
+ */
+ prepare_for_conclusion(manager);
+ } else {
+ /* Prepare to come back on the next zone */
+ prepare_for_next_zone(manager);
+ }
+
+ manager->current_action->zone_action(manager->context, zone, completion);
+}
+
+static void handle_preamble_error(struct vdo_completion *completion)
+{
+ /* Skip the zone actions since the preamble failed. */
+ completion->callback = finish_action_callback;
+ preserve_error(completion);
+}
+
+static void launch_current_action(struct action_manager *manager)
+{
+ struct action *action = manager->current_action;
+ int result = vdo_start_operation(&manager->state, action->operation);
+
+ if (result != VDO_SUCCESS) {
+ if (action->parent != NULL)
+ vdo_set_completion_result(action->parent, result);
+
+ /* We aren't going to run the preamble, so don't run the conclusion */
+ action->conclusion = no_conclusion;
+ finish_action_callback(&manager->completion);
+ return;
+ }
+
+ if (action->zone_action == NULL) {
+ prepare_for_conclusion(manager);
+ } else {
+ manager->acting_zone = 0;
+ vdo_prepare_completion_for_requeue(&manager->completion, apply_to_zone,
+ handle_preamble_error,
+ get_acting_zone_thread_id(manager),
+ manager->current_action->parent);
+ }
+
+ action->preamble(manager->context, &manager->completion);
+}
+
+/**
+ * vdo_schedule_default_action() - Attempt to schedule the default action.
+ * @manager: The action manager.
+ *
+ * If the manager is not operating normally, the action will not be scheduled.
+ *
+ * Return: true if an action was scheduled.
+ */
+bool vdo_schedule_default_action(struct action_manager *manager)
+{
+ /* Don't schedule a default action if we are operating or not in normal operation. */
+ const struct admin_state_code *code = vdo_get_current_manager_operation(manager);
+
+ return ((code == VDO_ADMIN_STATE_NORMAL_OPERATION) &&
+ manager->scheduler(manager->context));
+}
+
+static void finish_action_callback(struct vdo_completion *completion)
+{
+ bool has_next_action;
+ int result;
+ struct action_manager *manager = as_action_manager(completion);
+ struct action action = *(manager->current_action);
+
+ manager->current_action->in_use = false;
+ manager->current_action = manager->current_action->next;
+
+ /*
+ * We need to check this now to avoid use-after-free issues if running the conclusion or
+ * notifying the parent results in the manager being freed.
+ */
+ has_next_action =
+ (manager->current_action->in_use || vdo_schedule_default_action(manager));
+ result = action.conclusion(manager->context);
+ vdo_finish_operation(&manager->state, VDO_SUCCESS);
+ if (action.parent != NULL)
+ vdo_continue_completion(action.parent, result);
+
+ if (has_next_action)
+ launch_current_action(manager);
+}
+
+/**
+ * vdo_schedule_action() - Schedule an action to be applied to all zones.
+ * @manager: The action manager to schedule the action on.
+ * @preamble: A method to be invoked on the initiator thread once this action is started but before
+ * applying to each zone; may be NULL.
+ * @action: The action to apply to each zone; may be NULL.
+ * @conclusion: A method to be invoked back on the initiator thread once the action has been
+ * applied to all zones; may be NULL.
+ * @parent: The object to notify once the action is complete or if the action can not be scheduled;
+ * may be NULL.
+ *
+ * The action will be launched immediately if there is no current action, or as soon as the current
+ * action completes. If there is already a pending action, this action will not be scheduled, and,
+ * if it has a parent, that parent will be notified. At least one of the preamble, action, or
+ * conclusion must not be NULL.
+ *
+ * Return: true if the action was scheduled.
+ */
+bool vdo_schedule_action(struct action_manager *manager, vdo_action_preamble_fn preamble,
+ vdo_zone_action_fn action, vdo_action_conclusion_fn conclusion,
+ struct vdo_completion *parent)
+{
+ return vdo_schedule_operation(manager, VDO_ADMIN_STATE_OPERATING, preamble,
+ action, conclusion, parent);
+}
+
+/**
+ * vdo_schedule_operation() - Schedule an operation to be applied to all zones.
+ * @manager: The action manager to schedule the action on.
+ * @operation: The operation this action will perform
+ * @preamble: A method to be invoked on the initiator thread once this action is started but before
+ * applying to each zone; may be NULL.
+ * @action: The action to apply to each zone; may be NULL.
+ * @conclusion: A method to be invoked back on the initiator thread once the action has been
+ * applied to all zones; may be NULL.
+ * @parent: The object to notify once the action is complete or if the action can not be scheduled;
+ * may be NULL.
+ *
+ * The operation's action will be launched immediately if there is no current action, or as soon as
+ * the current action completes. If there is already a pending action, this operation will not be
+ * scheduled, and, if it has a parent, that parent will be notified. At least one of the preamble,
+ * action, or conclusion must not be NULL.
+ *
+ * Return: true if the action was scheduled.
+ */
+bool vdo_schedule_operation(struct action_manager *manager,
+ const struct admin_state_code *operation,
+ vdo_action_preamble_fn preamble, vdo_zone_action_fn action,
+ vdo_action_conclusion_fn conclusion,
+ struct vdo_completion *parent)
+{
+ return vdo_schedule_operation_with_context(manager, operation, preamble, action,
+ conclusion, NULL, parent);
+}
+
+/**
+ * vdo_schedule_operation_with_context() - Schedule an operation on all zones.
+ * @manager: The action manager to schedule the action on.
+ * @operation: The operation this action will perform.
+ * @preamble: A method to be invoked on the initiator thread once this action is started but before
+ * applying to each zone; may be NULL.
+ * @action: The action to apply to each zone; may be NULL.
+ * @conclusion: A method to be invoked back on the initiator thread once the action has been
+ * applied to all zones; may be NULL.
+ * @context: An action-specific context which may be retrieved via
+ * vdo_get_current_action_context(); may be NULL.
+ * @parent: The object to notify once the action is complete or if the action can not be scheduled;
+ * may be NULL.
+ *
+ * The operation's action will be launched immediately if there is no current action, or as soon as
+ * the current action completes. If there is already a pending action, this operation will not be
+ * scheduled, and, if it has a parent, that parent will be notified. At least one of the preamble,
+ * action, or conclusion must not be NULL.
+ *
+ * Return: true if the action was scheduled
+ */
+bool vdo_schedule_operation_with_context(struct action_manager *manager,
+ const struct admin_state_code *operation,
+ vdo_action_preamble_fn preamble,
+ vdo_zone_action_fn action,
+ vdo_action_conclusion_fn conclusion,
+ void *context, struct vdo_completion *parent)
+{
+ struct action *current_action;
+
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == manager->initiator_thread_id),
+ "action initiated from correct thread");
+ if (!manager->current_action->in_use) {
+ current_action = manager->current_action;
+ } else if (!manager->current_action->next->in_use) {
+ current_action = manager->current_action->next;
+ } else {
+ if (parent != NULL)
+ vdo_continue_completion(parent, VDO_COMPONENT_BUSY);
+
+ return false;
+ }
+
+ *current_action = (struct action) {
+ .in_use = true,
+ .operation = operation,
+ .preamble = (preamble == NULL) ? no_preamble : preamble,
+ .zone_action = action,
+ .conclusion = (conclusion == NULL) ? no_conclusion : conclusion,
+ .context = context,
+ .parent = parent,
+ .next = current_action->next,
+ };
+
+ if (current_action == manager->current_action)
+ launch_current_action(manager);
+
+ return true;
+}
diff --git a/drivers/md/dm-vdo/action-manager.h b/drivers/md/dm-vdo/action-manager.h
new file mode 100644
index 000000000000..b0a8d3ddf3db
--- /dev/null
+++ b/drivers/md/dm-vdo/action-manager.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_ACTION_MANAGER_H
+#define VDO_ACTION_MANAGER_H
+
+#include "admin-state.h"
+#include "types.h"
+
+/*
+ * An action_manager provides a generic mechanism for applying actions to multi-zone entities (such
+ * as the block map or slab depot). Each action manager is tied to a specific context for which it
+ * manages actions. The manager ensures that only one action is active on that context at a time,
+ * and supports at most one pending action. Calls to schedule an action when there is already a
+ * pending action will result in VDO_COMPONENT_BUSY errors. Actions may only be submitted to the
+ * action manager from a single thread (which thread is determined when the action manager is
+ * constructed).
+ *
+ * A scheduled action consists of four components:
+ *
+ * preamble
+ * an optional method to be run on the initiator thread before applying the action to all zones
+ * zone_action
+ * an optional method to be applied to each of the zones
+ * conclusion
+ * an optional method to be run on the initiator thread once the per-zone method has been
+ * applied to all zones
+ * parent
+ * an optional completion to be finished once the conclusion is done
+ *
+ * At least one of the three methods must be provided.
+ */
+
+/*
+ * A function which is to be applied asynchronously to a set of zones.
+ * @context: The object which holds the per-zone context for the action
+ * @zone_number: The number of zone to which the action is being applied
+ * @parent: The object to notify when the action is complete
+ */
+typedef void (*vdo_zone_action_fn)(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent);
+
+/*
+ * A function which is to be applied asynchronously on an action manager's initiator thread as the
+ * preamble of an action.
+ * @context: The object which holds the per-zone context for the action
+ * @parent: The object to notify when the action is complete
+ */
+typedef void (*vdo_action_preamble_fn)(void *context, struct vdo_completion *parent);
+
+/*
+ * A function which will run on the action manager's initiator thread as the conclusion of an
+ * action.
+ * @context: The object which holds the per-zone context for the action
+ *
+ * Return: VDO_SUCCESS or an error
+ */
+typedef int (*vdo_action_conclusion_fn)(void *context);
+
+/*
+ * A function to schedule an action.
+ * @context: The object which holds the per-zone context for the action
+ *
+ * Return: true if an action was scheduled
+ */
+typedef bool (*vdo_action_scheduler_fn)(void *context);
+
+/*
+ * A function to get the id of the thread associated with a given zone.
+ * @context: The action context
+ * @zone_number: The number of the zone for which the thread ID is desired
+ */
+typedef thread_id_t (*vdo_zone_thread_getter_fn)(void *context, zone_count_t zone_number);
+
+struct action_manager;
+
+int __must_check vdo_make_action_manager(zone_count_t zones,
+ vdo_zone_thread_getter_fn get_zone_thread_id,
+ thread_id_t initiator_thread_id, void *context,
+ vdo_action_scheduler_fn scheduler,
+ struct vdo *vdo,
+ struct action_manager **manager_ptr);
+
+const struct admin_state_code *__must_check
+vdo_get_current_manager_operation(struct action_manager *manager);
+
+void * __must_check vdo_get_current_action_context(struct action_manager *manager);
+
+bool vdo_schedule_default_action(struct action_manager *manager);
+
+bool vdo_schedule_action(struct action_manager *manager, vdo_action_preamble_fn preamble,
+ vdo_zone_action_fn action, vdo_action_conclusion_fn conclusion,
+ struct vdo_completion *parent);
+
+bool vdo_schedule_operation(struct action_manager *manager,
+ const struct admin_state_code *operation,
+ vdo_action_preamble_fn preamble, vdo_zone_action_fn action,
+ vdo_action_conclusion_fn conclusion,
+ struct vdo_completion *parent);
+
+bool vdo_schedule_operation_with_context(struct action_manager *manager,
+ const struct admin_state_code *operation,
+ vdo_action_preamble_fn preamble,
+ vdo_zone_action_fn action,
+ vdo_action_conclusion_fn conclusion,
+ void *context, struct vdo_completion *parent);
+
+#endif /* VDO_ACTION_MANAGER_H */
diff --git a/drivers/md/dm-vdo/admin-state.c b/drivers/md/dm-vdo/admin-state.c
new file mode 100644
index 000000000000..3f9dba525154
--- /dev/null
+++ b/drivers/md/dm-vdo/admin-state.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "admin-state.h"
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "completion.h"
+#include "types.h"
+
+static const struct admin_state_code VDO_CODE_NORMAL_OPERATION = {
+ .name = "VDO_ADMIN_STATE_NORMAL_OPERATION",
+ .normal = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_NORMAL_OPERATION = &VDO_CODE_NORMAL_OPERATION;
+static const struct admin_state_code VDO_CODE_OPERATING = {
+ .name = "VDO_ADMIN_STATE_OPERATING",
+ .normal = true,
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_OPERATING = &VDO_CODE_OPERATING;
+static const struct admin_state_code VDO_CODE_FORMATTING = {
+ .name = "VDO_ADMIN_STATE_FORMATTING",
+ .operating = true,
+ .loading = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_FORMATTING = &VDO_CODE_FORMATTING;
+static const struct admin_state_code VDO_CODE_PRE_LOADING = {
+ .name = "VDO_ADMIN_STATE_PRE_LOADING",
+ .operating = true,
+ .loading = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_PRE_LOADING = &VDO_CODE_PRE_LOADING;
+static const struct admin_state_code VDO_CODE_PRE_LOADED = {
+ .name = "VDO_ADMIN_STATE_PRE_LOADED",
+};
+const struct admin_state_code *VDO_ADMIN_STATE_PRE_LOADED = &VDO_CODE_PRE_LOADED;
+static const struct admin_state_code VDO_CODE_LOADING = {
+ .name = "VDO_ADMIN_STATE_LOADING",
+ .normal = true,
+ .operating = true,
+ .loading = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_LOADING = &VDO_CODE_LOADING;
+static const struct admin_state_code VDO_CODE_LOADING_FOR_RECOVERY = {
+ .name = "VDO_ADMIN_STATE_LOADING_FOR_RECOVERY",
+ .operating = true,
+ .loading = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_LOADING_FOR_RECOVERY =
+ &VDO_CODE_LOADING_FOR_RECOVERY;
+static const struct admin_state_code VDO_CODE_LOADING_FOR_REBUILD = {
+ .name = "VDO_ADMIN_STATE_LOADING_FOR_REBUILD",
+ .operating = true,
+ .loading = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_LOADING_FOR_REBUILD = &VDO_CODE_LOADING_FOR_REBUILD;
+static const struct admin_state_code VDO_CODE_WAITING_FOR_RECOVERY = {
+ .name = "VDO_ADMIN_STATE_WAITING_FOR_RECOVERY",
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_WAITING_FOR_RECOVERY =
+ &VDO_CODE_WAITING_FOR_RECOVERY;
+static const struct admin_state_code VDO_CODE_NEW = {
+ .name = "VDO_ADMIN_STATE_NEW",
+ .quiescent = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_NEW = &VDO_CODE_NEW;
+static const struct admin_state_code VDO_CODE_INITIALIZED = {
+ .name = "VDO_ADMIN_STATE_INITIALIZED",
+};
+const struct admin_state_code *VDO_ADMIN_STATE_INITIALIZED = &VDO_CODE_INITIALIZED;
+static const struct admin_state_code VDO_CODE_RECOVERING = {
+ .name = "VDO_ADMIN_STATE_RECOVERING",
+ .draining = true,
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_RECOVERING = &VDO_CODE_RECOVERING;
+static const struct admin_state_code VDO_CODE_REBUILDING = {
+ .name = "VDO_ADMIN_STATE_REBUILDING",
+ .draining = true,
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_REBUILDING = &VDO_CODE_REBUILDING;
+static const struct admin_state_code VDO_CODE_SAVING = {
+ .name = "VDO_ADMIN_STATE_SAVING",
+ .draining = true,
+ .quiescing = true,
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_SAVING = &VDO_CODE_SAVING;
+static const struct admin_state_code VDO_CODE_SAVED = {
+ .name = "VDO_ADMIN_STATE_SAVED",
+ .quiescent = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_SAVED = &VDO_CODE_SAVED;
+static const struct admin_state_code VDO_CODE_SCRUBBING = {
+ .name = "VDO_ADMIN_STATE_SCRUBBING",
+ .draining = true,
+ .loading = true,
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_SCRUBBING = &VDO_CODE_SCRUBBING;
+static const struct admin_state_code VDO_CODE_SAVE_FOR_SCRUBBING = {
+ .name = "VDO_ADMIN_STATE_SAVE_FOR_SCRUBBING",
+ .draining = true,
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_SAVE_FOR_SCRUBBING = &VDO_CODE_SAVE_FOR_SCRUBBING;
+static const struct admin_state_code VDO_CODE_STOPPING = {
+ .name = "VDO_ADMIN_STATE_STOPPING",
+ .draining = true,
+ .quiescing = true,
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_STOPPING = &VDO_CODE_STOPPING;
+static const struct admin_state_code VDO_CODE_STOPPED = {
+ .name = "VDO_ADMIN_STATE_STOPPED",
+ .quiescent = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_STOPPED = &VDO_CODE_STOPPED;
+static const struct admin_state_code VDO_CODE_SUSPENDING = {
+ .name = "VDO_ADMIN_STATE_SUSPENDING",
+ .draining = true,
+ .quiescing = true,
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_SUSPENDING = &VDO_CODE_SUSPENDING;
+static const struct admin_state_code VDO_CODE_SUSPENDED = {
+ .name = "VDO_ADMIN_STATE_SUSPENDED",
+ .quiescent = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_SUSPENDED = &VDO_CODE_SUSPENDED;
+static const struct admin_state_code VDO_CODE_SUSPENDED_OPERATION = {
+ .name = "VDO_ADMIN_STATE_SUSPENDED_OPERATION",
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_SUSPENDED_OPERATION = &VDO_CODE_SUSPENDED_OPERATION;
+static const struct admin_state_code VDO_CODE_RESUMING = {
+ .name = "VDO_ADMIN_STATE_RESUMING",
+ .operating = true,
+};
+const struct admin_state_code *VDO_ADMIN_STATE_RESUMING = &VDO_CODE_RESUMING;
+
+/**
+ * get_next_state() - Determine the state which should be set after a given operation completes
+ * based on the operation and the current state.
+ * @operation The operation to be started.
+ *
+ * Return: The state to set when the operation completes or NULL if the operation can not be
+ * started in the current state.
+ */
+static const struct admin_state_code *get_next_state(const struct admin_state *state,
+ const struct admin_state_code *operation)
+{
+ const struct admin_state_code *code = vdo_get_admin_state_code(state);
+
+ if (code->operating)
+ return NULL;
+
+ if (operation == VDO_ADMIN_STATE_SAVING)
+ return (code == VDO_ADMIN_STATE_NORMAL_OPERATION ? VDO_ADMIN_STATE_SAVED : NULL);
+
+ if (operation == VDO_ADMIN_STATE_SUSPENDING) {
+ return (code == VDO_ADMIN_STATE_NORMAL_OPERATION
+ ? VDO_ADMIN_STATE_SUSPENDED
+ : NULL);
+ }
+
+ if (operation == VDO_ADMIN_STATE_STOPPING)
+ return (code == VDO_ADMIN_STATE_NORMAL_OPERATION ? VDO_ADMIN_STATE_STOPPED : NULL);
+
+ if (operation == VDO_ADMIN_STATE_PRE_LOADING)
+ return (code == VDO_ADMIN_STATE_INITIALIZED ? VDO_ADMIN_STATE_PRE_LOADED : NULL);
+
+ if (operation == VDO_ADMIN_STATE_SUSPENDED_OPERATION) {
+ return (((code == VDO_ADMIN_STATE_SUSPENDED) ||
+ (code == VDO_ADMIN_STATE_SAVED)) ? code : NULL);
+ }
+
+ return VDO_ADMIN_STATE_NORMAL_OPERATION;
+}
+
+/**
+ * vdo_finish_operation() - Finish the current operation.
+ *
+ * Will notify the operation waiter if there is one. This method should be used for operations
+ * started with vdo_start_operation(). For operations which were started with vdo_start_draining(),
+ * use vdo_finish_draining() instead.
+ *
+ * Return: true if there was an operation to finish.
+ */
+bool vdo_finish_operation(struct admin_state *state, int result)
+{
+ if (!vdo_get_admin_state_code(state)->operating)
+ return false;
+
+ state->complete = state->starting;
+ if (state->waiter != NULL)
+ vdo_set_completion_result(state->waiter, result);
+
+ if (!state->starting) {
+ vdo_set_admin_state_code(state, state->next_state);
+ if (state->waiter != NULL)
+ vdo_launch_completion(vdo_forget(state->waiter));
+ }
+
+ return true;
+}
+
+/**
+ * begin_operation() - Begin an operation if it may be started given the current state.
+ * @waiter A completion to notify when the operation is complete; may be NULL.
+ * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check begin_operation(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter,
+ vdo_admin_initiator_fn initiator)
+{
+ int result;
+ const struct admin_state_code *next_state = get_next_state(state, operation);
+
+ if (next_state == NULL) {
+ result = vdo_log_error_strerror(VDO_INVALID_ADMIN_STATE,
+ "Can't start %s from %s",
+ operation->name,
+ vdo_get_admin_state_code(state)->name);
+ } else if (state->waiter != NULL) {
+ result = vdo_log_error_strerror(VDO_COMPONENT_BUSY,
+ "Can't start %s with extant waiter",
+ operation->name);
+ } else {
+ state->waiter = waiter;
+ state->next_state = next_state;
+ vdo_set_admin_state_code(state, operation);
+ if (initiator != NULL) {
+ state->starting = true;
+ initiator(state);
+ state->starting = false;
+ if (state->complete)
+ vdo_finish_operation(state, VDO_SUCCESS);
+ }
+
+ return VDO_SUCCESS;
+ }
+
+ if (waiter != NULL)
+ vdo_continue_completion(waiter, result);
+
+ return result;
+}
+
+/**
+ * start_operation() - Start an operation if it may be started given the current state.
+ * @waiter A completion to notify when the operation is complete.
+ * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ *
+ * Return: true if the operation was started.
+ */
+static inline bool __must_check start_operation(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter,
+ vdo_admin_initiator_fn initiator)
+{
+ return (begin_operation(state, operation, waiter, initiator) == VDO_SUCCESS);
+}
+
+/**
+ * check_code() - Check the result of a state validation.
+ * @valid true if the code is of an appropriate type.
+ * @code The code which failed to be of the correct type.
+ * @what What the code failed to be, for logging.
+ * @waiter The completion to notify of the error; may be NULL.
+ *
+ * If the result failed, log an invalid state error and, if there is a waiter, notify it.
+ *
+ * Return: The result of the check.
+ */
+static bool check_code(bool valid, const struct admin_state_code *code, const char *what,
+ struct vdo_completion *waiter)
+{
+ int result;
+
+ if (valid)
+ return true;
+
+ result = vdo_log_error_strerror(VDO_INVALID_ADMIN_STATE,
+ "%s is not a %s", code->name, what);
+ if (waiter != NULL)
+ vdo_continue_completion(waiter, result);
+
+ return false;
+}
+
+/**
+ * assert_vdo_drain_operation() - Check that an operation is a drain.
+ * @waiter The completion to finish with an error if the operation is not a drain.
+ *
+ * Return: true if the specified operation is a drain.
+ */
+static bool __must_check assert_vdo_drain_operation(const struct admin_state_code *operation,
+ struct vdo_completion *waiter)
+{
+ return check_code(operation->draining, operation, "drain operation", waiter);
+}
+
+/**
+ * vdo_start_draining() - Initiate a drain operation if the current state permits it.
+ * @operation The type of drain to initiate.
+ * @waiter The completion to notify when the drain is complete.
+ * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ *
+ * Return: true if the drain was initiated, if not the waiter will be notified.
+ */
+bool vdo_start_draining(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter, vdo_admin_initiator_fn initiator)
+{
+ const struct admin_state_code *code = vdo_get_admin_state_code(state);
+
+ if (!assert_vdo_drain_operation(operation, waiter))
+ return false;
+
+ if (code->quiescent) {
+ vdo_launch_completion(waiter);
+ return false;
+ }
+
+ if (!code->normal) {
+ vdo_log_error_strerror(VDO_INVALID_ADMIN_STATE, "can't start %s from %s",
+ operation->name, code->name);
+ vdo_continue_completion(waiter, VDO_INVALID_ADMIN_STATE);
+ return false;
+ }
+
+ return start_operation(state, operation, waiter, initiator);
+}
+
+/**
+ * vdo_finish_draining() - Finish a drain operation if one was in progress.
+ *
+ * Return: true if the state was draining; will notify the waiter if so.
+ */
+bool vdo_finish_draining(struct admin_state *state)
+{
+ return vdo_finish_draining_with_result(state, VDO_SUCCESS);
+}
+
+/**
+ * vdo_finish_draining_with_result() - Finish a drain operation with a status code.
+ *
+ * Return: true if the state was draining; will notify the waiter if so.
+ */
+bool vdo_finish_draining_with_result(struct admin_state *state, int result)
+{
+ return (vdo_is_state_draining(state) && vdo_finish_operation(state, result));
+}
+
+/**
+ * vdo_assert_load_operation() - Check that an operation is a load.
+ * @waiter The completion to finish with an error if the operation is not a load.
+ *
+ * Return: true if the specified operation is a load.
+ */
+bool vdo_assert_load_operation(const struct admin_state_code *operation,
+ struct vdo_completion *waiter)
+{
+ return check_code(operation->loading, operation, "load operation", waiter);
+}
+
+/**
+ * vdo_start_loading() - Initiate a load operation if the current state permits it.
+ * @operation The type of load to initiate.
+ * @waiter The completion to notify when the load is complete (may be NULL).
+ * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ *
+ * Return: true if the load was initiated, if not the waiter will be notified.
+ */
+bool vdo_start_loading(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter, vdo_admin_initiator_fn initiator)
+{
+ return (vdo_assert_load_operation(operation, waiter) &&
+ start_operation(state, operation, waiter, initiator));
+}
+
+/**
+ * vdo_finish_loading() - Finish a load operation if one was in progress.
+ *
+ * Return: true if the state was loading; will notify the waiter if so.
+ */
+bool vdo_finish_loading(struct admin_state *state)
+{
+ return vdo_finish_loading_with_result(state, VDO_SUCCESS);
+}
+
+/**
+ * vdo_finish_loading_with_result() - Finish a load operation with a status code.
+ * @result The result of the load operation.
+ *
+ * Return: true if the state was loading; will notify the waiter if so.
+ */
+bool vdo_finish_loading_with_result(struct admin_state *state, int result)
+{
+ return (vdo_is_state_loading(state) && vdo_finish_operation(state, result));
+}
+
+/**
+ * assert_vdo_resume_operation() - Check whether an admin_state_code is a resume operation.
+ * @waiter The completion to notify if the operation is not a resume operation; may be NULL.
+ *
+ * Return: true if the code is a resume operation.
+ */
+static bool __must_check assert_vdo_resume_operation(const struct admin_state_code *operation,
+ struct vdo_completion *waiter)
+{
+ return check_code(operation == VDO_ADMIN_STATE_RESUMING, operation,
+ "resume operation", waiter);
+}
+
+/**
+ * vdo_start_resuming() - Initiate a resume operation if the current state permits it.
+ * @operation The type of resume to start.
+ * @waiter The completion to notify when the resume is complete (may be NULL).
+ * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ *
+ * Return: true if the resume was initiated, if not the waiter will be notified.
+ */
+bool vdo_start_resuming(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter, vdo_admin_initiator_fn initiator)
+{
+ return (assert_vdo_resume_operation(operation, waiter) &&
+ start_operation(state, operation, waiter, initiator));
+}
+
+/**
+ * vdo_finish_resuming() - Finish a resume operation if one was in progress.
+ *
+ * Return: true if the state was resuming; will notify the waiter if so.
+ */
+bool vdo_finish_resuming(struct admin_state *state)
+{
+ return vdo_finish_resuming_with_result(state, VDO_SUCCESS);
+}
+
+/**
+ * vdo_finish_resuming_with_result() - Finish a resume operation with a status code.
+ * @result The result of the resume operation.
+ *
+ * Return: true if the state was resuming; will notify the waiter if so.
+ */
+bool vdo_finish_resuming_with_result(struct admin_state *state, int result)
+{
+ return (vdo_is_state_resuming(state) && vdo_finish_operation(state, result));
+}
+
+/**
+ * vdo_resume_if_quiescent() - Change the state to normal operation if the current state is
+ * quiescent.
+ *
+ * Return: VDO_SUCCESS if the state resumed, VDO_INVALID_ADMIN_STATE otherwise.
+ */
+int vdo_resume_if_quiescent(struct admin_state *state)
+{
+ if (!vdo_is_state_quiescent(state))
+ return VDO_INVALID_ADMIN_STATE;
+
+ vdo_set_admin_state_code(state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_start_operation() - Attempt to start an operation.
+ *
+ * Return: VDO_SUCCESS if the operation was started, VDO_INVALID_ADMIN_STATE if not
+ */
+int vdo_start_operation(struct admin_state *state,
+ const struct admin_state_code *operation)
+{
+ return vdo_start_operation_with_waiter(state, operation, NULL, NULL);
+}
+
+/**
+ * vdo_start_operation_with_waiter() - Attempt to start an operation.
+ * @waiter the completion to notify when the operation completes or fails to start; may be NULL.
+ * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ *
+ * Return: VDO_SUCCESS if the operation was started, VDO_INVALID_ADMIN_STATE if not
+ */
+int vdo_start_operation_with_waiter(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter,
+ vdo_admin_initiator_fn initiator)
+{
+ return (check_code(operation->operating, operation, "operation", waiter) ?
+ begin_operation(state, operation, waiter, initiator) :
+ VDO_INVALID_ADMIN_STATE);
+}
diff --git a/drivers/md/dm-vdo/admin-state.h b/drivers/md/dm-vdo/admin-state.h
new file mode 100644
index 000000000000..a7d6ac2c30a6
--- /dev/null
+++ b/drivers/md/dm-vdo/admin-state.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_ADMIN_STATE_H
+#define VDO_ADMIN_STATE_H
+
+#include "completion.h"
+#include "types.h"
+
+struct admin_state_code {
+ const char *name;
+ /* Normal operation, data_vios may be active */
+ bool normal;
+ /* I/O is draining, new requests should not start */
+ bool draining;
+ /* This is a startup time operation */
+ bool loading;
+ /* The next state will be quiescent */
+ bool quiescing;
+ /* The VDO is quiescent, there should be no I/O */
+ bool quiescent;
+ /* Whether an operation is in progress and so no other operation may be started */
+ bool operating;
+};
+
+extern const struct admin_state_code *VDO_ADMIN_STATE_NORMAL_OPERATION;
+extern const struct admin_state_code *VDO_ADMIN_STATE_OPERATING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_FORMATTING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_PRE_LOADING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_PRE_LOADED;
+extern const struct admin_state_code *VDO_ADMIN_STATE_LOADING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_LOADING_FOR_RECOVERY;
+extern const struct admin_state_code *VDO_ADMIN_STATE_LOADING_FOR_REBUILD;
+extern const struct admin_state_code *VDO_ADMIN_STATE_WAITING_FOR_RECOVERY;
+extern const struct admin_state_code *VDO_ADMIN_STATE_NEW;
+extern const struct admin_state_code *VDO_ADMIN_STATE_INITIALIZED;
+extern const struct admin_state_code *VDO_ADMIN_STATE_RECOVERING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_REBUILDING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_SAVING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_SAVED;
+extern const struct admin_state_code *VDO_ADMIN_STATE_SCRUBBING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_SAVE_FOR_SCRUBBING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_STOPPING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_STOPPED;
+extern const struct admin_state_code *VDO_ADMIN_STATE_SUSPENDING;
+extern const struct admin_state_code *VDO_ADMIN_STATE_SUSPENDED;
+extern const struct admin_state_code *VDO_ADMIN_STATE_SUSPENDED_OPERATION;
+extern const struct admin_state_code *VDO_ADMIN_STATE_RESUMING;
+
+struct admin_state {
+ const struct admin_state_code *current_state;
+ /* The next administrative state (when the current operation finishes) */
+ const struct admin_state_code *next_state;
+ /* A completion waiting on a state change */
+ struct vdo_completion *waiter;
+ /* Whether an operation is being initiated */
+ bool starting;
+ /* Whether an operation has completed in the initiator */
+ bool complete;
+};
+
+/**
+ * typedef vdo_admin_initiator_fn - A method to be called once an admin operation may be initiated.
+ */
+typedef void (*vdo_admin_initiator_fn)(struct admin_state *state);
+
+static inline const struct admin_state_code * __must_check
+vdo_get_admin_state_code(const struct admin_state *state)
+{
+ return READ_ONCE(state->current_state);
+}
+
+/**
+ * vdo_set_admin_state_code() - Set the current admin state code.
+ *
+ * This function should be used primarily for initialization and by adminState internals. Most uses
+ * should go through the operation interfaces.
+ */
+static inline void vdo_set_admin_state_code(struct admin_state *state,
+ const struct admin_state_code *code)
+{
+ WRITE_ONCE(state->current_state, code);
+}
+
+static inline bool __must_check vdo_is_state_normal(const struct admin_state *state)
+{
+ return vdo_get_admin_state_code(state)->normal;
+}
+
+static inline bool __must_check vdo_is_state_suspending(const struct admin_state *state)
+{
+ return (vdo_get_admin_state_code(state) == VDO_ADMIN_STATE_SUSPENDING);
+}
+
+static inline bool __must_check vdo_is_state_saving(const struct admin_state *state)
+{
+ return (vdo_get_admin_state_code(state) == VDO_ADMIN_STATE_SAVING);
+}
+
+static inline bool __must_check vdo_is_state_saved(const struct admin_state *state)
+{
+ return (vdo_get_admin_state_code(state) == VDO_ADMIN_STATE_SAVED);
+}
+
+static inline bool __must_check vdo_is_state_draining(const struct admin_state *state)
+{
+ return vdo_get_admin_state_code(state)->draining;
+}
+
+static inline bool __must_check vdo_is_state_loading(const struct admin_state *state)
+{
+ return vdo_get_admin_state_code(state)->loading;
+}
+
+static inline bool __must_check vdo_is_state_resuming(const struct admin_state *state)
+{
+ return (vdo_get_admin_state_code(state) == VDO_ADMIN_STATE_RESUMING);
+}
+
+static inline bool __must_check vdo_is_state_clean_load(const struct admin_state *state)
+{
+ const struct admin_state_code *code = vdo_get_admin_state_code(state);
+
+ return ((code == VDO_ADMIN_STATE_FORMATTING) || (code == VDO_ADMIN_STATE_LOADING));
+}
+
+static inline bool __must_check vdo_is_state_quiescing(const struct admin_state *state)
+{
+ return vdo_get_admin_state_code(state)->quiescing;
+}
+
+static inline bool __must_check vdo_is_state_quiescent(const struct admin_state *state)
+{
+ return vdo_get_admin_state_code(state)->quiescent;
+}
+
+bool __must_check vdo_assert_load_operation(const struct admin_state_code *operation,
+ struct vdo_completion *waiter);
+
+bool vdo_start_loading(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter, vdo_admin_initiator_fn initiator);
+
+bool vdo_finish_loading(struct admin_state *state);
+
+bool vdo_finish_loading_with_result(struct admin_state *state, int result);
+
+bool vdo_start_resuming(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter, vdo_admin_initiator_fn initiator);
+
+bool vdo_finish_resuming(struct admin_state *state);
+
+bool vdo_finish_resuming_with_result(struct admin_state *state, int result);
+
+int vdo_resume_if_quiescent(struct admin_state *state);
+
+bool vdo_start_draining(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter, vdo_admin_initiator_fn initiator);
+
+bool vdo_finish_draining(struct admin_state *state);
+
+bool vdo_finish_draining_with_result(struct admin_state *state, int result);
+
+int vdo_start_operation(struct admin_state *state,
+ const struct admin_state_code *operation);
+
+int vdo_start_operation_with_waiter(struct admin_state *state,
+ const struct admin_state_code *operation,
+ struct vdo_completion *waiter,
+ vdo_admin_initiator_fn initiator);
+
+bool vdo_finish_operation(struct admin_state *state, int result);
+
+#endif /* VDO_ADMIN_STATE_H */
diff --git a/drivers/md/dm-vdo/block-map.c b/drivers/md/dm-vdo/block-map.c
new file mode 100644
index 000000000000..a0a7c1bd634e
--- /dev/null
+++ b/drivers/md/dm-vdo/block-map.c
@@ -0,0 +1,3318 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "block-map.h"
+
+#include <linux/bio.h>
+#include <linux/ratelimit.h>
+
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "action-manager.h"
+#include "admin-state.h"
+#include "completion.h"
+#include "constants.h"
+#include "data-vio.h"
+#include "encodings.h"
+#include "io-submitter.h"
+#include "physical-zone.h"
+#include "recovery-journal.h"
+#include "slab-depot.h"
+#include "status-codes.h"
+#include "types.h"
+#include "vdo.h"
+#include "vio.h"
+#include "wait-queue.h"
+
+/**
+ * DOC: Block map eras
+ *
+ * The block map era, or maximum age, is used as follows:
+ *
+ * Each block map page, when dirty, records the earliest recovery journal block sequence number of
+ * the changes reflected in that dirty block. Sequence numbers are classified into eras: every
+ * @maximum_age sequence numbers, we switch to a new era. Block map pages are assigned to eras
+ * according to the sequence number they record.
+ *
+ * In the current (newest) era, block map pages are not written unless there is cache pressure. In
+ * the next oldest era, each time a new journal block is written 1/@maximum_age of the pages in
+ * this era are issued for write. In all older eras, pages are issued for write immediately.
+ */
+
+struct page_descriptor {
+ root_count_t root_index;
+ height_t height;
+ page_number_t page_index;
+ slot_number_t slot;
+} __packed;
+
+union page_key {
+ struct page_descriptor descriptor;
+ u64 key;
+};
+
+struct write_if_not_dirtied_context {
+ struct block_map_zone *zone;
+ u8 generation;
+};
+
+struct block_map_tree_segment {
+ struct tree_page *levels[VDO_BLOCK_MAP_TREE_HEIGHT];
+};
+
+struct block_map_tree {
+ struct block_map_tree_segment *segments;
+};
+
+struct forest {
+ struct block_map *map;
+ size_t segments;
+ struct boundary *boundaries;
+ struct tree_page **pages;
+ struct block_map_tree trees[];
+};
+
+struct cursor_level {
+ page_number_t page_index;
+ slot_number_t slot;
+};
+
+struct cursors;
+
+struct cursor {
+ struct vdo_waiter waiter;
+ struct block_map_tree *tree;
+ height_t height;
+ struct cursors *parent;
+ struct boundary boundary;
+ struct cursor_level levels[VDO_BLOCK_MAP_TREE_HEIGHT];
+ struct pooled_vio *vio;
+};
+
+struct cursors {
+ struct block_map_zone *zone;
+ struct vio_pool *pool;
+ vdo_entry_callback_fn entry_callback;
+ struct vdo_completion *completion;
+ root_count_t active_roots;
+ struct cursor cursors[];
+};
+
+static const physical_block_number_t NO_PAGE = 0xFFFFFFFFFFFFFFFF;
+
+/* Used to indicate that the page holding the location of a tree root has been "loaded". */
+static const physical_block_number_t VDO_INVALID_PBN = 0xFFFFFFFFFFFFFFFF;
+
+const struct block_map_entry UNMAPPED_BLOCK_MAP_ENTRY = {
+ .mapping_state = VDO_MAPPING_STATE_UNMAPPED & 0x0F,
+ .pbn_high_nibble = 0,
+ .pbn_low_word = __cpu_to_le32(VDO_ZERO_BLOCK & UINT_MAX),
+};
+
+#define LOG_INTERVAL 4000
+#define DISPLAY_INTERVAL 100000
+
+/*
+ * For adjusting VDO page cache statistic fields which are only mutated on the logical zone thread.
+ * Prevents any compiler shenanigans from affecting other threads reading those stats.
+ */
+#define ADD_ONCE(value, delta) WRITE_ONCE(value, (value) + (delta))
+
+static inline bool is_dirty(const struct page_info *info)
+{
+ return info->state == PS_DIRTY;
+}
+
+static inline bool is_present(const struct page_info *info)
+{
+ return (info->state == PS_RESIDENT) || (info->state == PS_DIRTY);
+}
+
+static inline bool is_in_flight(const struct page_info *info)
+{
+ return (info->state == PS_INCOMING) || (info->state == PS_OUTGOING);
+}
+
+static inline bool is_incoming(const struct page_info *info)
+{
+ return info->state == PS_INCOMING;
+}
+
+static inline bool is_outgoing(const struct page_info *info)
+{
+ return info->state == PS_OUTGOING;
+}
+
+static inline bool is_valid(const struct page_info *info)
+{
+ return is_present(info) || is_outgoing(info);
+}
+
+static char *get_page_buffer(struct page_info *info)
+{
+ struct vdo_page_cache *cache = info->cache;
+
+ return &cache->pages[(info - cache->infos) * VDO_BLOCK_SIZE];
+}
+
+static inline struct vdo_page_completion *page_completion_from_waiter(struct vdo_waiter *waiter)
+{
+ struct vdo_page_completion *completion;
+
+ if (waiter == NULL)
+ return NULL;
+
+ completion = container_of(waiter, struct vdo_page_completion, waiter);
+ vdo_assert_completion_type(&completion->completion, VDO_PAGE_COMPLETION);
+ return completion;
+}
+
+/**
+ * initialize_info() - Initialize all page info structures and put them on the free list.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int initialize_info(struct vdo_page_cache *cache)
+{
+ struct page_info *info;
+
+ INIT_LIST_HEAD(&cache->free_list);
+ for (info = cache->infos; info < cache->infos + cache->page_count; info++) {
+ int result;
+
+ info->cache = cache;
+ info->state = PS_FREE;
+ info->pbn = NO_PAGE;
+
+ result = create_metadata_vio(cache->vdo, VIO_TYPE_BLOCK_MAP,
+ VIO_PRIORITY_METADATA, info,
+ get_page_buffer(info), &info->vio);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* The thread ID should never change. */
+ info->vio->completion.callback_thread_id = cache->zone->thread_id;
+
+ INIT_LIST_HEAD(&info->state_entry);
+ list_add_tail(&info->state_entry, &cache->free_list);
+ INIT_LIST_HEAD(&info->lru_entry);
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * allocate_cache_components() - Allocate components of the cache which require their own
+ * allocation.
+ * @maximum_age: The number of journal blocks before a dirtied page is considered old and must be
+ * written out.
+ *
+ * The caller is responsible for all clean up on errors.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int __must_check allocate_cache_components(struct vdo_page_cache *cache)
+{
+ u64 size = cache->page_count * (u64) VDO_BLOCK_SIZE;
+ int result;
+
+ result = vdo_allocate(cache->page_count, struct page_info, "page infos",
+ &cache->infos);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate_memory(size, VDO_BLOCK_SIZE, "cache pages", &cache->pages);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_int_map_create(cache->page_count, &cache->page_map);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return initialize_info(cache);
+}
+
+/**
+ * assert_on_cache_thread() - Assert that a function has been called on the VDO page cache's
+ * thread.
+ */
+static inline void assert_on_cache_thread(struct vdo_page_cache *cache,
+ const char *function_name)
+{
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id),
+ "%s() must only be called on cache thread %d, not thread %d",
+ function_name, cache->zone->thread_id, thread_id);
+}
+
+/** assert_io_allowed() - Assert that a page cache may issue I/O. */
+static inline void assert_io_allowed(struct vdo_page_cache *cache)
+{
+ VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state),
+ "VDO page cache may issue I/O");
+}
+
+/** report_cache_pressure() - Log and, if enabled, report cache pressure. */
+static void report_cache_pressure(struct vdo_page_cache *cache)
+{
+ ADD_ONCE(cache->stats.cache_pressure, 1);
+ if (cache->waiter_count > cache->page_count) {
+ if ((cache->pressure_report % LOG_INTERVAL) == 0)
+ vdo_log_info("page cache pressure %u", cache->stats.cache_pressure);
+
+ if (++cache->pressure_report >= DISPLAY_INTERVAL)
+ cache->pressure_report = 0;
+ }
+}
+
+/**
+ * get_page_state_name() - Return the name of a page state.
+ *
+ * If the page state is invalid a static string is returned and the invalid state is logged.
+ *
+ * Return: A pointer to a static page state name.
+ */
+static const char * __must_check get_page_state_name(enum vdo_page_buffer_state state)
+{
+ int result;
+ static const char * const state_names[] = {
+ "FREE", "INCOMING", "FAILED", "RESIDENT", "DIRTY", "OUTGOING"
+ };
+
+ BUILD_BUG_ON(ARRAY_SIZE(state_names) != PAGE_STATE_COUNT);
+
+ result = VDO_ASSERT(state < ARRAY_SIZE(state_names),
+ "Unknown page_state value %d", state);
+ if (result != VDO_SUCCESS)
+ return "[UNKNOWN PAGE STATE]";
+
+ return state_names[state];
+}
+
+/**
+ * update_counter() - Update the counter associated with a given state.
+ * @info: The page info to count.
+ * @delta: The delta to apply to the counter.
+ */
+static void update_counter(struct page_info *info, s32 delta)
+{
+ struct block_map_statistics *stats = &info->cache->stats;
+
+ switch (info->state) {
+ case PS_FREE:
+ ADD_ONCE(stats->free_pages, delta);
+ return;
+
+ case PS_INCOMING:
+ ADD_ONCE(stats->incoming_pages, delta);
+ return;
+
+ case PS_OUTGOING:
+ ADD_ONCE(stats->outgoing_pages, delta);
+ return;
+
+ case PS_FAILED:
+ ADD_ONCE(stats->failed_pages, delta);
+ return;
+
+ case PS_RESIDENT:
+ ADD_ONCE(stats->clean_pages, delta);
+ return;
+
+ case PS_DIRTY:
+ ADD_ONCE(stats->dirty_pages, delta);
+ return;
+
+ default:
+ return;
+ }
+}
+
+/** update_lru() - Update the lru information for an active page. */
+static void update_lru(struct page_info *info)
+{
+ if (info->cache->lru_list.prev != &info->lru_entry)
+ list_move_tail(&info->lru_entry, &info->cache->lru_list);
+}
+
+/**
+ * set_info_state() - Set the state of a page_info and put it on the right list, adjusting
+ * counters.
+ */
+static void set_info_state(struct page_info *info, enum vdo_page_buffer_state new_state)
+{
+ if (new_state == info->state)
+ return;
+
+ update_counter(info, -1);
+ info->state = new_state;
+ update_counter(info, 1);
+
+ switch (info->state) {
+ case PS_FREE:
+ case PS_FAILED:
+ list_move_tail(&info->state_entry, &info->cache->free_list);
+ return;
+
+ case PS_OUTGOING:
+ list_move_tail(&info->state_entry, &info->cache->outgoing_list);
+ return;
+
+ case PS_DIRTY:
+ return;
+
+ default:
+ list_del_init(&info->state_entry);
+ }
+}
+
+/** set_info_pbn() - Set the pbn for an info, updating the map as needed. */
+static int __must_check set_info_pbn(struct page_info *info, physical_block_number_t pbn)
+{
+ struct vdo_page_cache *cache = info->cache;
+
+ /* Either the new or the old page number must be NO_PAGE. */
+ int result = VDO_ASSERT((pbn == NO_PAGE) || (info->pbn == NO_PAGE),
+ "Must free a page before reusing it.");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (info->pbn != NO_PAGE)
+ vdo_int_map_remove(cache->page_map, info->pbn);
+
+ info->pbn = pbn;
+
+ if (pbn != NO_PAGE) {
+ result = vdo_int_map_put(cache->page_map, pbn, info, true, NULL);
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+ return VDO_SUCCESS;
+}
+
+/** reset_page_info() - Reset page info to represent an unallocated page. */
+static int reset_page_info(struct page_info *info)
+{
+ int result;
+
+ result = VDO_ASSERT(info->busy == 0, "VDO Page must not be busy");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(!vdo_waitq_has_waiters(&info->waiting),
+ "VDO Page must not have waiters");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = set_info_pbn(info, NO_PAGE);
+ set_info_state(info, PS_FREE);
+ list_del_init(&info->lru_entry);
+ return result;
+}
+
+/**
+ * find_free_page() - Find a free page.
+ *
+ * Return: A pointer to the page info structure (if found), NULL otherwise.
+ */
+static struct page_info * __must_check find_free_page(struct vdo_page_cache *cache)
+{
+ struct page_info *info;
+
+ info = list_first_entry_or_null(&cache->free_list, struct page_info,
+ state_entry);
+ if (info != NULL)
+ list_del_init(&info->state_entry);
+
+ return info;
+}
+
+/**
+ * find_page() - Find the page info (if any) associated with a given pbn.
+ * @pbn: The absolute physical block number of the page.
+ *
+ * Return: The page info for the page if available, or NULL if not.
+ */
+static struct page_info * __must_check find_page(struct vdo_page_cache *cache,
+ physical_block_number_t pbn)
+{
+ if ((cache->last_found != NULL) && (cache->last_found->pbn == pbn))
+ return cache->last_found;
+
+ cache->last_found = vdo_int_map_get(cache->page_map, pbn);
+ return cache->last_found;
+}
+
+/**
+ * select_lru_page() - Determine which page is least recently used.
+ *
+ * Picks the least recently used from among the non-busy entries at the front of each of the lru
+ * ring. Since whenever we mark a page busy we also put it to the end of the ring it is unlikely
+ * that the entries at the front are busy unless the queue is very short, but not impossible.
+ *
+ * Return: A pointer to the info structure for a relevant page, or NULL if no such page can be
+ * found. The page can be dirty or resident.
+ */
+static struct page_info * __must_check select_lru_page(struct vdo_page_cache *cache)
+{
+ struct page_info *info;
+
+ list_for_each_entry(info, &cache->lru_list, lru_entry)
+ if ((info->busy == 0) && !is_in_flight(info))
+ return info;
+
+ return NULL;
+}
+
+/* ASYNCHRONOUS INTERFACE BEYOND THIS POINT */
+
+/**
+ * complete_with_page() - Helper to complete the VDO Page Completion request successfully.
+ * @info: The page info representing the result page.
+ * @vdo_page_comp: The VDO page completion to complete.
+ */
+static void complete_with_page(struct page_info *info,
+ struct vdo_page_completion *vdo_page_comp)
+{
+ bool available = vdo_page_comp->writable ? is_present(info) : is_valid(info);
+
+ if (!available) {
+ vdo_log_error_strerror(VDO_BAD_PAGE,
+ "Requested cache page %llu in state %s is not %s",
+ (unsigned long long) info->pbn,
+ get_page_state_name(info->state),
+ vdo_page_comp->writable ? "present" : "valid");
+ vdo_fail_completion(&vdo_page_comp->completion, VDO_BAD_PAGE);
+ return;
+ }
+
+ vdo_page_comp->info = info;
+ vdo_page_comp->ready = true;
+ vdo_finish_completion(&vdo_page_comp->completion);
+}
+
+/**
+ * complete_waiter_with_error() - Complete a page completion with an error code.
+ * @waiter: The page completion, as a waiter.
+ * @result_ptr: A pointer to the error code.
+ *
+ * Implements waiter_callback_fn.
+ */
+static void complete_waiter_with_error(struct vdo_waiter *waiter, void *result_ptr)
+{
+ int *result = result_ptr;
+
+ vdo_fail_completion(&page_completion_from_waiter(waiter)->completion, *result);
+}
+
+/**
+ * complete_waiter_with_page() - Complete a page completion with a page.
+ * @waiter: The page completion, as a waiter.
+ * @page_info: The page info to complete with.
+ *
+ * Implements waiter_callback_fn.
+ */
+static void complete_waiter_with_page(struct vdo_waiter *waiter, void *page_info)
+{
+ complete_with_page(page_info, page_completion_from_waiter(waiter));
+}
+
+/**
+ * distribute_page_over_waitq() - Complete a waitq of VDO page completions with a page result.
+ *
+ * Upon completion the waitq will be empty.
+ *
+ * Return: The number of pages distributed.
+ */
+static unsigned int distribute_page_over_waitq(struct page_info *info,
+ struct vdo_wait_queue *waitq)
+{
+ size_t num_pages;
+
+ update_lru(info);
+ num_pages = vdo_waitq_num_waiters(waitq);
+
+ /*
+ * Increment the busy count once for each pending completion so that this page does not
+ * stop being busy until all completions have been processed.
+ */
+ info->busy += num_pages;
+
+ vdo_waitq_notify_all_waiters(waitq, complete_waiter_with_page, info);
+ return num_pages;
+}
+
+/**
+ * set_persistent_error() - Set a persistent error which all requests will receive in the future.
+ * @context: A string describing what triggered the error.
+ *
+ * Once triggered, all enqueued completions will get this error. Any future requests will result in
+ * this error as well.
+ */
+static void set_persistent_error(struct vdo_page_cache *cache, const char *context,
+ int result)
+{
+ struct page_info *info;
+ /* If we're already read-only, there's no need to log. */
+ struct vdo *vdo = cache->vdo;
+
+ if ((result != VDO_READ_ONLY) && !vdo_is_read_only(vdo)) {
+ vdo_log_error_strerror(result, "VDO Page Cache persistent error: %s",
+ context);
+ vdo_enter_read_only_mode(vdo, result);
+ }
+
+ assert_on_cache_thread(cache, __func__);
+
+ vdo_waitq_notify_all_waiters(&cache->free_waiters,
+ complete_waiter_with_error, &result);
+ cache->waiter_count = 0;
+
+ for (info = cache->infos; info < cache->infos + cache->page_count; info++) {
+ vdo_waitq_notify_all_waiters(&info->waiting,
+ complete_waiter_with_error, &result);
+ }
+}
+
+/**
+ * validate_completed_page() - Check that a page completion which is being freed to the cache
+ * referred to a valid page and is in a valid state.
+ * @writable: Whether a writable page is required.
+ *
+ * Return: VDO_SUCCESS if the page was valid, otherwise as error
+ */
+static int __must_check validate_completed_page(struct vdo_page_completion *completion,
+ bool writable)
+{
+ int result;
+
+ result = VDO_ASSERT(completion->ready, "VDO Page completion not ready");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(completion->info != NULL,
+ "VDO Page Completion must be complete");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(completion->info->pbn == completion->pbn,
+ "VDO Page Completion pbn must be consistent");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(is_valid(completion->info),
+ "VDO Page Completion page must be valid");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (writable) {
+ result = VDO_ASSERT(completion->writable,
+ "VDO Page Completion must be writable");
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+
+ return VDO_SUCCESS;
+}
+
+static void check_for_drain_complete(struct block_map_zone *zone)
+{
+ if (vdo_is_state_draining(&zone->state) &&
+ (zone->active_lookups == 0) &&
+ !vdo_waitq_has_waiters(&zone->flush_waiters) &&
+ !is_vio_pool_busy(zone->vio_pool) &&
+ (zone->page_cache.outstanding_reads == 0) &&
+ (zone->page_cache.outstanding_writes == 0)) {
+ vdo_finish_draining_with_result(&zone->state,
+ (vdo_is_read_only(zone->block_map->vdo) ?
+ VDO_READ_ONLY : VDO_SUCCESS));
+ }
+}
+
+static void enter_zone_read_only_mode(struct block_map_zone *zone, int result)
+{
+ vdo_enter_read_only_mode(zone->block_map->vdo, result);
+
+ /*
+ * We are in read-only mode, so we won't ever write any page out.
+ * Just take all waiters off the waitq so the zone can drain.
+ */
+ vdo_waitq_init(&zone->flush_waiters);
+ check_for_drain_complete(zone);
+}
+
+static bool __must_check
+validate_completed_page_or_enter_read_only_mode(struct vdo_page_completion *completion,
+ bool writable)
+{
+ int result = validate_completed_page(completion, writable);
+
+ if (result == VDO_SUCCESS)
+ return true;
+
+ enter_zone_read_only_mode(completion->info->cache->zone, result);
+ return false;
+}
+
+/**
+ * handle_load_error() - Handle page load errors.
+ * @completion: The page read vio.
+ */
+static void handle_load_error(struct vdo_completion *completion)
+{
+ int result = completion->result;
+ struct page_info *info = completion->parent;
+ struct vdo_page_cache *cache = info->cache;
+
+ assert_on_cache_thread(cache, __func__);
+ vio_record_metadata_io_error(as_vio(completion));
+ vdo_enter_read_only_mode(cache->zone->block_map->vdo, result);
+ ADD_ONCE(cache->stats.failed_reads, 1);
+ set_info_state(info, PS_FAILED);
+ vdo_waitq_notify_all_waiters(&info->waiting, complete_waiter_with_error, &result);
+ reset_page_info(info);
+
+ /*
+ * Don't decrement until right before calling check_for_drain_complete() to
+ * ensure that the above work can't cause the page cache to be freed out from under us.
+ */
+ cache->outstanding_reads--;
+ check_for_drain_complete(cache->zone);
+}
+
+/**
+ * page_is_loaded() - Callback used when a page has been loaded.
+ * @completion: The vio which has loaded the page. Its parent is the page_info.
+ */
+static void page_is_loaded(struct vdo_completion *completion)
+{
+ struct page_info *info = completion->parent;
+ struct vdo_page_cache *cache = info->cache;
+ nonce_t nonce = info->cache->zone->block_map->nonce;
+ struct block_map_page *page;
+ enum block_map_page_validity validity;
+
+ assert_on_cache_thread(cache, __func__);
+
+ page = (struct block_map_page *) get_page_buffer(info);
+ validity = vdo_validate_block_map_page(page, nonce, info->pbn);
+ if (validity == VDO_BLOCK_MAP_PAGE_BAD) {
+ physical_block_number_t pbn = vdo_get_block_map_page_pbn(page);
+ int result = vdo_log_error_strerror(VDO_BAD_PAGE,
+ "Expected page %llu but got page %llu instead",
+ (unsigned long long) info->pbn,
+ (unsigned long long) pbn);
+
+ vdo_continue_completion(completion, result);
+ return;
+ }
+
+ if (validity == VDO_BLOCK_MAP_PAGE_INVALID)
+ vdo_format_block_map_page(page, nonce, info->pbn, false);
+
+ info->recovery_lock = 0;
+ set_info_state(info, PS_RESIDENT);
+ distribute_page_over_waitq(info, &info->waiting);
+
+ /*
+ * Don't decrement until right before calling check_for_drain_complete() to
+ * ensure that the above work can't cause the page cache to be freed out from under us.
+ */
+ cache->outstanding_reads--;
+ check_for_drain_complete(cache->zone);
+}
+
+/**
+ * handle_rebuild_read_error() - Handle a read error during a read-only rebuild.
+ * @completion: The page load completion.
+ */
+static void handle_rebuild_read_error(struct vdo_completion *completion)
+{
+ struct page_info *info = completion->parent;
+ struct vdo_page_cache *cache = info->cache;
+
+ assert_on_cache_thread(cache, __func__);
+
+ /*
+ * We are doing a read-only rebuild, so treat this as a successful read
+ * of an uninitialized page.
+ */
+ vio_record_metadata_io_error(as_vio(completion));
+ ADD_ONCE(cache->stats.failed_reads, 1);
+ memset(get_page_buffer(info), 0, VDO_BLOCK_SIZE);
+ vdo_reset_completion(completion);
+ page_is_loaded(completion);
+}
+
+static void load_cache_page_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct page_info *info = vio->completion.parent;
+
+ continue_vio_after_io(vio, page_is_loaded, info->cache->zone->thread_id);
+}
+
+/**
+ * launch_page_load() - Begin the process of loading a page.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int __must_check launch_page_load(struct page_info *info,
+ physical_block_number_t pbn)
+{
+ int result;
+ vdo_action_fn callback;
+ struct vdo_page_cache *cache = info->cache;
+
+ assert_io_allowed(cache);
+
+ result = set_info_pbn(info, pbn);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT((info->busy == 0), "Page is not busy before loading.");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ set_info_state(info, PS_INCOMING);
+ cache->outstanding_reads++;
+ ADD_ONCE(cache->stats.pages_loaded, 1);
+ callback = (cache->rebuilding ? handle_rebuild_read_error : handle_load_error);
+ vdo_submit_metadata_vio(info->vio, pbn, load_cache_page_endio,
+ callback, REQ_OP_READ | REQ_PRIO);
+ return VDO_SUCCESS;
+}
+
+static void write_pages(struct vdo_completion *completion);
+
+/** handle_flush_error() - Handle errors flushing the layer. */
+static void handle_flush_error(struct vdo_completion *completion)
+{
+ struct page_info *info = completion->parent;
+
+ vio_record_metadata_io_error(as_vio(completion));
+ set_persistent_error(info->cache, "flush failed", completion->result);
+ write_pages(completion);
+}
+
+static void flush_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct page_info *info = vio->completion.parent;
+
+ continue_vio_after_io(vio, write_pages, info->cache->zone->thread_id);
+}
+
+/** save_pages() - Attempt to save the outgoing pages by first flushing the layer. */
+static void save_pages(struct vdo_page_cache *cache)
+{
+ struct page_info *info;
+ struct vio *vio;
+
+ if ((cache->pages_in_flush > 0) || (cache->pages_to_flush == 0))
+ return;
+
+ assert_io_allowed(cache);
+
+ info = list_first_entry(&cache->outgoing_list, struct page_info, state_entry);
+
+ cache->pages_in_flush = cache->pages_to_flush;
+ cache->pages_to_flush = 0;
+ ADD_ONCE(cache->stats.flush_count, 1);
+
+ vio = info->vio;
+
+ /*
+ * We must make sure that the recovery journal entries that changed these pages were
+ * successfully persisted, and thus must issue a flush before each batch of pages is
+ * written to ensure this.
+ */
+ vdo_submit_flush_vio(vio, flush_endio, handle_flush_error);
+}
+
+/**
+ * schedule_page_save() - Add a page to the outgoing list of pages waiting to be saved.
+ *
+ * Once in the list, a page may not be used until it has been written out.
+ */
+static void schedule_page_save(struct page_info *info)
+{
+ if (info->busy > 0) {
+ info->write_status = WRITE_STATUS_DEFERRED;
+ return;
+ }
+
+ info->cache->pages_to_flush++;
+ info->cache->outstanding_writes++;
+ set_info_state(info, PS_OUTGOING);
+}
+
+/**
+ * launch_page_save() - Add a page to outgoing pages waiting to be saved, and then start saving
+ * pages if another save is not in progress.
+ */
+static void launch_page_save(struct page_info *info)
+{
+ schedule_page_save(info);
+ save_pages(info->cache);
+}
+
+/**
+ * completion_needs_page() - Determine whether a given vdo_page_completion (as a waiter) is
+ * requesting a given page number.
+ * @context: A pointer to the pbn of the desired page.
+ *
+ * Implements waiter_match_fn.
+ *
+ * Return: true if the page completion is for the desired page number.
+ */
+static bool completion_needs_page(struct vdo_waiter *waiter, void *context)
+{
+ physical_block_number_t *pbn = context;
+
+ return (page_completion_from_waiter(waiter)->pbn == *pbn);
+}
+
+/**
+ * allocate_free_page() - Allocate a free page to the first completion in the waiting queue, and
+ * any other completions that match it in page number.
+ */
+static void allocate_free_page(struct page_info *info)
+{
+ int result;
+ struct vdo_waiter *oldest_waiter;
+ physical_block_number_t pbn;
+ struct vdo_page_cache *cache = info->cache;
+
+ assert_on_cache_thread(cache, __func__);
+
+ if (!vdo_waitq_has_waiters(&cache->free_waiters)) {
+ if (cache->stats.cache_pressure > 0) {
+ vdo_log_info("page cache pressure relieved");
+ WRITE_ONCE(cache->stats.cache_pressure, 0);
+ }
+
+ return;
+ }
+
+ result = reset_page_info(info);
+ if (result != VDO_SUCCESS) {
+ set_persistent_error(cache, "cannot reset page info", result);
+ return;
+ }
+
+ oldest_waiter = vdo_waitq_get_first_waiter(&cache->free_waiters);
+ pbn = page_completion_from_waiter(oldest_waiter)->pbn;
+
+ /*
+ * Remove all entries which match the page number in question and push them onto the page
+ * info's waitq.
+ */
+ vdo_waitq_dequeue_matching_waiters(&cache->free_waiters, completion_needs_page,
+ &pbn, &info->waiting);
+ cache->waiter_count -= vdo_waitq_num_waiters(&info->waiting);
+
+ result = launch_page_load(info, pbn);
+ if (result != VDO_SUCCESS) {
+ vdo_waitq_notify_all_waiters(&info->waiting,
+ complete_waiter_with_error, &result);
+ }
+}
+
+/**
+ * discard_a_page() - Begin the process of discarding a page.
+ *
+ * If no page is discardable, increments a count of deferred frees so that the next release of a
+ * page which is no longer busy will kick off another discard cycle. This is an indication that the
+ * cache is not big enough.
+ *
+ * If the selected page is not dirty, immediately allocates the page to the oldest completion
+ * waiting for a free page.
+ */
+static void discard_a_page(struct vdo_page_cache *cache)
+{
+ struct page_info *info = select_lru_page(cache);
+
+ if (info == NULL) {
+ report_cache_pressure(cache);
+ return;
+ }
+
+ if (!is_dirty(info)) {
+ allocate_free_page(info);
+ return;
+ }
+
+ VDO_ASSERT_LOG_ONLY(!is_in_flight(info),
+ "page selected for discard is not in flight");
+
+ cache->discard_count++;
+ info->write_status = WRITE_STATUS_DISCARD;
+ launch_page_save(info);
+}
+
+/**
+ * discard_page_for_completion() - Helper used to trigger a discard so that the completion can get
+ * a different page.
+ */
+static void discard_page_for_completion(struct vdo_page_completion *vdo_page_comp)
+{
+ struct vdo_page_cache *cache = vdo_page_comp->cache;
+
+ cache->waiter_count++;
+ vdo_waitq_enqueue_waiter(&cache->free_waiters, &vdo_page_comp->waiter);
+ discard_a_page(cache);
+}
+
+/**
+ * discard_page_if_needed() - Helper used to trigger a discard if the cache needs another free
+ * page.
+ * @cache: The page cache.
+ */
+static void discard_page_if_needed(struct vdo_page_cache *cache)
+{
+ if (cache->waiter_count > cache->discard_count)
+ discard_a_page(cache);
+}
+
+/**
+ * write_has_finished() - Inform the cache that a write has finished (possibly with an error).
+ * @info: The info structure for the page whose write just completed.
+ *
+ * Return: true if the page write was a discard.
+ */
+static bool write_has_finished(struct page_info *info)
+{
+ bool was_discard = (info->write_status == WRITE_STATUS_DISCARD);
+
+ assert_on_cache_thread(info->cache, __func__);
+ info->cache->outstanding_writes--;
+
+ info->write_status = WRITE_STATUS_NORMAL;
+ return was_discard;
+}
+
+/**
+ * handle_page_write_error() - Handler for page write errors.
+ * @completion: The page write vio.
+ */
+static void handle_page_write_error(struct vdo_completion *completion)
+{
+ int result = completion->result;
+ struct page_info *info = completion->parent;
+ struct vdo_page_cache *cache = info->cache;
+
+ vio_record_metadata_io_error(as_vio(completion));
+
+ /* If we're already read-only, write failures are to be expected. */
+ if (result != VDO_READ_ONLY) {
+ vdo_log_ratelimit(vdo_log_error,
+ "failed to write block map page %llu",
+ (unsigned long long) info->pbn);
+ }
+
+ set_info_state(info, PS_DIRTY);
+ ADD_ONCE(cache->stats.failed_writes, 1);
+ set_persistent_error(cache, "cannot write page", result);
+
+ if (!write_has_finished(info))
+ discard_page_if_needed(cache);
+
+ check_for_drain_complete(cache->zone);
+}
+
+static void page_is_written_out(struct vdo_completion *completion);
+
+static void write_cache_page_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct page_info *info = vio->completion.parent;
+
+ continue_vio_after_io(vio, page_is_written_out, info->cache->zone->thread_id);
+}
+
+/**
+ * page_is_written_out() - Callback used when a page has been written out.
+ * @completion: The vio which wrote the page. Its parent is a page_info.
+ */
+static void page_is_written_out(struct vdo_completion *completion)
+{
+ bool was_discard, reclaimed;
+ u32 reclamations;
+ struct page_info *info = completion->parent;
+ struct vdo_page_cache *cache = info->cache;
+ struct block_map_page *page = (struct block_map_page *) get_page_buffer(info);
+
+ if (!page->header.initialized) {
+ page->header.initialized = true;
+ vdo_submit_metadata_vio(info->vio, info->pbn,
+ write_cache_page_endio,
+ handle_page_write_error,
+ REQ_OP_WRITE | REQ_PRIO | REQ_PREFLUSH);
+ return;
+ }
+
+ /* Handle journal updates and torn write protection. */
+ vdo_release_recovery_journal_block_reference(cache->zone->block_map->journal,
+ info->recovery_lock,
+ VDO_ZONE_TYPE_LOGICAL,
+ cache->zone->zone_number);
+ info->recovery_lock = 0;
+ was_discard = write_has_finished(info);
+ reclaimed = (!was_discard || (info->busy > 0) || vdo_waitq_has_waiters(&info->waiting));
+
+ set_info_state(info, PS_RESIDENT);
+
+ reclamations = distribute_page_over_waitq(info, &info->waiting);
+ ADD_ONCE(cache->stats.reclaimed, reclamations);
+
+ if (was_discard)
+ cache->discard_count--;
+
+ if (reclaimed)
+ discard_page_if_needed(cache);
+ else
+ allocate_free_page(info);
+
+ check_for_drain_complete(cache->zone);
+}
+
+/**
+ * write_pages() - Write the batch of pages which were covered by the layer flush which just
+ * completed.
+ * @flush_completion: The flush vio.
+ *
+ * This callback is registered in save_pages().
+ */
+static void write_pages(struct vdo_completion *flush_completion)
+{
+ struct vdo_page_cache *cache = ((struct page_info *) flush_completion->parent)->cache;
+
+ /*
+ * We need to cache these two values on the stack since it is possible for the last
+ * page info to cause the page cache to get freed. Hence once we launch the last page,
+ * it may be unsafe to dereference the cache.
+ */
+ bool has_unflushed_pages = (cache->pages_to_flush > 0);
+ page_count_t pages_in_flush = cache->pages_in_flush;
+
+ cache->pages_in_flush = 0;
+ while (pages_in_flush-- > 0) {
+ struct page_info *info =
+ list_first_entry(&cache->outgoing_list, struct page_info,
+ state_entry);
+
+ list_del_init(&info->state_entry);
+ if (vdo_is_read_only(info->cache->vdo)) {
+ struct vdo_completion *completion = &info->vio->completion;
+
+ vdo_reset_completion(completion);
+ completion->callback = page_is_written_out;
+ completion->error_handler = handle_page_write_error;
+ vdo_fail_completion(completion, VDO_READ_ONLY);
+ continue;
+ }
+ ADD_ONCE(info->cache->stats.pages_saved, 1);
+ vdo_submit_metadata_vio(info->vio, info->pbn, write_cache_page_endio,
+ handle_page_write_error, REQ_OP_WRITE | REQ_PRIO);
+ }
+
+ if (has_unflushed_pages) {
+ /*
+ * If there are unflushed pages, the cache can't have been freed, so this call is
+ * safe.
+ */
+ save_pages(cache);
+ }
+}
+
+/**
+ * vdo_release_page_completion() - Release a VDO Page Completion.
+ *
+ * The page referenced by this completion (if any) will no longer be held busy by this completion.
+ * If a page becomes discardable and there are completions awaiting free pages then a new round of
+ * page discarding is started.
+ */
+void vdo_release_page_completion(struct vdo_completion *completion)
+{
+ struct page_info *discard_info = NULL;
+ struct vdo_page_completion *page_completion = as_vdo_page_completion(completion);
+ struct vdo_page_cache *cache;
+
+ if (completion->result == VDO_SUCCESS) {
+ if (!validate_completed_page_or_enter_read_only_mode(page_completion, false))
+ return;
+
+ if (--page_completion->info->busy == 0)
+ discard_info = page_completion->info;
+ }
+
+ VDO_ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL),
+ "Page being released after leaving all queues");
+
+ page_completion->info = NULL;
+ cache = page_completion->cache;
+ assert_on_cache_thread(cache, __func__);
+
+ if (discard_info != NULL) {
+ if (discard_info->write_status == WRITE_STATUS_DEFERRED) {
+ discard_info->write_status = WRITE_STATUS_NORMAL;
+ launch_page_save(discard_info);
+ }
+
+ /*
+ * if there are excess requests for pages (that have not already started discards)
+ * we need to discard some page (which may be this one)
+ */
+ discard_page_if_needed(cache);
+ }
+}
+
+/**
+ * load_page_for_completion() - Helper function to load a page as described by a VDO Page
+ * Completion.
+ */
+static void load_page_for_completion(struct page_info *info,
+ struct vdo_page_completion *vdo_page_comp)
+{
+ int result;
+
+ vdo_waitq_enqueue_waiter(&info->waiting, &vdo_page_comp->waiter);
+ result = launch_page_load(info, vdo_page_comp->pbn);
+ if (result != VDO_SUCCESS) {
+ vdo_waitq_notify_all_waiters(&info->waiting,
+ complete_waiter_with_error, &result);
+ }
+}
+
+/**
+ * vdo_get_page() - Initialize a page completion and get a block map page.
+ * @page_completion: The vdo_page_completion to initialize.
+ * @zone: The block map zone of the desired page.
+ * @pbn: The absolute physical block of the desired page.
+ * @writable: Whether the page can be modified.
+ * @parent: The object to notify when the fetch is complete.
+ * @callback: The notification callback.
+ * @error_handler: The handler for fetch errors.
+ * @requeue: Whether we must requeue when notifying the parent.
+ *
+ * May cause another page to be discarded (potentially writing a dirty page) and the one nominated
+ * by the completion to be loaded from disk. When the callback is invoked, the page will be
+ * resident in the cache and marked busy. All callers must call vdo_release_page_completion()
+ * when they are done with the page to clear the busy mark.
+ */
+void vdo_get_page(struct vdo_page_completion *page_completion,
+ struct block_map_zone *zone, physical_block_number_t pbn,
+ bool writable, void *parent, vdo_action_fn callback,
+ vdo_action_fn error_handler, bool requeue)
+{
+ struct vdo_page_cache *cache = &zone->page_cache;
+ struct vdo_completion *completion = &page_completion->completion;
+ struct page_info *info;
+
+ assert_on_cache_thread(cache, __func__);
+ VDO_ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL),
+ "New page completion was not already on a wait queue");
+
+ *page_completion = (struct vdo_page_completion) {
+ .pbn = pbn,
+ .writable = writable,
+ .cache = cache,
+ };
+
+ vdo_initialize_completion(completion, cache->vdo, VDO_PAGE_COMPLETION);
+ vdo_prepare_completion(completion, callback, error_handler,
+ cache->zone->thread_id, parent);
+ completion->requeue = requeue;
+
+ if (page_completion->writable && vdo_is_read_only(cache->vdo)) {
+ vdo_fail_completion(completion, VDO_READ_ONLY);
+ return;
+ }
+
+ if (page_completion->writable)
+ ADD_ONCE(cache->stats.write_count, 1);
+ else
+ ADD_ONCE(cache->stats.read_count, 1);
+
+ info = find_page(cache, page_completion->pbn);
+ if (info != NULL) {
+ /* The page is in the cache already. */
+ if ((info->write_status == WRITE_STATUS_DEFERRED) ||
+ is_incoming(info) ||
+ (is_outgoing(info) && page_completion->writable)) {
+ /* The page is unusable until it has finished I/O. */
+ ADD_ONCE(cache->stats.wait_for_page, 1);
+ vdo_waitq_enqueue_waiter(&info->waiting, &page_completion->waiter);
+ return;
+ }
+
+ if (is_valid(info)) {
+ /* The page is usable. */
+ ADD_ONCE(cache->stats.found_in_cache, 1);
+ if (!is_present(info))
+ ADD_ONCE(cache->stats.read_outgoing, 1);
+ update_lru(info);
+ info->busy++;
+ complete_with_page(info, page_completion);
+ return;
+ }
+
+ /* Something horrible has gone wrong. */
+ VDO_ASSERT_LOG_ONLY(false, "Info found in a usable state.");
+ }
+
+ /* The page must be fetched. */
+ info = find_free_page(cache);
+ if (info != NULL) {
+ ADD_ONCE(cache->stats.fetch_required, 1);
+ load_page_for_completion(info, page_completion);
+ return;
+ }
+
+ /* The page must wait for a page to be discarded. */
+ ADD_ONCE(cache->stats.discard_required, 1);
+ discard_page_for_completion(page_completion);
+}
+
+/**
+ * vdo_request_page_write() - Request that a VDO page be written out as soon as it is not busy.
+ * @completion: The vdo_page_completion containing the page.
+ */
+void vdo_request_page_write(struct vdo_completion *completion)
+{
+ struct page_info *info;
+ struct vdo_page_completion *vdo_page_comp = as_vdo_page_completion(completion);
+
+ if (!validate_completed_page_or_enter_read_only_mode(vdo_page_comp, true))
+ return;
+
+ info = vdo_page_comp->info;
+ set_info_state(info, PS_DIRTY);
+ launch_page_save(info);
+}
+
+/**
+ * vdo_get_cached_page() - Get the block map page from a page completion.
+ * @completion: A vdo page completion whose callback has been called.
+ * @page_ptr: A pointer to hold the page
+ *
+ * Return: VDO_SUCCESS or an error
+ */
+int vdo_get_cached_page(struct vdo_completion *completion,
+ struct block_map_page **page_ptr)
+{
+ int result;
+ struct vdo_page_completion *vpc;
+
+ vpc = as_vdo_page_completion(completion);
+ result = validate_completed_page(vpc, true);
+ if (result == VDO_SUCCESS)
+ *page_ptr = (struct block_map_page *) get_page_buffer(vpc->info);
+
+ return result;
+}
+
+/**
+ * vdo_invalidate_page_cache() - Invalidate all entries in the VDO page cache.
+ *
+ * There must not be any dirty pages in the cache.
+ *
+ * Return: A success or error code.
+ */
+int vdo_invalidate_page_cache(struct vdo_page_cache *cache)
+{
+ struct page_info *info;
+
+ assert_on_cache_thread(cache, __func__);
+
+ /* Make sure we don't throw away any dirty pages. */
+ for (info = cache->infos; info < cache->infos + cache->page_count; info++) {
+ int result = VDO_ASSERT(!is_dirty(info), "cache must have no dirty pages");
+
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+
+ /* Reset the page map by re-allocating it. */
+ vdo_int_map_free(vdo_forget(cache->page_map));
+ return vdo_int_map_create(cache->page_count, &cache->page_map);
+}
+
+/**
+ * get_tree_page_by_index() - Get the tree page for a given height and page index.
+ *
+ * Return: The requested page.
+ */
+static struct tree_page * __must_check get_tree_page_by_index(struct forest *forest,
+ root_count_t root_index,
+ height_t height,
+ page_number_t page_index)
+{
+ page_number_t offset = 0;
+ size_t segment;
+
+ for (segment = 0; segment < forest->segments; segment++) {
+ page_number_t border = forest->boundaries[segment].levels[height - 1];
+
+ if (page_index < border) {
+ struct block_map_tree *tree = &forest->trees[root_index];
+
+ return &(tree->segments[segment].levels[height - 1][page_index - offset]);
+ }
+
+ offset = border;
+ }
+
+ return NULL;
+}
+
+/* Get the page referred to by the lock's tree slot at its current height. */
+static inline struct tree_page *get_tree_page(const struct block_map_zone *zone,
+ const struct tree_lock *lock)
+{
+ return get_tree_page_by_index(zone->block_map->forest, lock->root_index,
+ lock->height,
+ lock->tree_slots[lock->height].page_index);
+}
+
+/** vdo_copy_valid_page() - Validate and copy a buffer to a page. */
+bool vdo_copy_valid_page(char *buffer, nonce_t nonce,
+ physical_block_number_t pbn,
+ struct block_map_page *page)
+{
+ struct block_map_page *loaded = (struct block_map_page *) buffer;
+ enum block_map_page_validity validity =
+ vdo_validate_block_map_page(loaded, nonce, pbn);
+
+ if (validity == VDO_BLOCK_MAP_PAGE_VALID) {
+ memcpy(page, loaded, VDO_BLOCK_SIZE);
+ return true;
+ }
+
+ if (validity == VDO_BLOCK_MAP_PAGE_BAD) {
+ vdo_log_error_strerror(VDO_BAD_PAGE,
+ "Expected page %llu but got page %llu instead",
+ (unsigned long long) pbn,
+ (unsigned long long) vdo_get_block_map_page_pbn(loaded));
+ }
+
+ return false;
+}
+
+/**
+ * in_cyclic_range() - Check whether the given value is between the lower and upper bounds, within
+ * a cyclic range of values from 0 to (modulus - 1).
+ * @lower: The lowest value to accept.
+ * @value: The value to check.
+ * @upper: The highest value to accept.
+ * @modulus: The size of the cyclic space, no more than 2^15.
+ *
+ * The value and both bounds must be smaller than the modulus.
+ *
+ * Return: true if the value is in range.
+ */
+static bool in_cyclic_range(u16 lower, u16 value, u16 upper, u16 modulus)
+{
+ if (value < lower)
+ value += modulus;
+ if (upper < lower)
+ upper += modulus;
+ return (value <= upper);
+}
+
+/**
+ * is_not_older() - Check whether a generation is strictly older than some other generation in the
+ * context of a zone's current generation range.
+ * @zone: The zone in which to do the comparison.
+ * @a: The generation in question.
+ * @b: The generation to compare to.
+ *
+ * Return: true if generation @a is not strictly older than generation @b in the context of @zone
+ */
+static bool __must_check is_not_older(struct block_map_zone *zone, u8 a, u8 b)
+{
+ int result;
+
+ result = VDO_ASSERT((in_cyclic_range(zone->oldest_generation, a, zone->generation, 1 << 8) &&
+ in_cyclic_range(zone->oldest_generation, b, zone->generation, 1 << 8)),
+ "generation(s) %u, %u are out of range [%u, %u]",
+ a, b, zone->oldest_generation, zone->generation);
+ if (result != VDO_SUCCESS) {
+ enter_zone_read_only_mode(zone, result);
+ return true;
+ }
+
+ return in_cyclic_range(b, a, zone->generation, 1 << 8);
+}
+
+static void release_generation(struct block_map_zone *zone, u8 generation)
+{
+ int result;
+
+ result = VDO_ASSERT((zone->dirty_page_counts[generation] > 0),
+ "dirty page count underflow for generation %u", generation);
+ if (result != VDO_SUCCESS) {
+ enter_zone_read_only_mode(zone, result);
+ return;
+ }
+
+ zone->dirty_page_counts[generation]--;
+ while ((zone->dirty_page_counts[zone->oldest_generation] == 0) &&
+ (zone->oldest_generation != zone->generation))
+ zone->oldest_generation++;
+}
+
+static void set_generation(struct block_map_zone *zone, struct tree_page *page,
+ u8 new_generation)
+{
+ u32 new_count;
+ int result;
+ bool decrement_old = vdo_waiter_is_waiting(&page->waiter);
+ u8 old_generation = page->generation;
+
+ if (decrement_old && (old_generation == new_generation))
+ return;
+
+ page->generation = new_generation;
+ new_count = ++zone->dirty_page_counts[new_generation];
+ result = VDO_ASSERT((new_count != 0), "dirty page count overflow for generation %u",
+ new_generation);
+ if (result != VDO_SUCCESS) {
+ enter_zone_read_only_mode(zone, result);
+ return;
+ }
+
+ if (decrement_old)
+ release_generation(zone, old_generation);
+}
+
+static void write_page(struct tree_page *tree_page, struct pooled_vio *vio);
+
+/* Implements waiter_callback_fn */
+static void write_page_callback(struct vdo_waiter *waiter, void *context)
+{
+ write_page(container_of(waiter, struct tree_page, waiter), context);
+}
+
+static void acquire_vio(struct vdo_waiter *waiter, struct block_map_zone *zone)
+{
+ waiter->callback = write_page_callback;
+ acquire_vio_from_pool(zone->vio_pool, waiter);
+}
+
+/* Return: true if all possible generations were not already active */
+static bool attempt_increment(struct block_map_zone *zone)
+{
+ u8 generation = zone->generation + 1;
+
+ if (zone->oldest_generation == generation)
+ return false;
+
+ zone->generation = generation;
+ return true;
+}
+
+/* Launches a flush if one is not already in progress. */
+static void enqueue_page(struct tree_page *page, struct block_map_zone *zone)
+{
+ if ((zone->flusher == NULL) && attempt_increment(zone)) {
+ zone->flusher = page;
+ acquire_vio(&page->waiter, zone);
+ return;
+ }
+
+ vdo_waitq_enqueue_waiter(&zone->flush_waiters, &page->waiter);
+}
+
+static void write_page_if_not_dirtied(struct vdo_waiter *waiter, void *context)
+{
+ struct tree_page *page = container_of(waiter, struct tree_page, waiter);
+ struct write_if_not_dirtied_context *write_context = context;
+
+ if (page->generation == write_context->generation) {
+ acquire_vio(waiter, write_context->zone);
+ return;
+ }
+
+ enqueue_page(page, write_context->zone);
+}
+
+static void return_to_pool(struct block_map_zone *zone, struct pooled_vio *vio)
+{
+ return_vio_to_pool(zone->vio_pool, vio);
+ check_for_drain_complete(zone);
+}
+
+/* This callback is registered in write_initialized_page(). */
+static void finish_page_write(struct vdo_completion *completion)
+{
+ bool dirty;
+ struct vio *vio = as_vio(completion);
+ struct pooled_vio *pooled = container_of(vio, struct pooled_vio, vio);
+ struct tree_page *page = completion->parent;
+ struct block_map_zone *zone = pooled->context;
+
+ vdo_release_recovery_journal_block_reference(zone->block_map->journal,
+ page->writing_recovery_lock,
+ VDO_ZONE_TYPE_LOGICAL,
+ zone->zone_number);
+
+ dirty = (page->writing_generation != page->generation);
+ release_generation(zone, page->writing_generation);
+ page->writing = false;
+
+ if (zone->flusher == page) {
+ struct write_if_not_dirtied_context context = {
+ .zone = zone,
+ .generation = page->writing_generation,
+ };
+
+ vdo_waitq_notify_all_waiters(&zone->flush_waiters,
+ write_page_if_not_dirtied, &context);
+ if (dirty && attempt_increment(zone)) {
+ write_page(page, pooled);
+ return;
+ }
+
+ zone->flusher = NULL;
+ }
+
+ if (dirty) {
+ enqueue_page(page, zone);
+ } else if ((zone->flusher == NULL) && vdo_waitq_has_waiters(&zone->flush_waiters) &&
+ attempt_increment(zone)) {
+ zone->flusher = container_of(vdo_waitq_dequeue_waiter(&zone->flush_waiters),
+ struct tree_page, waiter);
+ write_page(zone->flusher, pooled);
+ return;
+ }
+
+ return_to_pool(zone, pooled);
+}
+
+static void handle_write_error(struct vdo_completion *completion)
+{
+ int result = completion->result;
+ struct vio *vio = as_vio(completion);
+ struct pooled_vio *pooled = container_of(vio, struct pooled_vio, vio);
+ struct block_map_zone *zone = pooled->context;
+
+ vio_record_metadata_io_error(vio);
+ enter_zone_read_only_mode(zone, result);
+ return_to_pool(zone, pooled);
+}
+
+static void write_page_endio(struct bio *bio);
+
+static void write_initialized_page(struct vdo_completion *completion)
+{
+ struct vio *vio = as_vio(completion);
+ struct pooled_vio *pooled = container_of(vio, struct pooled_vio, vio);
+ struct block_map_zone *zone = pooled->context;
+ struct tree_page *tree_page = completion->parent;
+ struct block_map_page *page = (struct block_map_page *) vio->data;
+ blk_opf_t operation = REQ_OP_WRITE | REQ_PRIO;
+
+ /*
+ * Now that we know the page has been written at least once, mark the copy we are writing
+ * as initialized.
+ */
+ page->header.initialized = true;
+
+ if (zone->flusher == tree_page)
+ operation |= REQ_PREFLUSH;
+
+ vdo_submit_metadata_vio(vio, vdo_get_block_map_page_pbn(page),
+ write_page_endio, handle_write_error,
+ operation);
+}
+
+static void write_page_endio(struct bio *bio)
+{
+ struct pooled_vio *vio = bio->bi_private;
+ struct block_map_zone *zone = vio->context;
+ struct block_map_page *page = (struct block_map_page *) vio->vio.data;
+
+ continue_vio_after_io(&vio->vio,
+ (page->header.initialized ?
+ finish_page_write : write_initialized_page),
+ zone->thread_id);
+}
+
+static void write_page(struct tree_page *tree_page, struct pooled_vio *vio)
+{
+ struct vdo_completion *completion = &vio->vio.completion;
+ struct block_map_zone *zone = vio->context;
+ struct block_map_page *page = vdo_as_block_map_page(tree_page);
+
+ if ((zone->flusher != tree_page) &&
+ is_not_older(zone, tree_page->generation, zone->generation)) {
+ /*
+ * This page was re-dirtied after the last flush was issued, hence we need to do
+ * another flush.
+ */
+ enqueue_page(tree_page, zone);
+ return_to_pool(zone, vio);
+ return;
+ }
+
+ completion->parent = tree_page;
+ memcpy(vio->vio.data, tree_page->page_buffer, VDO_BLOCK_SIZE);
+ completion->callback_thread_id = zone->thread_id;
+
+ tree_page->writing = true;
+ tree_page->writing_generation = tree_page->generation;
+ tree_page->writing_recovery_lock = tree_page->recovery_lock;
+
+ /* Clear this now so that we know this page is not on any dirty list. */
+ tree_page->recovery_lock = 0;
+
+ /*
+ * We've already copied the page into the vio which will write it, so if it was not yet
+ * initialized, the first write will indicate that (for torn write protection). It is now
+ * safe to mark it as initialized in memory since if the write fails, the in memory state
+ * will become irrelevant.
+ */
+ if (page->header.initialized) {
+ write_initialized_page(completion);
+ return;
+ }
+
+ page->header.initialized = true;
+ vdo_submit_metadata_vio(&vio->vio, vdo_get_block_map_page_pbn(page),
+ write_page_endio, handle_write_error,
+ REQ_OP_WRITE | REQ_PRIO);
+}
+
+/* Release a lock on a page which was being loaded or allocated. */
+static void release_page_lock(struct data_vio *data_vio, char *what)
+{
+ struct block_map_zone *zone;
+ struct tree_lock *lock_holder;
+ struct tree_lock *lock = &data_vio->tree_lock;
+
+ VDO_ASSERT_LOG_ONLY(lock->locked,
+ "release of unlocked block map page %s for key %llu in tree %u",
+ what, (unsigned long long) lock->key, lock->root_index);
+
+ zone = data_vio->logical.zone->block_map_zone;
+ lock_holder = vdo_int_map_remove(zone->loading_pages, lock->key);
+ VDO_ASSERT_LOG_ONLY((lock_holder == lock),
+ "block map page %s mismatch for key %llu in tree %u",
+ what, (unsigned long long) lock->key, lock->root_index);
+ lock->locked = false;
+}
+
+static void finish_lookup(struct data_vio *data_vio, int result)
+{
+ data_vio->tree_lock.height = 0;
+
+ --data_vio->logical.zone->block_map_zone->active_lookups;
+
+ set_data_vio_logical_callback(data_vio, continue_data_vio_with_block_map_slot);
+ data_vio->vio.completion.error_handler = handle_data_vio_error;
+ continue_data_vio_with_error(data_vio, result);
+}
+
+static void abort_lookup_for_waiter(struct vdo_waiter *waiter, void *context)
+{
+ struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
+ int result = *((int *) context);
+
+ if (!data_vio->write) {
+ if (result == VDO_NO_SPACE)
+ result = VDO_SUCCESS;
+ } else if (result != VDO_NO_SPACE) {
+ result = VDO_READ_ONLY;
+ }
+
+ finish_lookup(data_vio, result);
+}
+
+static void abort_lookup(struct data_vio *data_vio, int result, char *what)
+{
+ if (result != VDO_NO_SPACE)
+ enter_zone_read_only_mode(data_vio->logical.zone->block_map_zone, result);
+
+ if (data_vio->tree_lock.locked) {
+ release_page_lock(data_vio, what);
+ vdo_waitq_notify_all_waiters(&data_vio->tree_lock.waiters,
+ abort_lookup_for_waiter,
+ &result);
+ }
+
+ finish_lookup(data_vio, result);
+}
+
+static void abort_load(struct data_vio *data_vio, int result)
+{
+ abort_lookup(data_vio, result, "load");
+}
+
+static bool __must_check is_invalid_tree_entry(const struct vdo *vdo,
+ const struct data_location *mapping,
+ height_t height)
+{
+ if (!vdo_is_valid_location(mapping) ||
+ vdo_is_state_compressed(mapping->state) ||
+ (vdo_is_mapped_location(mapping) && (mapping->pbn == VDO_ZERO_BLOCK)))
+ return true;
+
+ /* Roots aren't physical data blocks, so we can't check their PBNs. */
+ if (height == VDO_BLOCK_MAP_TREE_HEIGHT)
+ return false;
+
+ return !vdo_is_physical_data_block(vdo->depot, mapping->pbn);
+}
+
+static void load_block_map_page(struct block_map_zone *zone, struct data_vio *data_vio);
+static void allocate_block_map_page(struct block_map_zone *zone,
+ struct data_vio *data_vio);
+
+static void continue_with_loaded_page(struct data_vio *data_vio,
+ struct block_map_page *page)
+{
+ struct tree_lock *lock = &data_vio->tree_lock;
+ struct block_map_tree_slot slot = lock->tree_slots[lock->height];
+ struct data_location mapping =
+ vdo_unpack_block_map_entry(&page->entries[slot.block_map_slot.slot]);
+
+ if (is_invalid_tree_entry(vdo_from_data_vio(data_vio), &mapping, lock->height)) {
+ vdo_log_error_strerror(VDO_BAD_MAPPING,
+ "Invalid block map tree PBN: %llu with state %u for page index %u at height %u",
+ (unsigned long long) mapping.pbn, mapping.state,
+ lock->tree_slots[lock->height - 1].page_index,
+ lock->height - 1);
+ abort_load(data_vio, VDO_BAD_MAPPING);
+ return;
+ }
+
+ if (!vdo_is_mapped_location(&mapping)) {
+ /* The page we need is unallocated */
+ allocate_block_map_page(data_vio->logical.zone->block_map_zone,
+ data_vio);
+ return;
+ }
+
+ lock->tree_slots[lock->height - 1].block_map_slot.pbn = mapping.pbn;
+ if (lock->height == 1) {
+ finish_lookup(data_vio, VDO_SUCCESS);
+ return;
+ }
+
+ /* We know what page we need to load next */
+ load_block_map_page(data_vio->logical.zone->block_map_zone, data_vio);
+}
+
+static void continue_load_for_waiter(struct vdo_waiter *waiter, void *context)
+{
+ struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
+
+ data_vio->tree_lock.height--;
+ continue_with_loaded_page(data_vio, context);
+}
+
+static void finish_block_map_page_load(struct vdo_completion *completion)
+{
+ physical_block_number_t pbn;
+ struct tree_page *tree_page;
+ struct block_map_page *page;
+ nonce_t nonce;
+ struct vio *vio = as_vio(completion);
+ struct pooled_vio *pooled = vio_as_pooled_vio(vio);
+ struct data_vio *data_vio = completion->parent;
+ struct block_map_zone *zone = pooled->context;
+ struct tree_lock *tree_lock = &data_vio->tree_lock;
+
+ tree_lock->height--;
+ pbn = tree_lock->tree_slots[tree_lock->height].block_map_slot.pbn;
+ tree_page = get_tree_page(zone, tree_lock);
+ page = (struct block_map_page *) tree_page->page_buffer;
+ nonce = zone->block_map->nonce;
+
+ if (!vdo_copy_valid_page(vio->data, nonce, pbn, page))
+ vdo_format_block_map_page(page, nonce, pbn, false);
+ return_vio_to_pool(zone->vio_pool, pooled);
+
+ /* Release our claim to the load and wake any waiters */
+ release_page_lock(data_vio, "load");
+ vdo_waitq_notify_all_waiters(&tree_lock->waiters, continue_load_for_waiter, page);
+ continue_with_loaded_page(data_vio, page);
+}
+
+static void handle_io_error(struct vdo_completion *completion)
+{
+ int result = completion->result;
+ struct vio *vio = as_vio(completion);
+ struct pooled_vio *pooled = container_of(vio, struct pooled_vio, vio);
+ struct data_vio *data_vio = completion->parent;
+ struct block_map_zone *zone = pooled->context;
+
+ vio_record_metadata_io_error(vio);
+ return_vio_to_pool(zone->vio_pool, pooled);
+ abort_load(data_vio, result);
+}
+
+static void load_page_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct data_vio *data_vio = vio->completion.parent;
+
+ continue_vio_after_io(vio, finish_block_map_page_load,
+ data_vio->logical.zone->thread_id);
+}
+
+static void load_page(struct vdo_waiter *waiter, void *context)
+{
+ struct pooled_vio *pooled = context;
+ struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
+ struct tree_lock *lock = &data_vio->tree_lock;
+ physical_block_number_t pbn = lock->tree_slots[lock->height - 1].block_map_slot.pbn;
+
+ pooled->vio.completion.parent = data_vio;
+ vdo_submit_metadata_vio(&pooled->vio, pbn, load_page_endio,
+ handle_io_error, REQ_OP_READ | REQ_PRIO);
+}
+
+/*
+ * If the page is already locked, queue up to wait for the lock to be released. If the lock is
+ * acquired, @data_vio->tree_lock.locked will be true.
+ */
+static int attempt_page_lock(struct block_map_zone *zone, struct data_vio *data_vio)
+{
+ int result;
+ struct tree_lock *lock_holder;
+ struct tree_lock *lock = &data_vio->tree_lock;
+ height_t height = lock->height;
+ struct block_map_tree_slot tree_slot = lock->tree_slots[height];
+ union page_key key;
+
+ key.descriptor = (struct page_descriptor) {
+ .root_index = lock->root_index,
+ .height = height,
+ .page_index = tree_slot.page_index,
+ .slot = tree_slot.block_map_slot.slot,
+ };
+ lock->key = key.key;
+
+ result = vdo_int_map_put(zone->loading_pages, lock->key,
+ lock, false, (void **) &lock_holder);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (lock_holder == NULL) {
+ /* We got the lock */
+ data_vio->tree_lock.locked = true;
+ return VDO_SUCCESS;
+ }
+
+ /* Someone else is loading or allocating the page we need */
+ vdo_waitq_enqueue_waiter(&lock_holder->waiters, &data_vio->waiter);
+ return VDO_SUCCESS;
+}
+
+/* Load a block map tree page from disk, for the next level in the data vio tree lock. */
+static void load_block_map_page(struct block_map_zone *zone, struct data_vio *data_vio)
+{
+ int result;
+
+ result = attempt_page_lock(zone, data_vio);
+ if (result != VDO_SUCCESS) {
+ abort_load(data_vio, result);
+ return;
+ }
+
+ if (data_vio->tree_lock.locked) {
+ data_vio->waiter.callback = load_page;
+ acquire_vio_from_pool(zone->vio_pool, &data_vio->waiter);
+ }
+}
+
+static void allocation_failure(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ if (vdo_requeue_completion_if_needed(completion,
+ data_vio->logical.zone->thread_id))
+ return;
+
+ abort_lookup(data_vio, completion->result, "allocation");
+}
+
+static void continue_allocation_for_waiter(struct vdo_waiter *waiter, void *context)
+{
+ struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
+ struct tree_lock *tree_lock = &data_vio->tree_lock;
+ physical_block_number_t pbn = *((physical_block_number_t *) context);
+
+ tree_lock->height--;
+ data_vio->tree_lock.tree_slots[tree_lock->height].block_map_slot.pbn = pbn;
+
+ if (tree_lock->height == 0) {
+ finish_lookup(data_vio, VDO_SUCCESS);
+ return;
+ }
+
+ allocate_block_map_page(data_vio->logical.zone->block_map_zone, data_vio);
+}
+
+/** expire_oldest_list() - Expire the oldest list. */
+static void expire_oldest_list(struct dirty_lists *dirty_lists)
+{
+ block_count_t i = dirty_lists->offset++;
+
+ dirty_lists->oldest_period++;
+ if (!list_empty(&dirty_lists->eras[i][VDO_TREE_PAGE])) {
+ list_splice_tail_init(&dirty_lists->eras[i][VDO_TREE_PAGE],
+ &dirty_lists->expired[VDO_TREE_PAGE]);
+ }
+
+ if (!list_empty(&dirty_lists->eras[i][VDO_CACHE_PAGE])) {
+ list_splice_tail_init(&dirty_lists->eras[i][VDO_CACHE_PAGE],
+ &dirty_lists->expired[VDO_CACHE_PAGE]);
+ }
+
+ if (dirty_lists->offset == dirty_lists->maximum_age)
+ dirty_lists->offset = 0;
+}
+
+
+/** update_period() - Update the dirty_lists period if necessary. */
+static void update_period(struct dirty_lists *dirty, sequence_number_t period)
+{
+ while (dirty->next_period <= period) {
+ if ((dirty->next_period - dirty->oldest_period) == dirty->maximum_age)
+ expire_oldest_list(dirty);
+ dirty->next_period++;
+ }
+}
+
+/** write_expired_elements() - Write out the expired list. */
+static void write_expired_elements(struct block_map_zone *zone)
+{
+ struct tree_page *page, *ttmp;
+ struct page_info *info, *ptmp;
+ struct list_head *expired;
+ u8 generation = zone->generation;
+
+ expired = &zone->dirty_lists->expired[VDO_TREE_PAGE];
+ list_for_each_entry_safe(page, ttmp, expired, entry) {
+ int result;
+
+ list_del_init(&page->entry);
+
+ result = VDO_ASSERT(!vdo_waiter_is_waiting(&page->waiter),
+ "Newly expired page not already waiting to write");
+ if (result != VDO_SUCCESS) {
+ enter_zone_read_only_mode(zone, result);
+ continue;
+ }
+
+ set_generation(zone, page, generation);
+ if (!page->writing)
+ enqueue_page(page, zone);
+ }
+
+ expired = &zone->dirty_lists->expired[VDO_CACHE_PAGE];
+ list_for_each_entry_safe(info, ptmp, expired, state_entry) {
+ list_del_init(&info->state_entry);
+ schedule_page_save(info);
+ }
+
+ save_pages(&zone->page_cache);
+}
+
+/**
+ * add_to_dirty_lists() - Add an element to the dirty lists.
+ * @zone: The zone in which we are operating.
+ * @entry: The list entry of the element to add.
+ * @type: The type of page.
+ * @old_period: The period in which the element was previously dirtied, or 0 if it was not dirty.
+ * @new_period: The period in which the element has now been dirtied, or 0 if it does not hold a
+ * lock.
+ */
+static void add_to_dirty_lists(struct block_map_zone *zone,
+ struct list_head *entry,
+ enum block_map_page_type type,
+ sequence_number_t old_period,
+ sequence_number_t new_period)
+{
+ struct dirty_lists *dirty_lists = zone->dirty_lists;
+
+ if ((old_period == new_period) || ((old_period != 0) && (old_period < new_period)))
+ return;
+
+ if (new_period < dirty_lists->oldest_period) {
+ list_move_tail(entry, &dirty_lists->expired[type]);
+ } else {
+ update_period(dirty_lists, new_period);
+ list_move_tail(entry,
+ &dirty_lists->eras[new_period % dirty_lists->maximum_age][type]);
+ }
+
+ write_expired_elements(zone);
+}
+
+/*
+ * Record the allocation in the tree and wake any waiters now that the write lock has been
+ * released.
+ */
+static void finish_block_map_allocation(struct vdo_completion *completion)
+{
+ physical_block_number_t pbn;
+ struct tree_page *tree_page;
+ struct block_map_page *page;
+ sequence_number_t old_lock;
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct block_map_zone *zone = data_vio->logical.zone->block_map_zone;
+ struct tree_lock *tree_lock = &data_vio->tree_lock;
+ height_t height = tree_lock->height;
+
+ assert_data_vio_in_logical_zone(data_vio);
+
+ tree_page = get_tree_page(zone, tree_lock);
+ pbn = tree_lock->tree_slots[height - 1].block_map_slot.pbn;
+
+ /* Record the allocation. */
+ page = (struct block_map_page *) tree_page->page_buffer;
+ old_lock = tree_page->recovery_lock;
+ vdo_update_block_map_page(page, data_vio, pbn,
+ VDO_MAPPING_STATE_UNCOMPRESSED,
+ &tree_page->recovery_lock);
+
+ if (vdo_waiter_is_waiting(&tree_page->waiter)) {
+ /* This page is waiting to be written out. */
+ if (zone->flusher != tree_page) {
+ /*
+ * The outstanding flush won't cover the update we just made,
+ * so mark the page as needing another flush.
+ */
+ set_generation(zone, tree_page, zone->generation);
+ }
+ } else {
+ /* Put the page on a dirty list */
+ if (old_lock == 0)
+ INIT_LIST_HEAD(&tree_page->entry);
+ add_to_dirty_lists(zone, &tree_page->entry, VDO_TREE_PAGE,
+ old_lock, tree_page->recovery_lock);
+ }
+
+ tree_lock->height--;
+ if (height > 1) {
+ /* Format the interior node we just allocated (in memory). */
+ tree_page = get_tree_page(zone, tree_lock);
+ vdo_format_block_map_page(tree_page->page_buffer,
+ zone->block_map->nonce,
+ pbn, false);
+ }
+
+ /* Release our claim to the allocation and wake any waiters */
+ release_page_lock(data_vio, "allocation");
+ vdo_waitq_notify_all_waiters(&tree_lock->waiters,
+ continue_allocation_for_waiter, &pbn);
+ if (tree_lock->height == 0) {
+ finish_lookup(data_vio, VDO_SUCCESS);
+ return;
+ }
+
+ allocate_block_map_page(zone, data_vio);
+}
+
+static void release_block_map_write_lock(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_allocated_zone(data_vio);
+
+ release_data_vio_allocation_lock(data_vio, true);
+ launch_data_vio_logical_callback(data_vio, finish_block_map_allocation);
+}
+
+/*
+ * Newly allocated block map pages are set to have to MAXIMUM_REFERENCES after they are journaled,
+ * to prevent deduplication against the block after we release the write lock on it, but before we
+ * write out the page.
+ */
+static void set_block_map_page_reference_count(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_allocated_zone(data_vio);
+
+ completion->callback = release_block_map_write_lock;
+ vdo_modify_reference_count(completion, &data_vio->increment_updater);
+}
+
+static void journal_block_map_allocation(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_journal_zone(data_vio);
+
+ set_data_vio_allocated_zone_callback(data_vio,
+ set_block_map_page_reference_count);
+ vdo_add_recovery_journal_entry(completion->vdo->recovery_journal, data_vio);
+}
+
+static void allocate_block(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct tree_lock *lock = &data_vio->tree_lock;
+ physical_block_number_t pbn;
+
+ assert_data_vio_in_allocated_zone(data_vio);
+
+ if (!vdo_allocate_block_in_zone(data_vio))
+ return;
+
+ pbn = data_vio->allocation.pbn;
+ lock->tree_slots[lock->height - 1].block_map_slot.pbn = pbn;
+ data_vio->increment_updater = (struct reference_updater) {
+ .operation = VDO_JOURNAL_BLOCK_MAP_REMAPPING,
+ .increment = true,
+ .zpbn = {
+ .pbn = pbn,
+ .state = VDO_MAPPING_STATE_UNCOMPRESSED,
+ },
+ .lock = data_vio->allocation.lock,
+ };
+
+ launch_data_vio_journal_callback(data_vio, journal_block_map_allocation);
+}
+
+static void allocate_block_map_page(struct block_map_zone *zone,
+ struct data_vio *data_vio)
+{
+ int result;
+
+ if (!data_vio->write || data_vio->is_discard) {
+ /* This is a pure read or a discard, so there's nothing left to do here. */
+ finish_lookup(data_vio, VDO_SUCCESS);
+ return;
+ }
+
+ result = attempt_page_lock(zone, data_vio);
+ if (result != VDO_SUCCESS) {
+ abort_lookup(data_vio, result, "allocation");
+ return;
+ }
+
+ if (!data_vio->tree_lock.locked)
+ return;
+
+ data_vio_allocate_data_block(data_vio, VIO_BLOCK_MAP_WRITE_LOCK,
+ allocate_block, allocation_failure);
+}
+
+/**
+ * vdo_find_block_map_slot() - Find the block map slot in which the block map entry for a data_vio
+ * resides and cache that result in the data_vio.
+ *
+ * All ancestors in the tree will be allocated or loaded, as needed.
+ */
+void vdo_find_block_map_slot(struct data_vio *data_vio)
+{
+ page_number_t page_index;
+ struct block_map_tree_slot tree_slot;
+ struct data_location mapping;
+ struct block_map_page *page = NULL;
+ struct tree_lock *lock = &data_vio->tree_lock;
+ struct block_map_zone *zone = data_vio->logical.zone->block_map_zone;
+
+ zone->active_lookups++;
+ if (vdo_is_state_draining(&zone->state)) {
+ finish_lookup(data_vio, VDO_SHUTTING_DOWN);
+ return;
+ }
+
+ lock->tree_slots[0].block_map_slot.slot =
+ data_vio->logical.lbn % VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
+ page_index = (lock->tree_slots[0].page_index / zone->block_map->root_count);
+ tree_slot = (struct block_map_tree_slot) {
+ .page_index = page_index / VDO_BLOCK_MAP_ENTRIES_PER_PAGE,
+ .block_map_slot = {
+ .pbn = 0,
+ .slot = page_index % VDO_BLOCK_MAP_ENTRIES_PER_PAGE,
+ },
+ };
+
+ for (lock->height = 1; lock->height <= VDO_BLOCK_MAP_TREE_HEIGHT; lock->height++) {
+ physical_block_number_t pbn;
+
+ lock->tree_slots[lock->height] = tree_slot;
+ page = (struct block_map_page *) (get_tree_page(zone, lock)->page_buffer);
+ pbn = vdo_get_block_map_page_pbn(page);
+ if (pbn != VDO_ZERO_BLOCK) {
+ lock->tree_slots[lock->height].block_map_slot.pbn = pbn;
+ break;
+ }
+
+ /* Calculate the index and slot for the next level. */
+ tree_slot.block_map_slot.slot =
+ tree_slot.page_index % VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
+ tree_slot.page_index = tree_slot.page_index / VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
+ }
+
+ /* The page at this height has been allocated and loaded. */
+ mapping = vdo_unpack_block_map_entry(&page->entries[tree_slot.block_map_slot.slot]);
+ if (is_invalid_tree_entry(vdo_from_data_vio(data_vio), &mapping, lock->height)) {
+ vdo_log_error_strerror(VDO_BAD_MAPPING,
+ "Invalid block map tree PBN: %llu with state %u for page index %u at height %u",
+ (unsigned long long) mapping.pbn, mapping.state,
+ lock->tree_slots[lock->height - 1].page_index,
+ lock->height - 1);
+ abort_load(data_vio, VDO_BAD_MAPPING);
+ return;
+ }
+
+ if (!vdo_is_mapped_location(&mapping)) {
+ /* The page we want one level down has not been allocated, so allocate it. */
+ allocate_block_map_page(zone, data_vio);
+ return;
+ }
+
+ lock->tree_slots[lock->height - 1].block_map_slot.pbn = mapping.pbn;
+ if (lock->height == 1) {
+ /* This is the ultimate block map page, so we're done */
+ finish_lookup(data_vio, VDO_SUCCESS);
+ return;
+ }
+
+ /* We know what page we need to load. */
+ load_block_map_page(zone, data_vio);
+}
+
+/*
+ * Find the PBN of a leaf block map page. This method may only be used after all allocated tree
+ * pages have been loaded, otherwise, it may give the wrong answer (0).
+ */
+physical_block_number_t vdo_find_block_map_page_pbn(struct block_map *map,
+ page_number_t page_number)
+{
+ struct data_location mapping;
+ struct tree_page *tree_page;
+ struct block_map_page *page;
+ root_count_t root_index = page_number % map->root_count;
+ page_number_t page_index = page_number / map->root_count;
+ slot_number_t slot = page_index % VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
+
+ page_index /= VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
+
+ tree_page = get_tree_page_by_index(map->forest, root_index, 1, page_index);
+ page = (struct block_map_page *) tree_page->page_buffer;
+ if (!page->header.initialized)
+ return VDO_ZERO_BLOCK;
+
+ mapping = vdo_unpack_block_map_entry(&page->entries[slot]);
+ if (!vdo_is_valid_location(&mapping) || vdo_is_state_compressed(mapping.state))
+ return VDO_ZERO_BLOCK;
+ return mapping.pbn;
+}
+
+/*
+ * Write a tree page or indicate that it has been re-dirtied if it is already being written. This
+ * method is used when correcting errors in the tree during read-only rebuild.
+ */
+void vdo_write_tree_page(struct tree_page *page, struct block_map_zone *zone)
+{
+ bool waiting = vdo_waiter_is_waiting(&page->waiter);
+
+ if (waiting && (zone->flusher == page))
+ return;
+
+ set_generation(zone, page, zone->generation);
+ if (waiting || page->writing)
+ return;
+
+ enqueue_page(page, zone);
+}
+
+static int make_segment(struct forest *old_forest, block_count_t new_pages,
+ struct boundary *new_boundary, struct forest *forest)
+{
+ size_t index = (old_forest == NULL) ? 0 : old_forest->segments;
+ struct tree_page *page_ptr;
+ page_count_t segment_sizes[VDO_BLOCK_MAP_TREE_HEIGHT];
+ height_t height;
+ root_count_t root;
+ int result;
+
+ forest->segments = index + 1;
+
+ result = vdo_allocate(forest->segments, struct boundary,
+ "forest boundary array", &forest->boundaries);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(forest->segments, struct tree_page *,
+ "forest page pointers", &forest->pages);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(new_pages, struct tree_page,
+ "new forest pages", &forest->pages[index]);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (index > 0) {
+ memcpy(forest->boundaries, old_forest->boundaries,
+ index * sizeof(struct boundary));
+ memcpy(forest->pages, old_forest->pages,
+ index * sizeof(struct tree_page *));
+ }
+
+ memcpy(&(forest->boundaries[index]), new_boundary, sizeof(struct boundary));
+
+ for (height = 0; height < VDO_BLOCK_MAP_TREE_HEIGHT; height++) {
+ segment_sizes[height] = new_boundary->levels[height];
+ if (index > 0)
+ segment_sizes[height] -= old_forest->boundaries[index - 1].levels[height];
+ }
+
+ page_ptr = forest->pages[index];
+ for (root = 0; root < forest->map->root_count; root++) {
+ struct block_map_tree_segment *segment;
+ struct block_map_tree *tree = &(forest->trees[root]);
+ height_t height;
+
+ int result = vdo_allocate(forest->segments,
+ struct block_map_tree_segment,
+ "tree root segments", &tree->segments);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (index > 0) {
+ memcpy(tree->segments, old_forest->trees[root].segments,
+ index * sizeof(struct block_map_tree_segment));
+ }
+
+ segment = &(tree->segments[index]);
+ for (height = 0; height < VDO_BLOCK_MAP_TREE_HEIGHT; height++) {
+ if (segment_sizes[height] == 0)
+ continue;
+
+ segment->levels[height] = page_ptr;
+ if (height == (VDO_BLOCK_MAP_TREE_HEIGHT - 1)) {
+ /* Record the root. */
+ struct block_map_page *page =
+ vdo_format_block_map_page(page_ptr->page_buffer,
+ forest->map->nonce,
+ VDO_INVALID_PBN, true);
+ page->entries[0] =
+ vdo_pack_block_map_entry(forest->map->root_origin + root,
+ VDO_MAPPING_STATE_UNCOMPRESSED);
+ }
+ page_ptr += segment_sizes[height];
+ }
+ }
+
+ return VDO_SUCCESS;
+}
+
+static void deforest(struct forest *forest, size_t first_page_segment)
+{
+ root_count_t root;
+
+ if (forest->pages != NULL) {
+ size_t segment;
+
+ for (segment = first_page_segment; segment < forest->segments; segment++)
+ vdo_free(forest->pages[segment]);
+ vdo_free(forest->pages);
+ }
+
+ for (root = 0; root < forest->map->root_count; root++)
+ vdo_free(forest->trees[root].segments);
+
+ vdo_free(forest->boundaries);
+ vdo_free(forest);
+}
+
+/**
+ * make_forest() - Make a collection of trees for a block_map, expanding the existing forest if
+ * there is one.
+ * @entries: The number of entries the block map will hold.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int make_forest(struct block_map *map, block_count_t entries)
+{
+ struct forest *forest, *old_forest = map->forest;
+ struct boundary new_boundary, *old_boundary = NULL;
+ block_count_t new_pages;
+ int result;
+
+ if (old_forest != NULL)
+ old_boundary = &(old_forest->boundaries[old_forest->segments - 1]);
+
+ new_pages = vdo_compute_new_forest_pages(map->root_count, old_boundary,
+ entries, &new_boundary);
+ if (new_pages == 0) {
+ map->next_entry_count = entries;
+ return VDO_SUCCESS;
+ }
+
+ result = vdo_allocate_extended(struct forest, map->root_count,
+ struct block_map_tree, __func__,
+ &forest);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ forest->map = map;
+ result = make_segment(old_forest, new_pages, &new_boundary, forest);
+ if (result != VDO_SUCCESS) {
+ deforest(forest, forest->segments - 1);
+ return result;
+ }
+
+ map->next_forest = forest;
+ map->next_entry_count = entries;
+ return VDO_SUCCESS;
+}
+
+/**
+ * replace_forest() - Replace a block_map's forest with the already-prepared larger forest.
+ */
+static void replace_forest(struct block_map *map)
+{
+ if (map->next_forest != NULL) {
+ if (map->forest != NULL)
+ deforest(map->forest, map->forest->segments);
+ map->forest = vdo_forget(map->next_forest);
+ }
+
+ map->entry_count = map->next_entry_count;
+ map->next_entry_count = 0;
+}
+
+/**
+ * finish_cursor() - Finish the traversal of a single tree. If it was the last cursor, finish the
+ * traversal.
+ */
+static void finish_cursor(struct cursor *cursor)
+{
+ struct cursors *cursors = cursor->parent;
+ struct vdo_completion *completion = cursors->completion;
+
+ return_vio_to_pool(cursors->pool, vdo_forget(cursor->vio));
+ if (--cursors->active_roots > 0)
+ return;
+
+ vdo_free(cursors);
+
+ vdo_finish_completion(completion);
+}
+
+static void traverse(struct cursor *cursor);
+
+/**
+ * continue_traversal() - Continue traversing a block map tree.
+ * @completion: The VIO doing a read or write.
+ */
+static void continue_traversal(struct vdo_completion *completion)
+{
+ vio_record_metadata_io_error(as_vio(completion));
+ traverse(completion->parent);
+}
+
+/**
+ * finish_traversal_load() - Continue traversing a block map tree now that a page has been loaded.
+ * @completion: The VIO doing the read.
+ */
+static void finish_traversal_load(struct vdo_completion *completion)
+{
+ struct cursor *cursor = completion->parent;
+ height_t height = cursor->height;
+ struct cursor_level *level = &cursor->levels[height];
+ struct tree_page *tree_page =
+ &(cursor->tree->segments[0].levels[height][level->page_index]);
+ struct block_map_page *page = (struct block_map_page *) tree_page->page_buffer;
+
+ vdo_copy_valid_page(cursor->vio->vio.data,
+ cursor->parent->zone->block_map->nonce,
+ pbn_from_vio_bio(cursor->vio->vio.bio), page);
+ traverse(cursor);
+}
+
+static void traversal_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct cursor *cursor = vio->completion.parent;
+
+ continue_vio_after_io(vio, finish_traversal_load,
+ cursor->parent->zone->thread_id);
+}
+
+/**
+ * traverse() - Traverse a single block map tree.
+ *
+ * This is the recursive heart of the traversal process.
+ */
+static void traverse(struct cursor *cursor)
+{
+ for (; cursor->height < VDO_BLOCK_MAP_TREE_HEIGHT; cursor->height++) {
+ height_t height = cursor->height;
+ struct cursor_level *level = &cursor->levels[height];
+ struct tree_page *tree_page =
+ &(cursor->tree->segments[0].levels[height][level->page_index]);
+ struct block_map_page *page = (struct block_map_page *) tree_page->page_buffer;
+
+ if (!page->header.initialized)
+ continue;
+
+ for (; level->slot < VDO_BLOCK_MAP_ENTRIES_PER_PAGE; level->slot++) {
+ struct cursor_level *next_level;
+ page_number_t entry_index =
+ (VDO_BLOCK_MAP_ENTRIES_PER_PAGE * level->page_index) + level->slot;
+ struct data_location location =
+ vdo_unpack_block_map_entry(&page->entries[level->slot]);
+
+ if (!vdo_is_valid_location(&location)) {
+ /* This entry is invalid, so remove it from the page. */
+ page->entries[level->slot] = UNMAPPED_BLOCK_MAP_ENTRY;
+ vdo_write_tree_page(tree_page, cursor->parent->zone);
+ continue;
+ }
+
+ if (!vdo_is_mapped_location(&location))
+ continue;
+
+ /* Erase mapped entries past the end of the logical space. */
+ if (entry_index >= cursor->boundary.levels[height]) {
+ page->entries[level->slot] = UNMAPPED_BLOCK_MAP_ENTRY;
+ vdo_write_tree_page(tree_page, cursor->parent->zone);
+ continue;
+ }
+
+ if (cursor->height < VDO_BLOCK_MAP_TREE_HEIGHT - 1) {
+ int result = cursor->parent->entry_callback(location.pbn,
+ cursor->parent->completion);
+ if (result != VDO_SUCCESS) {
+ page->entries[level->slot] = UNMAPPED_BLOCK_MAP_ENTRY;
+ vdo_write_tree_page(tree_page, cursor->parent->zone);
+ continue;
+ }
+ }
+
+ if (cursor->height == 0)
+ continue;
+
+ cursor->height--;
+ next_level = &cursor->levels[cursor->height];
+ next_level->page_index = entry_index;
+ next_level->slot = 0;
+ level->slot++;
+ vdo_submit_metadata_vio(&cursor->vio->vio, location.pbn,
+ traversal_endio, continue_traversal,
+ REQ_OP_READ | REQ_PRIO);
+ return;
+ }
+ }
+
+ finish_cursor(cursor);
+}
+
+/**
+ * launch_cursor() - Start traversing a single block map tree now that the cursor has a VIO with
+ * which to load pages.
+ * @context: The pooled_vio just acquired.
+ *
+ * Implements waiter_callback_fn.
+ */
+static void launch_cursor(struct vdo_waiter *waiter, void *context)
+{
+ struct cursor *cursor = container_of(waiter, struct cursor, waiter);
+ struct pooled_vio *pooled = context;
+
+ cursor->vio = pooled;
+ pooled->vio.completion.parent = cursor;
+ pooled->vio.completion.callback_thread_id = cursor->parent->zone->thread_id;
+ traverse(cursor);
+}
+
+/**
+ * compute_boundary() - Compute the number of pages used at each level of the given root's tree.
+ *
+ * Return: The list of page counts as a boundary structure.
+ */
+static struct boundary compute_boundary(struct block_map *map, root_count_t root_index)
+{
+ struct boundary boundary;
+ height_t height;
+ page_count_t leaf_pages = vdo_compute_block_map_page_count(map->entry_count);
+ /*
+ * Compute the leaf pages for this root. If the number of leaf pages does not distribute
+ * evenly, we must determine if this root gets an extra page. Extra pages are assigned to
+ * roots starting from tree 0.
+ */
+ page_count_t last_tree_root = (leaf_pages - 1) % map->root_count;
+ page_count_t level_pages = leaf_pages / map->root_count;
+
+ if (root_index <= last_tree_root)
+ level_pages++;
+
+ for (height = 0; height < VDO_BLOCK_MAP_TREE_HEIGHT - 1; height++) {
+ boundary.levels[height] = level_pages;
+ level_pages = DIV_ROUND_UP(level_pages, VDO_BLOCK_MAP_ENTRIES_PER_PAGE);
+ }
+
+ /* The root node always exists, even if the root is otherwise unused. */
+ boundary.levels[VDO_BLOCK_MAP_TREE_HEIGHT - 1] = 1;
+
+ return boundary;
+}
+
+/**
+ * vdo_traverse_forest() - Walk the entire forest of a block map.
+ * @callback: A function to call with the pbn of each allocated node in the forest.
+ * @completion: The completion to notify on each traversed PBN, and when traversal completes.
+ */
+void vdo_traverse_forest(struct block_map *map, vdo_entry_callback_fn callback,
+ struct vdo_completion *completion)
+{
+ root_count_t root;
+ struct cursors *cursors;
+ int result;
+
+ result = vdo_allocate_extended(struct cursors, map->root_count,
+ struct cursor, __func__, &cursors);
+ if (result != VDO_SUCCESS) {
+ vdo_fail_completion(completion, result);
+ return;
+ }
+
+ cursors->zone = &map->zones[0];
+ cursors->pool = cursors->zone->vio_pool;
+ cursors->entry_callback = callback;
+ cursors->completion = completion;
+ cursors->active_roots = map->root_count;
+ for (root = 0; root < map->root_count; root++) {
+ struct cursor *cursor = &cursors->cursors[root];
+
+ *cursor = (struct cursor) {
+ .tree = &map->forest->trees[root],
+ .height = VDO_BLOCK_MAP_TREE_HEIGHT - 1,
+ .parent = cursors,
+ .boundary = compute_boundary(map, root),
+ };
+
+ cursor->waiter.callback = launch_cursor;
+ acquire_vio_from_pool(cursors->pool, &cursor->waiter);
+ }
+}
+
+/**
+ * initialize_block_map_zone() - Initialize the per-zone portions of the block map.
+ * @maximum_age: The number of journal blocks before a dirtied page is considered old and must be
+ * written out.
+ */
+static int __must_check initialize_block_map_zone(struct block_map *map,
+ zone_count_t zone_number,
+ page_count_t cache_size,
+ block_count_t maximum_age)
+{
+ int result;
+ block_count_t i;
+ struct vdo *vdo = map->vdo;
+ struct block_map_zone *zone = &map->zones[zone_number];
+
+ BUILD_BUG_ON(sizeof(struct page_descriptor) != sizeof(u64));
+
+ zone->zone_number = zone_number;
+ zone->thread_id = vdo->thread_config.logical_threads[zone_number];
+ zone->block_map = map;
+
+ result = vdo_allocate_extended(struct dirty_lists, maximum_age,
+ dirty_era_t, __func__,
+ &zone->dirty_lists);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ zone->dirty_lists->maximum_age = maximum_age;
+ INIT_LIST_HEAD(&zone->dirty_lists->expired[VDO_TREE_PAGE]);
+ INIT_LIST_HEAD(&zone->dirty_lists->expired[VDO_CACHE_PAGE]);
+
+ for (i = 0; i < maximum_age; i++) {
+ INIT_LIST_HEAD(&zone->dirty_lists->eras[i][VDO_TREE_PAGE]);
+ INIT_LIST_HEAD(&zone->dirty_lists->eras[i][VDO_CACHE_PAGE]);
+ }
+
+ result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->loading_pages);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = make_vio_pool(vdo, BLOCK_MAP_VIO_POOL_SIZE,
+ zone->thread_id, VIO_TYPE_BLOCK_MAP_INTERIOR,
+ VIO_PRIORITY_METADATA, zone, &zone->vio_pool);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+
+ zone->page_cache.zone = zone;
+ zone->page_cache.vdo = vdo;
+ zone->page_cache.page_count = cache_size / map->zone_count;
+ zone->page_cache.stats.free_pages = zone->page_cache.page_count;
+
+ result = allocate_cache_components(&zone->page_cache);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* initialize empty circular queues */
+ INIT_LIST_HEAD(&zone->page_cache.lru_list);
+ INIT_LIST_HEAD(&zone->page_cache.outgoing_list);
+
+ return VDO_SUCCESS;
+}
+
+/* Implements vdo_zone_thread_getter_fn */
+static thread_id_t get_block_map_zone_thread_id(void *context, zone_count_t zone_number)
+{
+ struct block_map *map = context;
+
+ return map->zones[zone_number].thread_id;
+}
+
+/* Implements vdo_action_preamble_fn */
+static void prepare_for_era_advance(void *context, struct vdo_completion *parent)
+{
+ struct block_map *map = context;
+
+ map->current_era_point = map->pending_era_point;
+ vdo_finish_completion(parent);
+}
+
+/* Implements vdo_zone_action_fn */
+static void advance_block_map_zone_era(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct block_map *map = context;
+ struct block_map_zone *zone = &map->zones[zone_number];
+
+ update_period(zone->dirty_lists, map->current_era_point);
+ write_expired_elements(zone);
+ vdo_finish_completion(parent);
+}
+
+/*
+ * Schedule an era advance if necessary. This method should not be called directly. Rather, call
+ * vdo_schedule_default_action() on the block map's action manager.
+ *
+ * Implements vdo_action_scheduler_fn.
+ */
+static bool schedule_era_advance(void *context)
+{
+ struct block_map *map = context;
+
+ if (map->current_era_point == map->pending_era_point)
+ return false;
+
+ return vdo_schedule_action(map->action_manager, prepare_for_era_advance,
+ advance_block_map_zone_era, NULL, NULL);
+}
+
+static void uninitialize_block_map_zone(struct block_map_zone *zone)
+{
+ struct vdo_page_cache *cache = &zone->page_cache;
+
+ vdo_free(vdo_forget(zone->dirty_lists));
+ free_vio_pool(vdo_forget(zone->vio_pool));
+ vdo_int_map_free(vdo_forget(zone->loading_pages));
+ if (cache->infos != NULL) {
+ struct page_info *info;
+
+ for (info = cache->infos; info < cache->infos + cache->page_count; info++)
+ free_vio(vdo_forget(info->vio));
+ }
+
+ vdo_int_map_free(vdo_forget(cache->page_map));
+ vdo_free(vdo_forget(cache->infos));
+ vdo_free(vdo_forget(cache->pages));
+}
+
+void vdo_free_block_map(struct block_map *map)
+{
+ zone_count_t zone;
+
+ if (map == NULL)
+ return;
+
+ for (zone = 0; zone < map->zone_count; zone++)
+ uninitialize_block_map_zone(&map->zones[zone]);
+
+ vdo_abandon_block_map_growth(map);
+ if (map->forest != NULL)
+ deforest(vdo_forget(map->forest), 0);
+ vdo_free(vdo_forget(map->action_manager));
+ vdo_free(map);
+}
+
+/* @journal may be NULL. */
+int vdo_decode_block_map(struct block_map_state_2_0 state, block_count_t logical_blocks,
+ struct vdo *vdo, struct recovery_journal *journal,
+ nonce_t nonce, page_count_t cache_size, block_count_t maximum_age,
+ struct block_map **map_ptr)
+{
+ struct block_map *map;
+ int result;
+ zone_count_t zone = 0;
+
+ BUILD_BUG_ON(VDO_BLOCK_MAP_ENTRIES_PER_PAGE !=
+ ((VDO_BLOCK_SIZE - sizeof(struct block_map_page)) /
+ sizeof(struct block_map_entry)));
+ result = VDO_ASSERT(cache_size > 0, "block map cache size is specified");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate_extended(struct block_map,
+ vdo->thread_config.logical_zone_count,
+ struct block_map_zone, __func__, &map);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ map->vdo = vdo;
+ map->root_origin = state.root_origin;
+ map->root_count = state.root_count;
+ map->entry_count = logical_blocks;
+ map->journal = journal;
+ map->nonce = nonce;
+
+ result = make_forest(map, map->entry_count);
+ if (result != VDO_SUCCESS) {
+ vdo_free_block_map(map);
+ return result;
+ }
+
+ replace_forest(map);
+
+ map->zone_count = vdo->thread_config.logical_zone_count;
+ for (zone = 0; zone < map->zone_count; zone++) {
+ result = initialize_block_map_zone(map, zone, cache_size, maximum_age);
+ if (result != VDO_SUCCESS) {
+ vdo_free_block_map(map);
+ return result;
+ }
+ }
+
+ result = vdo_make_action_manager(map->zone_count, get_block_map_zone_thread_id,
+ vdo_get_recovery_journal_thread_id(journal),
+ map, schedule_era_advance, vdo,
+ &map->action_manager);
+ if (result != VDO_SUCCESS) {
+ vdo_free_block_map(map);
+ return result;
+ }
+
+ *map_ptr = map;
+ return VDO_SUCCESS;
+}
+
+struct block_map_state_2_0 vdo_record_block_map(const struct block_map *map)
+{
+ return (struct block_map_state_2_0) {
+ .flat_page_origin = VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN,
+ /* This is the flat page count, which has turned out to always be 0. */
+ .flat_page_count = 0,
+ .root_origin = map->root_origin,
+ .root_count = map->root_count,
+ };
+}
+
+/* The block map needs to know the journals' sequence number to initialize the eras. */
+void vdo_initialize_block_map_from_journal(struct block_map *map,
+ struct recovery_journal *journal)
+{
+ zone_count_t z = 0;
+
+ map->current_era_point = vdo_get_recovery_journal_current_sequence_number(journal);
+ map->pending_era_point = map->current_era_point;
+
+ for (z = 0; z < map->zone_count; z++) {
+ struct dirty_lists *dirty_lists = map->zones[z].dirty_lists;
+
+ VDO_ASSERT_LOG_ONLY(dirty_lists->next_period == 0, "current period not set");
+ dirty_lists->oldest_period = map->current_era_point;
+ dirty_lists->next_period = map->current_era_point + 1;
+ dirty_lists->offset = map->current_era_point % dirty_lists->maximum_age;
+ }
+}
+
+/* Compute the logical zone for the LBN of a data vio. */
+zone_count_t vdo_compute_logical_zone(struct data_vio *data_vio)
+{
+ struct block_map *map = vdo_from_data_vio(data_vio)->block_map;
+ struct tree_lock *tree_lock = &data_vio->tree_lock;
+ page_number_t page_number = data_vio->logical.lbn / VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
+
+ tree_lock->tree_slots[0].page_index = page_number;
+ tree_lock->root_index = page_number % map->root_count;
+ return (tree_lock->root_index % map->zone_count);
+}
+
+void vdo_advance_block_map_era(struct block_map *map,
+ sequence_number_t recovery_block_number)
+{
+ if (map == NULL)
+ return;
+
+ map->pending_era_point = recovery_block_number;
+ vdo_schedule_default_action(map->action_manager);
+}
+
+/* Implements vdo_admin_initiator_fn */
+static void initiate_drain(struct admin_state *state)
+{
+ struct block_map_zone *zone = container_of(state, struct block_map_zone, state);
+
+ VDO_ASSERT_LOG_ONLY((zone->active_lookups == 0),
+ "%s() called with no active lookups", __func__);
+
+ if (!vdo_is_state_suspending(state)) {
+ while (zone->dirty_lists->oldest_period < zone->dirty_lists->next_period)
+ expire_oldest_list(zone->dirty_lists);
+ write_expired_elements(zone);
+ }
+
+ check_for_drain_complete(zone);
+}
+
+/* Implements vdo_zone_action_fn. */
+static void drain_zone(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct block_map *map = context;
+ struct block_map_zone *zone = &map->zones[zone_number];
+
+ vdo_start_draining(&zone->state,
+ vdo_get_current_manager_operation(map->action_manager),
+ parent, initiate_drain);
+}
+
+void vdo_drain_block_map(struct block_map *map, const struct admin_state_code *operation,
+ struct vdo_completion *parent)
+{
+ vdo_schedule_operation(map->action_manager, operation, NULL, drain_zone, NULL,
+ parent);
+}
+
+/* Implements vdo_zone_action_fn. */
+static void resume_block_map_zone(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct block_map *map = context;
+ struct block_map_zone *zone = &map->zones[zone_number];
+
+ vdo_fail_completion(parent, vdo_resume_if_quiescent(&zone->state));
+}
+
+void vdo_resume_block_map(struct block_map *map, struct vdo_completion *parent)
+{
+ vdo_schedule_operation(map->action_manager, VDO_ADMIN_STATE_RESUMING,
+ NULL, resume_block_map_zone, NULL, parent);
+}
+
+/* Allocate an expanded collection of trees, for a future growth. */
+int vdo_prepare_to_grow_block_map(struct block_map *map,
+ block_count_t new_logical_blocks)
+{
+ if (map->next_entry_count == new_logical_blocks)
+ return VDO_SUCCESS;
+
+ if (map->next_entry_count > 0)
+ vdo_abandon_block_map_growth(map);
+
+ if (new_logical_blocks < map->entry_count) {
+ map->next_entry_count = map->entry_count;
+ return VDO_SUCCESS;
+ }
+
+ return make_forest(map, new_logical_blocks);
+}
+
+/* Implements vdo_action_preamble_fn */
+static void grow_forest(void *context, struct vdo_completion *completion)
+{
+ replace_forest(context);
+ vdo_finish_completion(completion);
+}
+
+/* Requires vdo_prepare_to_grow_block_map() to have been previously called. */
+void vdo_grow_block_map(struct block_map *map, struct vdo_completion *parent)
+{
+ vdo_schedule_operation(map->action_manager,
+ VDO_ADMIN_STATE_SUSPENDED_OPERATION,
+ grow_forest, NULL, NULL, parent);
+}
+
+void vdo_abandon_block_map_growth(struct block_map *map)
+{
+ struct forest *forest = vdo_forget(map->next_forest);
+
+ if (forest != NULL)
+ deforest(forest, forest->segments - 1);
+
+ map->next_entry_count = 0;
+}
+
+/* Release the page completion and then continue the requester. */
+static inline void finish_processing_page(struct vdo_completion *completion, int result)
+{
+ struct vdo_completion *parent = completion->parent;
+
+ vdo_release_page_completion(completion);
+ vdo_continue_completion(parent, result);
+}
+
+static void handle_page_error(struct vdo_completion *completion)
+{
+ finish_processing_page(completion, completion->result);
+}
+
+/* Fetch the mapping page for a block map update, and call the provided handler when fetched. */
+static void fetch_mapping_page(struct data_vio *data_vio, bool modifiable,
+ vdo_action_fn action)
+{
+ struct block_map_zone *zone = data_vio->logical.zone->block_map_zone;
+
+ if (vdo_is_state_draining(&zone->state)) {
+ continue_data_vio_with_error(data_vio, VDO_SHUTTING_DOWN);
+ return;
+ }
+
+ vdo_get_page(&data_vio->page_completion, zone,
+ data_vio->tree_lock.tree_slots[0].block_map_slot.pbn,
+ modifiable, &data_vio->vio.completion,
+ action, handle_page_error, false);
+}
+
+/**
+ * clear_mapped_location() - Clear a data_vio's mapped block location, setting it to be unmapped.
+ *
+ * This indicates the block map entry for the logical block is either unmapped or corrupted.
+ */
+static void clear_mapped_location(struct data_vio *data_vio)
+{
+ data_vio->mapped = (struct zoned_pbn) {
+ .state = VDO_MAPPING_STATE_UNMAPPED,
+ };
+}
+
+/**
+ * set_mapped_location() - Decode and validate a block map entry, and set the mapped location of a
+ * data_vio.
+ *
+ * Return: VDO_SUCCESS or VDO_BAD_MAPPING if the map entry is invalid or an error code for any
+ * other failure
+ */
+static int __must_check set_mapped_location(struct data_vio *data_vio,
+ const struct block_map_entry *entry)
+{
+ /* Unpack the PBN for logging purposes even if the entry is invalid. */
+ struct data_location mapped = vdo_unpack_block_map_entry(entry);
+
+ if (vdo_is_valid_location(&mapped)) {
+ int result;
+
+ result = vdo_get_physical_zone(vdo_from_data_vio(data_vio),
+ mapped.pbn, &data_vio->mapped.zone);
+ if (result == VDO_SUCCESS) {
+ data_vio->mapped.pbn = mapped.pbn;
+ data_vio->mapped.state = mapped.state;
+ return VDO_SUCCESS;
+ }
+
+ /*
+ * Return all errors not specifically known to be errors from validating the
+ * location.
+ */
+ if ((result != VDO_OUT_OF_RANGE) && (result != VDO_BAD_MAPPING))
+ return result;
+ }
+
+ /*
+ * Log the corruption even if we wind up ignoring it for write VIOs, converting all cases
+ * to VDO_BAD_MAPPING.
+ */
+ vdo_log_error_strerror(VDO_BAD_MAPPING,
+ "PBN %llu with state %u read from the block map was invalid",
+ (unsigned long long) mapped.pbn, mapped.state);
+
+ /*
+ * A read VIO has no option but to report the bad mapping--reading zeros would be hiding
+ * known data loss.
+ */
+ if (!data_vio->write)
+ return VDO_BAD_MAPPING;
+
+ /*
+ * A write VIO only reads this mapping to decref the old block. Treat this as an unmapped
+ * entry rather than fail the write.
+ */
+ clear_mapped_location(data_vio);
+ return VDO_SUCCESS;
+}
+
+/* This callback is registered in vdo_get_mapped_block(). */
+static void get_mapping_from_fetched_page(struct vdo_completion *completion)
+{
+ int result;
+ struct vdo_page_completion *vpc = as_vdo_page_completion(completion);
+ const struct block_map_page *page;
+ const struct block_map_entry *entry;
+ struct data_vio *data_vio = as_data_vio(completion->parent);
+ struct block_map_tree_slot *tree_slot;
+
+ if (completion->result != VDO_SUCCESS) {
+ finish_processing_page(completion, completion->result);
+ return;
+ }
+
+ result = validate_completed_page(vpc, false);
+ if (result != VDO_SUCCESS) {
+ finish_processing_page(completion, result);
+ return;
+ }
+
+ page = (const struct block_map_page *) get_page_buffer(vpc->info);
+ tree_slot = &data_vio->tree_lock.tree_slots[0];
+ entry = &page->entries[tree_slot->block_map_slot.slot];
+
+ result = set_mapped_location(data_vio, entry);
+ finish_processing_page(completion, result);
+}
+
+void vdo_update_block_map_page(struct block_map_page *page, struct data_vio *data_vio,
+ physical_block_number_t pbn,
+ enum block_mapping_state mapping_state,
+ sequence_number_t *recovery_lock)
+{
+ struct block_map_zone *zone = data_vio->logical.zone->block_map_zone;
+ struct block_map *block_map = zone->block_map;
+ struct recovery_journal *journal = block_map->journal;
+ sequence_number_t old_locked, new_locked;
+ struct tree_lock *tree_lock = &data_vio->tree_lock;
+
+ /* Encode the new mapping. */
+ page->entries[tree_lock->tree_slots[tree_lock->height].block_map_slot.slot] =
+ vdo_pack_block_map_entry(pbn, mapping_state);
+
+ /* Adjust references on the recovery journal blocks. */
+ old_locked = *recovery_lock;
+ new_locked = data_vio->recovery_sequence_number;
+
+ if ((old_locked == 0) || (old_locked > new_locked)) {
+ vdo_acquire_recovery_journal_block_reference(journal, new_locked,
+ VDO_ZONE_TYPE_LOGICAL,
+ zone->zone_number);
+
+ if (old_locked > 0) {
+ vdo_release_recovery_journal_block_reference(journal, old_locked,
+ VDO_ZONE_TYPE_LOGICAL,
+ zone->zone_number);
+ }
+
+ *recovery_lock = new_locked;
+ }
+
+ /*
+ * FIXME: explain this more
+ * Release the transferred lock from the data_vio.
+ */
+ vdo_release_journal_entry_lock(journal, new_locked);
+ data_vio->recovery_sequence_number = 0;
+}
+
+static void put_mapping_in_fetched_page(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion->parent);
+ sequence_number_t old_lock;
+ struct vdo_page_completion *vpc;
+ struct page_info *info;
+ int result;
+
+ if (completion->result != VDO_SUCCESS) {
+ finish_processing_page(completion, completion->result);
+ return;
+ }
+
+ vpc = as_vdo_page_completion(completion);
+ result = validate_completed_page(vpc, true);
+ if (result != VDO_SUCCESS) {
+ finish_processing_page(completion, result);
+ return;
+ }
+
+ info = vpc->info;
+ old_lock = info->recovery_lock;
+ vdo_update_block_map_page((struct block_map_page *) get_page_buffer(info),
+ data_vio, data_vio->new_mapped.pbn,
+ data_vio->new_mapped.state, &info->recovery_lock);
+ set_info_state(info, PS_DIRTY);
+ add_to_dirty_lists(info->cache->zone, &info->state_entry,
+ VDO_CACHE_PAGE, old_lock, info->recovery_lock);
+ finish_processing_page(completion, VDO_SUCCESS);
+}
+
+/* Read a stored block mapping into a data_vio. */
+void vdo_get_mapped_block(struct data_vio *data_vio)
+{
+ if (data_vio->tree_lock.tree_slots[0].block_map_slot.pbn == VDO_ZERO_BLOCK) {
+ /*
+ * We know that the block map page for this LBN has not been allocated, so the
+ * block must be unmapped.
+ */
+ clear_mapped_location(data_vio);
+ continue_data_vio(data_vio);
+ return;
+ }
+
+ fetch_mapping_page(data_vio, false, get_mapping_from_fetched_page);
+}
+
+/* Update a stored block mapping to reflect a data_vio's new mapping. */
+void vdo_put_mapped_block(struct data_vio *data_vio)
+{
+ fetch_mapping_page(data_vio, true, put_mapping_in_fetched_page);
+}
+
+struct block_map_statistics vdo_get_block_map_statistics(struct block_map *map)
+{
+ zone_count_t zone = 0;
+ struct block_map_statistics totals;
+
+ memset(&totals, 0, sizeof(struct block_map_statistics));
+ for (zone = 0; zone < map->zone_count; zone++) {
+ const struct block_map_statistics *stats =
+ &(map->zones[zone].page_cache.stats);
+
+ totals.dirty_pages += READ_ONCE(stats->dirty_pages);
+ totals.clean_pages += READ_ONCE(stats->clean_pages);
+ totals.free_pages += READ_ONCE(stats->free_pages);
+ totals.failed_pages += READ_ONCE(stats->failed_pages);
+ totals.incoming_pages += READ_ONCE(stats->incoming_pages);
+ totals.outgoing_pages += READ_ONCE(stats->outgoing_pages);
+ totals.cache_pressure += READ_ONCE(stats->cache_pressure);
+ totals.read_count += READ_ONCE(stats->read_count);
+ totals.write_count += READ_ONCE(stats->write_count);
+ totals.failed_reads += READ_ONCE(stats->failed_reads);
+ totals.failed_writes += READ_ONCE(stats->failed_writes);
+ totals.reclaimed += READ_ONCE(stats->reclaimed);
+ totals.read_outgoing += READ_ONCE(stats->read_outgoing);
+ totals.found_in_cache += READ_ONCE(stats->found_in_cache);
+ totals.discard_required += READ_ONCE(stats->discard_required);
+ totals.wait_for_page += READ_ONCE(stats->wait_for_page);
+ totals.fetch_required += READ_ONCE(stats->fetch_required);
+ totals.pages_loaded += READ_ONCE(stats->pages_loaded);
+ totals.pages_saved += READ_ONCE(stats->pages_saved);
+ totals.flush_count += READ_ONCE(stats->flush_count);
+ }
+
+ return totals;
+}
diff --git a/drivers/md/dm-vdo/block-map.h b/drivers/md/dm-vdo/block-map.h
new file mode 100644
index 000000000000..39a13039e4a3
--- /dev/null
+++ b/drivers/md/dm-vdo/block-map.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_BLOCK_MAP_H
+#define VDO_BLOCK_MAP_H
+
+#include <linux/list.h>
+
+#include "numeric.h"
+
+#include "admin-state.h"
+#include "completion.h"
+#include "encodings.h"
+#include "int-map.h"
+#include "statistics.h"
+#include "types.h"
+#include "vio.h"
+#include "wait-queue.h"
+
+/*
+ * The block map is responsible for tracking all the logical to physical mappings of a VDO. It
+ * consists of a collection of 60 radix trees gradually allocated as logical addresses are used.
+ * Each tree is assigned to a logical zone such that it is easy to compute which zone must handle
+ * each logical address. Each logical zone also has a dedicated portion of the leaf page cache.
+ *
+ * Each logical zone has a single dedicated queue and thread for performing all updates to the
+ * radix trees assigned to that zone. The concurrency guarantees of this single-threaded model
+ * allow the code to omit more fine-grained locking for the block map structures.
+ *
+ * Load operations must be performed on the admin thread. Normal operations, such as reading and
+ * updating mappings, must be performed on the appropriate logical zone thread. Save operations
+ * must be launched from the same admin thread as the original load operation.
+ */
+
+enum {
+ BLOCK_MAP_VIO_POOL_SIZE = 64,
+};
+
+/*
+ * Generation counter for page references.
+ */
+typedef u32 vdo_page_generation;
+
+extern const struct block_map_entry UNMAPPED_BLOCK_MAP_ENTRY;
+
+/* The VDO Page Cache abstraction. */
+struct vdo_page_cache {
+ /* the VDO which owns this cache */
+ struct vdo *vdo;
+ /* number of pages in cache */
+ page_count_t page_count;
+ /* number of pages to write in the current batch */
+ page_count_t pages_in_batch;
+ /* Whether the VDO is doing a read-only rebuild */
+ bool rebuilding;
+
+ /* array of page information entries */
+ struct page_info *infos;
+ /* raw memory for pages */
+ char *pages;
+ /* cache last found page info */
+ struct page_info *last_found;
+ /* map of page number to info */
+ struct int_map *page_map;
+ /* main LRU list (all infos) */
+ struct list_head lru_list;
+ /* free page list (oldest first) */
+ struct list_head free_list;
+ /* outgoing page list */
+ struct list_head outgoing_list;
+ /* number of read I/O operations pending */
+ page_count_t outstanding_reads;
+ /* number of write I/O operations pending */
+ page_count_t outstanding_writes;
+ /* number of pages covered by the current flush */
+ page_count_t pages_in_flush;
+ /* number of pages waiting to be included in the next flush */
+ page_count_t pages_to_flush;
+ /* number of discards in progress */
+ unsigned int discard_count;
+ /* how many VPCs waiting for free page */
+ unsigned int waiter_count;
+ /* queue of waiters who want a free page */
+ struct vdo_wait_queue free_waiters;
+ /*
+ * Statistics are only updated on the logical zone thread, but are accessed from other
+ * threads.
+ */
+ struct block_map_statistics stats;
+ /* counter for pressure reports */
+ u32 pressure_report;
+ /* the block map zone to which this cache belongs */
+ struct block_map_zone *zone;
+};
+
+/*
+ * The state of a page buffer. If the page buffer is free no particular page is bound to it,
+ * otherwise the page buffer is bound to particular page whose absolute pbn is in the pbn field. If
+ * the page is resident or dirty the page data is stable and may be accessed. Otherwise the page is
+ * in flight (incoming or outgoing) and its data should not be accessed.
+ *
+ * @note Update the static data in get_page_state_name() if you change this enumeration.
+ */
+enum vdo_page_buffer_state {
+ /* this page buffer is not being used */
+ PS_FREE,
+ /* this page is being read from store */
+ PS_INCOMING,
+ /* attempt to load this page failed */
+ PS_FAILED,
+ /* this page is valid and un-modified */
+ PS_RESIDENT,
+ /* this page is valid and modified */
+ PS_DIRTY,
+ /* this page is being written and should not be used */
+ PS_OUTGOING,
+ /* not a state */
+ PAGE_STATE_COUNT,
+} __packed;
+
+/*
+ * The write status of page
+ */
+enum vdo_page_write_status {
+ WRITE_STATUS_NORMAL,
+ WRITE_STATUS_DISCARD,
+ WRITE_STATUS_DEFERRED,
+} __packed;
+
+/* Per-page-slot information. */
+struct page_info {
+ /* Preallocated page struct vio */
+ struct vio *vio;
+ /* back-link for references */
+ struct vdo_page_cache *cache;
+ /* the pbn of the page */
+ physical_block_number_t pbn;
+ /* page is busy (temporarily locked) */
+ u16 busy;
+ /* the write status the page */
+ enum vdo_page_write_status write_status;
+ /* page state */
+ enum vdo_page_buffer_state state;
+ /* queue of completions awaiting this item */
+ struct vdo_wait_queue waiting;
+ /* state linked list entry */
+ struct list_head state_entry;
+ /* LRU entry */
+ struct list_head lru_entry;
+ /*
+ * The earliest recovery journal block containing uncommitted updates to the block map page
+ * associated with this page_info. A reference (lock) is held on that block to prevent it
+ * from being reaped. When this value changes, the reference on the old value must be
+ * released and a reference on the new value must be acquired.
+ */
+ sequence_number_t recovery_lock;
+};
+
+/*
+ * A completion awaiting a specific page. Also a live reference into the page once completed, until
+ * freed.
+ */
+struct vdo_page_completion {
+ /* The generic completion */
+ struct vdo_completion completion;
+ /* The cache involved */
+ struct vdo_page_cache *cache;
+ /* The waiter for the pending list */
+ struct vdo_waiter waiter;
+ /* The absolute physical block number of the page on disk */
+ physical_block_number_t pbn;
+ /* Whether the page may be modified */
+ bool writable;
+ /* Whether the page is available */
+ bool ready;
+ /* The info structure for the page, only valid when ready */
+ struct page_info *info;
+};
+
+struct forest;
+
+struct tree_page {
+ struct vdo_waiter waiter;
+
+ /* Dirty list entry */
+ struct list_head entry;
+
+ /* If dirty, the tree zone flush generation in which it was last dirtied. */
+ u8 generation;
+
+ /* Whether this page is an interior tree page being written out. */
+ bool writing;
+
+ /* If writing, the tree zone flush generation of the copy being written. */
+ u8 writing_generation;
+
+ /*
+ * Sequence number of the earliest recovery journal block containing uncommitted updates to
+ * this page
+ */
+ sequence_number_t recovery_lock;
+
+ /* The value of recovery_lock when the this page last started writing */
+ sequence_number_t writing_recovery_lock;
+
+ char page_buffer[VDO_BLOCK_SIZE];
+};
+
+enum block_map_page_type {
+ VDO_TREE_PAGE,
+ VDO_CACHE_PAGE,
+};
+
+typedef struct list_head dirty_era_t[2];
+
+struct dirty_lists {
+ /* The number of periods after which an element will be expired */
+ block_count_t maximum_age;
+ /* The oldest period which has unexpired elements */
+ sequence_number_t oldest_period;
+ /* One more than the current period */
+ sequence_number_t next_period;
+ /* The offset in the array of lists of the oldest period */
+ block_count_t offset;
+ /* Expired pages */
+ dirty_era_t expired;
+ /* The lists of dirty pages */
+ dirty_era_t eras[];
+};
+
+struct block_map_zone {
+ zone_count_t zone_number;
+ thread_id_t thread_id;
+ struct admin_state state;
+ struct block_map *block_map;
+ /* Dirty pages, by era*/
+ struct dirty_lists *dirty_lists;
+ struct vdo_page_cache page_cache;
+ data_vio_count_t active_lookups;
+ struct int_map *loading_pages;
+ struct vio_pool *vio_pool;
+ /* The tree page which has issued or will be issuing a flush */
+ struct tree_page *flusher;
+ struct vdo_wait_queue flush_waiters;
+ /* The generation after the most recent flush */
+ u8 generation;
+ u8 oldest_generation;
+ /* The counts of dirty pages in each generation */
+ u32 dirty_page_counts[256];
+};
+
+struct block_map {
+ struct vdo *vdo;
+ struct action_manager *action_manager;
+ /* The absolute PBN of the first root of the tree part of the block map */
+ physical_block_number_t root_origin;
+ block_count_t root_count;
+
+ /* The era point we are currently distributing to the zones */
+ sequence_number_t current_era_point;
+ /* The next era point */
+ sequence_number_t pending_era_point;
+
+ /* The number of entries in block map */
+ block_count_t entry_count;
+ nonce_t nonce;
+ struct recovery_journal *journal;
+
+ /* The trees for finding block map pages */
+ struct forest *forest;
+ /* The expanded trees awaiting growth */
+ struct forest *next_forest;
+ /* The number of entries after growth */
+ block_count_t next_entry_count;
+
+ zone_count_t zone_count;
+ struct block_map_zone zones[];
+};
+
+/**
+ * typedef vdo_entry_callback_fn - A function to be called for each allocated PBN when traversing
+ * the forest.
+ * @pbn: A PBN of a tree node.
+ * @completion: The parent completion of the traversal.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+typedef int (*vdo_entry_callback_fn)(physical_block_number_t pbn,
+ struct vdo_completion *completion);
+
+static inline struct vdo_page_completion *as_vdo_page_completion(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_PAGE_COMPLETION);
+ return container_of(completion, struct vdo_page_completion, completion);
+}
+
+void vdo_release_page_completion(struct vdo_completion *completion);
+
+void vdo_get_page(struct vdo_page_completion *page_completion,
+ struct block_map_zone *zone, physical_block_number_t pbn,
+ bool writable, void *parent, vdo_action_fn callback,
+ vdo_action_fn error_handler, bool requeue);
+
+void vdo_request_page_write(struct vdo_completion *completion);
+
+int __must_check vdo_get_cached_page(struct vdo_completion *completion,
+ struct block_map_page **page_ptr);
+
+int __must_check vdo_invalidate_page_cache(struct vdo_page_cache *cache);
+
+static inline struct block_map_page * __must_check
+vdo_as_block_map_page(struct tree_page *tree_page)
+{
+ return (struct block_map_page *) tree_page->page_buffer;
+}
+
+bool vdo_copy_valid_page(char *buffer, nonce_t nonce,
+ physical_block_number_t pbn,
+ struct block_map_page *page);
+
+void vdo_find_block_map_slot(struct data_vio *data_vio);
+
+physical_block_number_t vdo_find_block_map_page_pbn(struct block_map *map,
+ page_number_t page_number);
+
+void vdo_write_tree_page(struct tree_page *page, struct block_map_zone *zone);
+
+void vdo_traverse_forest(struct block_map *map, vdo_entry_callback_fn callback,
+ struct vdo_completion *completion);
+
+int __must_check vdo_decode_block_map(struct block_map_state_2_0 state,
+ block_count_t logical_blocks, struct vdo *vdo,
+ struct recovery_journal *journal, nonce_t nonce,
+ page_count_t cache_size, block_count_t maximum_age,
+ struct block_map **map_ptr);
+
+void vdo_drain_block_map(struct block_map *map, const struct admin_state_code *operation,
+ struct vdo_completion *parent);
+
+void vdo_resume_block_map(struct block_map *map, struct vdo_completion *parent);
+
+int __must_check vdo_prepare_to_grow_block_map(struct block_map *map,
+ block_count_t new_logical_blocks);
+
+void vdo_grow_block_map(struct block_map *map, struct vdo_completion *parent);
+
+void vdo_abandon_block_map_growth(struct block_map *map);
+
+void vdo_free_block_map(struct block_map *map);
+
+struct block_map_state_2_0 __must_check vdo_record_block_map(const struct block_map *map);
+
+void vdo_initialize_block_map_from_journal(struct block_map *map,
+ struct recovery_journal *journal);
+
+zone_count_t vdo_compute_logical_zone(struct data_vio *data_vio);
+
+void vdo_advance_block_map_era(struct block_map *map,
+ sequence_number_t recovery_block_number);
+
+void vdo_update_block_map_page(struct block_map_page *page, struct data_vio *data_vio,
+ physical_block_number_t pbn,
+ enum block_mapping_state mapping_state,
+ sequence_number_t *recovery_lock);
+
+void vdo_get_mapped_block(struct data_vio *data_vio);
+
+void vdo_put_mapped_block(struct data_vio *data_vio);
+
+struct block_map_statistics __must_check vdo_get_block_map_statistics(struct block_map *map);
+
+/**
+ * vdo_convert_maximum_age() - Convert the maximum age to reflect the new recovery journal format
+ * @age: The configured maximum age
+ *
+ * Return: The converted age
+ *
+ * In the old recovery journal format, each journal block held 311 entries, and every write bio
+ * made two entries. The old maximum age was half the usable journal length. In the new format,
+ * each block holds only 217 entries, but each bio only makes one entry. We convert the configured
+ * age so that the number of writes in a block map era is the same in the old and new formats. This
+ * keeps the bound on the amount of work required to recover the block map from the recovery
+ * journal the same across the format change. It also keeps the amortization of block map page
+ * writes to write bios the same.
+ */
+static inline block_count_t vdo_convert_maximum_age(block_count_t age)
+{
+ return DIV_ROUND_UP(age * RECOVERY_JOURNAL_1_ENTRIES_PER_BLOCK,
+ 2 * RECOVERY_JOURNAL_ENTRIES_PER_BLOCK);
+}
+
+#endif /* VDO_BLOCK_MAP_H */
diff --git a/drivers/md/dm-vdo/completion.c b/drivers/md/dm-vdo/completion.c
new file mode 100644
index 000000000000..5ad85334632d
--- /dev/null
+++ b/drivers/md/dm-vdo/completion.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "completion.h"
+
+#include <linux/kernel.h>
+
+#include "logger.h"
+#include "permassert.h"
+
+#include "status-codes.h"
+#include "types.h"
+#include "vio.h"
+#include "vdo.h"
+
+/**
+ * DOC: vdo completions.
+ *
+ * Most of vdo's data structures are lock free, each either belonging to a single "zone," or
+ * divided into a number of zones whose accesses to the structure do not overlap. During normal
+ * operation, at most one thread will be operating in any given zone. Each zone has a
+ * vdo_work_queue which holds vdo_completions that are to be run in that zone. A completion may
+ * only be enqueued on one queue or operating in a single zone at a time.
+ *
+ * At each step of a multi-threaded operation, the completion performing the operation is given a
+ * callback, error handler, and thread id for the next step. A completion is "run" when it is
+ * operating on the correct thread (as specified by its callback_thread_id). If the value of its
+ * "result" field is an error (i.e. not VDO_SUCCESS), the function in its "error_handler" will be
+ * invoked. If the error_handler is NULL, or there is no error, the function set as its "callback"
+ * will be invoked. Generally, a completion will not be run directly, but rather will be
+ * "launched." In this case, it will check whether it is operating on the correct thread. If it is,
+ * it will run immediately. Otherwise, it will be enqueue on the vdo_work_queue associated with the
+ * completion's "callback_thread_id". When it is dequeued, it will be on the correct thread, and
+ * will get run. In some cases, the completion should get queued instead of running immediately,
+ * even if it is being launched from the correct thread. This is usually in cases where there is a
+ * long chain of callbacks, all on the same thread, which could overflow the stack. In such cases,
+ * the completion's "requeue" field should be set to true. Doing so will skip the current thread
+ * check and simply enqueue the completion.
+ *
+ * A completion may be "finished," in which case its "complete" field will be set to true before it
+ * is next run. It is a bug to attempt to set the result or re-finish a finished completion.
+ * Because a completion's fields are not safe to examine from any thread other than the one on
+ * which the completion is currently operating, this field is used only to aid in detecting
+ * programming errors. It can not be used for cross-thread checking on the status of an operation.
+ * A completion must be "reset" before it can be reused after it has been finished. Resetting will
+ * also clear any error from the result field.
+ **/
+
+void vdo_initialize_completion(struct vdo_completion *completion,
+ struct vdo *vdo,
+ enum vdo_completion_type type)
+{
+ memset(completion, 0, sizeof(*completion));
+ completion->vdo = vdo;
+ completion->type = type;
+ vdo_reset_completion(completion);
+}
+
+static inline void assert_incomplete(struct vdo_completion *completion)
+{
+ VDO_ASSERT_LOG_ONLY(!completion->complete, "completion is not complete");
+}
+
+/**
+ * vdo_set_completion_result() - Set the result of a completion.
+ *
+ * Older errors will not be masked.
+ */
+void vdo_set_completion_result(struct vdo_completion *completion, int result)
+{
+ assert_incomplete(completion);
+ if (completion->result == VDO_SUCCESS)
+ completion->result = result;
+}
+
+/**
+ * vdo_launch_completion_with_priority() - Run or enqueue a completion.
+ * @priority: The priority at which to enqueue the completion.
+ *
+ * If called on the correct thread (i.e. the one specified in the completion's callback_thread_id
+ * field) and not marked for requeue, the completion will be run immediately. Otherwise, the
+ * completion will be enqueued on the specified thread.
+ */
+void vdo_launch_completion_with_priority(struct vdo_completion *completion,
+ enum vdo_completion_priority priority)
+{
+ thread_id_t callback_thread = completion->callback_thread_id;
+
+ if (completion->requeue || (callback_thread != vdo_get_callback_thread_id())) {
+ vdo_enqueue_completion(completion, priority);
+ return;
+ }
+
+ vdo_run_completion(completion);
+}
+
+/** vdo_finish_completion() - Mark a completion as complete and then launch it. */
+void vdo_finish_completion(struct vdo_completion *completion)
+{
+ assert_incomplete(completion);
+ completion->complete = true;
+ if (completion->callback != NULL)
+ vdo_launch_completion(completion);
+}
+
+void vdo_enqueue_completion(struct vdo_completion *completion,
+ enum vdo_completion_priority priority)
+{
+ struct vdo *vdo = completion->vdo;
+ thread_id_t thread_id = completion->callback_thread_id;
+
+ if (VDO_ASSERT(thread_id < vdo->thread_config.thread_count,
+ "thread_id %u (completion type %d) is less than thread count %u",
+ thread_id, completion->type,
+ vdo->thread_config.thread_count) != VDO_SUCCESS)
+ BUG();
+
+ completion->requeue = false;
+ completion->priority = priority;
+ completion->my_queue = NULL;
+ vdo_enqueue_work_queue(vdo->threads[thread_id].queue, completion);
+}
+
+/**
+ * vdo_requeue_completion_if_needed() - Requeue a completion if not called on the specified thread.
+ *
+ * Return: True if the completion was requeued; callers may not access the completion in this case.
+ */
+bool vdo_requeue_completion_if_needed(struct vdo_completion *completion,
+ thread_id_t callback_thread_id)
+{
+ if (vdo_get_callback_thread_id() == callback_thread_id)
+ return false;
+
+ completion->callback_thread_id = callback_thread_id;
+ vdo_enqueue_completion(completion, VDO_WORK_Q_DEFAULT_PRIORITY);
+ return true;
+}
diff --git a/drivers/md/dm-vdo/completion.h b/drivers/md/dm-vdo/completion.h
new file mode 100644
index 000000000000..3407f34ce58c
--- /dev/null
+++ b/drivers/md/dm-vdo/completion.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_COMPLETION_H
+#define VDO_COMPLETION_H
+
+#include "permassert.h"
+
+#include "status-codes.h"
+#include "types.h"
+
+/**
+ * vdo_run_completion() - Run a completion's callback or error handler on the current thread.
+ *
+ * Context: This function must be called from the correct callback thread.
+ */
+static inline void vdo_run_completion(struct vdo_completion *completion)
+{
+ if ((completion->result != VDO_SUCCESS) && (completion->error_handler != NULL)) {
+ completion->error_handler(completion);
+ return;
+ }
+
+ completion->callback(completion);
+}
+
+void vdo_set_completion_result(struct vdo_completion *completion, int result);
+
+void vdo_initialize_completion(struct vdo_completion *completion, struct vdo *vdo,
+ enum vdo_completion_type type);
+
+/**
+ * vdo_reset_completion() - Reset a completion to a clean state, while keeping the type, vdo and
+ * parent information.
+ */
+static inline void vdo_reset_completion(struct vdo_completion *completion)
+{
+ completion->result = VDO_SUCCESS;
+ completion->complete = false;
+}
+
+void vdo_launch_completion_with_priority(struct vdo_completion *completion,
+ enum vdo_completion_priority priority);
+
+/**
+ * vdo_launch_completion() - Launch a completion with default priority.
+ */
+static inline void vdo_launch_completion(struct vdo_completion *completion)
+{
+ vdo_launch_completion_with_priority(completion, VDO_WORK_Q_DEFAULT_PRIORITY);
+}
+
+/**
+ * vdo_continue_completion() - Continue processing a completion.
+ * @result: The current result (will not mask older errors).
+ *
+ * Continue processing a completion by setting the current result and calling
+ * vdo_launch_completion().
+ */
+static inline void vdo_continue_completion(struct vdo_completion *completion, int result)
+{
+ vdo_set_completion_result(completion, result);
+ vdo_launch_completion(completion);
+}
+
+void vdo_finish_completion(struct vdo_completion *completion);
+
+/**
+ * vdo_fail_completion() - Set the result of a completion if it does not already have an error,
+ * then finish it.
+ */
+static inline void vdo_fail_completion(struct vdo_completion *completion, int result)
+{
+ vdo_set_completion_result(completion, result);
+ vdo_finish_completion(completion);
+}
+
+/**
+ * vdo_assert_completion_type() - Assert that a completion is of the correct type.
+ *
+ * Return: VDO_SUCCESS or an error
+ */
+static inline int vdo_assert_completion_type(struct vdo_completion *completion,
+ enum vdo_completion_type expected)
+{
+ return VDO_ASSERT(expected == completion->type,
+ "completion type should be %u, not %u", expected,
+ completion->type);
+}
+
+static inline void vdo_set_completion_callback(struct vdo_completion *completion,
+ vdo_action_fn callback,
+ thread_id_t callback_thread_id)
+{
+ completion->callback = callback;
+ completion->callback_thread_id = callback_thread_id;
+}
+
+/**
+ * vdo_launch_completion_callback() - Set the callback for a completion and launch it immediately.
+ */
+static inline void vdo_launch_completion_callback(struct vdo_completion *completion,
+ vdo_action_fn callback,
+ thread_id_t callback_thread_id)
+{
+ vdo_set_completion_callback(completion, callback, callback_thread_id);
+ vdo_launch_completion(completion);
+}
+
+/**
+ * vdo_prepare_completion() - Prepare a completion for launch.
+ *
+ * Resets the completion, and then sets its callback, error handler, callback thread, and parent.
+ */
+static inline void vdo_prepare_completion(struct vdo_completion *completion,
+ vdo_action_fn callback,
+ vdo_action_fn error_handler,
+ thread_id_t callback_thread_id, void *parent)
+{
+ vdo_reset_completion(completion);
+ vdo_set_completion_callback(completion, callback, callback_thread_id);
+ completion->error_handler = error_handler;
+ completion->parent = parent;
+}
+
+/**
+ * vdo_prepare_completion_for_requeue() - Prepare a completion for launch ensuring that it will
+ * always be requeued.
+ *
+ * Resets the completion, and then sets its callback, error handler, callback thread, and parent.
+ */
+static inline void vdo_prepare_completion_for_requeue(struct vdo_completion *completion,
+ vdo_action_fn callback,
+ vdo_action_fn error_handler,
+ thread_id_t callback_thread_id,
+ void *parent)
+{
+ vdo_prepare_completion(completion, callback, error_handler,
+ callback_thread_id, parent);
+ completion->requeue = true;
+}
+
+void vdo_enqueue_completion(struct vdo_completion *completion,
+ enum vdo_completion_priority priority);
+
+
+bool vdo_requeue_completion_if_needed(struct vdo_completion *completion,
+ thread_id_t callback_thread_id);
+
+#endif /* VDO_COMPLETION_H */
diff --git a/drivers/md/dm-vdo/constants.h b/drivers/md/dm-vdo/constants.h
new file mode 100644
index 000000000000..a8c4d6e24b38
--- /dev/null
+++ b/drivers/md/dm-vdo/constants.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_CONSTANTS_H
+#define VDO_CONSTANTS_H
+
+#include <linux/blkdev.h>
+
+#include "types.h"
+
+enum {
+ /*
+ * The maximum number of contiguous PBNs which will go to a single bio submission queue,
+ * assuming there is more than one queue.
+ */
+ VDO_BIO_ROTATION_INTERVAL_LIMIT = 1024,
+
+ /* The number of entries on a block map page */
+ VDO_BLOCK_MAP_ENTRIES_PER_PAGE = 812,
+
+ /* The origin of the flat portion of the block map */
+ VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN = 1,
+
+ /*
+ * The height of a block map tree. Assuming a root count of 60 and 812 entries per page,
+ * this is big enough to represent almost 95 PB of logical space.
+ */
+ VDO_BLOCK_MAP_TREE_HEIGHT = 5,
+
+ /* The default number of bio submission queues. */
+ DEFAULT_VDO_BIO_SUBMIT_QUEUE_COUNT = 4,
+
+ /* The number of contiguous PBNs to be submitted to a single bio queue. */
+ DEFAULT_VDO_BIO_SUBMIT_QUEUE_ROTATE_INTERVAL = 64,
+
+ /* The number of trees in the arboreal block map */
+ DEFAULT_VDO_BLOCK_MAP_TREE_ROOT_COUNT = 60,
+
+ /* The default size of the recovery journal, in blocks */
+ DEFAULT_VDO_RECOVERY_JOURNAL_SIZE = 32 * 1024,
+
+ /* The default size of each slab journal, in blocks */
+ DEFAULT_VDO_SLAB_JOURNAL_SIZE = 224,
+
+ /* Unit test minimum */
+ MINIMUM_VDO_SLAB_JOURNAL_BLOCKS = 2,
+
+ /*
+ * The initial size of lbn_operations and pbn_operations, which is based upon the expected
+ * maximum number of outstanding VIOs. This value was chosen to make it highly unlikely
+ * that the maps would need to be resized.
+ */
+ VDO_LOCK_MAP_CAPACITY = 10000,
+
+ /* The maximum number of logical zones */
+ MAX_VDO_LOGICAL_ZONES = 60,
+
+ /* The maximum number of physical zones */
+ MAX_VDO_PHYSICAL_ZONES = 16,
+
+ /* The base-2 logarithm of the maximum blocks in one slab */
+ MAX_VDO_SLAB_BITS = 23,
+
+ /* The maximum number of slabs the slab depot supports */
+ MAX_VDO_SLABS = 8192,
+
+ /*
+ * The maximum number of block map pages to load simultaneously during recovery or rebuild.
+ */
+ MAXIMUM_SIMULTANEOUS_VDO_BLOCK_MAP_RESTORATION_READS = 1024,
+
+ /* The maximum number of entries in the slab summary */
+ MAXIMUM_VDO_SLAB_SUMMARY_ENTRIES = MAX_VDO_SLABS * MAX_VDO_PHYSICAL_ZONES,
+
+ /* The maximum number of total threads in a VDO thread configuration. */
+ MAXIMUM_VDO_THREADS = 100,
+
+ /* The maximum number of VIOs in the system at once */
+ MAXIMUM_VDO_USER_VIOS = 2048,
+
+ /* The only physical block size supported by VDO */
+ VDO_BLOCK_SIZE = 4096,
+
+ /* The number of sectors per block */
+ VDO_SECTORS_PER_BLOCK = (VDO_BLOCK_SIZE >> SECTOR_SHIFT),
+
+ /* The size of a sector that will not be torn */
+ VDO_SECTOR_SIZE = 512,
+
+ /* The physical block number reserved for storing the zero block */
+ VDO_ZERO_BLOCK = 0,
+};
+
+#endif /* VDO_CONSTANTS_H */
diff --git a/drivers/md/dm-vdo/cpu.h b/drivers/md/dm-vdo/cpu.h
new file mode 100644
index 000000000000..d6a2615ba657
--- /dev/null
+++ b/drivers/md/dm-vdo/cpu.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_CPU_H
+#define UDS_CPU_H
+
+#include <linux/cache.h>
+
+/**
+ * uds_prefetch_address() - Minimize cache-miss latency by attempting to move data into a CPU cache
+ * before it is accessed.
+ *
+ * @address: the address to fetch (may be invalid)
+ * @for_write: must be constant at compile time--false if for reading, true if for writing
+ */
+static inline void uds_prefetch_address(const void *address, bool for_write)
+{
+ /*
+ * for_write won't be a constant if we are compiled with optimization turned off, in which
+ * case prefetching really doesn't matter. clang can't figure out that if for_write is a
+ * constant, it can be passed as the second, mandatorily constant argument to prefetch(),
+ * at least currently on llvm 12.
+ */
+ if (__builtin_constant_p(for_write)) {
+ if (for_write)
+ __builtin_prefetch(address, true);
+ else
+ __builtin_prefetch(address, false);
+ }
+}
+
+/**
+ * uds_prefetch_range() - Minimize cache-miss latency by attempting to move a range of addresses
+ * into a CPU cache before they are accessed.
+ *
+ * @start: the starting address to fetch (may be invalid)
+ * @size: the number of bytes in the address range
+ * @for_write: must be constant at compile time--false if for reading, true if for writing
+ */
+static inline void uds_prefetch_range(const void *start, unsigned int size,
+ bool for_write)
+{
+ /*
+ * Count the number of cache lines to fetch, allowing for the address range to span an
+ * extra cache line boundary due to address alignment.
+ */
+ const char *address = (const char *) start;
+ unsigned int offset = ((uintptr_t) address % L1_CACHE_BYTES);
+ unsigned int cache_lines = (1 + ((size + offset) / L1_CACHE_BYTES));
+
+ while (cache_lines-- > 0) {
+ uds_prefetch_address(address, for_write);
+ address += L1_CACHE_BYTES;
+ }
+}
+
+#endif /* UDS_CPU_H */
diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c
new file mode 100644
index 000000000000..94f6f1ccfb7d
--- /dev/null
+++ b/drivers/md/dm-vdo/data-vio.c
@@ -0,0 +1,2063 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "data-vio.h"
+
+#include <linux/atomic.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/device-mapper.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lz4.h>
+#include <linux/minmax.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "murmurhash3.h"
+#include "permassert.h"
+
+#include "block-map.h"
+#include "dump.h"
+#include "encodings.h"
+#include "int-map.h"
+#include "io-submitter.h"
+#include "logical-zone.h"
+#include "packer.h"
+#include "recovery-journal.h"
+#include "slab-depot.h"
+#include "status-codes.h"
+#include "types.h"
+#include "vdo.h"
+#include "vio.h"
+#include "wait-queue.h"
+
+/**
+ * DOC: Bio flags.
+ *
+ * For certain flags set on user bios, if the user bio has not yet been acknowledged, setting those
+ * flags on our own bio(s) for that request may help underlying layers better fulfill the user
+ * bio's needs. This constant contains the aggregate of those flags; VDO strips all the other
+ * flags, as they convey incorrect information.
+ *
+ * These flags are always irrelevant if we have already finished the user bio as they are only
+ * hints on IO importance. If VDO has finished the user bio, any remaining IO done doesn't care how
+ * important finishing the finished bio was.
+ *
+ * Note that bio.c contains the complete list of flags we believe may be set; the following list
+ * explains the action taken with each of those flags VDO could receive:
+ *
+ * * REQ_SYNC: Passed down if the user bio is not yet completed, since it indicates the user bio
+ * completion is required for further work to be done by the issuer.
+ * * REQ_META: Passed down if the user bio is not yet completed, since it may mean the lower layer
+ * treats it as more urgent, similar to REQ_SYNC.
+ * * REQ_PRIO: Passed down if the user bio is not yet completed, since it indicates the user bio is
+ * important.
+ * * REQ_NOMERGE: Set only if the incoming bio was split; irrelevant to VDO IO.
+ * * REQ_IDLE: Set if the incoming bio had more IO quickly following; VDO's IO pattern doesn't
+ * match incoming IO, so this flag is incorrect for it.
+ * * REQ_FUA: Handled separately, and irrelevant to VDO IO otherwise.
+ * * REQ_RAHEAD: Passed down, as, for reads, it indicates trivial importance.
+ * * REQ_BACKGROUND: Not passed down, as VIOs are a limited resource and VDO needs them recycled
+ * ASAP to service heavy load, which is the only place where REQ_BACKGROUND might aid in load
+ * prioritization.
+ */
+static blk_opf_t PASSTHROUGH_FLAGS = (REQ_PRIO | REQ_META | REQ_SYNC | REQ_RAHEAD);
+
+/**
+ * DOC:
+ *
+ * The data_vio_pool maintains the pool of data_vios which a vdo uses to service incoming bios. For
+ * correctness, and in order to avoid potentially expensive or blocking memory allocations during
+ * normal operation, the number of concurrently active data_vios is capped. Furthermore, in order
+ * to avoid starvation of reads and writes, at most 75% of the data_vios may be used for
+ * discards. The data_vio_pool is responsible for enforcing these limits. Threads submitting bios
+ * for which a data_vio or discard permit are not available will block until the necessary
+ * resources are available. The pool is also responsible for distributing resources to blocked
+ * threads and waking them. Finally, the pool attempts to batch the work of recycling data_vios by
+ * performing the work of actually assigning resources to blocked threads or placing data_vios back
+ * into the pool on a single cpu at a time.
+ *
+ * The pool contains two "limiters", one for tracking data_vios and one for tracking discard
+ * permits. The limiters also provide safe cross-thread access to pool statistics without the need
+ * to take the pool's lock. When a thread submits a bio to a vdo device, it will first attempt to
+ * get a discard permit if it is a discard, and then to get a data_vio. If the necessary resources
+ * are available, the incoming bio will be assigned to the acquired data_vio, and it will be
+ * launched. However, if either of these are unavailable, the arrival time of the bio is recorded
+ * in the bio's bi_private field, the bio and its submitter are both queued on the appropriate
+ * limiter and the submitting thread will then put itself to sleep. (note that this mechanism will
+ * break if jiffies are only 32 bits.)
+ *
+ * Whenever a data_vio has completed processing for the bio it was servicing, release_data_vio()
+ * will be called on it. This function will add the data_vio to a funnel queue, and then check the
+ * state of the pool. If the pool is not currently processing released data_vios, the pool's
+ * completion will be enqueued on a cpu queue. This obviates the need for the releasing threads to
+ * hold the pool's lock, and also batches release work while avoiding starvation of the cpu
+ * threads.
+ *
+ * Whenever the pool's completion is run on a cpu thread, it calls process_release_callback() which
+ * processes a batch of returned data_vios (currently at most 32) from the pool's funnel queue. For
+ * each data_vio, it first checks whether that data_vio was processing a discard. If so, and there
+ * is a blocked bio waiting for a discard permit, that permit is notionally transferred to the
+ * eldest discard waiter, and that waiter is moved to the end of the list of discard bios waiting
+ * for a data_vio. If there are no discard waiters, the discard permit is returned to the pool.
+ * Next, the data_vio is assigned to the oldest blocked bio which either has a discard permit, or
+ * doesn't need one and relaunched. If neither of these exist, the data_vio is returned to the
+ * pool. Finally, if any waiting bios were launched, the threads which blocked trying to submit
+ * them are awakened.
+ */
+
+#define DATA_VIO_RELEASE_BATCH_SIZE 128
+
+static const unsigned int VDO_SECTORS_PER_BLOCK_MASK = VDO_SECTORS_PER_BLOCK - 1;
+static const u32 COMPRESSION_STATUS_MASK = 0xff;
+static const u32 MAY_NOT_COMPRESS_MASK = 0x80000000;
+
+struct limiter;
+typedef void (*assigner_fn)(struct limiter *limiter);
+
+/* Bookkeeping structure for a single type of resource. */
+struct limiter {
+ /* The data_vio_pool to which this limiter belongs */
+ struct data_vio_pool *pool;
+ /* The maximum number of data_vios available */
+ data_vio_count_t limit;
+ /* The number of resources in use */
+ data_vio_count_t busy;
+ /* The maximum number of resources ever simultaneously in use */
+ data_vio_count_t max_busy;
+ /* The number of resources to release */
+ data_vio_count_t release_count;
+ /* The number of waiters to wake */
+ data_vio_count_t wake_count;
+ /* The list of waiting bios which are known to process_release_callback() */
+ struct bio_list waiters;
+ /* The list of waiting bios which are not yet known to process_release_callback() */
+ struct bio_list new_waiters;
+ /* The list of waiters which have their permits */
+ struct bio_list *permitted_waiters;
+ /* The function for assigning a resource to a waiter */
+ assigner_fn assigner;
+ /* The queue of blocked threads */
+ wait_queue_head_t blocked_threads;
+ /* The arrival time of the eldest waiter */
+ u64 arrival;
+};
+
+/*
+ * A data_vio_pool is a collection of preallocated data_vios which may be acquired from any thread,
+ * and are released in batches.
+ */
+struct data_vio_pool {
+ /* Completion for scheduling releases */
+ struct vdo_completion completion;
+ /* The administrative state of the pool */
+ struct admin_state state;
+ /* Lock protecting the pool */
+ spinlock_t lock;
+ /* The main limiter controlling the total data_vios in the pool. */
+ struct limiter limiter;
+ /* The limiter controlling data_vios for discard */
+ struct limiter discard_limiter;
+ /* The list of bios which have discard permits but still need a data_vio */
+ struct bio_list permitted_discards;
+ /* The list of available data_vios */
+ struct list_head available;
+ /* The queue of data_vios waiting to be returned to the pool */
+ struct funnel_queue *queue;
+ /* Whether the pool is processing, or scheduled to process releases */
+ atomic_t processing;
+ /* The data vios in the pool */
+ struct data_vio data_vios[];
+};
+
+static const char * const ASYNC_OPERATION_NAMES[] = {
+ "launch",
+ "acknowledge_write",
+ "acquire_hash_lock",
+ "attempt_logical_block_lock",
+ "lock_duplicate_pbn",
+ "check_for_duplication",
+ "cleanup",
+ "compress_data_vio",
+ "find_block_map_slot",
+ "get_mapped_block_for_read",
+ "get_mapped_block_for_write",
+ "hash_data_vio",
+ "journal_remapping",
+ "vdo_attempt_packing",
+ "put_mapped_block",
+ "read_data_vio",
+ "update_dedupe_index",
+ "update_reference_counts",
+ "verify_duplication",
+ "write_data_vio",
+};
+
+/* The steps taken cleaning up a VIO, in the order they are performed. */
+enum data_vio_cleanup_stage {
+ VIO_CLEANUP_START,
+ VIO_RELEASE_HASH_LOCK = VIO_CLEANUP_START,
+ VIO_RELEASE_ALLOCATED,
+ VIO_RELEASE_RECOVERY_LOCKS,
+ VIO_RELEASE_LOGICAL,
+ VIO_CLEANUP_DONE
+};
+
+static inline struct data_vio_pool * __must_check
+as_data_vio_pool(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_DATA_VIO_POOL_COMPLETION);
+ return container_of(completion, struct data_vio_pool, completion);
+}
+
+static inline u64 get_arrival_time(struct bio *bio)
+{
+ return (u64) bio->bi_private;
+}
+
+/**
+ * check_for_drain_complete_locked() - Check whether a data_vio_pool has no outstanding data_vios
+ * or waiters while holding the pool's lock.
+ */
+static bool check_for_drain_complete_locked(struct data_vio_pool *pool)
+{
+ if (pool->limiter.busy > 0)
+ return false;
+
+ VDO_ASSERT_LOG_ONLY((pool->discard_limiter.busy == 0),
+ "no outstanding discard permits");
+
+ return (bio_list_empty(&pool->limiter.new_waiters) &&
+ bio_list_empty(&pool->discard_limiter.new_waiters));
+}
+
+static void initialize_lbn_lock(struct data_vio *data_vio, logical_block_number_t lbn)
+{
+ struct vdo *vdo = vdo_from_data_vio(data_vio);
+ zone_count_t zone_number;
+ struct lbn_lock *lock = &data_vio->logical;
+
+ lock->lbn = lbn;
+ lock->locked = false;
+ vdo_waitq_init(&lock->waiters);
+ zone_number = vdo_compute_logical_zone(data_vio);
+ lock->zone = &vdo->logical_zones->zones[zone_number];
+}
+
+static void launch_locked_request(struct data_vio *data_vio)
+{
+ data_vio->logical.locked = true;
+ if (data_vio->write) {
+ struct vdo *vdo = vdo_from_data_vio(data_vio);
+
+ if (vdo_is_read_only(vdo)) {
+ continue_data_vio_with_error(data_vio, VDO_READ_ONLY);
+ return;
+ }
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_FIND_BLOCK_MAP_SLOT;
+ vdo_find_block_map_slot(data_vio);
+}
+
+static void acknowledge_data_vio(struct data_vio *data_vio)
+{
+ struct vdo *vdo = vdo_from_data_vio(data_vio);
+ struct bio *bio = data_vio->user_bio;
+ int error = vdo_status_to_errno(data_vio->vio.completion.result);
+
+ if (bio == NULL)
+ return;
+
+ VDO_ASSERT_LOG_ONLY((data_vio->remaining_discard <=
+ (u32) (VDO_BLOCK_SIZE - data_vio->offset)),
+ "data_vio to acknowledge is not an incomplete discard");
+
+ data_vio->user_bio = NULL;
+ vdo_count_bios(&vdo->stats.bios_acknowledged, bio);
+ if (data_vio->is_partial)
+ vdo_count_bios(&vdo->stats.bios_acknowledged_partial, bio);
+
+ bio->bi_status = errno_to_blk_status(error);
+ bio_endio(bio);
+}
+
+static void copy_to_bio(struct bio *bio, char *data_ptr)
+{
+ struct bio_vec biovec;
+ struct bvec_iter iter;
+
+ bio_for_each_segment(biovec, bio, iter) {
+ memcpy_to_bvec(&biovec, data_ptr);
+ data_ptr += biovec.bv_len;
+ }
+}
+
+struct data_vio_compression_status get_data_vio_compression_status(struct data_vio *data_vio)
+{
+ u32 packed = atomic_read(&data_vio->compression.status);
+
+ /* pairs with cmpxchg in set_data_vio_compression_status */
+ smp_rmb();
+ return (struct data_vio_compression_status) {
+ .stage = packed & COMPRESSION_STATUS_MASK,
+ .may_not_compress = ((packed & MAY_NOT_COMPRESS_MASK) != 0),
+ };
+}
+
+/**
+ * pack_status() - Convert a data_vio_compression_status into a u32 which may be stored
+ * atomically.
+ * @status: The state to convert.
+ *
+ * Return: The compression state packed into a u32.
+ */
+static u32 __must_check pack_status(struct data_vio_compression_status status)
+{
+ return status.stage | (status.may_not_compress ? MAY_NOT_COMPRESS_MASK : 0);
+}
+
+/**
+ * set_data_vio_compression_status() - Set the compression status of a data_vio.
+ * @state: The expected current status of the data_vio.
+ * @new_state: The status to set.
+ *
+ * Return: true if the new status was set, false if the data_vio's compression status did not
+ * match the expected state, and so was left unchanged.
+ */
+static bool __must_check
+set_data_vio_compression_status(struct data_vio *data_vio,
+ struct data_vio_compression_status status,
+ struct data_vio_compression_status new_status)
+{
+ u32 actual;
+ u32 expected = pack_status(status);
+ u32 replacement = pack_status(new_status);
+
+ /*
+ * Extra barriers because this was original developed using a CAS operation that implicitly
+ * had them.
+ */
+ smp_mb__before_atomic();
+ actual = atomic_cmpxchg(&data_vio->compression.status, expected, replacement);
+ /* same as before_atomic */
+ smp_mb__after_atomic();
+ return (expected == actual);
+}
+
+struct data_vio_compression_status advance_data_vio_compression_stage(struct data_vio *data_vio)
+{
+ for (;;) {
+ struct data_vio_compression_status status =
+ get_data_vio_compression_status(data_vio);
+ struct data_vio_compression_status new_status = status;
+
+ if (status.stage == DATA_VIO_POST_PACKER) {
+ /* We're already in the last stage. */
+ return status;
+ }
+
+ if (status.may_not_compress) {
+ /*
+ * Compression has been dis-allowed for this VIO, so skip the rest of the
+ * path and go to the end.
+ */
+ new_status.stage = DATA_VIO_POST_PACKER;
+ } else {
+ /* Go to the next state. */
+ new_status.stage++;
+ }
+
+ if (set_data_vio_compression_status(data_vio, status, new_status))
+ return new_status;
+
+ /* Another thread changed the status out from under us so try again. */
+ }
+}
+
+/**
+ * cancel_data_vio_compression() - Prevent this data_vio from being compressed or packed.
+ *
+ * Return: true if the data_vio is in the packer and the caller was the first caller to cancel it.
+ */
+bool cancel_data_vio_compression(struct data_vio *data_vio)
+{
+ struct data_vio_compression_status status, new_status;
+
+ for (;;) {
+ status = get_data_vio_compression_status(data_vio);
+ if (status.may_not_compress || (status.stage == DATA_VIO_POST_PACKER)) {
+ /* This data_vio is already set up to not block in the packer. */
+ break;
+ }
+
+ new_status.stage = status.stage;
+ new_status.may_not_compress = true;
+
+ if (set_data_vio_compression_status(data_vio, status, new_status))
+ break;
+ }
+
+ return ((status.stage == DATA_VIO_PACKING) && !status.may_not_compress);
+}
+
+/**
+ * attempt_logical_block_lock() - Attempt to acquire the lock on a logical block.
+ * @completion: The data_vio for an external data request as a completion.
+ *
+ * This is the start of the path for all external requests. It is registered in launch_data_vio().
+ */
+static void attempt_logical_block_lock(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct lbn_lock *lock = &data_vio->logical;
+ struct vdo *vdo = vdo_from_data_vio(data_vio);
+ struct data_vio *lock_holder;
+ int result;
+
+ assert_data_vio_in_logical_zone(data_vio);
+
+ if (data_vio->logical.lbn >= vdo->states.vdo.config.logical_blocks) {
+ continue_data_vio_with_error(data_vio, VDO_OUT_OF_RANGE);
+ return;
+ }
+
+ result = vdo_int_map_put(lock->zone->lbn_operations, lock->lbn,
+ data_vio, false, (void **) &lock_holder);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+
+ if (lock_holder == NULL) {
+ /* We got the lock */
+ launch_locked_request(data_vio);
+ return;
+ }
+
+ result = VDO_ASSERT(lock_holder->logical.locked, "logical block lock held");
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+
+ /*
+ * If the new request is a pure read request (not read-modify-write) and the lock_holder is
+ * writing and has received an allocation, service the read request immediately by copying
+ * data from the lock_holder to avoid having to flush the write out of the packer just to
+ * prevent the read from waiting indefinitely. If the lock_holder does not yet have an
+ * allocation, prevent it from blocking in the packer and wait on it. This is necessary in
+ * order to prevent returning data that may not have actually been written.
+ */
+ if (!data_vio->write && READ_ONCE(lock_holder->allocation_succeeded)) {
+ copy_to_bio(data_vio->user_bio, lock_holder->vio.data + data_vio->offset);
+ acknowledge_data_vio(data_vio);
+ complete_data_vio(completion);
+ return;
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_ATTEMPT_LOGICAL_BLOCK_LOCK;
+ vdo_waitq_enqueue_waiter(&lock_holder->logical.waiters, &data_vio->waiter);
+
+ /*
+ * Prevent writes and read-modify-writes from blocking indefinitely on lock holders in the
+ * packer.
+ */
+ if (lock_holder->write && cancel_data_vio_compression(lock_holder)) {
+ data_vio->compression.lock_holder = lock_holder;
+ launch_data_vio_packer_callback(data_vio,
+ vdo_remove_lock_holder_from_packer);
+ }
+}
+
+/**
+ * launch_data_vio() - (Re)initialize a data_vio to have a new logical block number, keeping the
+ * same parent and other state and send it on its way.
+ */
+static void launch_data_vio(struct data_vio *data_vio, logical_block_number_t lbn)
+{
+ struct vdo_completion *completion = &data_vio->vio.completion;
+
+ /*
+ * Clearing the tree lock must happen before initializing the LBN lock, which also adds
+ * information to the tree lock.
+ */
+ memset(&data_vio->tree_lock, 0, sizeof(data_vio->tree_lock));
+ initialize_lbn_lock(data_vio, lbn);
+ INIT_LIST_HEAD(&data_vio->hash_lock_entry);
+ INIT_LIST_HEAD(&data_vio->write_entry);
+
+ memset(&data_vio->allocation, 0, sizeof(data_vio->allocation));
+
+ data_vio->is_duplicate = false;
+
+ memset(&data_vio->record_name, 0, sizeof(data_vio->record_name));
+ memset(&data_vio->duplicate, 0, sizeof(data_vio->duplicate));
+ vdo_reset_completion(completion);
+ completion->error_handler = handle_data_vio_error;
+ set_data_vio_logical_callback(data_vio, attempt_logical_block_lock);
+ vdo_enqueue_completion(completion, VDO_DEFAULT_Q_MAP_BIO_PRIORITY);
+}
+
+static bool is_zero_block(char *block)
+{
+ int i;
+
+ for (i = 0; i < VDO_BLOCK_SIZE; i += sizeof(u64)) {
+ if (*((u64 *) &block[i]))
+ return false;
+ }
+
+ return true;
+}
+
+static void copy_from_bio(struct bio *bio, char *data_ptr)
+{
+ struct bio_vec biovec;
+ struct bvec_iter iter;
+
+ bio_for_each_segment(biovec, bio, iter) {
+ memcpy_from_bvec(data_ptr, &biovec);
+ data_ptr += biovec.bv_len;
+ }
+}
+
+static void launch_bio(struct vdo *vdo, struct data_vio *data_vio, struct bio *bio)
+{
+ logical_block_number_t lbn;
+ /*
+ * Zero out the fields which don't need to be preserved (i.e. which are not pointers to
+ * separately allocated objects).
+ */
+ memset(data_vio, 0, offsetof(struct data_vio, vio));
+ memset(&data_vio->compression, 0, offsetof(struct compression_state, block));
+
+ data_vio->user_bio = bio;
+ data_vio->offset = to_bytes(bio->bi_iter.bi_sector & VDO_SECTORS_PER_BLOCK_MASK);
+ data_vio->is_partial = (bio->bi_iter.bi_size < VDO_BLOCK_SIZE) || (data_vio->offset != 0);
+
+ /*
+ * Discards behave very differently than other requests when coming in from device-mapper.
+ * We have to be able to handle any size discards and various sector offsets within a
+ * block.
+ */
+ if (bio_op(bio) == REQ_OP_DISCARD) {
+ data_vio->remaining_discard = bio->bi_iter.bi_size;
+ data_vio->write = true;
+ data_vio->is_discard = true;
+ if (data_vio->is_partial) {
+ vdo_count_bios(&vdo->stats.bios_in_partial, bio);
+ data_vio->read = true;
+ }
+ } else if (data_vio->is_partial) {
+ vdo_count_bios(&vdo->stats.bios_in_partial, bio);
+ data_vio->read = true;
+ if (bio_data_dir(bio) == WRITE)
+ data_vio->write = true;
+ } else if (bio_data_dir(bio) == READ) {
+ data_vio->read = true;
+ } else {
+ /*
+ * Copy the bio data to a char array so that we can continue to use the data after
+ * we acknowledge the bio.
+ */
+ copy_from_bio(bio, data_vio->vio.data);
+ data_vio->is_zero = is_zero_block(data_vio->vio.data);
+ data_vio->write = true;
+ }
+
+ if (data_vio->user_bio->bi_opf & REQ_FUA)
+ data_vio->fua = true;
+
+ lbn = (bio->bi_iter.bi_sector - vdo->starting_sector_offset) / VDO_SECTORS_PER_BLOCK;
+ launch_data_vio(data_vio, lbn);
+}
+
+static void assign_data_vio(struct limiter *limiter, struct data_vio *data_vio)
+{
+ struct bio *bio = bio_list_pop(limiter->permitted_waiters);
+
+ launch_bio(limiter->pool->completion.vdo, data_vio, bio);
+ limiter->wake_count++;
+
+ bio = bio_list_peek(limiter->permitted_waiters);
+ limiter->arrival = ((bio == NULL) ? U64_MAX : get_arrival_time(bio));
+}
+
+static void assign_discard_permit(struct limiter *limiter)
+{
+ struct bio *bio = bio_list_pop(&limiter->waiters);
+
+ if (limiter->arrival == U64_MAX)
+ limiter->arrival = get_arrival_time(bio);
+
+ bio_list_add(limiter->permitted_waiters, bio);
+}
+
+static void get_waiters(struct limiter *limiter)
+{
+ bio_list_merge(&limiter->waiters, &limiter->new_waiters);
+ bio_list_init(&limiter->new_waiters);
+}
+
+static inline struct data_vio *get_available_data_vio(struct data_vio_pool *pool)
+{
+ struct data_vio *data_vio =
+ list_first_entry(&pool->available, struct data_vio, pool_entry);
+
+ list_del_init(&data_vio->pool_entry);
+ return data_vio;
+}
+
+static void assign_data_vio_to_waiter(struct limiter *limiter)
+{
+ assign_data_vio(limiter, get_available_data_vio(limiter->pool));
+}
+
+static void update_limiter(struct limiter *limiter)
+{
+ struct bio_list *waiters = &limiter->waiters;
+ data_vio_count_t available = limiter->limit - limiter->busy;
+
+ VDO_ASSERT_LOG_ONLY((limiter->release_count <= limiter->busy),
+ "Release count %u is not more than busy count %u",
+ limiter->release_count, limiter->busy);
+
+ get_waiters(limiter);
+ for (; (limiter->release_count > 0) && !bio_list_empty(waiters); limiter->release_count--)
+ limiter->assigner(limiter);
+
+ if (limiter->release_count > 0) {
+ WRITE_ONCE(limiter->busy, limiter->busy - limiter->release_count);
+ limiter->release_count = 0;
+ return;
+ }
+
+ for (; (available > 0) && !bio_list_empty(waiters); available--)
+ limiter->assigner(limiter);
+
+ WRITE_ONCE(limiter->busy, limiter->limit - available);
+ if (limiter->max_busy < limiter->busy)
+ WRITE_ONCE(limiter->max_busy, limiter->busy);
+}
+
+/**
+ * schedule_releases() - Ensure that release processing is scheduled.
+ *
+ * If this call switches the state to processing, enqueue. Otherwise, some other thread has already
+ * done so.
+ */
+static void schedule_releases(struct data_vio_pool *pool)
+{
+ /* Pairs with the barrier in process_release_callback(). */
+ smp_mb__before_atomic();
+ if (atomic_cmpxchg(&pool->processing, false, true))
+ return;
+
+ pool->completion.requeue = true;
+ vdo_launch_completion_with_priority(&pool->completion,
+ CPU_Q_COMPLETE_VIO_PRIORITY);
+}
+
+static void reuse_or_release_resources(struct data_vio_pool *pool,
+ struct data_vio *data_vio,
+ struct list_head *returned)
+{
+ if (data_vio->remaining_discard > 0) {
+ if (bio_list_empty(&pool->discard_limiter.waiters)) {
+ /* Return the data_vio's discard permit. */
+ pool->discard_limiter.release_count++;
+ } else {
+ assign_discard_permit(&pool->discard_limiter);
+ }
+ }
+
+ if (pool->limiter.arrival < pool->discard_limiter.arrival) {
+ assign_data_vio(&pool->limiter, data_vio);
+ } else if (pool->discard_limiter.arrival < U64_MAX) {
+ assign_data_vio(&pool->discard_limiter, data_vio);
+ } else {
+ list_add(&data_vio->pool_entry, returned);
+ pool->limiter.release_count++;
+ }
+}
+
+/**
+ * process_release_callback() - Process a batch of data_vio releases.
+ * @completion: The pool with data_vios to release.
+ */
+static void process_release_callback(struct vdo_completion *completion)
+{
+ struct data_vio_pool *pool = as_data_vio_pool(completion);
+ bool reschedule;
+ bool drained;
+ data_vio_count_t processed;
+ data_vio_count_t to_wake;
+ data_vio_count_t discards_to_wake;
+ LIST_HEAD(returned);
+
+ spin_lock(&pool->lock);
+ get_waiters(&pool->discard_limiter);
+ get_waiters(&pool->limiter);
+ spin_unlock(&pool->lock);
+
+ if (pool->limiter.arrival == U64_MAX) {
+ struct bio *bio = bio_list_peek(&pool->limiter.waiters);
+
+ if (bio != NULL)
+ pool->limiter.arrival = get_arrival_time(bio);
+ }
+
+ for (processed = 0; processed < DATA_VIO_RELEASE_BATCH_SIZE; processed++) {
+ struct data_vio *data_vio;
+ struct funnel_queue_entry *entry = vdo_funnel_queue_poll(pool->queue);
+
+ if (entry == NULL)
+ break;
+
+ data_vio = as_data_vio(container_of(entry, struct vdo_completion,
+ work_queue_entry_link));
+ acknowledge_data_vio(data_vio);
+ reuse_or_release_resources(pool, data_vio, &returned);
+ }
+
+ spin_lock(&pool->lock);
+ /*
+ * There is a race where waiters could be added while we are in the unlocked section above.
+ * Those waiters could not see the resources we are now about to release, so we assign
+ * those resources now as we have no guarantee of being rescheduled. This is handled in
+ * update_limiter().
+ */
+ update_limiter(&pool->discard_limiter);
+ list_splice(&returned, &pool->available);
+ update_limiter(&pool->limiter);
+ to_wake = pool->limiter.wake_count;
+ pool->limiter.wake_count = 0;
+ discards_to_wake = pool->discard_limiter.wake_count;
+ pool->discard_limiter.wake_count = 0;
+
+ atomic_set(&pool->processing, false);
+ /* Pairs with the barrier in schedule_releases(). */
+ smp_mb();
+
+ reschedule = !vdo_is_funnel_queue_empty(pool->queue);
+ drained = (!reschedule &&
+ vdo_is_state_draining(&pool->state) &&
+ check_for_drain_complete_locked(pool));
+ spin_unlock(&pool->lock);
+
+ if (to_wake > 0)
+ wake_up_nr(&pool->limiter.blocked_threads, to_wake);
+
+ if (discards_to_wake > 0)
+ wake_up_nr(&pool->discard_limiter.blocked_threads, discards_to_wake);
+
+ if (reschedule)
+ schedule_releases(pool);
+ else if (drained)
+ vdo_finish_draining(&pool->state);
+}
+
+static void initialize_limiter(struct limiter *limiter, struct data_vio_pool *pool,
+ assigner_fn assigner, data_vio_count_t limit)
+{
+ limiter->pool = pool;
+ limiter->assigner = assigner;
+ limiter->limit = limit;
+ limiter->arrival = U64_MAX;
+ init_waitqueue_head(&limiter->blocked_threads);
+}
+
+/**
+ * initialize_data_vio() - Allocate the components of a data_vio.
+ *
+ * The caller is responsible for cleaning up the data_vio on error.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int initialize_data_vio(struct data_vio *data_vio, struct vdo *vdo)
+{
+ struct bio *bio;
+ int result;
+
+ BUILD_BUG_ON(VDO_BLOCK_SIZE > PAGE_SIZE);
+ result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "data_vio data",
+ &data_vio->vio.data);
+ if (result != VDO_SUCCESS)
+ return vdo_log_error_strerror(result,
+ "data_vio data allocation failure");
+
+ result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "compressed block",
+ &data_vio->compression.block);
+ if (result != VDO_SUCCESS) {
+ return vdo_log_error_strerror(result,
+ "data_vio compressed block allocation failure");
+ }
+
+ result = vdo_allocate_memory(VDO_BLOCK_SIZE, 0, "vio scratch",
+ &data_vio->scratch_block);
+ if (result != VDO_SUCCESS)
+ return vdo_log_error_strerror(result,
+ "data_vio scratch allocation failure");
+
+ result = vdo_create_bio(&bio);
+ if (result != VDO_SUCCESS)
+ return vdo_log_error_strerror(result,
+ "data_vio data bio allocation failure");
+
+ vdo_initialize_completion(&data_vio->decrement_completion, vdo,
+ VDO_DECREMENT_COMPLETION);
+ initialize_vio(&data_vio->vio, bio, 1, VIO_TYPE_DATA, VIO_PRIORITY_DATA, vdo);
+
+ return VDO_SUCCESS;
+}
+
+static void destroy_data_vio(struct data_vio *data_vio)
+{
+ if (data_vio == NULL)
+ return;
+
+ vdo_free_bio(vdo_forget(data_vio->vio.bio));
+ vdo_free(vdo_forget(data_vio->vio.data));
+ vdo_free(vdo_forget(data_vio->compression.block));
+ vdo_free(vdo_forget(data_vio->scratch_block));
+}
+
+/**
+ * make_data_vio_pool() - Initialize a data_vio pool.
+ * @vdo: The vdo to which the pool will belong.
+ * @pool_size: The number of data_vios in the pool.
+ * @discard_limit: The maximum number of data_vios which may be used for discards.
+ * @pool: A pointer to hold the newly allocated pool.
+ */
+int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size,
+ data_vio_count_t discard_limit, struct data_vio_pool **pool_ptr)
+{
+ int result;
+ struct data_vio_pool *pool;
+ data_vio_count_t i;
+
+ result = vdo_allocate_extended(struct data_vio_pool, pool_size, struct data_vio,
+ __func__, &pool);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ VDO_ASSERT_LOG_ONLY((discard_limit <= pool_size),
+ "discard limit does not exceed pool size");
+ initialize_limiter(&pool->discard_limiter, pool, assign_discard_permit,
+ discard_limit);
+ pool->discard_limiter.permitted_waiters = &pool->permitted_discards;
+ initialize_limiter(&pool->limiter, pool, assign_data_vio_to_waiter, pool_size);
+ pool->limiter.permitted_waiters = &pool->limiter.waiters;
+ INIT_LIST_HEAD(&pool->available);
+ spin_lock_init(&pool->lock);
+ vdo_set_admin_state_code(&pool->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+ vdo_initialize_completion(&pool->completion, vdo, VDO_DATA_VIO_POOL_COMPLETION);
+ vdo_prepare_completion(&pool->completion, process_release_callback,
+ process_release_callback, vdo->thread_config.cpu_thread,
+ NULL);
+
+ result = vdo_make_funnel_queue(&pool->queue);
+ if (result != VDO_SUCCESS) {
+ free_data_vio_pool(vdo_forget(pool));
+ return result;
+ }
+
+ for (i = 0; i < pool_size; i++) {
+ struct data_vio *data_vio = &pool->data_vios[i];
+
+ result = initialize_data_vio(data_vio, vdo);
+ if (result != VDO_SUCCESS) {
+ destroy_data_vio(data_vio);
+ free_data_vio_pool(pool);
+ return result;
+ }
+
+ list_add(&data_vio->pool_entry, &pool->available);
+ }
+
+ *pool_ptr = pool;
+ return VDO_SUCCESS;
+}
+
+/**
+ * free_data_vio_pool() - Free a data_vio_pool and the data_vios in it.
+ *
+ * All data_vios must be returned to the pool before calling this function.
+ */
+void free_data_vio_pool(struct data_vio_pool *pool)
+{
+ struct data_vio *data_vio, *tmp;
+
+ if (pool == NULL)
+ return;
+
+ /*
+ * Pairs with the barrier in process_release_callback(). Possibly not needed since it
+ * caters to an enqueue vs. free race.
+ */
+ smp_mb();
+ BUG_ON(atomic_read(&pool->processing));
+
+ spin_lock(&pool->lock);
+ VDO_ASSERT_LOG_ONLY((pool->limiter.busy == 0),
+ "data_vio pool must not have %u busy entries when being freed",
+ pool->limiter.busy);
+ VDO_ASSERT_LOG_ONLY((bio_list_empty(&pool->limiter.waiters) &&
+ bio_list_empty(&pool->limiter.new_waiters)),
+ "data_vio pool must not have threads waiting to read or write when being freed");
+ VDO_ASSERT_LOG_ONLY((bio_list_empty(&pool->discard_limiter.waiters) &&
+ bio_list_empty(&pool->discard_limiter.new_waiters)),
+ "data_vio pool must not have threads waiting to discard when being freed");
+ spin_unlock(&pool->lock);
+
+ list_for_each_entry_safe(data_vio, tmp, &pool->available, pool_entry) {
+ list_del_init(&data_vio->pool_entry);
+ destroy_data_vio(data_vio);
+ }
+
+ vdo_free_funnel_queue(vdo_forget(pool->queue));
+ vdo_free(pool);
+}
+
+static bool acquire_permit(struct limiter *limiter)
+{
+ if (limiter->busy >= limiter->limit)
+ return false;
+
+ WRITE_ONCE(limiter->busy, limiter->busy + 1);
+ if (limiter->max_busy < limiter->busy)
+ WRITE_ONCE(limiter->max_busy, limiter->busy);
+ return true;
+}
+
+static void wait_permit(struct limiter *limiter, struct bio *bio)
+ __releases(&limiter->pool->lock)
+{
+ DEFINE_WAIT(wait);
+
+ bio_list_add(&limiter->new_waiters, bio);
+ prepare_to_wait_exclusive(&limiter->blocked_threads, &wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock(&limiter->pool->lock);
+ io_schedule();
+ finish_wait(&limiter->blocked_threads, &wait);
+}
+
+/**
+ * vdo_launch_bio() - Acquire a data_vio from the pool, assign the bio to it, and launch it.
+ *
+ * This will block if data_vios or discard permits are not available.
+ */
+void vdo_launch_bio(struct data_vio_pool *pool, struct bio *bio)
+{
+ struct data_vio *data_vio;
+
+ VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&pool->state),
+ "data_vio_pool not quiescent on acquire");
+
+ bio->bi_private = (void *) jiffies;
+ spin_lock(&pool->lock);
+ if ((bio_op(bio) == REQ_OP_DISCARD) &&
+ !acquire_permit(&pool->discard_limiter)) {
+ wait_permit(&pool->discard_limiter, bio);
+ return;
+ }
+
+ if (!acquire_permit(&pool->limiter)) {
+ wait_permit(&pool->limiter, bio);
+ return;
+ }
+
+ data_vio = get_available_data_vio(pool);
+ spin_unlock(&pool->lock);
+ launch_bio(pool->completion.vdo, data_vio, bio);
+}
+
+/* Implements vdo_admin_initiator_fn. */
+static void initiate_drain(struct admin_state *state)
+{
+ bool drained;
+ struct data_vio_pool *pool = container_of(state, struct data_vio_pool, state);
+
+ spin_lock(&pool->lock);
+ drained = check_for_drain_complete_locked(pool);
+ spin_unlock(&pool->lock);
+
+ if (drained)
+ vdo_finish_draining(state);
+}
+
+static void assert_on_vdo_cpu_thread(const struct vdo *vdo, const char *name)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.cpu_thread),
+ "%s called on cpu thread", name);
+}
+
+/**
+ * drain_data_vio_pool() - Wait asynchronously for all data_vios to be returned to the pool.
+ * @completion: The completion to notify when the pool has drained.
+ */
+void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion)
+{
+ assert_on_vdo_cpu_thread(completion->vdo, __func__);
+ vdo_start_draining(&pool->state, VDO_ADMIN_STATE_SUSPENDING, completion,
+ initiate_drain);
+}
+
+/**
+ * resume_data_vio_pool() - Resume a data_vio pool.
+ * @completion: The completion to notify when the pool has resumed.
+ */
+void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion)
+{
+ assert_on_vdo_cpu_thread(completion->vdo, __func__);
+ vdo_continue_completion(completion, vdo_resume_if_quiescent(&pool->state));
+}
+
+static void dump_limiter(const char *name, struct limiter *limiter)
+{
+ vdo_log_info("%s: %u of %u busy (max %u), %s", name, limiter->busy,
+ limiter->limit, limiter->max_busy,
+ ((bio_list_empty(&limiter->waiters) &&
+ bio_list_empty(&limiter->new_waiters)) ?
+ "no waiters" : "has waiters"));
+}
+
+/**
+ * dump_data_vio_pool() - Dump a data_vio pool to the log.
+ * @dump_vios: Whether to dump the details of each busy data_vio as well.
+ */
+void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios)
+{
+ /*
+ * In order that syslog can empty its buffer, sleep after 35 elements for 4ms (till the
+ * second clock tick). These numbers were picked based on experiments with lab machines.
+ */
+ static const int ELEMENTS_PER_BATCH = 35;
+ static const int SLEEP_FOR_SYSLOG = 4000;
+
+ if (pool == NULL)
+ return;
+
+ spin_lock(&pool->lock);
+ dump_limiter("data_vios", &pool->limiter);
+ dump_limiter("discard permits", &pool->discard_limiter);
+ if (dump_vios) {
+ int i;
+ int dumped = 0;
+
+ for (i = 0; i < pool->limiter.limit; i++) {
+ struct data_vio *data_vio = &pool->data_vios[i];
+
+ if (!list_empty(&data_vio->pool_entry))
+ continue;
+
+ dump_data_vio(data_vio);
+ if (++dumped >= ELEMENTS_PER_BATCH) {
+ spin_unlock(&pool->lock);
+ dumped = 0;
+ fsleep(SLEEP_FOR_SYSLOG);
+ spin_lock(&pool->lock);
+ }
+ }
+ }
+
+ spin_unlock(&pool->lock);
+}
+
+data_vio_count_t get_data_vio_pool_active_discards(struct data_vio_pool *pool)
+{
+ return READ_ONCE(pool->discard_limiter.busy);
+}
+
+data_vio_count_t get_data_vio_pool_discard_limit(struct data_vio_pool *pool)
+{
+ return READ_ONCE(pool->discard_limiter.limit);
+}
+
+data_vio_count_t get_data_vio_pool_maximum_discards(struct data_vio_pool *pool)
+{
+ return READ_ONCE(pool->discard_limiter.max_busy);
+}
+
+int set_data_vio_pool_discard_limit(struct data_vio_pool *pool, data_vio_count_t limit)
+{
+ if (get_data_vio_pool_request_limit(pool) < limit) {
+ // The discard limit may not be higher than the data_vio limit.
+ return -EINVAL;
+ }
+
+ spin_lock(&pool->lock);
+ pool->discard_limiter.limit = limit;
+ spin_unlock(&pool->lock);
+
+ return VDO_SUCCESS;
+}
+
+data_vio_count_t get_data_vio_pool_active_requests(struct data_vio_pool *pool)
+{
+ return READ_ONCE(pool->limiter.busy);
+}
+
+data_vio_count_t get_data_vio_pool_request_limit(struct data_vio_pool *pool)
+{
+ return READ_ONCE(pool->limiter.limit);
+}
+
+data_vio_count_t get_data_vio_pool_maximum_requests(struct data_vio_pool *pool)
+{
+ return READ_ONCE(pool->limiter.max_busy);
+}
+
+static void update_data_vio_error_stats(struct data_vio *data_vio)
+{
+ u8 index = 0;
+ static const char * const operations[] = {
+ [0] = "empty",
+ [1] = "read",
+ [2] = "write",
+ [3] = "read-modify-write",
+ [5] = "read+fua",
+ [6] = "write+fua",
+ [7] = "read-modify-write+fua",
+ };
+
+ if (data_vio->read)
+ index = 1;
+
+ if (data_vio->write)
+ index += 2;
+
+ if (data_vio->fua)
+ index += 4;
+
+ update_vio_error_stats(&data_vio->vio,
+ "Completing %s vio for LBN %llu with error after %s",
+ operations[index],
+ (unsigned long long) data_vio->logical.lbn,
+ get_data_vio_operation_name(data_vio));
+}
+
+static void perform_cleanup_stage(struct data_vio *data_vio,
+ enum data_vio_cleanup_stage stage);
+
+/**
+ * release_allocated_lock() - Release the PBN lock and/or the reference on the allocated block at
+ * the end of processing a data_vio.
+ */
+static void release_allocated_lock(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_allocated_zone(data_vio);
+ release_data_vio_allocation_lock(data_vio, false);
+ perform_cleanup_stage(data_vio, VIO_RELEASE_RECOVERY_LOCKS);
+}
+
+/** release_lock() - Release an uncontended LBN lock. */
+static void release_lock(struct data_vio *data_vio, struct lbn_lock *lock)
+{
+ struct int_map *lock_map = lock->zone->lbn_operations;
+ struct data_vio *lock_holder;
+
+ if (!lock->locked) {
+ /* The lock is not locked, so it had better not be registered in the lock map. */
+ struct data_vio *lock_holder = vdo_int_map_get(lock_map, lock->lbn);
+
+ VDO_ASSERT_LOG_ONLY((data_vio != lock_holder),
+ "no logical block lock held for block %llu",
+ (unsigned long long) lock->lbn);
+ return;
+ }
+
+ /* Release the lock by removing the lock from the map. */
+ lock_holder = vdo_int_map_remove(lock_map, lock->lbn);
+ VDO_ASSERT_LOG_ONLY((data_vio == lock_holder),
+ "logical block lock mismatch for block %llu",
+ (unsigned long long) lock->lbn);
+ lock->locked = false;
+}
+
+/** transfer_lock() - Transfer a contended LBN lock to the eldest waiter. */
+static void transfer_lock(struct data_vio *data_vio, struct lbn_lock *lock)
+{
+ struct data_vio *lock_holder, *next_lock_holder;
+ int result;
+
+ VDO_ASSERT_LOG_ONLY(lock->locked, "lbn_lock with waiters is not locked");
+
+ /* Another data_vio is waiting for the lock, transfer it in a single lock map operation. */
+ next_lock_holder =
+ vdo_waiter_as_data_vio(vdo_waitq_dequeue_waiter(&lock->waiters));
+
+ /* Transfer the remaining lock waiters to the next lock holder. */
+ vdo_waitq_transfer_all_waiters(&lock->waiters,
+ &next_lock_holder->logical.waiters);
+
+ result = vdo_int_map_put(lock->zone->lbn_operations, lock->lbn,
+ next_lock_holder, true, (void **) &lock_holder);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(next_lock_holder, result);
+ return;
+ }
+
+ VDO_ASSERT_LOG_ONLY((lock_holder == data_vio),
+ "logical block lock mismatch for block %llu",
+ (unsigned long long) lock->lbn);
+ lock->locked = false;
+
+ /*
+ * If there are still waiters, other data_vios must be trying to get the lock we just
+ * transferred. We must ensure that the new lock holder doesn't block in the packer.
+ */
+ if (vdo_waitq_has_waiters(&next_lock_holder->logical.waiters))
+ cancel_data_vio_compression(next_lock_holder);
+
+ /*
+ * Avoid stack overflow on lock transfer.
+ * FIXME: this is only an issue in the 1 thread config.
+ */
+ next_lock_holder->vio.completion.requeue = true;
+ launch_locked_request(next_lock_holder);
+}
+
+/**
+ * release_logical_lock() - Release the logical block lock and flush generation lock at the end of
+ * processing a data_vio.
+ */
+static void release_logical_lock(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct lbn_lock *lock = &data_vio->logical;
+
+ assert_data_vio_in_logical_zone(data_vio);
+
+ if (vdo_waitq_has_waiters(&lock->waiters))
+ transfer_lock(data_vio, lock);
+ else
+ release_lock(data_vio, lock);
+
+ vdo_release_flush_generation_lock(data_vio);
+ perform_cleanup_stage(data_vio, VIO_CLEANUP_DONE);
+}
+
+/** clean_hash_lock() - Release the hash lock at the end of processing a data_vio. */
+static void clean_hash_lock(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_hash_zone(data_vio);
+ if (completion->result != VDO_SUCCESS) {
+ vdo_clean_failed_hash_lock(data_vio);
+ return;
+ }
+
+ vdo_release_hash_lock(data_vio);
+ perform_cleanup_stage(data_vio, VIO_RELEASE_LOGICAL);
+}
+
+/**
+ * finish_cleanup() - Make some assertions about a data_vio which has finished cleaning up.
+ *
+ * If it is part of a multi-block discard, starts on the next block, otherwise, returns it to the
+ * pool.
+ */
+static void finish_cleanup(struct data_vio *data_vio)
+{
+ struct vdo_completion *completion = &data_vio->vio.completion;
+
+ VDO_ASSERT_LOG_ONLY(data_vio->allocation.lock == NULL,
+ "complete data_vio has no allocation lock");
+ VDO_ASSERT_LOG_ONLY(data_vio->hash_lock == NULL,
+ "complete data_vio has no hash lock");
+ if ((data_vio->remaining_discard <= VDO_BLOCK_SIZE) ||
+ (completion->result != VDO_SUCCESS)) {
+ struct data_vio_pool *pool = completion->vdo->data_vio_pool;
+
+ vdo_funnel_queue_put(pool->queue, &completion->work_queue_entry_link);
+ schedule_releases(pool);
+ return;
+ }
+
+ data_vio->remaining_discard -= min_t(u32, data_vio->remaining_discard,
+ VDO_BLOCK_SIZE - data_vio->offset);
+ data_vio->is_partial = (data_vio->remaining_discard < VDO_BLOCK_SIZE);
+ data_vio->read = data_vio->is_partial;
+ data_vio->offset = 0;
+ completion->requeue = true;
+ launch_data_vio(data_vio, data_vio->logical.lbn + 1);
+}
+
+/** perform_cleanup_stage() - Perform the next step in the process of cleaning up a data_vio. */
+static void perform_cleanup_stage(struct data_vio *data_vio,
+ enum data_vio_cleanup_stage stage)
+{
+ struct vdo *vdo = vdo_from_data_vio(data_vio);
+
+ switch (stage) {
+ case VIO_RELEASE_HASH_LOCK:
+ if (data_vio->hash_lock != NULL) {
+ launch_data_vio_hash_zone_callback(data_vio, clean_hash_lock);
+ return;
+ }
+ fallthrough;
+
+ case VIO_RELEASE_ALLOCATED:
+ if (data_vio_has_allocation(data_vio)) {
+ launch_data_vio_allocated_zone_callback(data_vio,
+ release_allocated_lock);
+ return;
+ }
+ fallthrough;
+
+ case VIO_RELEASE_RECOVERY_LOCKS:
+ if ((data_vio->recovery_sequence_number > 0) &&
+ (READ_ONCE(vdo->read_only_notifier.read_only_error) == VDO_SUCCESS) &&
+ (data_vio->vio.completion.result != VDO_READ_ONLY))
+ vdo_log_warning("VDO not read-only when cleaning data_vio with RJ lock");
+ fallthrough;
+
+ case VIO_RELEASE_LOGICAL:
+ launch_data_vio_logical_callback(data_vio, release_logical_lock);
+ return;
+
+ default:
+ finish_cleanup(data_vio);
+ }
+}
+
+void complete_data_vio(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ completion->error_handler = NULL;
+ data_vio->last_async_operation = VIO_ASYNC_OP_CLEANUP;
+ perform_cleanup_stage(data_vio,
+ (data_vio->write ? VIO_CLEANUP_START : VIO_RELEASE_LOGICAL));
+}
+
+static void enter_read_only_mode(struct vdo_completion *completion)
+{
+ if (vdo_is_read_only(completion->vdo))
+ return;
+
+ if (completion->result != VDO_READ_ONLY) {
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ vdo_log_error_strerror(completion->result,
+ "Preparing to enter read-only mode: data_vio for LBN %llu (becoming mapped to %llu, previously mapped to %llu, allocated %llu) is completing with a fatal error after operation %s",
+ (unsigned long long) data_vio->logical.lbn,
+ (unsigned long long) data_vio->new_mapped.pbn,
+ (unsigned long long) data_vio->mapped.pbn,
+ (unsigned long long) data_vio->allocation.pbn,
+ get_data_vio_operation_name(data_vio));
+ }
+
+ vdo_enter_read_only_mode(completion->vdo, completion->result);
+}
+
+void handle_data_vio_error(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ if ((completion->result == VDO_READ_ONLY) || (data_vio->user_bio == NULL))
+ enter_read_only_mode(completion);
+
+ update_data_vio_error_stats(data_vio);
+ complete_data_vio(completion);
+}
+
+/**
+ * get_data_vio_operation_name() - Get the name of the last asynchronous operation performed on a
+ * data_vio.
+ */
+const char *get_data_vio_operation_name(struct data_vio *data_vio)
+{
+ BUILD_BUG_ON((MAX_VIO_ASYNC_OPERATION_NUMBER - MIN_VIO_ASYNC_OPERATION_NUMBER) !=
+ ARRAY_SIZE(ASYNC_OPERATION_NAMES));
+
+ return ((data_vio->last_async_operation < MAX_VIO_ASYNC_OPERATION_NUMBER) ?
+ ASYNC_OPERATION_NAMES[data_vio->last_async_operation] :
+ "unknown async operation");
+}
+
+/**
+ * data_vio_allocate_data_block() - Allocate a data block.
+ *
+ * @write_lock_type: The type of write lock to obtain on the block.
+ * @callback: The callback which will attempt an allocation in the current zone and continue if it
+ * succeeds.
+ * @error_handler: The handler for errors while allocating.
+ */
+void data_vio_allocate_data_block(struct data_vio *data_vio,
+ enum pbn_lock_type write_lock_type,
+ vdo_action_fn callback, vdo_action_fn error_handler)
+{
+ struct allocation *allocation = &data_vio->allocation;
+
+ VDO_ASSERT_LOG_ONLY((allocation->pbn == VDO_ZERO_BLOCK),
+ "data_vio does not have an allocation");
+ allocation->write_lock_type = write_lock_type;
+ allocation->zone = vdo_get_next_allocation_zone(data_vio->logical.zone);
+ allocation->first_allocation_zone = allocation->zone->zone_number;
+
+ data_vio->vio.completion.error_handler = error_handler;
+ launch_data_vio_allocated_zone_callback(data_vio, callback);
+}
+
+/**
+ * release_data_vio_allocation_lock() - Release the PBN lock on a data_vio's allocated block.
+ * @reset: If true, the allocation will be reset (i.e. any allocated pbn will be forgotten).
+ *
+ * If the reference to the locked block is still provisional, it will be released as well.
+ */
+void release_data_vio_allocation_lock(struct data_vio *data_vio, bool reset)
+{
+ struct allocation *allocation = &data_vio->allocation;
+ physical_block_number_t locked_pbn = allocation->pbn;
+
+ assert_data_vio_in_allocated_zone(data_vio);
+
+ if (reset || vdo_pbn_lock_has_provisional_reference(allocation->lock))
+ allocation->pbn = VDO_ZERO_BLOCK;
+
+ vdo_release_physical_zone_pbn_lock(allocation->zone, locked_pbn,
+ vdo_forget(allocation->lock));
+}
+
+/**
+ * uncompress_data_vio() - Uncompress the data a data_vio has just read.
+ * @mapping_state: The mapping state indicating which fragment to decompress.
+ * @buffer: The buffer to receive the uncompressed data.
+ */
+int uncompress_data_vio(struct data_vio *data_vio,
+ enum block_mapping_state mapping_state, char *buffer)
+{
+ int size;
+ u16 fragment_offset, fragment_size;
+ struct compressed_block *block = data_vio->compression.block;
+ int result = vdo_get_compressed_block_fragment(mapping_state, block,
+ &fragment_offset, &fragment_size);
+
+ if (result != VDO_SUCCESS) {
+ vdo_log_debug("%s: compressed fragment error %d", __func__, result);
+ return result;
+ }
+
+ size = LZ4_decompress_safe((block->data + fragment_offset), buffer,
+ fragment_size, VDO_BLOCK_SIZE);
+ if (size != VDO_BLOCK_SIZE) {
+ vdo_log_debug("%s: lz4 error", __func__);
+ return VDO_INVALID_FRAGMENT;
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * modify_for_partial_write() - Do the modify-write part of a read-modify-write cycle.
+ * @completion: The data_vio which has just finished its read.
+ *
+ * This callback is registered in read_block().
+ */
+static void modify_for_partial_write(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ char *data = data_vio->vio.data;
+ struct bio *bio = data_vio->user_bio;
+
+ assert_data_vio_on_cpu_thread(data_vio);
+
+ if (bio_op(bio) == REQ_OP_DISCARD) {
+ memset(data + data_vio->offset, '\0', min_t(u32,
+ data_vio->remaining_discard,
+ VDO_BLOCK_SIZE - data_vio->offset));
+ } else {
+ copy_from_bio(bio, data + data_vio->offset);
+ }
+
+ data_vio->is_zero = is_zero_block(data);
+ data_vio->read = false;
+ launch_data_vio_logical_callback(data_vio,
+ continue_data_vio_with_block_map_slot);
+}
+
+static void complete_read(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ char *data = data_vio->vio.data;
+ bool compressed = vdo_is_state_compressed(data_vio->mapped.state);
+
+ assert_data_vio_on_cpu_thread(data_vio);
+
+ if (compressed) {
+ int result = uncompress_data_vio(data_vio, data_vio->mapped.state, data);
+
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+ }
+
+ if (data_vio->write) {
+ modify_for_partial_write(completion);
+ return;
+ }
+
+ if (compressed || data_vio->is_partial)
+ copy_to_bio(data_vio->user_bio, data + data_vio->offset);
+
+ acknowledge_data_vio(data_vio);
+ complete_data_vio(completion);
+}
+
+static void read_endio(struct bio *bio)
+{
+ struct data_vio *data_vio = vio_as_data_vio(bio->bi_private);
+ int result = blk_status_to_errno(bio->bi_status);
+
+ vdo_count_completed_bios(bio);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+
+ launch_data_vio_cpu_callback(data_vio, complete_read,
+ CPU_Q_COMPLETE_READ_PRIORITY);
+}
+
+static void complete_zero_read(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_on_cpu_thread(data_vio);
+
+ if (data_vio->is_partial) {
+ memset(data_vio->vio.data, 0, VDO_BLOCK_SIZE);
+ if (data_vio->write) {
+ modify_for_partial_write(completion);
+ return;
+ }
+ } else {
+ zero_fill_bio(data_vio->user_bio);
+ }
+
+ complete_read(completion);
+}
+
+/**
+ * read_block() - Read a block asynchronously.
+ *
+ * This is the callback registered in read_block_mapping().
+ */
+static void read_block(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct vio *vio = as_vio(completion);
+ int result = VDO_SUCCESS;
+
+ if (data_vio->mapped.pbn == VDO_ZERO_BLOCK) {
+ launch_data_vio_cpu_callback(data_vio, complete_zero_read,
+ CPU_Q_COMPLETE_VIO_PRIORITY);
+ return;
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_READ_DATA_VIO;
+ if (vdo_is_state_compressed(data_vio->mapped.state)) {
+ result = vio_reset_bio(vio, (char *) data_vio->compression.block,
+ read_endio, REQ_OP_READ, data_vio->mapped.pbn);
+ } else {
+ blk_opf_t opf = ((data_vio->user_bio->bi_opf & PASSTHROUGH_FLAGS) | REQ_OP_READ);
+
+ if (data_vio->is_partial) {
+ result = vio_reset_bio(vio, vio->data, read_endio, opf,
+ data_vio->mapped.pbn);
+ } else {
+ /* A full 4k read. Use the incoming bio to avoid having to copy the data */
+ bio_reset(vio->bio, vio->bio->bi_bdev, opf);
+ bio_init_clone(data_vio->user_bio->bi_bdev, vio->bio,
+ data_vio->user_bio, GFP_KERNEL);
+
+ /* Copy over the original bio iovec and opflags. */
+ vdo_set_bio_properties(vio->bio, vio, read_endio, opf,
+ data_vio->mapped.pbn);
+ }
+ }
+
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+
+ vdo_submit_data_vio(data_vio);
+}
+
+static inline struct data_vio *
+reference_count_update_completion_as_data_vio(struct vdo_completion *completion)
+{
+ if (completion->type == VIO_COMPLETION)
+ return as_data_vio(completion);
+
+ return container_of(completion, struct data_vio, decrement_completion);
+}
+
+/**
+ * update_block_map() - Rendezvous of the data_vio and decrement completions after each has
+ * made its reference updates. Handle any error from either, or proceed
+ * to updating the block map.
+ * @completion: The completion of the write in progress.
+ */
+static void update_block_map(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = reference_count_update_completion_as_data_vio(completion);
+
+ assert_data_vio_in_logical_zone(data_vio);
+
+ if (!data_vio->first_reference_operation_complete) {
+ /* Rendezvous, we're first */
+ data_vio->first_reference_operation_complete = true;
+ return;
+ }
+
+ completion = &data_vio->vio.completion;
+ vdo_set_completion_result(completion, data_vio->decrement_completion.result);
+ if (completion->result != VDO_SUCCESS) {
+ handle_data_vio_error(completion);
+ return;
+ }
+
+ completion->error_handler = handle_data_vio_error;
+ if (data_vio->hash_lock != NULL)
+ set_data_vio_hash_zone_callback(data_vio, vdo_continue_hash_lock);
+ else
+ completion->callback = complete_data_vio;
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_PUT_MAPPED_BLOCK;
+ vdo_put_mapped_block(data_vio);
+}
+
+static void decrement_reference_count(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = container_of(completion, struct data_vio,
+ decrement_completion);
+
+ assert_data_vio_in_mapped_zone(data_vio);
+
+ vdo_set_completion_callback(completion, update_block_map,
+ data_vio->logical.zone->thread_id);
+ completion->error_handler = update_block_map;
+ vdo_modify_reference_count(completion, &data_vio->decrement_updater);
+}
+
+static void increment_reference_count(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_new_mapped_zone(data_vio);
+
+ if (data_vio->downgrade_allocation_lock) {
+ /*
+ * Now that the data has been written, it's safe to deduplicate against the
+ * block. Downgrade the allocation lock to a read lock so it can be used later by
+ * the hash lock. This is done here since it needs to happen sometime before we
+ * return to the hash zone, and we are currently on the correct thread. For
+ * compressed blocks, the downgrade will have already been done.
+ */
+ vdo_downgrade_pbn_write_lock(data_vio->allocation.lock, false);
+ }
+
+ set_data_vio_logical_callback(data_vio, update_block_map);
+ completion->error_handler = update_block_map;
+ vdo_modify_reference_count(completion, &data_vio->increment_updater);
+}
+
+/** journal_remapping() - Add a recovery journal entry for a data remapping. */
+static void journal_remapping(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_journal_zone(data_vio);
+
+ data_vio->decrement_updater.operation = VDO_JOURNAL_DATA_REMAPPING;
+ data_vio->decrement_updater.zpbn = data_vio->mapped;
+ if (data_vio->new_mapped.pbn == VDO_ZERO_BLOCK) {
+ data_vio->first_reference_operation_complete = true;
+ if (data_vio->mapped.pbn == VDO_ZERO_BLOCK)
+ set_data_vio_logical_callback(data_vio, update_block_map);
+ } else {
+ set_data_vio_new_mapped_zone_callback(data_vio,
+ increment_reference_count);
+ }
+
+ if (data_vio->mapped.pbn == VDO_ZERO_BLOCK) {
+ data_vio->first_reference_operation_complete = true;
+ } else {
+ vdo_set_completion_callback(&data_vio->decrement_completion,
+ decrement_reference_count,
+ data_vio->mapped.zone->thread_id);
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_JOURNAL_REMAPPING;
+ vdo_add_recovery_journal_entry(completion->vdo->recovery_journal, data_vio);
+}
+
+/**
+ * read_old_block_mapping() - Get the previous PBN/LBN mapping of an in-progress write.
+ *
+ * Gets the previous PBN mapped to this LBN from the block map, so as to make an appropriate
+ * journal entry referencing the removal of this LBN->PBN mapping.
+ */
+static void read_old_block_mapping(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_logical_zone(data_vio);
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_WRITE;
+ set_data_vio_journal_callback(data_vio, journal_remapping);
+ vdo_get_mapped_block(data_vio);
+}
+
+void update_metadata_for_data_vio_write(struct data_vio *data_vio, struct pbn_lock *lock)
+{
+ data_vio->increment_updater = (struct reference_updater) {
+ .operation = VDO_JOURNAL_DATA_REMAPPING,
+ .increment = true,
+ .zpbn = data_vio->new_mapped,
+ .lock = lock,
+ };
+
+ launch_data_vio_logical_callback(data_vio, read_old_block_mapping);
+}
+
+/**
+ * pack_compressed_data() - Attempt to pack the compressed data_vio into a block.
+ *
+ * This is the callback registered in launch_compress_data_vio().
+ */
+static void pack_compressed_data(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_packer_zone(data_vio);
+
+ if (!vdo_get_compressing(vdo_from_data_vio(data_vio)) ||
+ get_data_vio_compression_status(data_vio).may_not_compress) {
+ write_data_vio(data_vio);
+ return;
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_ATTEMPT_PACKING;
+ vdo_attempt_packing(data_vio);
+}
+
+/**
+ * compress_data_vio() - Do the actual work of compressing the data on a CPU queue.
+ *
+ * This callback is registered in launch_compress_data_vio().
+ */
+static void compress_data_vio(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ int size;
+
+ assert_data_vio_on_cpu_thread(data_vio);
+
+ /*
+ * By putting the compressed data at the start of the compressed block data field, we won't
+ * need to copy it if this data_vio becomes a compressed write agent.
+ */
+ size = LZ4_compress_default(data_vio->vio.data,
+ data_vio->compression.block->data, VDO_BLOCK_SIZE,
+ VDO_MAX_COMPRESSED_FRAGMENT_SIZE,
+ (char *) vdo_get_work_queue_private_data());
+ if ((size > 0) && (size < VDO_COMPRESSED_BLOCK_DATA_SIZE)) {
+ data_vio->compression.size = size;
+ launch_data_vio_packer_callback(data_vio, pack_compressed_data);
+ return;
+ }
+
+ write_data_vio(data_vio);
+}
+
+/**
+ * launch_compress_data_vio() - Continue a write by attempting to compress the data.
+ *
+ * This is a re-entry point to vio_write used by hash locks.
+ */
+void launch_compress_data_vio(struct data_vio *data_vio)
+{
+ VDO_ASSERT_LOG_ONLY(!data_vio->is_duplicate, "compressing a non-duplicate block");
+ VDO_ASSERT_LOG_ONLY(data_vio->hash_lock != NULL,
+ "data_vio to compress has a hash_lock");
+ VDO_ASSERT_LOG_ONLY(data_vio_has_allocation(data_vio),
+ "data_vio to compress has an allocation");
+
+ /*
+ * There are 4 reasons why a data_vio which has reached this point will not be eligible for
+ * compression:
+ *
+ * 1) Since data_vios can block indefinitely in the packer, it would be bad to do so if the
+ * write request also requests FUA.
+ *
+ * 2) A data_vio should not be compressed when compression is disabled for the vdo.
+ *
+ * 3) A data_vio could be doing a partial write on behalf of a larger discard which has not
+ * yet been acknowledged and hence blocking in the packer would be bad.
+ *
+ * 4) Some other data_vio may be waiting on this data_vio in which case blocking in the
+ * packer would also be bad.
+ */
+ if (data_vio->fua ||
+ !vdo_get_compressing(vdo_from_data_vio(data_vio)) ||
+ ((data_vio->user_bio != NULL) && (bio_op(data_vio->user_bio) == REQ_OP_DISCARD)) ||
+ (advance_data_vio_compression_stage(data_vio).stage != DATA_VIO_COMPRESSING)) {
+ write_data_vio(data_vio);
+ return;
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_COMPRESS_DATA_VIO;
+ launch_data_vio_cpu_callback(data_vio, compress_data_vio,
+ CPU_Q_COMPRESS_BLOCK_PRIORITY);
+}
+
+/**
+ * hash_data_vio() - Hash the data in a data_vio and set the hash zone (which also flags the record
+ * name as set).
+
+ * This callback is registered in prepare_for_dedupe().
+ */
+static void hash_data_vio(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_on_cpu_thread(data_vio);
+ VDO_ASSERT_LOG_ONLY(!data_vio->is_zero, "zero blocks should not be hashed");
+
+ murmurhash3_128(data_vio->vio.data, VDO_BLOCK_SIZE, 0x62ea60be,
+ &data_vio->record_name);
+
+ data_vio->hash_zone = vdo_select_hash_zone(vdo_from_data_vio(data_vio)->hash_zones,
+ &data_vio->record_name);
+ data_vio->last_async_operation = VIO_ASYNC_OP_ACQUIRE_VDO_HASH_LOCK;
+ launch_data_vio_hash_zone_callback(data_vio, vdo_acquire_hash_lock);
+}
+
+/** prepare_for_dedupe() - Prepare for the dedupe path after attempting to get an allocation. */
+static void prepare_for_dedupe(struct data_vio *data_vio)
+{
+ /* We don't care what thread we are on. */
+ VDO_ASSERT_LOG_ONLY(!data_vio->is_zero, "must not prepare to dedupe zero blocks");
+
+ /*
+ * Before we can dedupe, we need to know the record name, so the first
+ * step is to hash the block data.
+ */
+ data_vio->last_async_operation = VIO_ASYNC_OP_HASH_DATA_VIO;
+ launch_data_vio_cpu_callback(data_vio, hash_data_vio, CPU_Q_HASH_BLOCK_PRIORITY);
+}
+
+/**
+ * write_bio_finished() - This is the bio_end_io function registered in write_block() to be called
+ * when a data_vio's write to the underlying storage has completed.
+ */
+static void write_bio_finished(struct bio *bio)
+{
+ struct data_vio *data_vio = vio_as_data_vio((struct vio *) bio->bi_private);
+
+ vdo_count_completed_bios(bio);
+ vdo_set_completion_result(&data_vio->vio.completion,
+ blk_status_to_errno(bio->bi_status));
+ data_vio->downgrade_allocation_lock = true;
+ update_metadata_for_data_vio_write(data_vio, data_vio->allocation.lock);
+}
+
+/** write_data_vio() - Write a data block to storage without compression. */
+void write_data_vio(struct data_vio *data_vio)
+{
+ struct data_vio_compression_status status, new_status;
+ int result;
+
+ if (!data_vio_has_allocation(data_vio)) {
+ /*
+ * There was no space to write this block and we failed to deduplicate or compress
+ * it.
+ */
+ continue_data_vio_with_error(data_vio, VDO_NO_SPACE);
+ return;
+ }
+
+ new_status = (struct data_vio_compression_status) {
+ .stage = DATA_VIO_POST_PACKER,
+ .may_not_compress = true,
+ };
+
+ do {
+ status = get_data_vio_compression_status(data_vio);
+ } while ((status.stage != DATA_VIO_POST_PACKER) &&
+ !set_data_vio_compression_status(data_vio, status, new_status));
+
+ /* Write the data from the data block buffer. */
+ result = vio_reset_bio(&data_vio->vio, data_vio->vio.data,
+ write_bio_finished, REQ_OP_WRITE,
+ data_vio->allocation.pbn);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_WRITE_DATA_VIO;
+ vdo_submit_data_vio(data_vio);
+}
+
+/**
+ * acknowledge_write_callback() - Acknowledge a write to the requestor.
+ *
+ * This callback is registered in allocate_block() and continue_write_with_block_map_slot().
+ */
+static void acknowledge_write_callback(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct vdo *vdo = completion->vdo;
+
+ VDO_ASSERT_LOG_ONLY((!vdo_uses_bio_ack_queue(vdo) ||
+ (vdo_get_callback_thread_id() == vdo->thread_config.bio_ack_thread)),
+ "%s() called on bio ack queue", __func__);
+ VDO_ASSERT_LOG_ONLY(data_vio_has_flush_generation_lock(data_vio),
+ "write VIO to be acknowledged has a flush generation lock");
+ acknowledge_data_vio(data_vio);
+ if (data_vio->new_mapped.pbn == VDO_ZERO_BLOCK) {
+ /* This is a zero write or discard */
+ update_metadata_for_data_vio_write(data_vio, NULL);
+ return;
+ }
+
+ prepare_for_dedupe(data_vio);
+}
+
+/**
+ * allocate_block() - Attempt to allocate a block in the current allocation zone.
+ *
+ * This callback is registered in continue_write_with_block_map_slot().
+ */
+static void allocate_block(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_allocated_zone(data_vio);
+
+ if (!vdo_allocate_block_in_zone(data_vio))
+ return;
+
+ completion->error_handler = handle_data_vio_error;
+ WRITE_ONCE(data_vio->allocation_succeeded, true);
+ data_vio->new_mapped = (struct zoned_pbn) {
+ .zone = data_vio->allocation.zone,
+ .pbn = data_vio->allocation.pbn,
+ .state = VDO_MAPPING_STATE_UNCOMPRESSED,
+ };
+
+ if (data_vio->fua) {
+ prepare_for_dedupe(data_vio);
+ return;
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_ACKNOWLEDGE_WRITE;
+ launch_data_vio_on_bio_ack_queue(data_vio, acknowledge_write_callback);
+}
+
+/**
+ * handle_allocation_error() - Handle an error attempting to allocate a block.
+ *
+ * This error handler is registered in continue_write_with_block_map_slot().
+ */
+static void handle_allocation_error(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ if (completion->result == VDO_NO_SPACE) {
+ /* We failed to get an allocation, but we can try to dedupe. */
+ vdo_reset_completion(completion);
+ completion->error_handler = handle_data_vio_error;
+ prepare_for_dedupe(data_vio);
+ return;
+ }
+
+ /* We got a "real" error, not just a failure to allocate, so fail the request. */
+ handle_data_vio_error(completion);
+}
+
+static int assert_is_discard(struct data_vio *data_vio)
+{
+ int result = VDO_ASSERT(data_vio->is_discard,
+ "data_vio with no block map page is a discard");
+
+ return ((result == VDO_SUCCESS) ? result : VDO_READ_ONLY);
+}
+
+/**
+ * continue_data_vio_with_block_map_slot() - Read the data_vio's mapping from the block map.
+ *
+ * This callback is registered in launch_read_data_vio().
+ */
+void continue_data_vio_with_block_map_slot(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+
+ assert_data_vio_in_logical_zone(data_vio);
+ if (data_vio->read) {
+ set_data_vio_logical_callback(data_vio, read_block);
+ data_vio->last_async_operation = VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_READ;
+ vdo_get_mapped_block(data_vio);
+ return;
+ }
+
+ vdo_acquire_flush_generation_lock(data_vio);
+
+ if (data_vio->tree_lock.tree_slots[0].block_map_slot.pbn == VDO_ZERO_BLOCK) {
+ /*
+ * This is a discard for a block on a block map page which has not been allocated, so
+ * there's nothing more we need to do.
+ */
+ completion->callback = complete_data_vio;
+ continue_data_vio_with_error(data_vio, assert_is_discard(data_vio));
+ return;
+ }
+
+ /*
+ * We need an allocation if this is neither a full-block discard nor a
+ * full-block zero write.
+ */
+ if (!data_vio->is_zero && (!data_vio->is_discard || data_vio->is_partial)) {
+ data_vio_allocate_data_block(data_vio, VIO_WRITE_LOCK, allocate_block,
+ handle_allocation_error);
+ return;
+ }
+
+
+ /*
+ * We don't need to write any data, so skip allocation and just update the block map and
+ * reference counts (via the journal).
+ */
+ data_vio->new_mapped.pbn = VDO_ZERO_BLOCK;
+ if (data_vio->is_zero)
+ data_vio->new_mapped.state = VDO_MAPPING_STATE_UNCOMPRESSED;
+
+ if (data_vio->remaining_discard > VDO_BLOCK_SIZE) {
+ /* This is not the final block of a discard so we can't acknowledge it yet. */
+ update_metadata_for_data_vio_write(data_vio, NULL);
+ return;
+ }
+
+ data_vio->last_async_operation = VIO_ASYNC_OP_ACKNOWLEDGE_WRITE;
+ launch_data_vio_on_bio_ack_queue(data_vio, acknowledge_write_callback);
+}
diff --git a/drivers/md/dm-vdo/data-vio.h b/drivers/md/dm-vdo/data-vio.h
new file mode 100644
index 000000000000..25926b6cd98b
--- /dev/null
+++ b/drivers/md/dm-vdo/data-vio.h
@@ -0,0 +1,670 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef DATA_VIO_H
+#define DATA_VIO_H
+
+#include <linux/atomic.h>
+#include <linux/bio.h>
+#include <linux/list.h>
+
+#include "permassert.h"
+
+#include "indexer.h"
+
+#include "block-map.h"
+#include "completion.h"
+#include "constants.h"
+#include "dedupe.h"
+#include "encodings.h"
+#include "logical-zone.h"
+#include "physical-zone.h"
+#include "types.h"
+#include "vdo.h"
+#include "vio.h"
+#include "wait-queue.h"
+
+/* Codes for describing the last asynchronous operation performed on a vio. */
+enum async_operation_number {
+ MIN_VIO_ASYNC_OPERATION_NUMBER,
+ VIO_ASYNC_OP_LAUNCH = MIN_VIO_ASYNC_OPERATION_NUMBER,
+ VIO_ASYNC_OP_ACKNOWLEDGE_WRITE,
+ VIO_ASYNC_OP_ACQUIRE_VDO_HASH_LOCK,
+ VIO_ASYNC_OP_ATTEMPT_LOGICAL_BLOCK_LOCK,
+ VIO_ASYNC_OP_LOCK_DUPLICATE_PBN,
+ VIO_ASYNC_OP_CHECK_FOR_DUPLICATION,
+ VIO_ASYNC_OP_CLEANUP,
+ VIO_ASYNC_OP_COMPRESS_DATA_VIO,
+ VIO_ASYNC_OP_FIND_BLOCK_MAP_SLOT,
+ VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_READ,
+ VIO_ASYNC_OP_GET_MAPPED_BLOCK_FOR_WRITE,
+ VIO_ASYNC_OP_HASH_DATA_VIO,
+ VIO_ASYNC_OP_JOURNAL_REMAPPING,
+ VIO_ASYNC_OP_ATTEMPT_PACKING,
+ VIO_ASYNC_OP_PUT_MAPPED_BLOCK,
+ VIO_ASYNC_OP_READ_DATA_VIO,
+ VIO_ASYNC_OP_UPDATE_DEDUPE_INDEX,
+ VIO_ASYNC_OP_UPDATE_REFERENCE_COUNTS,
+ VIO_ASYNC_OP_VERIFY_DUPLICATION,
+ VIO_ASYNC_OP_WRITE_DATA_VIO,
+ MAX_VIO_ASYNC_OPERATION_NUMBER,
+} __packed;
+
+struct lbn_lock {
+ logical_block_number_t lbn;
+ bool locked;
+ struct vdo_wait_queue waiters;
+ struct logical_zone *zone;
+};
+
+/* A position in the arboreal block map at a specific level. */
+struct block_map_tree_slot {
+ page_number_t page_index;
+ struct block_map_slot block_map_slot;
+};
+
+/* Fields for using the arboreal block map. */
+struct tree_lock {
+ /* The current height at which this data_vio is operating */
+ height_t height;
+ /* The block map tree for this LBN */
+ root_count_t root_index;
+ /* Whether we hold a page lock */
+ bool locked;
+ /* The key for the lock map */
+ u64 key;
+ /* The queue of waiters for the page this vio is allocating or loading */
+ struct vdo_wait_queue waiters;
+ /* The block map tree slots for this LBN */
+ struct block_map_tree_slot tree_slots[VDO_BLOCK_MAP_TREE_HEIGHT + 1];
+};
+
+struct zoned_pbn {
+ physical_block_number_t pbn;
+ enum block_mapping_state state;
+ struct physical_zone *zone;
+};
+
+/*
+ * Where a data_vio is on the compression path; advance_compression_stage() depends on the order of
+ * this enum.
+ */
+enum data_vio_compression_stage {
+ /* A data_vio which has not yet entered the compression path */
+ DATA_VIO_PRE_COMPRESSOR,
+ /* A data_vio which is in the compressor */
+ DATA_VIO_COMPRESSING,
+ /* A data_vio which is blocked in the packer */
+ DATA_VIO_PACKING,
+ /* A data_vio which is no longer on the compression path (and never will be) */
+ DATA_VIO_POST_PACKER,
+};
+
+struct data_vio_compression_status {
+ enum data_vio_compression_stage stage;
+ bool may_not_compress;
+};
+
+struct compression_state {
+ /*
+ * The current compression status of this data_vio. This field contains a value which
+ * consists of a data_vio_compression_stage and a flag indicating whether a request has
+ * been made to cancel (or prevent) compression for this data_vio.
+ *
+ * This field should be accessed through the get_data_vio_compression_status() and
+ * set_data_vio_compression_status() methods. It should not be accessed directly.
+ */
+ atomic_t status;
+
+ /* The compressed size of this block */
+ u16 size;
+
+ /* The packer input or output bin slot which holds the enclosing data_vio */
+ slot_number_t slot;
+
+ /* The packer bin to which the enclosing data_vio has been assigned */
+ struct packer_bin *bin;
+
+ /* A link in the chain of data_vios which have been packed together */
+ struct data_vio *next_in_batch;
+
+ /* A vio which is blocked in the packer while holding a lock this vio needs. */
+ struct data_vio *lock_holder;
+
+ /*
+ * The compressed block used to hold the compressed form of this block and that of any
+ * other blocks for which this data_vio is the compressed write agent.
+ */
+ struct compressed_block *block;
+};
+
+/* Fields supporting allocation of data blocks. */
+struct allocation {
+ /* The physical zone in which to allocate a physical block */
+ struct physical_zone *zone;
+
+ /* The block allocated to this vio */
+ physical_block_number_t pbn;
+
+ /*
+ * If non-NULL, the pooled PBN lock held on the allocated block. Must be a write lock until
+ * the block has been written, after which it will become a read lock.
+ */
+ struct pbn_lock *lock;
+
+ /* The type of write lock to obtain on the allocated block */
+ enum pbn_lock_type write_lock_type;
+
+ /* The zone which was the start of the current allocation cycle */
+ zone_count_t first_allocation_zone;
+
+ /* Whether this vio should wait for a clean slab */
+ bool wait_for_clean_slab;
+};
+
+struct reference_updater {
+ enum journal_operation operation;
+ bool increment;
+ struct zoned_pbn zpbn;
+ struct pbn_lock *lock;
+ struct vdo_waiter waiter;
+};
+
+/* A vio for processing user data requests. */
+struct data_vio {
+ /* The vdo_wait_queue entry structure */
+ struct vdo_waiter waiter;
+
+ /* The logical block of this request */
+ struct lbn_lock logical;
+
+ /* The state for traversing the block map tree */
+ struct tree_lock tree_lock;
+
+ /* The current partition address of this block */
+ struct zoned_pbn mapped;
+
+ /* The hash of this vio (if not zero) */
+ struct uds_record_name record_name;
+
+ /* Used for logging and debugging */
+ enum async_operation_number last_async_operation;
+
+ /* The operations to record in the recovery and slab journals */
+ struct reference_updater increment_updater;
+ struct reference_updater decrement_updater;
+
+ u16 read : 1;
+ u16 write : 1;
+ u16 fua : 1;
+ u16 is_zero : 1;
+ u16 is_discard : 1;
+ u16 is_partial : 1;
+ u16 is_duplicate : 1;
+ u16 first_reference_operation_complete : 1;
+ u16 downgrade_allocation_lock : 1;
+
+ struct allocation allocation;
+
+ /*
+ * Whether this vio has received an allocation. This field is examined from threads not in
+ * the allocation zone.
+ */
+ bool allocation_succeeded;
+
+ /* The new partition address of this block after the vio write completes */
+ struct zoned_pbn new_mapped;
+
+ /* The hash zone responsible for the name (NULL if is_zero_block) */
+ struct hash_zone *hash_zone;
+
+ /* The lock this vio holds or shares with other vios with the same data */
+ struct hash_lock *hash_lock;
+
+ /* All data_vios sharing a hash lock are kept in a list linking these list entries */
+ struct list_head hash_lock_entry;
+
+ /* The block number in the partition of the UDS deduplication advice */
+ struct zoned_pbn duplicate;
+
+ /*
+ * The sequence number of the recovery journal block containing the increment entry for
+ * this vio.
+ */
+ sequence_number_t recovery_sequence_number;
+
+ /* The point in the recovery journal where this write last made an entry */
+ struct journal_point recovery_journal_point;
+
+ /* The list of vios in user initiated write requests */
+ struct list_head write_entry;
+
+ /* The generation number of the VDO that this vio belongs to */
+ sequence_number_t flush_generation;
+
+ /* The completion to use for fetching block map pages for this vio */
+ struct vdo_page_completion page_completion;
+
+ /* The user bio that initiated this VIO */
+ struct bio *user_bio;
+
+ /* partial block support */
+ block_size_t offset;
+
+ /*
+ * The number of bytes to be discarded. For discards, this field will always be positive,
+ * whereas for non-discards it will always be 0. Hence it can be used to determine whether
+ * a data_vio is processing a discard, even after the user_bio has been acknowledged.
+ */
+ u32 remaining_discard;
+
+ struct dedupe_context *dedupe_context;
+
+ /* Fields beyond this point will not be reset when a pooled data_vio is reused. */
+
+ struct vio vio;
+
+ /* The completion for making reference count decrements */
+ struct vdo_completion decrement_completion;
+
+ /* All of the fields necessary for the compression path */
+ struct compression_state compression;
+
+ /* A block used as output during compression or uncompression */
+ char *scratch_block;
+
+ struct list_head pool_entry;
+};
+
+static inline struct data_vio *vio_as_data_vio(struct vio *vio)
+{
+ VDO_ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio");
+ return container_of(vio, struct data_vio, vio);
+}
+
+static inline struct data_vio *as_data_vio(struct vdo_completion *completion)
+{
+ return vio_as_data_vio(as_vio(completion));
+}
+
+static inline struct data_vio *vdo_waiter_as_data_vio(struct vdo_waiter *waiter)
+{
+ if (waiter == NULL)
+ return NULL;
+
+ return container_of(waiter, struct data_vio, waiter);
+}
+
+static inline struct data_vio *data_vio_from_reference_updater(struct reference_updater *updater)
+{
+ if (updater->increment)
+ return container_of(updater, struct data_vio, increment_updater);
+
+ return container_of(updater, struct data_vio, decrement_updater);
+}
+
+static inline bool data_vio_has_flush_generation_lock(struct data_vio *data_vio)
+{
+ return !list_empty(&data_vio->write_entry);
+}
+
+static inline struct vdo *vdo_from_data_vio(struct data_vio *data_vio)
+{
+ return data_vio->vio.completion.vdo;
+}
+
+static inline bool data_vio_has_allocation(struct data_vio *data_vio)
+{
+ return (data_vio->allocation.pbn != VDO_ZERO_BLOCK);
+}
+
+struct data_vio_compression_status __must_check
+advance_data_vio_compression_stage(struct data_vio *data_vio);
+struct data_vio_compression_status __must_check
+get_data_vio_compression_status(struct data_vio *data_vio);
+bool cancel_data_vio_compression(struct data_vio *data_vio);
+
+struct data_vio_pool;
+
+int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size,
+ data_vio_count_t discard_limit, struct data_vio_pool **pool_ptr);
+void free_data_vio_pool(struct data_vio_pool *pool);
+void vdo_launch_bio(struct data_vio_pool *pool, struct bio *bio);
+void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion);
+void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion);
+
+void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios);
+data_vio_count_t get_data_vio_pool_active_discards(struct data_vio_pool *pool);
+data_vio_count_t get_data_vio_pool_discard_limit(struct data_vio_pool *pool);
+data_vio_count_t get_data_vio_pool_maximum_discards(struct data_vio_pool *pool);
+int __must_check set_data_vio_pool_discard_limit(struct data_vio_pool *pool,
+ data_vio_count_t limit);
+data_vio_count_t get_data_vio_pool_active_requests(struct data_vio_pool *pool);
+data_vio_count_t get_data_vio_pool_request_limit(struct data_vio_pool *pool);
+data_vio_count_t get_data_vio_pool_maximum_requests(struct data_vio_pool *pool);
+
+void complete_data_vio(struct vdo_completion *completion);
+void handle_data_vio_error(struct vdo_completion *completion);
+
+static inline void continue_data_vio(struct data_vio *data_vio)
+{
+ vdo_launch_completion(&data_vio->vio.completion);
+}
+
+/**
+ * continue_data_vio_with_error() - Set an error code and then continue processing a data_vio.
+ *
+ * This will not mask older errors. This function can be called with a success code, but it is more
+ * efficient to call continue_data_vio() if the caller knows the result was a success.
+ */
+static inline void continue_data_vio_with_error(struct data_vio *data_vio, int result)
+{
+ vdo_continue_completion(&data_vio->vio.completion, result);
+}
+
+const char * __must_check get_data_vio_operation_name(struct data_vio *data_vio);
+
+static inline void assert_data_vio_in_hash_zone(struct data_vio *data_vio)
+{
+ thread_id_t expected = data_vio->hash_zone->thread_id;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+ /*
+ * It's odd to use the LBN, but converting the record name to hex is a bit clunky for an
+ * inline, and the LBN better than nothing as an identifier.
+ */
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "data_vio for logical block %llu on thread %u, should be on hash zone thread %u",
+ (unsigned long long) data_vio->logical.lbn, thread_id, expected);
+}
+
+static inline void set_data_vio_hash_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ vdo_set_completion_callback(&data_vio->vio.completion, callback,
+ data_vio->hash_zone->thread_id);
+}
+
+/**
+ * launch_data_vio_hash_zone_callback() - Set a callback as a hash zone operation and invoke it
+ * immediately.
+ */
+static inline void launch_data_vio_hash_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ set_data_vio_hash_zone_callback(data_vio, callback);
+ vdo_launch_completion(&data_vio->vio.completion);
+}
+
+static inline void assert_data_vio_in_logical_zone(struct data_vio *data_vio)
+{
+ thread_id_t expected = data_vio->logical.zone->thread_id;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "data_vio for logical block %llu on thread %u, should be on thread %u",
+ (unsigned long long) data_vio->logical.lbn, thread_id, expected);
+}
+
+static inline void set_data_vio_logical_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ vdo_set_completion_callback(&data_vio->vio.completion, callback,
+ data_vio->logical.zone->thread_id);
+}
+
+/**
+ * launch_data_vio_logical_callback() - Set a callback as a logical block operation and invoke it
+ * immediately.
+ */
+static inline void launch_data_vio_logical_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ set_data_vio_logical_callback(data_vio, callback);
+ vdo_launch_completion(&data_vio->vio.completion);
+}
+
+static inline void assert_data_vio_in_allocated_zone(struct data_vio *data_vio)
+{
+ thread_id_t expected = data_vio->allocation.zone->thread_id;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "struct data_vio for allocated physical block %llu on thread %u, should be on thread %u",
+ (unsigned long long) data_vio->allocation.pbn, thread_id,
+ expected);
+}
+
+static inline void set_data_vio_allocated_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ vdo_set_completion_callback(&data_vio->vio.completion, callback,
+ data_vio->allocation.zone->thread_id);
+}
+
+/**
+ * launch_data_vio_allocated_zone_callback() - Set a callback as a physical block operation in a
+ * data_vio's allocated zone and queue the data_vio and
+ * invoke it immediately.
+ */
+static inline void launch_data_vio_allocated_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ set_data_vio_allocated_zone_callback(data_vio, callback);
+ vdo_launch_completion(&data_vio->vio.completion);
+}
+
+static inline void assert_data_vio_in_duplicate_zone(struct data_vio *data_vio)
+{
+ thread_id_t expected = data_vio->duplicate.zone->thread_id;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "data_vio for duplicate physical block %llu on thread %u, should be on thread %u",
+ (unsigned long long) data_vio->duplicate.pbn, thread_id,
+ expected);
+}
+
+static inline void set_data_vio_duplicate_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ vdo_set_completion_callback(&data_vio->vio.completion, callback,
+ data_vio->duplicate.zone->thread_id);
+}
+
+/**
+ * launch_data_vio_duplicate_zone_callback() - Set a callback as a physical block operation in a
+ * data_vio's duplicate zone and queue the data_vio and
+ * invoke it immediately.
+ */
+static inline void launch_data_vio_duplicate_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ set_data_vio_duplicate_zone_callback(data_vio, callback);
+ vdo_launch_completion(&data_vio->vio.completion);
+}
+
+static inline void assert_data_vio_in_mapped_zone(struct data_vio *data_vio)
+{
+ thread_id_t expected = data_vio->mapped.zone->thread_id;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "data_vio for mapped physical block %llu on thread %u, should be on thread %u",
+ (unsigned long long) data_vio->mapped.pbn, thread_id, expected);
+}
+
+static inline void set_data_vio_mapped_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ vdo_set_completion_callback(&data_vio->vio.completion, callback,
+ data_vio->mapped.zone->thread_id);
+}
+
+static inline void assert_data_vio_in_new_mapped_zone(struct data_vio *data_vio)
+{
+ thread_id_t expected = data_vio->new_mapped.zone->thread_id;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "data_vio for new_mapped physical block %llu on thread %u, should be on thread %u",
+ (unsigned long long) data_vio->new_mapped.pbn, thread_id,
+ expected);
+}
+
+static inline void set_data_vio_new_mapped_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ vdo_set_completion_callback(&data_vio->vio.completion, callback,
+ data_vio->new_mapped.zone->thread_id);
+}
+
+static inline void assert_data_vio_in_journal_zone(struct data_vio *data_vio)
+{
+ thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((journal_thread == thread_id),
+ "data_vio for logical block %llu on thread %u, should be on journal thread %u",
+ (unsigned long long) data_vio->logical.lbn, thread_id,
+ journal_thread);
+}
+
+static inline void set_data_vio_journal_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread;
+
+ vdo_set_completion_callback(&data_vio->vio.completion, callback, journal_thread);
+}
+
+/**
+ * launch_data_vio_journal_callback() - Set a callback as a journal operation and invoke it
+ * immediately.
+ */
+static inline void launch_data_vio_journal_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ set_data_vio_journal_callback(data_vio, callback);
+ vdo_launch_completion(&data_vio->vio.completion);
+}
+
+static inline void assert_data_vio_in_packer_zone(struct data_vio *data_vio)
+{
+ thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((packer_thread == thread_id),
+ "data_vio for logical block %llu on thread %u, should be on packer thread %u",
+ (unsigned long long) data_vio->logical.lbn, thread_id,
+ packer_thread);
+}
+
+static inline void set_data_vio_packer_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread;
+
+ vdo_set_completion_callback(&data_vio->vio.completion, callback, packer_thread);
+}
+
+/**
+ * launch_data_vio_packer_callback() - Set a callback as a packer operation and invoke it
+ * immediately.
+ */
+static inline void launch_data_vio_packer_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ set_data_vio_packer_callback(data_vio, callback);
+ vdo_launch_completion(&data_vio->vio.completion);
+}
+
+static inline void assert_data_vio_on_cpu_thread(struct data_vio *data_vio)
+{
+ thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((cpu_thread == thread_id),
+ "data_vio for logical block %llu on thread %u, should be on cpu thread %u",
+ (unsigned long long) data_vio->logical.lbn, thread_id,
+ cpu_thread);
+}
+
+static inline void set_data_vio_cpu_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread;
+
+ vdo_set_completion_callback(&data_vio->vio.completion, callback, cpu_thread);
+}
+
+/**
+ * launch_data_vio_cpu_callback() - Set a callback to run on the CPU queues and invoke it
+ * immediately.
+ */
+static inline void launch_data_vio_cpu_callback(struct data_vio *data_vio,
+ vdo_action_fn callback,
+ enum vdo_completion_priority priority)
+{
+ set_data_vio_cpu_callback(data_vio, callback);
+ vdo_launch_completion_with_priority(&data_vio->vio.completion, priority);
+}
+
+static inline void set_data_vio_bio_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ vdo_set_completion_callback(&data_vio->vio.completion, callback,
+ get_vio_bio_zone_thread_id(&data_vio->vio));
+}
+
+/**
+ * launch_data_vio_bio_zone_callback() - Set a callback as a bio zone operation and invoke it
+ * immediately.
+ */
+static inline void launch_data_vio_bio_zone_callback(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ set_data_vio_bio_zone_callback(data_vio, callback);
+ vdo_launch_completion_with_priority(&data_vio->vio.completion,
+ BIO_Q_DATA_PRIORITY);
+}
+
+/**
+ * launch_data_vio_on_bio_ack_queue() - If the vdo uses a bio_ack queue, set a callback to run on
+ * it and invoke it immediately, otherwise, just run the
+ * callback on the current thread.
+ */
+static inline void launch_data_vio_on_bio_ack_queue(struct data_vio *data_vio,
+ vdo_action_fn callback)
+{
+ struct vdo_completion *completion = &data_vio->vio.completion;
+ struct vdo *vdo = completion->vdo;
+
+ if (!vdo_uses_bio_ack_queue(vdo)) {
+ callback(completion);
+ return;
+ }
+
+ vdo_set_completion_callback(completion, callback,
+ vdo->thread_config.bio_ack_thread);
+ vdo_launch_completion_with_priority(completion, BIO_ACK_Q_ACK_PRIORITY);
+}
+
+void data_vio_allocate_data_block(struct data_vio *data_vio,
+ enum pbn_lock_type write_lock_type,
+ vdo_action_fn callback, vdo_action_fn error_handler);
+
+void release_data_vio_allocation_lock(struct data_vio *data_vio, bool reset);
+
+int __must_check uncompress_data_vio(struct data_vio *data_vio,
+ enum block_mapping_state mapping_state,
+ char *buffer);
+
+void update_metadata_for_data_vio_write(struct data_vio *data_vio,
+ struct pbn_lock *lock);
+void write_data_vio(struct data_vio *data_vio);
+void launch_compress_data_vio(struct data_vio *data_vio);
+void continue_data_vio_with_block_map_slot(struct vdo_completion *completion);
+
+#endif /* DATA_VIO_H */
diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c
new file mode 100644
index 000000000000..117266e1b3ae
--- /dev/null
+++ b/drivers/md/dm-vdo/dedupe.c
@@ -0,0 +1,3003 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+/**
+ * DOC:
+ *
+ * Hash Locks:
+ *
+ * A hash_lock controls and coordinates writing, index access, and dedupe among groups of data_vios
+ * concurrently writing identical blocks, allowing them to deduplicate not only against advice but
+ * also against each other. This saves on index queries and allows those data_vios to concurrently
+ * deduplicate against a single block instead of being serialized through a PBN read lock. Only one
+ * index query is needed for each hash_lock, instead of one for every data_vio.
+ *
+ * Hash_locks are assigned to hash_zones by computing a modulus on the hash itself. Each hash_zone
+ * has a single dedicated queue and thread for performing all operations on the hash_locks assigned
+ * to that zone. The concurrency guarantees of this single-threaded model allow the code to omit
+ * more fine-grained locking for the hash_lock structures.
+ *
+ * A hash_lock acts like a state machine perhaps more than as a lock. Other than the starting and
+ * ending states INITIALIZING and BYPASSING, every state represents and is held for the duration of
+ * an asynchronous operation. All state transitions are performed on the thread of the hash_zone
+ * containing the lock. An asynchronous operation is almost always performed upon entering a state,
+ * and the callback from that operation triggers exiting the state and entering a new state.
+ *
+ * In all states except DEDUPING, there is a single data_vio, called the lock agent, performing the
+ * asynchronous operations on behalf of the lock. The agent will change during the lifetime of the
+ * lock if the lock is shared by more than one data_vio. data_vios waiting to deduplicate are kept
+ * on a wait queue. Viewed a different way, the agent holds the lock exclusively until the lock
+ * enters the DEDUPING state, at which point it becomes a shared lock that all the waiters (and any
+ * new data_vios that arrive) use to share a PBN lock. In state DEDUPING, there is no agent. When
+ * the last data_vio in the lock calls back in DEDUPING, it becomes the agent and the lock becomes
+ * exclusive again. New data_vios that arrive in the lock will also go on the wait queue.
+ *
+ * The existence of lock waiters is a key factor controlling which state the lock transitions to
+ * next. When the lock is new or has waiters, it will always try to reach DEDUPING, and when it
+ * doesn't, it will try to clean up and exit.
+ *
+ * Deduping requires holding a PBN lock on a block that is known to contain data identical to the
+ * data_vios in the lock, so the lock will send the agent to the duplicate zone to acquire the PBN
+ * lock (LOCKING), to the kernel I/O threads to read and verify the data (VERIFYING), or to write a
+ * new copy of the data to a full data block or a slot in a compressed block (WRITING).
+ *
+ * Cleaning up consists of updating the index when the data location is different from the initial
+ * index query (UPDATING, triggered by stale advice, compression, and rollover), releasing the PBN
+ * lock on the duplicate block (UNLOCKING), and if the agent is the last data_vio referencing the
+ * lock, releasing the hash_lock itself back to the hash zone (BYPASSING).
+ *
+ * The shortest sequence of states is for non-concurrent writes of new data:
+ * INITIALIZING -> QUERYING -> WRITING -> BYPASSING
+ * This sequence is short because no PBN read lock or index update is needed.
+ *
+ * Non-concurrent, finding valid advice looks like this (endpoints elided):
+ * -> QUERYING -> LOCKING -> VERIFYING -> DEDUPING -> UNLOCKING ->
+ * Or with stale advice (endpoints elided):
+ * -> QUERYING -> LOCKING -> VERIFYING -> UNLOCKING -> WRITING -> UPDATING ->
+ *
+ * When there are not enough available reference count increments available on a PBN for a data_vio
+ * to deduplicate, a new lock is forked and the excess waiters roll over to the new lock (which
+ * goes directly to WRITING). The new lock takes the place of the old lock in the lock map so new
+ * data_vios will be directed to it. The two locks will proceed independently, but only the new
+ * lock will have the right to update the index (unless it also forks).
+ *
+ * Since rollover happens in a lock instance, once a valid data location has been selected, it will
+ * not change. QUERYING and WRITING are only performed once per lock lifetime. All other
+ * non-endpoint states can be re-entered.
+ *
+ * The function names in this module follow a convention referencing the states and transitions in
+ * the state machine. For example, for the LOCKING state, there are start_locking() and
+ * finish_locking() functions. start_locking() is invoked by the finish function of the state (or
+ * states) that transition to LOCKING. It performs the actual lock state change and must be invoked
+ * on the hash zone thread. finish_locking() is called by (or continued via callback from) the
+ * code actually obtaining the lock. It does any bookkeeping or decision-making required and
+ * invokes the appropriate start function of the state being transitioned to after LOCKING.
+ *
+ * ----------------------------------------------------------------------
+ *
+ * Index Queries:
+ *
+ * A query to the UDS index is handled asynchronously by the index's threads. When the query is
+ * complete, a callback supplied with the query will be called from one of the those threads. Under
+ * heavy system load, the index may be slower to respond than is desirable for reasonable I/O
+ * throughput. Since deduplication of writes is not necessary for correct operation of a VDO
+ * device, it is acceptable to timeout out slow index queries and proceed to fulfill a write
+ * request without deduplicating. However, because the uds_request struct itself is supplied by the
+ * caller, we can not simply reuse a uds_request object which we have chosen to timeout. Hence,
+ * each hash_zone maintains a pool of dedupe_contexts which each contain a uds_request along with a
+ * reference to the data_vio on behalf of which they are performing a query.
+ *
+ * When a hash_lock needs to query the index, it attempts to acquire an unused dedupe_context from
+ * its hash_zone's pool. If one is available, that context is prepared, associated with the
+ * hash_lock's agent, added to the list of pending contexts, and then sent to the index. The
+ * context's state will be transitioned from DEDUPE_CONTEXT_IDLE to DEDUPE_CONTEXT_PENDING. If all
+ * goes well, the dedupe callback will be called by the index which will change the context's state
+ * to DEDUPE_CONTEXT_COMPLETE, and the associated data_vio will be enqueued to run back in the hash
+ * zone where the query results will be processed and the context will be put back in the idle
+ * state and returned to the hash_zone's available list.
+ *
+ * The first time an index query is launched from a given hash_zone, a timer is started. When the
+ * timer fires, the hash_zone's completion is enqueued to run in the hash_zone where the zone's
+ * pending list will be searched for any contexts in the pending state which have been running for
+ * too long. Those contexts are transitioned to the DEDUPE_CONTEXT_TIMED_OUT state and moved to the
+ * zone's timed_out list where they won't be examined again if there is a subsequent time out). The
+ * data_vios associated with timed out contexts are sent to continue processing their write
+ * operation without deduplicating. The timer is also restarted.
+ *
+ * When the dedupe callback is run for a context which is in the timed out state, that context is
+ * moved to the DEDUPE_CONTEXT_TIMED_OUT_COMPLETE state. No other action need be taken as the
+ * associated data_vios have already been dispatched.
+ *
+ * If a hash_lock needs a dedupe context, and the available list is empty, the timed_out list will
+ * be searched for any contexts which are timed out and complete. One of these will be used
+ * immediately, and the rest will be returned to the available list and marked idle.
+ */
+
+#include "dedupe.h"
+
+#include <linux/atomic.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/ratelimit.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "permassert.h"
+#include "string-utils.h"
+
+#include "indexer.h"
+
+#include "action-manager.h"
+#include "admin-state.h"
+#include "completion.h"
+#include "constants.h"
+#include "data-vio.h"
+#include "int-map.h"
+#include "io-submitter.h"
+#include "packer.h"
+#include "physical-zone.h"
+#include "slab-depot.h"
+#include "statistics.h"
+#include "types.h"
+#include "vdo.h"
+#include "wait-queue.h"
+
+struct uds_attribute {
+ struct attribute attr;
+ const char *(*show_string)(struct hash_zones *hash_zones);
+};
+
+#define DEDUPE_QUERY_TIMER_IDLE 0
+#define DEDUPE_QUERY_TIMER_RUNNING 1
+#define DEDUPE_QUERY_TIMER_FIRED 2
+
+enum dedupe_context_state {
+ DEDUPE_CONTEXT_IDLE,
+ DEDUPE_CONTEXT_PENDING,
+ DEDUPE_CONTEXT_TIMED_OUT,
+ DEDUPE_CONTEXT_COMPLETE,
+ DEDUPE_CONTEXT_TIMED_OUT_COMPLETE,
+};
+
+/* Possible index states: closed, opened, or transitioning between those two. */
+enum index_state {
+ IS_CLOSED,
+ IS_CHANGING,
+ IS_OPENED,
+};
+
+static const char *CLOSED = "closed";
+static const char *CLOSING = "closing";
+static const char *ERROR = "error";
+static const char *OFFLINE = "offline";
+static const char *ONLINE = "online";
+static const char *OPENING = "opening";
+static const char *SUSPENDED = "suspended";
+static const char *UNKNOWN = "unknown";
+
+/* Version 2 uses the kernel space UDS index and is limited to 16 bytes */
+#define UDS_ADVICE_VERSION 2
+/* version byte + state byte + 64-bit little-endian PBN */
+#define UDS_ADVICE_SIZE (1 + 1 + sizeof(u64))
+
+enum hash_lock_state {
+ /* State for locks that are not in use or are being initialized. */
+ VDO_HASH_LOCK_INITIALIZING,
+
+ /* This is the sequence of states typically used on the non-dedupe path. */
+ VDO_HASH_LOCK_QUERYING,
+ VDO_HASH_LOCK_WRITING,
+ VDO_HASH_LOCK_UPDATING,
+
+ /* The remaining states are typically used on the dedupe path in this order. */
+ VDO_HASH_LOCK_LOCKING,
+ VDO_HASH_LOCK_VERIFYING,
+ VDO_HASH_LOCK_DEDUPING,
+ VDO_HASH_LOCK_UNLOCKING,
+
+ /*
+ * Terminal state for locks returning to the pool. Must be last both because it's the final
+ * state, and also because it's used to count the states.
+ */
+ VDO_HASH_LOCK_BYPASSING,
+};
+
+static const char * const LOCK_STATE_NAMES[] = {
+ [VDO_HASH_LOCK_BYPASSING] = "BYPASSING",
+ [VDO_HASH_LOCK_DEDUPING] = "DEDUPING",
+ [VDO_HASH_LOCK_INITIALIZING] = "INITIALIZING",
+ [VDO_HASH_LOCK_LOCKING] = "LOCKING",
+ [VDO_HASH_LOCK_QUERYING] = "QUERYING",
+ [VDO_HASH_LOCK_UNLOCKING] = "UNLOCKING",
+ [VDO_HASH_LOCK_UPDATING] = "UPDATING",
+ [VDO_HASH_LOCK_VERIFYING] = "VERIFYING",
+ [VDO_HASH_LOCK_WRITING] = "WRITING",
+};
+
+struct hash_lock {
+ /* The block hash covered by this lock */
+ struct uds_record_name hash;
+
+ /* When the lock is unused, this list entry allows the lock to be pooled */
+ struct list_head pool_node;
+
+ /*
+ * A list containing the data VIOs sharing this lock, all having the same record name and
+ * data block contents, linked by their hash_lock_node fields.
+ */
+ struct list_head duplicate_ring;
+
+ /* The number of data_vios sharing this lock instance */
+ data_vio_count_t reference_count;
+
+ /* The maximum value of reference_count in the lifetime of this lock */
+ data_vio_count_t max_references;
+
+ /* The current state of this lock */
+ enum hash_lock_state state;
+
+ /* True if the UDS index should be updated with new advice */
+ bool update_advice;
+
+ /* True if the advice has been verified to be a true duplicate */
+ bool verified;
+
+ /* True if the lock has already accounted for an initial verification */
+ bool verify_counted;
+
+ /* True if this lock is registered in the lock map (cleared on rollover) */
+ bool registered;
+
+ /*
+ * If verified is false, this is the location of a possible duplicate. If verified is true,
+ * it is the verified location of a true duplicate.
+ */
+ struct zoned_pbn duplicate;
+
+ /* The PBN lock on the block containing the duplicate data */
+ struct pbn_lock *duplicate_lock;
+
+ /* The data_vio designated to act on behalf of the lock */
+ struct data_vio *agent;
+
+ /*
+ * Other data_vios with data identical to the agent who are currently waiting for the agent
+ * to get the information they all need to deduplicate--either against each other, or
+ * against an existing duplicate on disk.
+ */
+ struct vdo_wait_queue waiters;
+};
+
+#define LOCK_POOL_CAPACITY MAXIMUM_VDO_USER_VIOS
+
+struct hash_zones {
+ struct action_manager *manager;
+ struct uds_parameters parameters;
+ struct uds_index_session *index_session;
+ struct ratelimit_state ratelimiter;
+ atomic64_t timeouts;
+ atomic64_t dedupe_context_busy;
+
+ /* This spinlock protects the state fields and the starting of dedupe requests. */
+ spinlock_t lock;
+
+ /* The fields in the next block are all protected by the lock */
+ struct vdo_completion completion;
+ enum index_state index_state;
+ enum index_state index_target;
+ struct admin_state state;
+ bool changing;
+ bool create_flag;
+ bool dedupe_flag;
+ bool error_flag;
+ u64 reported_timeouts;
+
+ /* The number of zones */
+ zone_count_t zone_count;
+ /* The hash zones themselves */
+ struct hash_zone zones[];
+};
+
+/* These are in milliseconds. */
+unsigned int vdo_dedupe_index_timeout_interval = 5000;
+unsigned int vdo_dedupe_index_min_timer_interval = 100;
+/* Same two variables, in jiffies for easier consumption. */
+static u64 vdo_dedupe_index_timeout_jiffies;
+static u64 vdo_dedupe_index_min_timer_jiffies;
+
+static inline struct hash_zone *as_hash_zone(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_HASH_ZONE_COMPLETION);
+ return container_of(completion, struct hash_zone, completion);
+}
+
+static inline struct hash_zones *as_hash_zones(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_HASH_ZONES_COMPLETION);
+ return container_of(completion, struct hash_zones, completion);
+}
+
+static inline void assert_in_hash_zone(struct hash_zone *zone, const char *name)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id),
+ "%s called on hash zone thread", name);
+}
+
+static inline bool change_context_state(struct dedupe_context *context, int old, int new)
+{
+ return (atomic_cmpxchg(&context->state, old, new) == old);
+}
+
+static inline bool change_timer_state(struct hash_zone *zone, int old, int new)
+{
+ return (atomic_cmpxchg(&zone->timer_state, old, new) == old);
+}
+
+/**
+ * return_hash_lock_to_pool() - (Re)initialize a hash lock and return it to its pool.
+ * @zone: The zone from which the lock was borrowed.
+ * @lock: The lock that is no longer in use.
+ */
+static void return_hash_lock_to_pool(struct hash_zone *zone, struct hash_lock *lock)
+{
+ memset(lock, 0, sizeof(*lock));
+ INIT_LIST_HEAD(&lock->pool_node);
+ INIT_LIST_HEAD(&lock->duplicate_ring);
+ vdo_waitq_init(&lock->waiters);
+ list_add_tail(&lock->pool_node, &zone->lock_pool);
+}
+
+/**
+ * vdo_get_duplicate_lock() - Get the PBN lock on the duplicate data location for a data_vio from
+ * the hash_lock the data_vio holds (if there is one).
+ * @data_vio: The data_vio to query.
+ *
+ * Return: The PBN lock on the data_vio's duplicate location.
+ */
+struct pbn_lock *vdo_get_duplicate_lock(struct data_vio *data_vio)
+{
+ if (data_vio->hash_lock == NULL)
+ return NULL;
+
+ return data_vio->hash_lock->duplicate_lock;
+}
+
+/**
+ * hash_lock_key() - Return hash_lock's record name as a hash code.
+ * @lock: The hash lock.
+ *
+ * Return: The key to use for the int map.
+ */
+static inline u64 hash_lock_key(struct hash_lock *lock)
+{
+ return get_unaligned_le64(&lock->hash.name);
+}
+
+/**
+ * get_hash_lock_state_name() - Get the string representation of a hash lock state.
+ * @state: The hash lock state.
+ *
+ * Return: The short string representing the state
+ */
+static const char *get_hash_lock_state_name(enum hash_lock_state state)
+{
+ /* Catch if a state has been added without updating the name array. */
+ BUILD_BUG_ON((VDO_HASH_LOCK_BYPASSING + 1) != ARRAY_SIZE(LOCK_STATE_NAMES));
+ return (state < ARRAY_SIZE(LOCK_STATE_NAMES)) ? LOCK_STATE_NAMES[state] : "INVALID";
+}
+
+/**
+ * assert_hash_lock_agent() - Assert that a data_vio is the agent of its hash lock, and that this
+ * is being called in the hash zone.
+ * @data_vio: The data_vio expected to be the lock agent.
+ * @where: A string describing the function making the assertion.
+ */
+static void assert_hash_lock_agent(struct data_vio *data_vio, const char *where)
+{
+ /* Not safe to access the agent field except from the hash zone. */
+ assert_data_vio_in_hash_zone(data_vio);
+ VDO_ASSERT_LOG_ONLY(data_vio == data_vio->hash_lock->agent,
+ "%s must be for the hash lock agent", where);
+}
+
+/**
+ * set_duplicate_lock() - Set the duplicate lock held by a hash lock. May only be called in the
+ * physical zone of the PBN lock.
+ * @hash_lock: The hash lock to update.
+ * @pbn_lock: The PBN read lock to use as the duplicate lock.
+ */
+static void set_duplicate_lock(struct hash_lock *hash_lock, struct pbn_lock *pbn_lock)
+{
+ VDO_ASSERT_LOG_ONLY((hash_lock->duplicate_lock == NULL),
+ "hash lock must not already hold a duplicate lock");
+ pbn_lock->holder_count += 1;
+ hash_lock->duplicate_lock = pbn_lock;
+}
+
+/**
+ * dequeue_lock_waiter() - Remove the first data_vio from the lock's waitq and return it.
+ * @lock: The lock containing the wait queue.
+ *
+ * Return: The first (oldest) waiter in the queue, or NULL if the queue is empty.
+ */
+static inline struct data_vio *dequeue_lock_waiter(struct hash_lock *lock)
+{
+ return vdo_waiter_as_data_vio(vdo_waitq_dequeue_waiter(&lock->waiters));
+}
+
+/**
+ * set_hash_lock() - Set, change, or clear the hash lock a data_vio is using.
+ * @data_vio: The data_vio to update.
+ * @new_lock: The hash lock the data_vio is joining.
+ *
+ * Updates the hash lock (or locks) to reflect the change in membership.
+ */
+static void set_hash_lock(struct data_vio *data_vio, struct hash_lock *new_lock)
+{
+ struct hash_lock *old_lock = data_vio->hash_lock;
+
+ if (old_lock != NULL) {
+ VDO_ASSERT_LOG_ONLY(data_vio->hash_zone != NULL,
+ "must have a hash zone when holding a hash lock");
+ VDO_ASSERT_LOG_ONLY(!list_empty(&data_vio->hash_lock_entry),
+ "must be on a hash lock ring when holding a hash lock");
+ VDO_ASSERT_LOG_ONLY(old_lock->reference_count > 0,
+ "hash lock reference must be counted");
+
+ if ((old_lock->state != VDO_HASH_LOCK_BYPASSING) &&
+ (old_lock->state != VDO_HASH_LOCK_UNLOCKING)) {
+ /*
+ * If the reference count goes to zero in a non-terminal state, we're most
+ * likely leaking this lock.
+ */
+ VDO_ASSERT_LOG_ONLY(old_lock->reference_count > 1,
+ "hash locks should only become unreferenced in a terminal state, not state %s",
+ get_hash_lock_state_name(old_lock->state));
+ }
+
+ list_del_init(&data_vio->hash_lock_entry);
+ old_lock->reference_count -= 1;
+
+ data_vio->hash_lock = NULL;
+ }
+
+ if (new_lock != NULL) {
+ /*
+ * Keep all data_vios sharing the lock on a ring since they can complete in any
+ * order and we'll always need a pointer to one to compare data.
+ */
+ list_move_tail(&data_vio->hash_lock_entry, &new_lock->duplicate_ring);
+ new_lock->reference_count += 1;
+ if (new_lock->max_references < new_lock->reference_count)
+ new_lock->max_references = new_lock->reference_count;
+
+ data_vio->hash_lock = new_lock;
+ }
+}
+
+/* There are loops in the state diagram, so some forward decl's are needed. */
+static void start_deduping(struct hash_lock *lock, struct data_vio *agent,
+ bool agent_is_done);
+static void start_locking(struct hash_lock *lock, struct data_vio *agent);
+static void start_writing(struct hash_lock *lock, struct data_vio *agent);
+static void unlock_duplicate_pbn(struct vdo_completion *completion);
+static void transfer_allocation_lock(struct data_vio *data_vio);
+
+/**
+ * exit_hash_lock() - Bottleneck for data_vios that have written or deduplicated and that are no
+ * longer needed to be an agent for the hash lock.
+ * @data_vio: The data_vio to complete and send to be cleaned up.
+ */
+static void exit_hash_lock(struct data_vio *data_vio)
+{
+ /* Release the hash lock now, saving a thread transition in cleanup. */
+ vdo_release_hash_lock(data_vio);
+
+ /* Complete the data_vio and start the clean-up path to release any locks it still holds. */
+ data_vio->vio.completion.callback = complete_data_vio;
+
+ continue_data_vio(data_vio);
+}
+
+/**
+ * set_duplicate_location() - Set the location of the duplicate block for data_vio, updating the
+ * is_duplicate and duplicate fields from a zoned_pbn.
+ * @data_vio: The data_vio to modify.
+ * @source: The location of the duplicate.
+ */
+static void set_duplicate_location(struct data_vio *data_vio,
+ const struct zoned_pbn source)
+{
+ data_vio->is_duplicate = (source.pbn != VDO_ZERO_BLOCK);
+ data_vio->duplicate = source;
+}
+
+/**
+ * retire_lock_agent() - Retire the active lock agent, replacing it with the first lock waiter, and
+ * make the retired agent exit the hash lock.
+ * @lock: The hash lock to update.
+ *
+ * Return: The new lock agent (which will be NULL if there was no waiter)
+ */
+static struct data_vio *retire_lock_agent(struct hash_lock *lock)
+{
+ struct data_vio *old_agent = lock->agent;
+ struct data_vio *new_agent = dequeue_lock_waiter(lock);
+
+ lock->agent = new_agent;
+ exit_hash_lock(old_agent);
+ if (new_agent != NULL)
+ set_duplicate_location(new_agent, lock->duplicate);
+ return new_agent;
+}
+
+/**
+ * wait_on_hash_lock() - Add a data_vio to the lock's queue of waiters.
+ * @lock: The hash lock on which to wait.
+ * @data_vio: The data_vio to add to the queue.
+ */
+static void wait_on_hash_lock(struct hash_lock *lock, struct data_vio *data_vio)
+{
+ vdo_waitq_enqueue_waiter(&lock->waiters, &data_vio->waiter);
+
+ /*
+ * Make sure the agent doesn't block indefinitely in the packer since it now has at least
+ * one other data_vio waiting on it.
+ */
+ if ((lock->state != VDO_HASH_LOCK_WRITING) || !cancel_data_vio_compression(lock->agent))
+ return;
+
+ /*
+ * Even though we're waiting, we also have to send ourselves as a one-way message to the
+ * packer to ensure the agent continues executing. This is safe because
+ * cancel_vio_compression() guarantees the agent won't continue executing until this
+ * message arrives in the packer, and because the wait queue link isn't used for sending
+ * the message.
+ */
+ data_vio->compression.lock_holder = lock->agent;
+ launch_data_vio_packer_callback(data_vio, vdo_remove_lock_holder_from_packer);
+}
+
+/**
+ * abort_waiter() - waiter_callback_fn function that shunts waiters to write their blocks without
+ * optimization.
+ * @waiter: The data_vio's waiter link.
+ * @context: Not used.
+ */
+static void abort_waiter(struct vdo_waiter *waiter, void *context __always_unused)
+{
+ write_data_vio(vdo_waiter_as_data_vio(waiter));
+}
+
+/**
+ * start_bypassing() - Stop using the hash lock.
+ * @lock: The hash lock.
+ * @agent: The data_vio acting as the agent for the lock.
+ *
+ * Stops using the hash lock. This is the final transition for hash locks which did not get an
+ * error.
+ */
+static void start_bypassing(struct hash_lock *lock, struct data_vio *agent)
+{
+ lock->state = VDO_HASH_LOCK_BYPASSING;
+ exit_hash_lock(agent);
+}
+
+void vdo_clean_failed_hash_lock(struct data_vio *data_vio)
+{
+ struct hash_lock *lock = data_vio->hash_lock;
+
+ if (lock->state == VDO_HASH_LOCK_BYPASSING) {
+ exit_hash_lock(data_vio);
+ return;
+ }
+
+ if (lock->agent == NULL) {
+ lock->agent = data_vio;
+ } else if (data_vio != lock->agent) {
+ exit_hash_lock(data_vio);
+ return;
+ }
+
+ lock->state = VDO_HASH_LOCK_BYPASSING;
+
+ /* Ensure we don't attempt to update advice when cleaning up. */
+ lock->update_advice = false;
+
+ vdo_waitq_notify_all_waiters(&lock->waiters, abort_waiter, NULL);
+
+ if (lock->duplicate_lock != NULL) {
+ /* The agent must reference the duplicate zone to launch it. */
+ data_vio->duplicate = lock->duplicate;
+ launch_data_vio_duplicate_zone_callback(data_vio, unlock_duplicate_pbn);
+ return;
+ }
+
+ lock->agent = NULL;
+ data_vio->is_duplicate = false;
+ exit_hash_lock(data_vio);
+}
+
+/**
+ * finish_unlocking() - Handle the result of the agent for the lock releasing a read lock on
+ * duplicate candidate.
+ * @completion: The completion of the data_vio acting as the lock's agent.
+ *
+ * This continuation is registered in unlock_duplicate_pbn().
+ */
+static void finish_unlocking(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ struct hash_lock *lock = agent->hash_lock;
+
+ assert_hash_lock_agent(agent, __func__);
+
+ VDO_ASSERT_LOG_ONLY(lock->duplicate_lock == NULL,
+ "must have released the duplicate lock for the hash lock");
+
+ if (!lock->verified) {
+ /*
+ * UNLOCKING -> WRITING transition: The lock we released was on an unverified
+ * block, so it must have been a lock on advice we were verifying, not on a
+ * location that was used for deduplication. Go write (or compress) the block to
+ * get a location to dedupe against.
+ */
+ start_writing(lock, agent);
+ return;
+ }
+
+ /*
+ * With the lock released, the verified duplicate block may already have changed and will
+ * need to be re-verified if a waiter arrived.
+ */
+ lock->verified = false;
+
+ if (vdo_waitq_has_waiters(&lock->waiters)) {
+ /*
+ * UNLOCKING -> LOCKING transition: A new data_vio entered the hash lock while the
+ * agent was releasing the PBN lock. The current agent exits and the waiter has to
+ * re-lock and re-verify the duplicate location.
+ *
+ * TODO: If we used the current agent to re-acquire the PBN lock we wouldn't need
+ * to re-verify.
+ */
+ agent = retire_lock_agent(lock);
+ start_locking(lock, agent);
+ return;
+ }
+
+ /*
+ * UNLOCKING -> BYPASSING transition: The agent is done with the lock and no other
+ * data_vios reference it, so remove it from the lock map and return it to the pool.
+ */
+ start_bypassing(lock, agent);
+}
+
+/**
+ * unlock_duplicate_pbn() - Release a read lock on the PBN of the block that may or may not have
+ * contained duplicate data.
+ * @completion: The completion of the data_vio acting as the lock's agent.
+ *
+ * This continuation is launched by start_unlocking(), and calls back to finish_unlocking() on the
+ * hash zone thread.
+ */
+static void unlock_duplicate_pbn(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ struct hash_lock *lock = agent->hash_lock;
+
+ assert_data_vio_in_duplicate_zone(agent);
+ VDO_ASSERT_LOG_ONLY(lock->duplicate_lock != NULL,
+ "must have a duplicate lock to release");
+
+ vdo_release_physical_zone_pbn_lock(agent->duplicate.zone, agent->duplicate.pbn,
+ vdo_forget(lock->duplicate_lock));
+ if (lock->state == VDO_HASH_LOCK_BYPASSING) {
+ complete_data_vio(completion);
+ return;
+ }
+
+ launch_data_vio_hash_zone_callback(agent, finish_unlocking);
+}
+
+/**
+ * start_unlocking() - Release a read lock on the PBN of the block that may or may not have
+ * contained duplicate data.
+ * @lock: The hash lock.
+ * @agent: The data_vio currently acting as the agent for the lock.
+ */
+static void start_unlocking(struct hash_lock *lock, struct data_vio *agent)
+{
+ lock->state = VDO_HASH_LOCK_UNLOCKING;
+ launch_data_vio_duplicate_zone_callback(agent, unlock_duplicate_pbn);
+}
+
+static void release_context(struct dedupe_context *context)
+{
+ struct hash_zone *zone = context->zone;
+
+ WRITE_ONCE(zone->active, zone->active - 1);
+ list_move(&context->list_entry, &zone->available);
+}
+
+static void process_update_result(struct data_vio *agent)
+{
+ struct dedupe_context *context = agent->dedupe_context;
+
+ if ((context == NULL) ||
+ !change_context_state(context, DEDUPE_CONTEXT_COMPLETE, DEDUPE_CONTEXT_IDLE))
+ return;
+
+ release_context(context);
+}
+
+/**
+ * finish_updating() - Process the result of a UDS update performed by the agent for the lock.
+ * @completion: The completion of the data_vio that performed the update
+ *
+ * This continuation is registered in start_querying().
+ */
+static void finish_updating(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ struct hash_lock *lock = agent->hash_lock;
+
+ assert_hash_lock_agent(agent, __func__);
+
+ process_update_result(agent);
+
+ /*
+ * UDS was updated successfully, so don't update again unless the duplicate location
+ * changes due to rollover.
+ */
+ lock->update_advice = false;
+
+ if (vdo_waitq_has_waiters(&lock->waiters)) {
+ /*
+ * UPDATING -> DEDUPING transition: A new data_vio arrived during the UDS update.
+ * Send it on the verified dedupe path. The agent is done with the lock, but the
+ * lock may still need to use it to clean up after rollover.
+ */
+ start_deduping(lock, agent, true);
+ return;
+ }
+
+ if (lock->duplicate_lock != NULL) {
+ /*
+ * UPDATING -> UNLOCKING transition: No one is waiting to dedupe, but we hold a
+ * duplicate PBN lock, so go release it.
+ */
+ start_unlocking(lock, agent);
+ return;
+ }
+
+ /*
+ * UPDATING -> BYPASSING transition: No one is waiting to dedupe and there's no lock to
+ * release.
+ */
+ start_bypassing(lock, agent);
+}
+
+static void query_index(struct data_vio *data_vio, enum uds_request_type operation);
+
+/**
+ * start_updating() - Continue deduplication with the last step, updating UDS with the location of
+ * the duplicate that should be returned as advice in the future.
+ * @lock: The hash lock.
+ * @agent: The data_vio currently acting as the agent for the lock.
+ */
+static void start_updating(struct hash_lock *lock, struct data_vio *agent)
+{
+ lock->state = VDO_HASH_LOCK_UPDATING;
+
+ VDO_ASSERT_LOG_ONLY(lock->verified, "new advice should have been verified");
+ VDO_ASSERT_LOG_ONLY(lock->update_advice, "should only update advice if needed");
+
+ agent->last_async_operation = VIO_ASYNC_OP_UPDATE_DEDUPE_INDEX;
+ set_data_vio_hash_zone_callback(agent, finish_updating);
+ query_index(agent, UDS_UPDATE);
+}
+
+/**
+ * finish_deduping() - Handle a data_vio that has finished deduplicating against the block locked
+ * by the hash lock.
+ * @lock: The hash lock.
+ * @data_vio: The lock holder that has finished deduplicating.
+ *
+ * If there are other data_vios still sharing the lock, this will just release the data_vio's share
+ * of the lock and finish processing the data_vio. If this is the last data_vio holding the lock,
+ * this makes the data_vio the lock agent and uses it to advance the state of the lock so it can
+ * eventually be released.
+ */
+static void finish_deduping(struct hash_lock *lock, struct data_vio *data_vio)
+{
+ struct data_vio *agent = data_vio;
+
+ VDO_ASSERT_LOG_ONLY(lock->agent == NULL, "shouldn't have an agent in DEDUPING");
+ VDO_ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters),
+ "shouldn't have any lock waiters in DEDUPING");
+
+ /* Just release the lock reference if other data_vios are still deduping. */
+ if (lock->reference_count > 1) {
+ exit_hash_lock(data_vio);
+ return;
+ }
+
+ /* The hash lock must have an agent for all other lock states. */
+ lock->agent = agent;
+ if (lock->update_advice) {
+ /*
+ * DEDUPING -> UPDATING transition: The location of the duplicate block changed
+ * since the initial UDS query because of compression, rollover, or because the
+ * query agent didn't have an allocation. The UDS update was delayed in case there
+ * was another change in location, but with only this data_vio using the hash lock,
+ * it's time to update the advice.
+ */
+ start_updating(lock, agent);
+ } else {
+ /*
+ * DEDUPING -> UNLOCKING transition: Release the PBN read lock on the duplicate
+ * location so the hash lock itself can be released (contingent on no new data_vios
+ * arriving in the lock before the agent returns).
+ */
+ start_unlocking(lock, agent);
+ }
+}
+
+/**
+ * acquire_lock() - Get the lock for a record name.
+ * @zone: The zone responsible for the hash.
+ * @hash: The hash to lock.
+ * @replace_lock: If non-NULL, the lock already registered for the hash which should be replaced by
+ * the new lock.
+ * @lock_ptr: A pointer to receive the hash lock.
+ *
+ * Gets the lock for the hash (record name) of the data in a data_vio, or if one does not exist (or
+ * if we are explicitly rolling over), initialize a new lock for the hash and register it in the
+ * zone. This must only be called in the correct thread for the zone.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int __must_check acquire_lock(struct hash_zone *zone,
+ const struct uds_record_name *hash,
+ struct hash_lock *replace_lock,
+ struct hash_lock **lock_ptr)
+{
+ struct hash_lock *lock, *new_lock;
+ int result;
+
+ /*
+ * Borrow and prepare a lock from the pool so we don't have to do two int_map accesses
+ * in the common case of no lock contention.
+ */
+ result = VDO_ASSERT(!list_empty(&zone->lock_pool),
+ "never need to wait for a free hash lock");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ new_lock = list_entry(zone->lock_pool.prev, struct hash_lock, pool_node);
+ list_del_init(&new_lock->pool_node);
+
+ /*
+ * Fill in the hash of the new lock so we can map it, since we have to use the hash as the
+ * map key.
+ */
+ new_lock->hash = *hash;
+
+ result = vdo_int_map_put(zone->hash_lock_map, hash_lock_key(new_lock),
+ new_lock, (replace_lock != NULL), (void **) &lock);
+ if (result != VDO_SUCCESS) {
+ return_hash_lock_to_pool(zone, vdo_forget(new_lock));
+ return result;
+ }
+
+ if (replace_lock != NULL) {
+ /* On mismatch put the old lock back and return a severe error */
+ VDO_ASSERT_LOG_ONLY(lock == replace_lock,
+ "old lock must have been in the lock map");
+ /* TODO: Check earlier and bail out? */
+ VDO_ASSERT_LOG_ONLY(replace_lock->registered,
+ "old lock must have been marked registered");
+ replace_lock->registered = false;
+ }
+
+ if (lock == replace_lock) {
+ lock = new_lock;
+ lock->registered = true;
+ } else {
+ /* There's already a lock for the hash, so we don't need the borrowed lock. */
+ return_hash_lock_to_pool(zone, vdo_forget(new_lock));
+ }
+
+ *lock_ptr = lock;
+ return VDO_SUCCESS;
+}
+
+/**
+ * enter_forked_lock() - Bind the data_vio to a new hash lock.
+ *
+ * Implements waiter_callback_fn. Binds the data_vio that was waiting to a new hash lock and waits
+ * on that lock.
+ */
+static void enter_forked_lock(struct vdo_waiter *waiter, void *context)
+{
+ struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
+ struct hash_lock *new_lock = context;
+
+ set_hash_lock(data_vio, new_lock);
+ wait_on_hash_lock(new_lock, data_vio);
+}
+
+/**
+ * fork_hash_lock() - Fork a hash lock because it has run out of increments on the duplicate PBN.
+ * @old_lock: The hash lock to fork.
+ * @new_agent: The data_vio that will be the agent for the new lock.
+ *
+ * Transfers the new agent and any lock waiters to a new hash lock instance which takes the place
+ * of the old lock in the lock map. The old lock remains active, but will not update advice.
+ */
+static void fork_hash_lock(struct hash_lock *old_lock, struct data_vio *new_agent)
+{
+ struct hash_lock *new_lock;
+ int result;
+
+ result = acquire_lock(new_agent->hash_zone, &new_agent->record_name, old_lock,
+ &new_lock);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(new_agent, result);
+ return;
+ }
+
+ /*
+ * Only one of the two locks should update UDS. The old lock is out of references, so it
+ * would be poor dedupe advice in the short term.
+ */
+ old_lock->update_advice = false;
+ new_lock->update_advice = true;
+
+ set_hash_lock(new_agent, new_lock);
+ new_lock->agent = new_agent;
+
+ vdo_waitq_notify_all_waiters(&old_lock->waiters, enter_forked_lock, new_lock);
+
+ new_agent->is_duplicate = false;
+ start_writing(new_lock, new_agent);
+}
+
+/**
+ * launch_dedupe() - Reserve a reference count increment for a data_vio and launch it on the dedupe
+ * path.
+ * @lock: The hash lock.
+ * @data_vio: The data_vio to deduplicate using the hash lock.
+ * @has_claim: true if the data_vio already has claimed an increment from the duplicate lock.
+ *
+ * If no increments are available, this will roll over to a new hash lock and launch the data_vio
+ * as the writing agent for that lock.
+ */
+static void launch_dedupe(struct hash_lock *lock, struct data_vio *data_vio,
+ bool has_claim)
+{
+ if (!has_claim && !vdo_claim_pbn_lock_increment(lock->duplicate_lock)) {
+ /* Out of increments, so must roll over to a new lock. */
+ fork_hash_lock(lock, data_vio);
+ return;
+ }
+
+ /* Deduplicate against the lock's verified location. */
+ set_duplicate_location(data_vio, lock->duplicate);
+ data_vio->new_mapped = data_vio->duplicate;
+ update_metadata_for_data_vio_write(data_vio, lock->duplicate_lock);
+}
+
+/**
+ * start_deduping() - Enter the hash lock state where data_vios deduplicate in parallel against a
+ * true copy of their data on disk.
+ * @lock: The hash lock.
+ * @agent: The data_vio acting as the agent for the lock.
+ * @agent_is_done: true only if the agent has already written or deduplicated against its data.
+ *
+ * If the agent itself needs to deduplicate, an increment for it must already have been claimed
+ * from the duplicate lock, ensuring the hash lock will still have a data_vio holding it.
+ */
+static void start_deduping(struct hash_lock *lock, struct data_vio *agent,
+ bool agent_is_done)
+{
+ lock->state = VDO_HASH_LOCK_DEDUPING;
+
+ /*
+ * We don't take the downgraded allocation lock from the agent unless we actually need to
+ * deduplicate against it.
+ */
+ if (lock->duplicate_lock == NULL) {
+ VDO_ASSERT_LOG_ONLY(!vdo_is_state_compressed(agent->new_mapped.state),
+ "compression must have shared a lock");
+ VDO_ASSERT_LOG_ONLY(agent_is_done,
+ "agent must have written the new duplicate");
+ transfer_allocation_lock(agent);
+ }
+
+ VDO_ASSERT_LOG_ONLY(vdo_is_pbn_read_lock(lock->duplicate_lock),
+ "duplicate_lock must be a PBN read lock");
+
+ /*
+ * This state is not like any of the other states. There is no designated agent--the agent
+ * transitioning to this state and all the waiters will be launched to deduplicate in
+ * parallel.
+ */
+ lock->agent = NULL;
+
+ /*
+ * Launch the agent (if not already deduplicated) and as many lock waiters as we have
+ * available increments for on the dedupe path. If we run out of increments, rollover will
+ * be triggered and the remaining waiters will be transferred to the new lock.
+ */
+ if (!agent_is_done) {
+ launch_dedupe(lock, agent, true);
+ agent = NULL;
+ }
+ while (vdo_waitq_has_waiters(&lock->waiters))
+ launch_dedupe(lock, dequeue_lock_waiter(lock), false);
+
+ if (agent_is_done) {
+ /*
+ * In the degenerate case where all the waiters rolled over to a new lock, this
+ * will continue to use the old agent to clean up this lock, and otherwise it just
+ * lets the agent exit the lock.
+ */
+ finish_deduping(lock, agent);
+ }
+}
+
+/**
+ * increment_stat() - Increment a statistic counter in a non-atomic yet thread-safe manner.
+ * @stat: The statistic field to increment.
+ */
+static inline void increment_stat(u64 *stat)
+{
+ /*
+ * Must only be mutated on the hash zone thread. Prevents any compiler shenanigans from
+ * affecting other threads reading stats.
+ */
+ WRITE_ONCE(*stat, *stat + 1);
+}
+
+/**
+ * finish_verifying() - Handle the result of the agent for the lock comparing its data to the
+ * duplicate candidate.
+ * @completion: The completion of the data_vio used to verify dedupe
+ *
+ * This continuation is registered in start_verifying().
+ */
+static void finish_verifying(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ struct hash_lock *lock = agent->hash_lock;
+
+ assert_hash_lock_agent(agent, __func__);
+
+ lock->verified = agent->is_duplicate;
+
+ /*
+ * Only count the result of the initial verification of the advice as valid or stale, and
+ * not any re-verifications due to PBN lock releases.
+ */
+ if (!lock->verify_counted) {
+ lock->verify_counted = true;
+ if (lock->verified)
+ increment_stat(&agent->hash_zone->statistics.dedupe_advice_valid);
+ else
+ increment_stat(&agent->hash_zone->statistics.dedupe_advice_stale);
+ }
+
+ /*
+ * Even if the block is a verified duplicate, we can't start to deduplicate unless we can
+ * claim a reference count increment for the agent.
+ */
+ if (lock->verified && !vdo_claim_pbn_lock_increment(lock->duplicate_lock)) {
+ agent->is_duplicate = false;
+ lock->verified = false;
+ }
+
+ if (lock->verified) {
+ /*
+ * VERIFYING -> DEDUPING transition: The advice is for a true duplicate, so start
+ * deduplicating against it, if references are available.
+ */
+ start_deduping(lock, agent, false);
+ } else {
+ /*
+ * VERIFYING -> UNLOCKING transition: Either the verify failed or we'd try to
+ * dedupe and roll over immediately, which would fail because it would leave the
+ * lock without an agent to release the PBN lock. In both cases, the data will have
+ * to be written or compressed, but first the advice PBN must be unlocked by the
+ * VERIFYING agent.
+ */
+ lock->update_advice = true;
+ start_unlocking(lock, agent);
+ }
+}
+
+static bool blocks_equal(char *block1, char *block2)
+{
+ int i;
+
+ for (i = 0; i < VDO_BLOCK_SIZE; i += sizeof(u64)) {
+ if (*((u64 *) &block1[i]) != *((u64 *) &block2[i]))
+ return false;
+ }
+
+ return true;
+}
+
+static void verify_callback(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+
+ agent->is_duplicate = blocks_equal(agent->vio.data, agent->scratch_block);
+ launch_data_vio_hash_zone_callback(agent, finish_verifying);
+}
+
+static void uncompress_and_verify(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ int result;
+
+ result = uncompress_data_vio(agent, agent->duplicate.state,
+ agent->scratch_block);
+ if (result == VDO_SUCCESS) {
+ verify_callback(completion);
+ return;
+ }
+
+ agent->is_duplicate = false;
+ launch_data_vio_hash_zone_callback(agent, finish_verifying);
+}
+
+static void verify_endio(struct bio *bio)
+{
+ struct data_vio *agent = vio_as_data_vio(bio->bi_private);
+ int result = blk_status_to_errno(bio->bi_status);
+
+ vdo_count_completed_bios(bio);
+ if (result != VDO_SUCCESS) {
+ agent->is_duplicate = false;
+ launch_data_vio_hash_zone_callback(agent, finish_verifying);
+ return;
+ }
+
+ if (vdo_is_state_compressed(agent->duplicate.state)) {
+ launch_data_vio_cpu_callback(agent, uncompress_and_verify,
+ CPU_Q_COMPRESS_BLOCK_PRIORITY);
+ return;
+ }
+
+ launch_data_vio_cpu_callback(agent, verify_callback,
+ CPU_Q_COMPLETE_READ_PRIORITY);
+}
+
+/**
+ * start_verifying() - Begin the data verification phase.
+ * @lock: The hash lock (must be LOCKING).
+ * @agent: The data_vio to use to read and compare candidate data.
+ *
+ * Continue the deduplication path for a hash lock by using the agent to read (and possibly
+ * decompress) the data at the candidate duplicate location, comparing it to the data in the agent
+ * to verify that the candidate is identical to all the data_vios sharing the hash. If so, it can
+ * be deduplicated against, otherwise a data_vio allocation will have to be written to and used for
+ * dedupe.
+ */
+static void start_verifying(struct hash_lock *lock, struct data_vio *agent)
+{
+ int result;
+ struct vio *vio = &agent->vio;
+ char *buffer = (vdo_is_state_compressed(agent->duplicate.state) ?
+ (char *) agent->compression.block :
+ agent->scratch_block);
+
+ lock->state = VDO_HASH_LOCK_VERIFYING;
+ VDO_ASSERT_LOG_ONLY(!lock->verified, "hash lock only verifies advice once");
+
+ agent->last_async_operation = VIO_ASYNC_OP_VERIFY_DUPLICATION;
+ result = vio_reset_bio(vio, buffer, verify_endio, REQ_OP_READ,
+ agent->duplicate.pbn);
+ if (result != VDO_SUCCESS) {
+ set_data_vio_hash_zone_callback(agent, finish_verifying);
+ continue_data_vio_with_error(agent, result);
+ return;
+ }
+
+ set_data_vio_bio_zone_callback(agent, vdo_submit_vio);
+ vdo_launch_completion_with_priority(&vio->completion, BIO_Q_VERIFY_PRIORITY);
+}
+
+/**
+ * finish_locking() - Handle the result of the agent for the lock attempting to obtain a PBN read
+ * lock on the candidate duplicate block.
+ * @completion: The completion of the data_vio that attempted to get the read lock.
+ *
+ * This continuation is registered in lock_duplicate_pbn().
+ */
+static void finish_locking(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ struct hash_lock *lock = agent->hash_lock;
+
+ assert_hash_lock_agent(agent, __func__);
+
+ if (!agent->is_duplicate) {
+ VDO_ASSERT_LOG_ONLY(lock->duplicate_lock == NULL,
+ "must not hold duplicate_lock if not flagged as a duplicate");
+ /*
+ * LOCKING -> WRITING transition: The advice block is being modified or has no
+ * available references, so try to write or compress the data, remembering to
+ * update UDS later with the new advice.
+ */
+ increment_stat(&agent->hash_zone->statistics.dedupe_advice_stale);
+ lock->update_advice = true;
+ start_writing(lock, agent);
+ return;
+ }
+
+ VDO_ASSERT_LOG_ONLY(lock->duplicate_lock != NULL,
+ "must hold duplicate_lock if flagged as a duplicate");
+
+ if (!lock->verified) {
+ /*
+ * LOCKING -> VERIFYING transition: Continue on the unverified dedupe path, reading
+ * the candidate duplicate and comparing it to the agent's data to decide whether
+ * it is a true duplicate or stale advice.
+ */
+ start_verifying(lock, agent);
+ return;
+ }
+
+ if (!vdo_claim_pbn_lock_increment(lock->duplicate_lock)) {
+ /*
+ * LOCKING -> UNLOCKING transition: The verified block was re-locked, but has no
+ * available increments left. Must first release the useless PBN read lock before
+ * rolling over to a new copy of the block.
+ */
+ agent->is_duplicate = false;
+ lock->verified = false;
+ lock->update_advice = true;
+ start_unlocking(lock, agent);
+ return;
+ }
+
+ /*
+ * LOCKING -> DEDUPING transition: Continue on the verified dedupe path, deduplicating
+ * against a location that was previously verified or written to.
+ */
+ start_deduping(lock, agent, false);
+}
+
+static bool acquire_provisional_reference(struct data_vio *agent, struct pbn_lock *lock,
+ struct slab_depot *depot)
+{
+ /* Ensure that the newly-locked block is referenced. */
+ struct vdo_slab *slab = vdo_get_slab(depot, agent->duplicate.pbn);
+ int result = vdo_acquire_provisional_reference(slab, agent->duplicate.pbn, lock);
+
+ if (result == VDO_SUCCESS)
+ return true;
+
+ vdo_log_warning_strerror(result,
+ "Error acquiring provisional reference for dedupe candidate; aborting dedupe");
+ agent->is_duplicate = false;
+ vdo_release_physical_zone_pbn_lock(agent->duplicate.zone,
+ agent->duplicate.pbn, lock);
+ continue_data_vio_with_error(agent, result);
+ return false;
+}
+
+/**
+ * lock_duplicate_pbn() - Acquire a read lock on the PBN of the block containing candidate
+ * duplicate data (compressed or uncompressed).
+ * @completion: The completion of the data_vio attempting to acquire the physical block lock on
+ * behalf of its hash lock.
+ *
+ * If the PBN is already locked for writing, the lock attempt is abandoned and is_duplicate will be
+ * cleared before calling back. This continuation is launched from start_locking(), and calls back
+ * to finish_locking() on the hash zone thread.
+ */
+static void lock_duplicate_pbn(struct vdo_completion *completion)
+{
+ unsigned int increment_limit;
+ struct pbn_lock *lock;
+ int result;
+
+ struct data_vio *agent = as_data_vio(completion);
+ struct slab_depot *depot = vdo_from_data_vio(agent)->depot;
+ struct physical_zone *zone = agent->duplicate.zone;
+
+ assert_data_vio_in_duplicate_zone(agent);
+
+ set_data_vio_hash_zone_callback(agent, finish_locking);
+
+ /*
+ * While in the zone that owns it, find out how many additional references can be made to
+ * the block if it turns out to truly be a duplicate.
+ */
+ increment_limit = vdo_get_increment_limit(depot, agent->duplicate.pbn);
+ if (increment_limit == 0) {
+ /*
+ * We could deduplicate against it later if a reference happened to be released
+ * during verification, but it's probably better to bail out now.
+ */
+ agent->is_duplicate = false;
+ continue_data_vio(agent);
+ return;
+ }
+
+ result = vdo_attempt_physical_zone_pbn_lock(zone, agent->duplicate.pbn,
+ VIO_READ_LOCK, &lock);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(agent, result);
+ return;
+ }
+
+ if (!vdo_is_pbn_read_lock(lock)) {
+ /*
+ * There are three cases of write locks: uncompressed data block writes, compressed
+ * (packed) block writes, and block map page writes. In all three cases, we give up
+ * on trying to verify the advice and don't bother to try deduplicate against the
+ * data in the write lock holder.
+ *
+ * 1) We don't ever want to try to deduplicate against a block map page.
+ *
+ * 2a) It's very unlikely we'd deduplicate against an entire packed block, both
+ * because of the chance of matching it, and because we don't record advice for it,
+ * but for the uncompressed representation of all the fragments it contains. The
+ * only way we'd be getting lock contention is if we've written the same
+ * representation coincidentally before, had it become unreferenced, and it just
+ * happened to be packed together from compressed writes when we go to verify the
+ * lucky advice. Giving up is a minuscule loss of potential dedupe.
+ *
+ * 2b) If the advice is for a slot of a compressed block, it's about to get
+ * smashed, and the write smashing it cannot contain our data--it would have to be
+ * writing on behalf of our hash lock, but that's impossible since we're the lock
+ * agent.
+ *
+ * 3a) If the lock is held by a data_vio with different data, the advice is already
+ * stale or is about to become stale.
+ *
+ * 3b) If the lock is held by a data_vio that matches us, we may as well either
+ * write it ourselves (or reference the copy we already wrote) instead of
+ * potentially having many duplicates wait for the lock holder to write, journal,
+ * hash, and finally arrive in the hash lock. We lose a chance to avoid a UDS
+ * update in the very rare case of advice for a free block that just happened to be
+ * allocated to a data_vio with the same hash. There's also a chance to save on a
+ * block write, at the cost of a block verify. Saving on a full block compare in
+ * all stale advice cases almost certainly outweighs saving a UDS update and
+ * trading a write for a read in a lucky case where advice would have been saved
+ * from becoming stale.
+ */
+ agent->is_duplicate = false;
+ continue_data_vio(agent);
+ return;
+ }
+
+ if (lock->holder_count == 0) {
+ if (!acquire_provisional_reference(agent, lock, depot))
+ return;
+
+ /*
+ * The increment limit we grabbed earlier is still valid. The lock now holds the
+ * rights to acquire all those references. Those rights will be claimed by hash
+ * locks sharing this read lock.
+ */
+ lock->increment_limit = increment_limit;
+ }
+
+ /*
+ * We've successfully acquired a read lock on behalf of the hash lock, so mark it as such.
+ */
+ set_duplicate_lock(agent->hash_lock, lock);
+
+ /*
+ * TODO: Optimization: We could directly launch the block verify, then switch to a hash
+ * thread.
+ */
+ continue_data_vio(agent);
+}
+
+/**
+ * start_locking() - Continue deduplication for a hash lock that has obtained valid advice of a
+ * potential duplicate through its agent.
+ * @lock: The hash lock (currently must be QUERYING).
+ * @agent: The data_vio bearing the dedupe advice.
+ */
+static void start_locking(struct hash_lock *lock, struct data_vio *agent)
+{
+ VDO_ASSERT_LOG_ONLY(lock->duplicate_lock == NULL,
+ "must not acquire a duplicate lock when already holding it");
+
+ lock->state = VDO_HASH_LOCK_LOCKING;
+
+ /*
+ * TODO: Optimization: If we arrange to continue on the duplicate zone thread when
+ * accepting the advice, and don't explicitly change lock states (or use an agent-local
+ * state, or an atomic), we can avoid a thread transition here.
+ */
+ agent->last_async_operation = VIO_ASYNC_OP_LOCK_DUPLICATE_PBN;
+ launch_data_vio_duplicate_zone_callback(agent, lock_duplicate_pbn);
+}
+
+/**
+ * finish_writing() - Re-entry point for the lock agent after it has finished writing or
+ * compressing its copy of the data block.
+ * @lock: The hash lock, which must be in state WRITING.
+ * @agent: The data_vio that wrote its data for the lock.
+ *
+ * The agent will never need to dedupe against anything, so it's done with the lock, but the lock
+ * may not be finished with it, as a UDS update might still be needed.
+ *
+ * If there are other lock holders, the agent will hand the job to one of them and exit, leaving
+ * the lock to deduplicate against the just-written block. If there are no other lock holders, the
+ * agent either exits (and later tears down the hash lock), or it remains the agent and updates
+ * UDS.
+ */
+static void finish_writing(struct hash_lock *lock, struct data_vio *agent)
+{
+ /*
+ * Dedupe against the data block or compressed block slot the agent wrote. Since we know
+ * the write succeeded, there's no need to verify it.
+ */
+ lock->duplicate = agent->new_mapped;
+ lock->verified = true;
+
+ if (vdo_is_state_compressed(lock->duplicate.state) && lock->registered) {
+ /*
+ * Compression means the location we gave in the UDS query is not the location
+ * we're using to deduplicate.
+ */
+ lock->update_advice = true;
+ }
+
+ /* If there are any waiters, we need to start deduping them. */
+ if (vdo_waitq_has_waiters(&lock->waiters)) {
+ /*
+ * WRITING -> DEDUPING transition: an asynchronously-written block failed to
+ * compress, so the PBN lock on the written copy was already transferred. The agent
+ * is done with the lock, but the lock may still need to use it to clean up after
+ * rollover.
+ */
+ start_deduping(lock, agent, true);
+ return;
+ }
+
+ /*
+ * There are no waiters and the agent has successfully written, so take a step towards
+ * being able to release the hash lock (or just release it).
+ */
+ if (lock->update_advice) {
+ /*
+ * WRITING -> UPDATING transition: There's no waiter and a UDS update is needed, so
+ * retain the WRITING agent and use it to launch the update. The happens on
+ * compression, rollover, or the QUERYING agent not having an allocation.
+ */
+ start_updating(lock, agent);
+ } else if (lock->duplicate_lock != NULL) {
+ /*
+ * WRITING -> UNLOCKING transition: There's no waiter and no update needed, but the
+ * compressed write gave us a shared duplicate lock that we must release.
+ */
+ set_duplicate_location(agent, lock->duplicate);
+ start_unlocking(lock, agent);
+ } else {
+ /*
+ * WRITING -> BYPASSING transition: There's no waiter, no update needed, and no
+ * duplicate lock held, so both the agent and lock have no more work to do. The
+ * agent will release its allocation lock in cleanup.
+ */
+ start_bypassing(lock, agent);
+ }
+}
+
+/**
+ * select_writing_agent() - Search through the lock waiters for a data_vio that has an allocation.
+ * @lock: The hash lock to modify.
+ *
+ * If an allocation is found, swap agents, put the old agent at the head of the wait queue, then
+ * return the new agent. Otherwise, just return the current agent.
+ */
+static struct data_vio *select_writing_agent(struct hash_lock *lock)
+{
+ struct vdo_wait_queue temp_queue;
+ struct data_vio *data_vio;
+
+ vdo_waitq_init(&temp_queue);
+
+ /*
+ * Move waiters to the temp queue one-by-one until we find an allocation. Not ideal to
+ * search, but it only happens when nearly out of space.
+ */
+ while (((data_vio = dequeue_lock_waiter(lock)) != NULL) &&
+ !data_vio_has_allocation(data_vio)) {
+ /* Use the lower-level enqueue since we're just moving waiters around. */
+ vdo_waitq_enqueue_waiter(&temp_queue, &data_vio->waiter);
+ }
+
+ if (data_vio != NULL) {
+ /*
+ * Move the rest of the waiters over to the temp queue, preserving the order they
+ * arrived at the lock.
+ */
+ vdo_waitq_transfer_all_waiters(&lock->waiters, &temp_queue);
+
+ /*
+ * The current agent is being replaced and will have to wait to dedupe; make it the
+ * first waiter since it was the first to reach the lock.
+ */
+ vdo_waitq_enqueue_waiter(&lock->waiters, &lock->agent->waiter);
+ lock->agent = data_vio;
+ } else {
+ /* No one has an allocation, so keep the current agent. */
+ data_vio = lock->agent;
+ }
+
+ /* Swap all the waiters back onto the lock's queue. */
+ vdo_waitq_transfer_all_waiters(&temp_queue, &lock->waiters);
+ return data_vio;
+}
+
+/**
+ * start_writing() - Begin the non-duplicate write path.
+ * @lock: The hash lock (currently must be QUERYING).
+ * @agent: The data_vio currently acting as the agent for the lock.
+ *
+ * Begins the non-duplicate write path for a hash lock that had no advice, selecting a data_vio
+ * with an allocation as a new agent, if necessary, then resuming the agent on the data_vio write
+ * path.
+ */
+static void start_writing(struct hash_lock *lock, struct data_vio *agent)
+{
+ lock->state = VDO_HASH_LOCK_WRITING;
+
+ /*
+ * The agent might not have received an allocation and so can't be used for writing, but
+ * it's entirely possible that one of the waiters did.
+ */
+ if (!data_vio_has_allocation(agent)) {
+ agent = select_writing_agent(lock);
+ /* If none of the waiters had an allocation, the writes all have to fail. */
+ if (!data_vio_has_allocation(agent)) {
+ /*
+ * TODO: Should we keep a variant of BYPASSING that causes new arrivals to
+ * fail immediately if they don't have an allocation? It might be possible
+ * that on some path there would be non-waiters still referencing the lock,
+ * so it would remain in the map as everything is currently spelled, even
+ * if the agent and all waiters release.
+ */
+ continue_data_vio_with_error(agent, VDO_NO_SPACE);
+ return;
+ }
+ }
+
+ /*
+ * If the agent compresses, it might wait indefinitely in the packer, which would be bad if
+ * there are any other data_vios waiting.
+ */
+ if (vdo_waitq_has_waiters(&lock->waiters))
+ cancel_data_vio_compression(agent);
+
+ /*
+ * Send the agent to the compress/pack/write path in vioWrite. If it succeeds, it will
+ * return to the hash lock via vdo_continue_hash_lock() and call finish_writing().
+ */
+ launch_compress_data_vio(agent);
+}
+
+/*
+ * Decode VDO duplicate advice from the old_metadata field of a UDS request.
+ * Returns true if valid advice was found and decoded
+ */
+static bool decode_uds_advice(struct dedupe_context *context)
+{
+ const struct uds_request *request = &context->request;
+ struct data_vio *data_vio = context->requestor;
+ size_t offset = 0;
+ const struct uds_record_data *encoding = &request->old_metadata;
+ struct vdo *vdo = vdo_from_data_vio(data_vio);
+ struct zoned_pbn *advice = &data_vio->duplicate;
+ u8 version;
+ int result;
+
+ if ((request->status != UDS_SUCCESS) || !request->found)
+ return false;
+
+ version = encoding->data[offset++];
+ if (version != UDS_ADVICE_VERSION) {
+ vdo_log_error("invalid UDS advice version code %u", version);
+ return false;
+ }
+
+ advice->state = encoding->data[offset++];
+ advice->pbn = get_unaligned_le64(&encoding->data[offset]);
+ offset += sizeof(u64);
+ BUG_ON(offset != UDS_ADVICE_SIZE);
+
+ /* Don't use advice that's clearly meaningless. */
+ if ((advice->state == VDO_MAPPING_STATE_UNMAPPED) || (advice->pbn == VDO_ZERO_BLOCK)) {
+ vdo_log_debug("Invalid advice from deduplication server: pbn %llu, state %u. Giving up on deduplication of logical block %llu",
+ (unsigned long long) advice->pbn, advice->state,
+ (unsigned long long) data_vio->logical.lbn);
+ atomic64_inc(&vdo->stats.invalid_advice_pbn_count);
+ return false;
+ }
+
+ result = vdo_get_physical_zone(vdo, advice->pbn, &advice->zone);
+ if ((result != VDO_SUCCESS) || (advice->zone == NULL)) {
+ vdo_log_debug("Invalid physical block number from deduplication server: %llu, giving up on deduplication of logical block %llu",
+ (unsigned long long) advice->pbn,
+ (unsigned long long) data_vio->logical.lbn);
+ atomic64_inc(&vdo->stats.invalid_advice_pbn_count);
+ return false;
+ }
+
+ return true;
+}
+
+static void process_query_result(struct data_vio *agent)
+{
+ struct dedupe_context *context = agent->dedupe_context;
+
+ if (context == NULL)
+ return;
+
+ if (change_context_state(context, DEDUPE_CONTEXT_COMPLETE, DEDUPE_CONTEXT_IDLE)) {
+ agent->is_duplicate = decode_uds_advice(context);
+ release_context(context);
+ }
+}
+
+/**
+ * finish_querying() - Process the result of a UDS query performed by the agent for the lock.
+ * @completion: The completion of the data_vio that performed the query.
+ *
+ * This continuation is registered in start_querying().
+ */
+static void finish_querying(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ struct hash_lock *lock = agent->hash_lock;
+
+ assert_hash_lock_agent(agent, __func__);
+
+ process_query_result(agent);
+
+ if (agent->is_duplicate) {
+ lock->duplicate = agent->duplicate;
+ /*
+ * QUERYING -> LOCKING transition: Valid advice was obtained from UDS. Use the
+ * QUERYING agent to start the hash lock on the unverified dedupe path, verifying
+ * that the advice can be used.
+ */
+ start_locking(lock, agent);
+ } else {
+ /*
+ * The agent will be used as the duplicate if has an allocation; if it does, that
+ * location was posted to UDS, so no update will be needed.
+ */
+ lock->update_advice = !data_vio_has_allocation(agent);
+ /*
+ * QUERYING -> WRITING transition: There was no advice or the advice wasn't valid,
+ * so try to write or compress the data.
+ */
+ start_writing(lock, agent);
+ }
+}
+
+/**
+ * start_querying() - Start deduplication for a hash lock.
+ * @lock: The initialized hash lock.
+ * @data_vio: The data_vio that has just obtained the new lock.
+ *
+ * Starts deduplication for a hash lock that has finished initializing by making the data_vio that
+ * requested it the agent, entering the QUERYING state, and using the agent to perform the UDS
+ * query on behalf of the lock.
+ */
+static void start_querying(struct hash_lock *lock, struct data_vio *data_vio)
+{
+ lock->agent = data_vio;
+ lock->state = VDO_HASH_LOCK_QUERYING;
+ data_vio->last_async_operation = VIO_ASYNC_OP_CHECK_FOR_DUPLICATION;
+ set_data_vio_hash_zone_callback(data_vio, finish_querying);
+ query_index(data_vio,
+ (data_vio_has_allocation(data_vio) ? UDS_POST : UDS_QUERY));
+}
+
+/**
+ * report_bogus_lock_state() - Complain that a data_vio has entered a hash_lock that is in an
+ * unimplemented or unusable state and continue the data_vio with an
+ * error.
+ * @lock: The hash lock.
+ * @data_vio: The data_vio attempting to enter the lock.
+ */
+static void report_bogus_lock_state(struct hash_lock *lock, struct data_vio *data_vio)
+{
+ VDO_ASSERT_LOG_ONLY(false, "hash lock must not be in unimplemented state %s",
+ get_hash_lock_state_name(lock->state));
+ continue_data_vio_with_error(data_vio, VDO_LOCK_ERROR);
+}
+
+/**
+ * vdo_continue_hash_lock() - Continue the processing state after writing, compressing, or
+ * deduplicating.
+ * @data_vio: The data_vio to continue processing in its hash lock.
+ *
+ * Asynchronously continue processing a data_vio in its hash lock after it has finished writing,
+ * compressing, or deduplicating, so it can share the result with any data_vios waiting in the hash
+ * lock, or update the UDS index, or simply release its share of the lock.
+ *
+ * Context: This must only be called in the correct thread for the hash zone.
+ */
+void vdo_continue_hash_lock(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct hash_lock *lock = data_vio->hash_lock;
+
+ switch (lock->state) {
+ case VDO_HASH_LOCK_WRITING:
+ VDO_ASSERT_LOG_ONLY(data_vio == lock->agent,
+ "only the lock agent may continue the lock");
+ finish_writing(lock, data_vio);
+ break;
+
+ case VDO_HASH_LOCK_DEDUPING:
+ finish_deduping(lock, data_vio);
+ break;
+
+ case VDO_HASH_LOCK_BYPASSING:
+ /* This data_vio has finished the write path and the lock doesn't need it. */
+ exit_hash_lock(data_vio);
+ break;
+
+ case VDO_HASH_LOCK_INITIALIZING:
+ case VDO_HASH_LOCK_QUERYING:
+ case VDO_HASH_LOCK_UPDATING:
+ case VDO_HASH_LOCK_LOCKING:
+ case VDO_HASH_LOCK_VERIFYING:
+ case VDO_HASH_LOCK_UNLOCKING:
+ /* A lock in this state should never be re-entered. */
+ report_bogus_lock_state(lock, data_vio);
+ break;
+
+ default:
+ report_bogus_lock_state(lock, data_vio);
+ }
+}
+
+/**
+ * is_hash_collision() - Check to see if a hash collision has occurred.
+ * @lock: The lock to check.
+ * @candidate: The data_vio seeking to share the lock.
+ *
+ * Check whether the data in data_vios sharing a lock is different than in a data_vio seeking to
+ * share the lock, which should only be possible in the extremely unlikely case of a hash
+ * collision.
+ *
+ * Return: true if the given data_vio must not share the lock because it doesn't have the same data
+ * as the lock holders.
+ */
+static bool is_hash_collision(struct hash_lock *lock, struct data_vio *candidate)
+{
+ struct data_vio *lock_holder;
+ struct hash_zone *zone;
+ bool collides;
+
+ if (list_empty(&lock->duplicate_ring))
+ return false;
+
+ lock_holder = list_first_entry(&lock->duplicate_ring, struct data_vio,
+ hash_lock_entry);
+ zone = candidate->hash_zone;
+ collides = !blocks_equal(lock_holder->vio.data, candidate->vio.data);
+ if (collides)
+ increment_stat(&zone->statistics.concurrent_hash_collisions);
+ else
+ increment_stat(&zone->statistics.concurrent_data_matches);
+
+ return collides;
+}
+
+static inline int assert_hash_lock_preconditions(const struct data_vio *data_vio)
+{
+ int result;
+
+ /* FIXME: BUG_ON() and/or enter read-only mode? */
+ result = VDO_ASSERT(data_vio->hash_lock == NULL,
+ "must not already hold a hash lock");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(list_empty(&data_vio->hash_lock_entry),
+ "must not already be a member of a hash lock ring");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return VDO_ASSERT(data_vio->recovery_sequence_number == 0,
+ "must not hold a recovery lock when getting a hash lock");
+}
+
+/**
+ * vdo_acquire_hash_lock() - Acquire or share a lock on a record name.
+ * @data_vio: The data_vio acquiring a lock on its record name.
+ *
+ * Acquire or share a lock on the hash (record name) of the data in a data_vio, updating the
+ * data_vio to reference the lock. This must only be called in the correct thread for the zone. In
+ * the unlikely case of a hash collision, this function will succeed, but the data_vio will not get
+ * a lock reference.
+ */
+void vdo_acquire_hash_lock(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct hash_lock *lock;
+ int result;
+
+ assert_data_vio_in_hash_zone(data_vio);
+
+ result = assert_hash_lock_preconditions(data_vio);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+
+ result = acquire_lock(data_vio->hash_zone, &data_vio->record_name, NULL, &lock);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+
+ if (is_hash_collision(lock, data_vio)) {
+ /*
+ * Hash collisions are extremely unlikely, but the bogus dedupe would be a data
+ * corruption. Bypass optimization entirely. We can't compress a data_vio without
+ * a hash_lock as the compressed write depends on the hash_lock to manage the
+ * references for the compressed block.
+ */
+ write_data_vio(data_vio);
+ return;
+ }
+
+ set_hash_lock(data_vio, lock);
+ switch (lock->state) {
+ case VDO_HASH_LOCK_INITIALIZING:
+ start_querying(lock, data_vio);
+ return;
+
+ case VDO_HASH_LOCK_QUERYING:
+ case VDO_HASH_LOCK_WRITING:
+ case VDO_HASH_LOCK_UPDATING:
+ case VDO_HASH_LOCK_LOCKING:
+ case VDO_HASH_LOCK_VERIFYING:
+ case VDO_HASH_LOCK_UNLOCKING:
+ /* The lock is busy, and can't be shared yet. */
+ wait_on_hash_lock(lock, data_vio);
+ return;
+
+ case VDO_HASH_LOCK_BYPASSING:
+ /* We can't use this lock, so bypass optimization entirely. */
+ vdo_release_hash_lock(data_vio);
+ write_data_vio(data_vio);
+ return;
+
+ case VDO_HASH_LOCK_DEDUPING:
+ launch_dedupe(lock, data_vio, false);
+ return;
+
+ default:
+ /* A lock in this state should not be acquired by new VIOs. */
+ report_bogus_lock_state(lock, data_vio);
+ }
+}
+
+/**
+ * vdo_release_hash_lock() - Release a data_vio's share of a hash lock, if held, and null out the
+ * data_vio's reference to it.
+ * @data_vio: The data_vio releasing its hash lock.
+ *
+ * If the data_vio is the only one holding the lock, this also releases any resources or locks used
+ * by the hash lock (such as a PBN read lock on a block containing data with the same hash) and
+ * returns the lock to the hash zone's lock pool.
+ *
+ * Context: This must only be called in the correct thread for the hash zone.
+ */
+void vdo_release_hash_lock(struct data_vio *data_vio)
+{
+ u64 lock_key;
+ struct hash_lock *lock = data_vio->hash_lock;
+ struct hash_zone *zone = data_vio->hash_zone;
+
+ if (lock == NULL)
+ return;
+
+ set_hash_lock(data_vio, NULL);
+
+ if (lock->reference_count > 0) {
+ /* The lock is still in use by other data_vios. */
+ return;
+ }
+
+ lock_key = hash_lock_key(lock);
+ if (lock->registered) {
+ struct hash_lock *removed;
+
+ removed = vdo_int_map_remove(zone->hash_lock_map, lock_key);
+ VDO_ASSERT_LOG_ONLY(lock == removed,
+ "hash lock being released must have been mapped");
+ } else {
+ VDO_ASSERT_LOG_ONLY(lock != vdo_int_map_get(zone->hash_lock_map, lock_key),
+ "unregistered hash lock must not be in the lock map");
+ }
+
+ VDO_ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters),
+ "hash lock returned to zone must have no waiters");
+ VDO_ASSERT_LOG_ONLY((lock->duplicate_lock == NULL),
+ "hash lock returned to zone must not reference a PBN lock");
+ VDO_ASSERT_LOG_ONLY((lock->state == VDO_HASH_LOCK_BYPASSING),
+ "returned hash lock must not be in use with state %s",
+ get_hash_lock_state_name(lock->state));
+ VDO_ASSERT_LOG_ONLY(list_empty(&lock->pool_node),
+ "hash lock returned to zone must not be in a pool ring");
+ VDO_ASSERT_LOG_ONLY(list_empty(&lock->duplicate_ring),
+ "hash lock returned to zone must not reference DataVIOs");
+
+ return_hash_lock_to_pool(zone, lock);
+}
+
+/**
+ * transfer_allocation_lock() - Transfer a data_vio's downgraded allocation PBN lock to the
+ * data_vio's hash lock, converting it to a duplicate PBN lock.
+ * @data_vio: The data_vio holding the allocation lock to transfer.
+ */
+static void transfer_allocation_lock(struct data_vio *data_vio)
+{
+ struct allocation *allocation = &data_vio->allocation;
+ struct hash_lock *hash_lock = data_vio->hash_lock;
+
+ VDO_ASSERT_LOG_ONLY(data_vio->new_mapped.pbn == allocation->pbn,
+ "transferred lock must be for the block written");
+
+ allocation->pbn = VDO_ZERO_BLOCK;
+
+ VDO_ASSERT_LOG_ONLY(vdo_is_pbn_read_lock(allocation->lock),
+ "must have downgraded the allocation lock before transfer");
+
+ hash_lock->duplicate = data_vio->new_mapped;
+ data_vio->duplicate = data_vio->new_mapped;
+
+ /*
+ * Since the lock is being transferred, the holder count doesn't change (and isn't even
+ * safe to examine on this thread).
+ */
+ hash_lock->duplicate_lock = vdo_forget(allocation->lock);
+}
+
+/**
+ * vdo_share_compressed_write_lock() - Make a data_vio's hash lock a shared holder of the PBN lock
+ * on the compressed block to which its data was just written.
+ * @data_vio: The data_vio which was just compressed.
+ * @pbn_lock: The PBN lock on the compressed block.
+ *
+ * If the lock is still a write lock (as it will be for the first share), it will be converted to a
+ * read lock. This also reserves a reference count increment for the data_vio.
+ */
+void vdo_share_compressed_write_lock(struct data_vio *data_vio,
+ struct pbn_lock *pbn_lock)
+{
+ bool claimed;
+
+ VDO_ASSERT_LOG_ONLY(vdo_get_duplicate_lock(data_vio) == NULL,
+ "a duplicate PBN lock should not exist when writing");
+ VDO_ASSERT_LOG_ONLY(vdo_is_state_compressed(data_vio->new_mapped.state),
+ "lock transfer must be for a compressed write");
+ assert_data_vio_in_new_mapped_zone(data_vio);
+
+ /* First sharer downgrades the lock. */
+ if (!vdo_is_pbn_read_lock(pbn_lock))
+ vdo_downgrade_pbn_write_lock(pbn_lock, true);
+
+ /*
+ * Get a share of the PBN lock, ensuring it cannot be released until after this data_vio
+ * has had a chance to journal a reference.
+ */
+ data_vio->duplicate = data_vio->new_mapped;
+ data_vio->hash_lock->duplicate = data_vio->new_mapped;
+ set_duplicate_lock(data_vio->hash_lock, pbn_lock);
+
+ /*
+ * Claim a reference for this data_vio. Necessary since another hash_lock might start
+ * deduplicating against it before our incRef.
+ */
+ claimed = vdo_claim_pbn_lock_increment(pbn_lock);
+ VDO_ASSERT_LOG_ONLY(claimed, "impossible to fail to claim an initial increment");
+}
+
+static void start_uds_queue(void *ptr)
+{
+ /*
+ * Allow the UDS dedupe worker thread to do memory allocations. It will only do allocations
+ * during the UDS calls that open or close an index, but those allocations can safely sleep
+ * while reserving a large amount of memory. We could use an allocations_allowed boolean
+ * (like the base threads do), but it would be an unnecessary embellishment.
+ */
+ struct vdo_thread *thread = vdo_get_work_queue_owner(vdo_get_current_work_queue());
+
+ vdo_register_allocating_thread(&thread->allocating_thread, NULL);
+}
+
+static void finish_uds_queue(void *ptr __always_unused)
+{
+ vdo_unregister_allocating_thread();
+}
+
+static void close_index(struct hash_zones *zones)
+ __must_hold(&zones->lock)
+{
+ int result;
+
+ /*
+ * Change the index state so that get_index_statistics() will not try to use the index
+ * session we are closing.
+ */
+ zones->index_state = IS_CHANGING;
+ /* Close the index session, while not holding the lock. */
+ spin_unlock(&zones->lock);
+ result = uds_close_index(zones->index_session);
+
+ if (result != UDS_SUCCESS)
+ vdo_log_error_strerror(result, "Error closing index");
+ spin_lock(&zones->lock);
+ zones->index_state = IS_CLOSED;
+ zones->error_flag |= result != UDS_SUCCESS;
+ /* ASSERTION: We leave in IS_CLOSED state. */
+}
+
+static void open_index(struct hash_zones *zones)
+ __must_hold(&zones->lock)
+{
+ /* ASSERTION: We enter in IS_CLOSED state. */
+ int result;
+ bool create_flag = zones->create_flag;
+
+ zones->create_flag = false;
+ /*
+ * Change the index state so that the it will be reported to the outside world as
+ * "opening".
+ */
+ zones->index_state = IS_CHANGING;
+ zones->error_flag = false;
+
+ /* Open the index session, while not holding the lock */
+ spin_unlock(&zones->lock);
+ result = uds_open_index(create_flag ? UDS_CREATE : UDS_LOAD,
+ &zones->parameters, zones->index_session);
+ if (result != UDS_SUCCESS)
+ vdo_log_error_strerror(result, "Error opening index");
+
+ spin_lock(&zones->lock);
+ if (!create_flag) {
+ switch (result) {
+ case -ENOENT:
+ /*
+ * Either there is no index, or there is no way we can recover the index.
+ * We will be called again and try to create a new index.
+ */
+ zones->index_state = IS_CLOSED;
+ zones->create_flag = true;
+ return;
+ default:
+ break;
+ }
+ }
+ if (result == UDS_SUCCESS) {
+ zones->index_state = IS_OPENED;
+ } else {
+ zones->index_state = IS_CLOSED;
+ zones->index_target = IS_CLOSED;
+ zones->error_flag = true;
+ spin_unlock(&zones->lock);
+ vdo_log_info("Setting UDS index target state to error");
+ spin_lock(&zones->lock);
+ }
+ /*
+ * ASSERTION: On success, we leave in IS_OPENED state.
+ * ASSERTION: On failure, we leave in IS_CLOSED state.
+ */
+}
+
+static void change_dedupe_state(struct vdo_completion *completion)
+{
+ struct hash_zones *zones = as_hash_zones(completion);
+
+ spin_lock(&zones->lock);
+
+ /* Loop until the index is in the target state and the create flag is clear. */
+ while (vdo_is_state_normal(&zones->state) &&
+ ((zones->index_state != zones->index_target) || zones->create_flag)) {
+ if (zones->index_state == IS_OPENED)
+ close_index(zones);
+ else
+ open_index(zones);
+ }
+
+ zones->changing = false;
+ spin_unlock(&zones->lock);
+}
+
+static void start_expiration_timer(struct dedupe_context *context)
+{
+ u64 start_time = context->submission_jiffies;
+ u64 end_time;
+
+ if (!change_timer_state(context->zone, DEDUPE_QUERY_TIMER_IDLE,
+ DEDUPE_QUERY_TIMER_RUNNING))
+ return;
+
+ end_time = max(start_time + vdo_dedupe_index_timeout_jiffies,
+ jiffies + vdo_dedupe_index_min_timer_jiffies);
+ mod_timer(&context->zone->timer, end_time);
+}
+
+/**
+ * report_dedupe_timeouts() - Record and eventually report that some dedupe requests reached their
+ * expiration time without getting answers, so we timed them out.
+ * @zones: the hash zones.
+ * @timeouts: the number of newly timed out requests.
+ */
+static void report_dedupe_timeouts(struct hash_zones *zones, unsigned int timeouts)
+{
+ atomic64_add(timeouts, &zones->timeouts);
+ spin_lock(&zones->lock);
+ if (__ratelimit(&zones->ratelimiter)) {
+ u64 unreported = atomic64_read(&zones->timeouts);
+
+ unreported -= zones->reported_timeouts;
+ vdo_log_debug("UDS index timeout on %llu requests",
+ (unsigned long long) unreported);
+ zones->reported_timeouts += unreported;
+ }
+ spin_unlock(&zones->lock);
+}
+
+static int initialize_index(struct vdo *vdo, struct hash_zones *zones)
+{
+ int result;
+ off_t uds_offset;
+ struct volume_geometry geometry = vdo->geometry;
+ static const struct vdo_work_queue_type uds_queue_type = {
+ .start = start_uds_queue,
+ .finish = finish_uds_queue,
+ .max_priority = UDS_Q_MAX_PRIORITY,
+ .default_priority = UDS_Q_PRIORITY,
+ };
+
+ vdo_set_dedupe_index_timeout_interval(vdo_dedupe_index_timeout_interval);
+ vdo_set_dedupe_index_min_timer_interval(vdo_dedupe_index_min_timer_interval);
+
+ /*
+ * Since we will save up the timeouts that would have been reported but were ratelimited,
+ * we don't need to report ratelimiting.
+ */
+ ratelimit_default_init(&zones->ratelimiter);
+ ratelimit_set_flags(&zones->ratelimiter, RATELIMIT_MSG_ON_RELEASE);
+ uds_offset = ((vdo_get_index_region_start(geometry) -
+ geometry.bio_offset) * VDO_BLOCK_SIZE);
+ zones->parameters = (struct uds_parameters) {
+ .bdev = vdo->device_config->owned_device->bdev,
+ .offset = uds_offset,
+ .size = (vdo_get_index_region_size(geometry) * VDO_BLOCK_SIZE),
+ .memory_size = geometry.index_config.mem,
+ .sparse = geometry.index_config.sparse,
+ .nonce = (u64) geometry.nonce,
+ };
+
+ result = uds_create_index_session(&zones->index_session);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = vdo_make_thread(vdo, vdo->thread_config.dedupe_thread, &uds_queue_type,
+ 1, NULL);
+ if (result != VDO_SUCCESS) {
+ uds_destroy_index_session(vdo_forget(zones->index_session));
+ vdo_log_error("UDS index queue initialization failed (%d)", result);
+ return result;
+ }
+
+ vdo_initialize_completion(&zones->completion, vdo, VDO_HASH_ZONES_COMPLETION);
+ vdo_set_completion_callback(&zones->completion, change_dedupe_state,
+ vdo->thread_config.dedupe_thread);
+ return VDO_SUCCESS;
+}
+
+/**
+ * finish_index_operation() - This is the UDS callback for index queries.
+ * @request: The uds request which has just completed.
+ */
+static void finish_index_operation(struct uds_request *request)
+{
+ struct dedupe_context *context = container_of(request, struct dedupe_context,
+ request);
+
+ if (change_context_state(context, DEDUPE_CONTEXT_PENDING,
+ DEDUPE_CONTEXT_COMPLETE)) {
+ /*
+ * This query has not timed out, so send its data_vio back to its hash zone to
+ * process the results.
+ */
+ continue_data_vio(context->requestor);
+ return;
+ }
+
+ /*
+ * This query has timed out, so try to mark it complete and hence eligible for reuse. Its
+ * data_vio has already moved on.
+ */
+ if (!change_context_state(context, DEDUPE_CONTEXT_TIMED_OUT,
+ DEDUPE_CONTEXT_TIMED_OUT_COMPLETE)) {
+ VDO_ASSERT_LOG_ONLY(false, "uds request was timed out (state %d)",
+ atomic_read(&context->state));
+ }
+
+ vdo_funnel_queue_put(context->zone->timed_out_complete, &context->queue_entry);
+}
+
+/**
+ * check_for_drain_complete() - Check whether this zone has drained.
+ * @zone: The zone to check.
+ */
+static void check_for_drain_complete(struct hash_zone *zone)
+{
+ data_vio_count_t recycled = 0;
+
+ if (!vdo_is_state_draining(&zone->state))
+ return;
+
+ if ((atomic_read(&zone->timer_state) == DEDUPE_QUERY_TIMER_IDLE) ||
+ change_timer_state(zone, DEDUPE_QUERY_TIMER_RUNNING,
+ DEDUPE_QUERY_TIMER_IDLE)) {
+ del_timer_sync(&zone->timer);
+ } else {
+ /*
+ * There is an in flight time-out, which must get processed before we can continue.
+ */
+ return;
+ }
+
+ for (;;) {
+ struct dedupe_context *context;
+ struct funnel_queue_entry *entry;
+
+ entry = vdo_funnel_queue_poll(zone->timed_out_complete);
+ if (entry == NULL)
+ break;
+
+ context = container_of(entry, struct dedupe_context, queue_entry);
+ atomic_set(&context->state, DEDUPE_CONTEXT_IDLE);
+ list_add(&context->list_entry, &zone->available);
+ recycled++;
+ }
+
+ if (recycled > 0)
+ WRITE_ONCE(zone->active, zone->active - recycled);
+ VDO_ASSERT_LOG_ONLY(READ_ONCE(zone->active) == 0, "all contexts inactive");
+ vdo_finish_draining(&zone->state);
+}
+
+static void timeout_index_operations_callback(struct vdo_completion *completion)
+{
+ struct dedupe_context *context, *tmp;
+ struct hash_zone *zone = as_hash_zone(completion);
+ u64 timeout_jiffies = msecs_to_jiffies(vdo_dedupe_index_timeout_interval);
+ unsigned long cutoff = jiffies - timeout_jiffies;
+ unsigned int timed_out = 0;
+
+ atomic_set(&zone->timer_state, DEDUPE_QUERY_TIMER_IDLE);
+ list_for_each_entry_safe(context, tmp, &zone->pending, list_entry) {
+ if (cutoff <= context->submission_jiffies) {
+ /*
+ * We have reached the oldest query which has not timed out yet, so restart
+ * the timer.
+ */
+ start_expiration_timer(context);
+ break;
+ }
+
+ if (!change_context_state(context, DEDUPE_CONTEXT_PENDING,
+ DEDUPE_CONTEXT_TIMED_OUT)) {
+ /*
+ * This context completed between the time the timeout fired, and now. We
+ * can treat it as a successful query, its requestor is already enqueued
+ * to process it.
+ */
+ continue;
+ }
+
+ /*
+ * Remove this context from the pending list so we won't look at it again on a
+ * subsequent timeout. Once the index completes it, it will be reused. Meanwhile,
+ * send its requestor on its way.
+ */
+ list_del_init(&context->list_entry);
+ continue_data_vio(context->requestor);
+ timed_out++;
+ }
+
+ if (timed_out > 0)
+ report_dedupe_timeouts(completion->vdo->hash_zones, timed_out);
+
+ check_for_drain_complete(zone);
+}
+
+static void timeout_index_operations(struct timer_list *t)
+{
+ struct hash_zone *zone = from_timer(zone, t, timer);
+
+ if (change_timer_state(zone, DEDUPE_QUERY_TIMER_RUNNING,
+ DEDUPE_QUERY_TIMER_FIRED))
+ vdo_launch_completion(&zone->completion);
+}
+
+static int __must_check initialize_zone(struct vdo *vdo, struct hash_zones *zones,
+ zone_count_t zone_number)
+{
+ int result;
+ data_vio_count_t i;
+ struct hash_zone *zone = &zones->zones[zone_number];
+
+ result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->hash_lock_map);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+ zone->zone_number = zone_number;
+ zone->thread_id = vdo->thread_config.hash_zone_threads[zone_number];
+ vdo_initialize_completion(&zone->completion, vdo, VDO_HASH_ZONE_COMPLETION);
+ vdo_set_completion_callback(&zone->completion, timeout_index_operations_callback,
+ zone->thread_id);
+ INIT_LIST_HEAD(&zone->lock_pool);
+ result = vdo_allocate(LOCK_POOL_CAPACITY, struct hash_lock, "hash_lock array",
+ &zone->lock_array);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ for (i = 0; i < LOCK_POOL_CAPACITY; i++)
+ return_hash_lock_to_pool(zone, &zone->lock_array[i]);
+
+ INIT_LIST_HEAD(&zone->available);
+ INIT_LIST_HEAD(&zone->pending);
+ result = vdo_make_funnel_queue(&zone->timed_out_complete);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ timer_setup(&zone->timer, timeout_index_operations, 0);
+
+ for (i = 0; i < MAXIMUM_VDO_USER_VIOS; i++) {
+ struct dedupe_context *context = &zone->contexts[i];
+
+ context->zone = zone;
+ context->request.callback = finish_index_operation;
+ context->request.session = zones->index_session;
+ list_add(&context->list_entry, &zone->available);
+ }
+
+ return vdo_make_default_thread(vdo, zone->thread_id);
+}
+
+/** get_thread_id_for_zone() - Implements vdo_zone_thread_getter_fn. */
+static thread_id_t get_thread_id_for_zone(void *context, zone_count_t zone_number)
+{
+ struct hash_zones *zones = context;
+
+ return zones->zones[zone_number].thread_id;
+}
+
+/**
+ * vdo_make_hash_zones() - Create the hash zones.
+ *
+ * @vdo: The vdo to which the zone will belong.
+ * @zones_ptr: A pointer to hold the zones.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_make_hash_zones(struct vdo *vdo, struct hash_zones **zones_ptr)
+{
+ int result;
+ struct hash_zones *zones;
+ zone_count_t z;
+ zone_count_t zone_count = vdo->thread_config.hash_zone_count;
+
+ if (zone_count == 0)
+ return VDO_SUCCESS;
+
+ result = vdo_allocate_extended(struct hash_zones, zone_count, struct hash_zone,
+ __func__, &zones);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = initialize_index(vdo, zones);
+ if (result != VDO_SUCCESS) {
+ vdo_free(zones);
+ return result;
+ }
+
+ vdo_set_admin_state_code(&zones->state, VDO_ADMIN_STATE_NEW);
+
+ zones->zone_count = zone_count;
+ for (z = 0; z < zone_count; z++) {
+ result = initialize_zone(vdo, zones, z);
+ if (result != VDO_SUCCESS) {
+ vdo_free_hash_zones(zones);
+ return result;
+ }
+ }
+
+ result = vdo_make_action_manager(zones->zone_count, get_thread_id_for_zone,
+ vdo->thread_config.admin_thread, zones, NULL,
+ vdo, &zones->manager);
+ if (result != VDO_SUCCESS) {
+ vdo_free_hash_zones(zones);
+ return result;
+ }
+
+ *zones_ptr = zones;
+ return VDO_SUCCESS;
+}
+
+void vdo_finish_dedupe_index(struct hash_zones *zones)
+{
+ if (zones == NULL)
+ return;
+
+ uds_destroy_index_session(vdo_forget(zones->index_session));
+}
+
+/**
+ * vdo_free_hash_zones() - Free the hash zones.
+ * @zones: The zone to free.
+ */
+void vdo_free_hash_zones(struct hash_zones *zones)
+{
+ zone_count_t i;
+
+ if (zones == NULL)
+ return;
+
+ vdo_free(vdo_forget(zones->manager));
+
+ for (i = 0; i < zones->zone_count; i++) {
+ struct hash_zone *zone = &zones->zones[i];
+
+ vdo_free_funnel_queue(vdo_forget(zone->timed_out_complete));
+ vdo_int_map_free(vdo_forget(zone->hash_lock_map));
+ vdo_free(vdo_forget(zone->lock_array));
+ }
+
+ if (zones->index_session != NULL)
+ vdo_finish_dedupe_index(zones);
+
+ ratelimit_state_exit(&zones->ratelimiter);
+ vdo_free(zones);
+}
+
+static void initiate_suspend_index(struct admin_state *state)
+{
+ struct hash_zones *zones = container_of(state, struct hash_zones, state);
+ enum index_state index_state;
+
+ spin_lock(&zones->lock);
+ index_state = zones->index_state;
+ spin_unlock(&zones->lock);
+
+ if (index_state != IS_CLOSED) {
+ bool save = vdo_is_state_saving(&zones->state);
+ int result;
+
+ result = uds_suspend_index_session(zones->index_session, save);
+ if (result != UDS_SUCCESS)
+ vdo_log_error_strerror(result, "Error suspending dedupe index");
+ }
+
+ vdo_finish_draining(state);
+}
+
+/**
+ * suspend_index() - Suspend the UDS index prior to draining hash zones.
+ *
+ * Implements vdo_action_preamble_fn
+ */
+static void suspend_index(void *context, struct vdo_completion *completion)
+{
+ struct hash_zones *zones = context;
+
+ vdo_start_draining(&zones->state,
+ vdo_get_current_manager_operation(zones->manager), completion,
+ initiate_suspend_index);
+}
+
+/**
+ * initiate_drain() - Initiate a drain.
+ *
+ * Implements vdo_admin_initiator_fn.
+ */
+static void initiate_drain(struct admin_state *state)
+{
+ check_for_drain_complete(container_of(state, struct hash_zone, state));
+}
+
+/**
+ * drain_hash_zone() - Drain a hash zone.
+ *
+ * Implements vdo_zone_action_fn.
+ */
+static void drain_hash_zone(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct hash_zones *zones = context;
+
+ vdo_start_draining(&zones->zones[zone_number].state,
+ vdo_get_current_manager_operation(zones->manager), parent,
+ initiate_drain);
+}
+
+/** vdo_drain_hash_zones() - Drain all hash zones. */
+void vdo_drain_hash_zones(struct hash_zones *zones, struct vdo_completion *parent)
+{
+ vdo_schedule_operation(zones->manager, parent->vdo->suspend_type, suspend_index,
+ drain_hash_zone, NULL, parent);
+}
+
+static void launch_dedupe_state_change(struct hash_zones *zones)
+ __must_hold(&zones->lock)
+{
+ /* ASSERTION: We enter with the lock held. */
+ if (zones->changing || !vdo_is_state_normal(&zones->state))
+ /* Either a change is already in progress, or changes are not allowed. */
+ return;
+
+ if (zones->create_flag || (zones->index_state != zones->index_target)) {
+ zones->changing = true;
+ vdo_launch_completion(&zones->completion);
+ return;
+ }
+
+ /* ASSERTION: We exit with the lock held. */
+}
+
+/**
+ * resume_index() - Resume the UDS index prior to resuming hash zones.
+ *
+ * Implements vdo_action_preamble_fn
+ */
+static void resume_index(void *context, struct vdo_completion *parent)
+{
+ struct hash_zones *zones = context;
+ struct device_config *config = parent->vdo->device_config;
+ int result;
+
+ zones->parameters.bdev = config->owned_device->bdev;
+ result = uds_resume_index_session(zones->index_session, zones->parameters.bdev);
+ if (result != UDS_SUCCESS)
+ vdo_log_error_strerror(result, "Error resuming dedupe index");
+
+ spin_lock(&zones->lock);
+ vdo_resume_if_quiescent(&zones->state);
+
+ if (config->deduplication) {
+ zones->index_target = IS_OPENED;
+ WRITE_ONCE(zones->dedupe_flag, true);
+ } else {
+ zones->index_target = IS_CLOSED;
+ }
+
+ launch_dedupe_state_change(zones);
+ spin_unlock(&zones->lock);
+
+ vdo_finish_completion(parent);
+}
+
+/**
+ * resume_hash_zone() - Resume a hash zone.
+ *
+ * Implements vdo_zone_action_fn.
+ */
+static void resume_hash_zone(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct hash_zone *zone = &(((struct hash_zones *) context)->zones[zone_number]);
+
+ vdo_fail_completion(parent, vdo_resume_if_quiescent(&zone->state));
+}
+
+/**
+ * vdo_resume_hash_zones() - Resume a set of hash zones.
+ * @zones: The hash zones to resume.
+ * @parent: The object to notify when the zones have resumed.
+ */
+void vdo_resume_hash_zones(struct hash_zones *zones, struct vdo_completion *parent)
+{
+ if (vdo_is_read_only(parent->vdo)) {
+ vdo_launch_completion(parent);
+ return;
+ }
+
+ vdo_schedule_operation(zones->manager, VDO_ADMIN_STATE_RESUMING, resume_index,
+ resume_hash_zone, NULL, parent);
+}
+
+/**
+ * get_hash_zone_statistics() - Add the statistics for this hash zone to the tally for all zones.
+ * @zone: The hash zone to query.
+ * @tally: The tally
+ */
+static void get_hash_zone_statistics(const struct hash_zone *zone,
+ struct hash_lock_statistics *tally)
+{
+ const struct hash_lock_statistics *stats = &zone->statistics;
+
+ tally->dedupe_advice_valid += READ_ONCE(stats->dedupe_advice_valid);
+ tally->dedupe_advice_stale += READ_ONCE(stats->dedupe_advice_stale);
+ tally->concurrent_data_matches += READ_ONCE(stats->concurrent_data_matches);
+ tally->concurrent_hash_collisions += READ_ONCE(stats->concurrent_hash_collisions);
+ tally->curr_dedupe_queries += READ_ONCE(zone->active);
+}
+
+static void get_index_statistics(struct hash_zones *zones,
+ struct index_statistics *stats)
+{
+ enum index_state state;
+ struct uds_index_stats index_stats;
+ int result;
+
+ spin_lock(&zones->lock);
+ state = zones->index_state;
+ spin_unlock(&zones->lock);
+
+ if (state != IS_OPENED)
+ return;
+
+ result = uds_get_index_session_stats(zones->index_session, &index_stats);
+ if (result != UDS_SUCCESS) {
+ vdo_log_error_strerror(result, "Error reading index stats");
+ return;
+ }
+
+ stats->entries_indexed = index_stats.entries_indexed;
+ stats->posts_found = index_stats.posts_found;
+ stats->posts_not_found = index_stats.posts_not_found;
+ stats->queries_found = index_stats.queries_found;
+ stats->queries_not_found = index_stats.queries_not_found;
+ stats->updates_found = index_stats.updates_found;
+ stats->updates_not_found = index_stats.updates_not_found;
+ stats->entries_discarded = index_stats.entries_discarded;
+}
+
+/**
+ * vdo_get_dedupe_statistics() - Tally the statistics from all the hash zones and the UDS index.
+ * @hash_zones: The hash zones to query
+ *
+ * Return: The sum of the hash lock statistics from all hash zones plus the statistics from the UDS
+ * index
+ */
+void vdo_get_dedupe_statistics(struct hash_zones *zones, struct vdo_statistics *stats)
+
+{
+ zone_count_t zone;
+
+ for (zone = 0; zone < zones->zone_count; zone++)
+ get_hash_zone_statistics(&zones->zones[zone], &stats->hash_lock);
+
+ get_index_statistics(zones, &stats->index);
+
+ /*
+ * zones->timeouts gives the number of timeouts, and dedupe_context_busy gives the number
+ * of queries not made because of earlier timeouts.
+ */
+ stats->dedupe_advice_timeouts =
+ (atomic64_read(&zones->timeouts) + atomic64_read(&zones->dedupe_context_busy));
+}
+
+/**
+ * vdo_select_hash_zone() - Select the hash zone responsible for locking a given record name.
+ * @zones: The hash_zones from which to select.
+ * @name: The record name.
+ *
+ * Return: The hash zone responsible for the record name.
+ */
+struct hash_zone *vdo_select_hash_zone(struct hash_zones *zones,
+ const struct uds_record_name *name)
+{
+ /*
+ * Use a fragment of the record name as a hash code. Eight bits of hash should suffice
+ * since the number of hash zones is small.
+ * TODO: Verify that the first byte is independent enough.
+ */
+ u32 hash = name->name[0];
+
+ /*
+ * Scale the 8-bit hash fragment to a zone index by treating it as a binary fraction and
+ * multiplying that by the zone count. If the hash is uniformly distributed over [0 ..
+ * 2^8-1], then (hash * count / 2^8) should be uniformly distributed over [0 .. count-1].
+ * The multiply and shift is much faster than a divide (modulus) on X86 CPUs.
+ */
+ hash = (hash * zones->zone_count) >> 8;
+ return &zones->zones[hash];
+}
+
+/**
+ * dump_hash_lock() - Dump a compact description of hash_lock to the log if the lock is not on the
+ * free list.
+ * @lock: The hash lock to dump.
+ */
+static void dump_hash_lock(const struct hash_lock *lock)
+{
+ const char *state;
+
+ if (!list_empty(&lock->pool_node)) {
+ /* This lock is on the free list. */
+ return;
+ }
+
+ /*
+ * Necessarily cryptic since we can log a lot of these. First three chars of state is
+ * unambiguous. 'U' indicates a lock not registered in the map.
+ */
+ state = get_hash_lock_state_name(lock->state);
+ vdo_log_info(" hl %px: %3.3s %c%llu/%u rc=%u wc=%zu agt=%px",
+ lock, state, (lock->registered ? 'D' : 'U'),
+ (unsigned long long) lock->duplicate.pbn,
+ lock->duplicate.state, lock->reference_count,
+ vdo_waitq_num_waiters(&lock->waiters), lock->agent);
+}
+
+static const char *index_state_to_string(struct hash_zones *zones,
+ enum index_state state)
+{
+ if (!vdo_is_state_normal(&zones->state))
+ return SUSPENDED;
+
+ switch (state) {
+ case IS_CLOSED:
+ return zones->error_flag ? ERROR : CLOSED;
+ case IS_CHANGING:
+ return zones->index_target == IS_OPENED ? OPENING : CLOSING;
+ case IS_OPENED:
+ return READ_ONCE(zones->dedupe_flag) ? ONLINE : OFFLINE;
+ default:
+ return UNKNOWN;
+ }
+}
+
+/**
+ * dump_hash_zone() - Dump information about a hash zone to the log for debugging.
+ * @zone: The zone to dump.
+ */
+static void dump_hash_zone(const struct hash_zone *zone)
+{
+ data_vio_count_t i;
+
+ if (zone->hash_lock_map == NULL) {
+ vdo_log_info("struct hash_zone %u: NULL map", zone->zone_number);
+ return;
+ }
+
+ vdo_log_info("struct hash_zone %u: mapSize=%zu",
+ zone->zone_number, vdo_int_map_size(zone->hash_lock_map));
+ for (i = 0; i < LOCK_POOL_CAPACITY; i++)
+ dump_hash_lock(&zone->lock_array[i]);
+}
+
+/**
+ * vdo_dump_hash_zones() - Dump information about the hash zones to the log for debugging.
+ * @zones: The zones to dump.
+ */
+void vdo_dump_hash_zones(struct hash_zones *zones)
+{
+ const char *state, *target;
+ zone_count_t zone;
+
+ spin_lock(&zones->lock);
+ state = index_state_to_string(zones, zones->index_state);
+ target = (zones->changing ? index_state_to_string(zones, zones->index_target) : NULL);
+ spin_unlock(&zones->lock);
+
+ vdo_log_info("UDS index: state: %s", state);
+ if (target != NULL)
+ vdo_log_info("UDS index: changing to state: %s", target);
+
+ for (zone = 0; zone < zones->zone_count; zone++)
+ dump_hash_zone(&zones->zones[zone]);
+}
+
+void vdo_set_dedupe_index_timeout_interval(unsigned int value)
+{
+ u64 alb_jiffies;
+
+ /* Arbitrary maximum value is two minutes */
+ if (value > 120000)
+ value = 120000;
+ /* Arbitrary minimum value is 2 jiffies */
+ alb_jiffies = msecs_to_jiffies(value);
+
+ if (alb_jiffies < 2) {
+ alb_jiffies = 2;
+ value = jiffies_to_msecs(alb_jiffies);
+ }
+ vdo_dedupe_index_timeout_interval = value;
+ vdo_dedupe_index_timeout_jiffies = alb_jiffies;
+}
+
+void vdo_set_dedupe_index_min_timer_interval(unsigned int value)
+{
+ u64 min_jiffies;
+
+ /* Arbitrary maximum value is one second */
+ if (value > 1000)
+ value = 1000;
+
+ /* Arbitrary minimum value is 2 jiffies */
+ min_jiffies = msecs_to_jiffies(value);
+
+ if (min_jiffies < 2) {
+ min_jiffies = 2;
+ value = jiffies_to_msecs(min_jiffies);
+ }
+
+ vdo_dedupe_index_min_timer_interval = value;
+ vdo_dedupe_index_min_timer_jiffies = min_jiffies;
+}
+
+/**
+ * acquire_context() - Acquire a dedupe context from a hash_zone if any are available.
+ * @zone: the hash zone
+ *
+ * Return: A dedupe_context or NULL if none are available
+ */
+static struct dedupe_context * __must_check acquire_context(struct hash_zone *zone)
+{
+ struct dedupe_context *context;
+ struct funnel_queue_entry *entry;
+
+ assert_in_hash_zone(zone, __func__);
+
+ if (!list_empty(&zone->available)) {
+ WRITE_ONCE(zone->active, zone->active + 1);
+ context = list_first_entry(&zone->available, struct dedupe_context,
+ list_entry);
+ list_del_init(&context->list_entry);
+ return context;
+ }
+
+ entry = vdo_funnel_queue_poll(zone->timed_out_complete);
+ return ((entry == NULL) ?
+ NULL : container_of(entry, struct dedupe_context, queue_entry));
+}
+
+static void prepare_uds_request(struct uds_request *request, struct data_vio *data_vio,
+ enum uds_request_type operation)
+{
+ request->record_name = data_vio->record_name;
+ request->type = operation;
+ if ((operation == UDS_POST) || (operation == UDS_UPDATE)) {
+ size_t offset = 0;
+ struct uds_record_data *encoding = &request->new_metadata;
+
+ encoding->data[offset++] = UDS_ADVICE_VERSION;
+ encoding->data[offset++] = data_vio->new_mapped.state;
+ put_unaligned_le64(data_vio->new_mapped.pbn, &encoding->data[offset]);
+ offset += sizeof(u64);
+ BUG_ON(offset != UDS_ADVICE_SIZE);
+ }
+}
+
+/*
+ * The index operation will inquire about data_vio.record_name, providing (if the operation is
+ * appropriate) advice from the data_vio's new_mapped fields. The advice found in the index (or
+ * NULL if none) will be returned via receive_data_vio_dedupe_advice(). dedupe_context.status is
+ * set to the return status code of any asynchronous index processing.
+ */
+static void query_index(struct data_vio *data_vio, enum uds_request_type operation)
+{
+ int result;
+ struct dedupe_context *context;
+ struct vdo *vdo = vdo_from_data_vio(data_vio);
+ struct hash_zone *zone = data_vio->hash_zone;
+
+ assert_data_vio_in_hash_zone(data_vio);
+
+ if (!READ_ONCE(vdo->hash_zones->dedupe_flag)) {
+ continue_data_vio(data_vio);
+ return;
+ }
+
+ context = acquire_context(zone);
+ if (context == NULL) {
+ atomic64_inc(&vdo->hash_zones->dedupe_context_busy);
+ continue_data_vio(data_vio);
+ return;
+ }
+
+ data_vio->dedupe_context = context;
+ context->requestor = data_vio;
+ context->submission_jiffies = jiffies;
+ prepare_uds_request(&context->request, data_vio, operation);
+ atomic_set(&context->state, DEDUPE_CONTEXT_PENDING);
+ list_add_tail(&context->list_entry, &zone->pending);
+ start_expiration_timer(context);
+ result = uds_launch_request(&context->request);
+ if (result != UDS_SUCCESS) {
+ context->request.status = result;
+ finish_index_operation(&context->request);
+ }
+}
+
+static void set_target_state(struct hash_zones *zones, enum index_state target,
+ bool change_dedupe, bool dedupe, bool set_create)
+{
+ const char *old_state, *new_state;
+
+ spin_lock(&zones->lock);
+ old_state = index_state_to_string(zones, zones->index_target);
+ if (change_dedupe)
+ WRITE_ONCE(zones->dedupe_flag, dedupe);
+
+ if (set_create)
+ zones->create_flag = true;
+
+ zones->index_target = target;
+ launch_dedupe_state_change(zones);
+ new_state = index_state_to_string(zones, zones->index_target);
+ spin_unlock(&zones->lock);
+
+ if (old_state != new_state)
+ vdo_log_info("Setting UDS index target state to %s", new_state);
+}
+
+const char *vdo_get_dedupe_index_state_name(struct hash_zones *zones)
+{
+ const char *state;
+
+ spin_lock(&zones->lock);
+ state = index_state_to_string(zones, zones->index_state);
+ spin_unlock(&zones->lock);
+
+ return state;
+}
+
+/* Handle a dmsetup message relevant to the index. */
+int vdo_message_dedupe_index(struct hash_zones *zones, const char *name)
+{
+ if (strcasecmp(name, "index-close") == 0) {
+ set_target_state(zones, IS_CLOSED, false, false, false);
+ return 0;
+ } else if (strcasecmp(name, "index-create") == 0) {
+ set_target_state(zones, IS_OPENED, false, false, true);
+ return 0;
+ } else if (strcasecmp(name, "index-disable") == 0) {
+ set_target_state(zones, IS_OPENED, true, false, false);
+ return 0;
+ } else if (strcasecmp(name, "index-enable") == 0) {
+ set_target_state(zones, IS_OPENED, true, true, false);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+void vdo_set_dedupe_state_normal(struct hash_zones *zones)
+{
+ vdo_set_admin_state_code(&zones->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+}
+
+/* If create_flag, create a new index without first attempting to load an existing index. */
+void vdo_start_dedupe_index(struct hash_zones *zones, bool create_flag)
+{
+ set_target_state(zones, IS_OPENED, true, true, create_flag);
+}
diff --git a/drivers/md/dm-vdo/dedupe.h b/drivers/md/dm-vdo/dedupe.h
new file mode 100644
index 000000000000..9000d6f3eece
--- /dev/null
+++ b/drivers/md/dm-vdo/dedupe.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_DEDUPE_H
+#define VDO_DEDUPE_H
+
+#include <linux/list.h>
+#include <linux/timer.h>
+
+#include "indexer.h"
+
+#include "admin-state.h"
+#include "constants.h"
+#include "statistics.h"
+#include "types.h"
+#include "wait-queue.h"
+
+struct dedupe_context {
+ struct hash_zone *zone;
+ struct uds_request request;
+ struct list_head list_entry;
+ struct funnel_queue_entry queue_entry;
+ u64 submission_jiffies;
+ struct data_vio *requestor;
+ atomic_t state;
+};
+
+struct hash_lock;
+
+struct hash_zone {
+ /* Which hash zone this is */
+ zone_count_t zone_number;
+
+ /* The administrative state of the zone */
+ struct admin_state state;
+
+ /* The thread ID for this zone */
+ thread_id_t thread_id;
+
+ /* Mapping from record name fields to hash_locks */
+ struct int_map *hash_lock_map;
+
+ /* List containing all unused hash_locks */
+ struct list_head lock_pool;
+
+ /*
+ * Statistics shared by all hash locks in this zone. Only modified on the hash zone thread,
+ * but queried by other threads.
+ */
+ struct hash_lock_statistics statistics;
+
+ /* Array of all hash_locks */
+ struct hash_lock *lock_array;
+
+ /* These fields are used to manage the dedupe contexts */
+ struct list_head available;
+ struct list_head pending;
+ struct funnel_queue *timed_out_complete;
+ struct timer_list timer;
+ struct vdo_completion completion;
+ unsigned int active;
+ atomic_t timer_state;
+
+ /* The dedupe contexts for querying the index from this zone */
+ struct dedupe_context contexts[MAXIMUM_VDO_USER_VIOS];
+};
+
+struct hash_zones;
+
+struct pbn_lock * __must_check vdo_get_duplicate_lock(struct data_vio *data_vio);
+
+void vdo_acquire_hash_lock(struct vdo_completion *completion);
+void vdo_continue_hash_lock(struct vdo_completion *completion);
+void vdo_release_hash_lock(struct data_vio *data_vio);
+void vdo_clean_failed_hash_lock(struct data_vio *data_vio);
+void vdo_share_compressed_write_lock(struct data_vio *data_vio,
+ struct pbn_lock *pbn_lock);
+
+int __must_check vdo_make_hash_zones(struct vdo *vdo, struct hash_zones **zones_ptr);
+
+void vdo_free_hash_zones(struct hash_zones *zones);
+
+void vdo_drain_hash_zones(struct hash_zones *zones, struct vdo_completion *parent);
+
+void vdo_get_dedupe_statistics(struct hash_zones *zones, struct vdo_statistics *stats);
+
+struct hash_zone * __must_check vdo_select_hash_zone(struct hash_zones *zones,
+ const struct uds_record_name *name);
+
+void vdo_dump_hash_zones(struct hash_zones *zones);
+
+const char *vdo_get_dedupe_index_state_name(struct hash_zones *zones);
+
+u64 vdo_get_dedupe_index_timeout_count(struct hash_zones *zones);
+
+int vdo_message_dedupe_index(struct hash_zones *zones, const char *name);
+
+void vdo_set_dedupe_state_normal(struct hash_zones *zones);
+
+void vdo_start_dedupe_index(struct hash_zones *zones, bool create_flag);
+
+void vdo_resume_hash_zones(struct hash_zones *zones, struct vdo_completion *parent);
+
+void vdo_finish_dedupe_index(struct hash_zones *zones);
+
+/* Interval (in milliseconds) from submission until switching to fast path and skipping UDS. */
+extern unsigned int vdo_dedupe_index_timeout_interval;
+
+/*
+ * Minimum time interval (in milliseconds) between timer invocations to check for requests waiting
+ * for UDS that should now time out.
+ */
+extern unsigned int vdo_dedupe_index_min_timer_interval;
+
+void vdo_set_dedupe_index_timeout_interval(unsigned int value);
+void vdo_set_dedupe_index_min_timer_interval(unsigned int value);
+
+#endif /* VDO_DEDUPE_H */
diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c
new file mode 100644
index 000000000000..5a4b0a927f56
--- /dev/null
+++ b/drivers/md/dm-vdo/dm-vdo-target.c
@@ -0,0 +1,2910 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device-mapper.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#include "admin-state.h"
+#include "block-map.h"
+#include "completion.h"
+#include "constants.h"
+#include "data-vio.h"
+#include "dedupe.h"
+#include "dump.h"
+#include "encodings.h"
+#include "errors.h"
+#include "flush.h"
+#include "io-submitter.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "message-stats.h"
+#include "recovery-journal.h"
+#include "repair.h"
+#include "slab-depot.h"
+#include "status-codes.h"
+#include "string-utils.h"
+#include "thread-device.h"
+#include "thread-registry.h"
+#include "thread-utils.h"
+#include "types.h"
+#include "vdo.h"
+#include "vio.h"
+
+enum admin_phases {
+ GROW_LOGICAL_PHASE_START,
+ GROW_LOGICAL_PHASE_GROW_BLOCK_MAP,
+ GROW_LOGICAL_PHASE_END,
+ GROW_LOGICAL_PHASE_ERROR,
+ GROW_PHYSICAL_PHASE_START,
+ GROW_PHYSICAL_PHASE_COPY_SUMMARY,
+ GROW_PHYSICAL_PHASE_UPDATE_COMPONENTS,
+ GROW_PHYSICAL_PHASE_USE_NEW_SLABS,
+ GROW_PHYSICAL_PHASE_END,
+ GROW_PHYSICAL_PHASE_ERROR,
+ LOAD_PHASE_START,
+ LOAD_PHASE_LOAD_DEPOT,
+ LOAD_PHASE_MAKE_DIRTY,
+ LOAD_PHASE_PREPARE_TO_ALLOCATE,
+ LOAD_PHASE_SCRUB_SLABS,
+ LOAD_PHASE_DATA_REDUCTION,
+ LOAD_PHASE_FINISHED,
+ LOAD_PHASE_DRAIN_JOURNAL,
+ LOAD_PHASE_WAIT_FOR_READ_ONLY,
+ PRE_LOAD_PHASE_START,
+ PRE_LOAD_PHASE_LOAD_COMPONENTS,
+ PRE_LOAD_PHASE_END,
+ PREPARE_GROW_PHYSICAL_PHASE_START,
+ RESUME_PHASE_START,
+ RESUME_PHASE_ALLOW_READ_ONLY_MODE,
+ RESUME_PHASE_DEDUPE,
+ RESUME_PHASE_DEPOT,
+ RESUME_PHASE_JOURNAL,
+ RESUME_PHASE_BLOCK_MAP,
+ RESUME_PHASE_LOGICAL_ZONES,
+ RESUME_PHASE_PACKER,
+ RESUME_PHASE_FLUSHER,
+ RESUME_PHASE_DATA_VIOS,
+ RESUME_PHASE_END,
+ SUSPEND_PHASE_START,
+ SUSPEND_PHASE_PACKER,
+ SUSPEND_PHASE_DATA_VIOS,
+ SUSPEND_PHASE_DEDUPE,
+ SUSPEND_PHASE_FLUSHES,
+ SUSPEND_PHASE_LOGICAL_ZONES,
+ SUSPEND_PHASE_BLOCK_MAP,
+ SUSPEND_PHASE_JOURNAL,
+ SUSPEND_PHASE_DEPOT,
+ SUSPEND_PHASE_READ_ONLY_WAIT,
+ SUSPEND_PHASE_WRITE_SUPER_BLOCK,
+ SUSPEND_PHASE_END,
+};
+
+static const char * const ADMIN_PHASE_NAMES[] = {
+ "GROW_LOGICAL_PHASE_START",
+ "GROW_LOGICAL_PHASE_GROW_BLOCK_MAP",
+ "GROW_LOGICAL_PHASE_END",
+ "GROW_LOGICAL_PHASE_ERROR",
+ "GROW_PHYSICAL_PHASE_START",
+ "GROW_PHYSICAL_PHASE_COPY_SUMMARY",
+ "GROW_PHYSICAL_PHASE_UPDATE_COMPONENTS",
+ "GROW_PHYSICAL_PHASE_USE_NEW_SLABS",
+ "GROW_PHYSICAL_PHASE_END",
+ "GROW_PHYSICAL_PHASE_ERROR",
+ "LOAD_PHASE_START",
+ "LOAD_PHASE_LOAD_DEPOT",
+ "LOAD_PHASE_MAKE_DIRTY",
+ "LOAD_PHASE_PREPARE_TO_ALLOCATE",
+ "LOAD_PHASE_SCRUB_SLABS",
+ "LOAD_PHASE_DATA_REDUCTION",
+ "LOAD_PHASE_FINISHED",
+ "LOAD_PHASE_DRAIN_JOURNAL",
+ "LOAD_PHASE_WAIT_FOR_READ_ONLY",
+ "PRE_LOAD_PHASE_START",
+ "PRE_LOAD_PHASE_LOAD_COMPONENTS",
+ "PRE_LOAD_PHASE_END",
+ "PREPARE_GROW_PHYSICAL_PHASE_START",
+ "RESUME_PHASE_START",
+ "RESUME_PHASE_ALLOW_READ_ONLY_MODE",
+ "RESUME_PHASE_DEDUPE",
+ "RESUME_PHASE_DEPOT",
+ "RESUME_PHASE_JOURNAL",
+ "RESUME_PHASE_BLOCK_MAP",
+ "RESUME_PHASE_LOGICAL_ZONES",
+ "RESUME_PHASE_PACKER",
+ "RESUME_PHASE_FLUSHER",
+ "RESUME_PHASE_DATA_VIOS",
+ "RESUME_PHASE_END",
+ "SUSPEND_PHASE_START",
+ "SUSPEND_PHASE_PACKER",
+ "SUSPEND_PHASE_DATA_VIOS",
+ "SUSPEND_PHASE_DEDUPE",
+ "SUSPEND_PHASE_FLUSHES",
+ "SUSPEND_PHASE_LOGICAL_ZONES",
+ "SUSPEND_PHASE_BLOCK_MAP",
+ "SUSPEND_PHASE_JOURNAL",
+ "SUSPEND_PHASE_DEPOT",
+ "SUSPEND_PHASE_READ_ONLY_WAIT",
+ "SUSPEND_PHASE_WRITE_SUPER_BLOCK",
+ "SUSPEND_PHASE_END",
+};
+
+/* If we bump this, update the arrays below */
+#define TABLE_VERSION 4
+
+/* arrays for handling different table versions */
+static const u8 REQUIRED_ARGC[] = { 10, 12, 9, 7, 6 };
+/* pool name no longer used. only here for verification of older versions */
+static const u8 POOL_NAME_ARG_INDEX[] = { 8, 10, 8 };
+
+/*
+ * Track in-use instance numbers using a flat bit array.
+ *
+ * O(n) run time isn't ideal, but if we have 1000 VDO devices in use simultaneously we still only
+ * need to scan 16 words, so it's not likely to be a big deal compared to other resource usage.
+ */
+
+/*
+ * This minimum size for the bit array creates a numbering space of 0-999, which allows
+ * successive starts of the same volume to have different instance numbers in any
+ * reasonably-sized test. Changing instances on restart allows vdoMonReport to detect that
+ * the ephemeral stats have reset to zero.
+ */
+#define BIT_COUNT_MINIMUM 1000
+/* Grow the bit array by this many bits when needed */
+#define BIT_COUNT_INCREMENT 100
+
+struct instance_tracker {
+ unsigned int bit_count;
+ unsigned long *words;
+ unsigned int count;
+ unsigned int next;
+};
+
+static DEFINE_MUTEX(instances_lock);
+static struct instance_tracker instances;
+
+/**
+ * free_device_config() - Free a device config created by parse_device_config().
+ * @config: The config to free.
+ */
+static void free_device_config(struct device_config *config)
+{
+ if (config == NULL)
+ return;
+
+ if (config->owned_device != NULL)
+ dm_put_device(config->owning_target, config->owned_device);
+
+ vdo_free(config->parent_device_name);
+ vdo_free(config->original_string);
+
+ /* Reduce the chance a use-after-free (as in BZ 1669960) happens to work. */
+ memset(config, 0, sizeof(*config));
+ vdo_free(config);
+}
+
+/**
+ * get_version_number() - Decide the version number from argv.
+ *
+ * @argc: The number of table values.
+ * @argv: The array of table values.
+ * @error_ptr: A pointer to return a error string in.
+ * @version_ptr: A pointer to return the version.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int get_version_number(int argc, char **argv, char **error_ptr,
+ unsigned int *version_ptr)
+{
+ /* version, if it exists, is in a form of V<n> */
+ if (sscanf(argv[0], "V%u", version_ptr) == 1) {
+ if (*version_ptr < 1 || *version_ptr > TABLE_VERSION) {
+ *error_ptr = "Unknown version number detected";
+ return VDO_BAD_CONFIGURATION;
+ }
+ } else {
+ /* V0 actually has no version number in the table string */
+ *version_ptr = 0;
+ }
+
+ /*
+ * V0 and V1 have no optional parameters. There will always be a parameter for thread
+ * config, even if it's a "." to show it's an empty list.
+ */
+ if (*version_ptr <= 1) {
+ if (argc != REQUIRED_ARGC[*version_ptr]) {
+ *error_ptr = "Incorrect number of arguments for version";
+ return VDO_BAD_CONFIGURATION;
+ }
+ } else if (argc < REQUIRED_ARGC[*version_ptr]) {
+ *error_ptr = "Incorrect number of arguments for version";
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ if (*version_ptr != TABLE_VERSION) {
+ vdo_log_warning("Detected version mismatch between kernel module and tools kernel: %d, tool: %d",
+ TABLE_VERSION, *version_ptr);
+ vdo_log_warning("Please consider upgrading management tools to match kernel.");
+ }
+ return VDO_SUCCESS;
+}
+
+/* Free a list of non-NULL string pointers, and then the list itself. */
+static void free_string_array(char **string_array)
+{
+ unsigned int offset;
+
+ for (offset = 0; string_array[offset] != NULL; offset++)
+ vdo_free(string_array[offset]);
+ vdo_free(string_array);
+}
+
+/*
+ * Split the input string into substrings, separated at occurrences of the indicated character,
+ * returning a null-terminated list of string pointers.
+ *
+ * The string pointers and the pointer array itself should both be freed with vdo_free() when no
+ * longer needed. This can be done with vdo_free_string_array (below) if the pointers in the array
+ * are not changed. Since the array and copied strings are allocated by this function, it may only
+ * be used in contexts where allocation is permitted.
+ *
+ * Empty substrings are not ignored; that is, returned substrings may be empty strings if the
+ * separator occurs twice in a row.
+ */
+static int split_string(const char *string, char separator, char ***substring_array_ptr)
+{
+ unsigned int current_substring = 0, substring_count = 1;
+ const char *s;
+ char **substrings;
+ int result;
+ ptrdiff_t length;
+
+ for (s = string; *s != 0; s++) {
+ if (*s == separator)
+ substring_count++;
+ }
+
+ result = vdo_allocate(substring_count + 1, char *, "string-splitting array",
+ &substrings);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ for (s = string; *s != 0; s++) {
+ if (*s == separator) {
+ ptrdiff_t length = s - string;
+
+ result = vdo_allocate(length + 1, char, "split string",
+ &substrings[current_substring]);
+ if (result != VDO_SUCCESS) {
+ free_string_array(substrings);
+ return result;
+ }
+ /*
+ * Trailing NUL is already in place after allocation; deal with the zero or
+ * more non-NUL bytes in the string.
+ */
+ if (length > 0)
+ memcpy(substrings[current_substring], string, length);
+ string = s + 1;
+ current_substring++;
+ BUG_ON(current_substring >= substring_count);
+ }
+ }
+ /* Process final string, with no trailing separator. */
+ BUG_ON(current_substring != (substring_count - 1));
+ length = strlen(string);
+
+ result = vdo_allocate(length + 1, char, "split string",
+ &substrings[current_substring]);
+ if (result != VDO_SUCCESS) {
+ free_string_array(substrings);
+ return result;
+ }
+ memcpy(substrings[current_substring], string, length);
+ current_substring++;
+ /* substrings[current_substring] is NULL already */
+ *substring_array_ptr = substrings;
+ return VDO_SUCCESS;
+}
+
+/*
+ * Join the input substrings into one string, joined with the indicated character, returning a
+ * string. array_length is a bound on the number of valid elements in substring_array, in case it
+ * is not NULL-terminated.
+ */
+static int join_strings(char **substring_array, size_t array_length, char separator,
+ char **string_ptr)
+{
+ size_t string_length = 0;
+ size_t i;
+ int result;
+ char *output, *current_position;
+
+ for (i = 0; (i < array_length) && (substring_array[i] != NULL); i++)
+ string_length += strlen(substring_array[i]) + 1;
+
+ result = vdo_allocate(string_length, char, __func__, &output);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ current_position = &output[0];
+
+ for (i = 0; (i < array_length) && (substring_array[i] != NULL); i++) {
+ current_position = vdo_append_to_buffer(current_position,
+ output + string_length, "%s",
+ substring_array[i]);
+ *current_position = separator;
+ current_position++;
+ }
+
+ /* We output one too many separators; replace the last with a zero byte. */
+ if (current_position != output)
+ *(current_position - 1) = '\0';
+
+ *string_ptr = output;
+ return VDO_SUCCESS;
+}
+
+/**
+ * parse_bool() - Parse a two-valued option into a bool.
+ * @bool_str: The string value to convert to a bool.
+ * @true_str: The string value which should be converted to true.
+ * @false_str: The string value which should be converted to false.
+ * @bool_ptr: A pointer to return the bool value in.
+ *
+ * Return: VDO_SUCCESS or an error if bool_str is neither true_str nor false_str.
+ */
+static inline int __must_check parse_bool(const char *bool_str, const char *true_str,
+ const char *false_str, bool *bool_ptr)
+{
+ bool value = false;
+
+ if (strcmp(bool_str, true_str) == 0)
+ value = true;
+ else if (strcmp(bool_str, false_str) == 0)
+ value = false;
+ else
+ return VDO_BAD_CONFIGURATION;
+
+ *bool_ptr = value;
+ return VDO_SUCCESS;
+}
+
+/**
+ * process_one_thread_config_spec() - Process one component of a thread parameter configuration
+ * string and update the configuration data structure.
+ * @thread_param_type: The type of thread specified.
+ * @count: The thread count requested.
+ * @config: The configuration data structure to update.
+ *
+ * If the thread count requested is invalid, a message is logged and -EINVAL returned. If the
+ * thread name is unknown, a message is logged but no error is returned.
+ *
+ * Return: VDO_SUCCESS or -EINVAL
+ */
+static int process_one_thread_config_spec(const char *thread_param_type,
+ unsigned int count,
+ struct thread_count_config *config)
+{
+ /* Handle limited thread parameters */
+ if (strcmp(thread_param_type, "bioRotationInterval") == 0) {
+ if (count == 0) {
+ vdo_log_error("thread config string error: 'bioRotationInterval' of at least 1 is required");
+ return -EINVAL;
+ } else if (count > VDO_BIO_ROTATION_INTERVAL_LIMIT) {
+ vdo_log_error("thread config string error: 'bioRotationInterval' cannot be higher than %d",
+ VDO_BIO_ROTATION_INTERVAL_LIMIT);
+ return -EINVAL;
+ }
+ config->bio_rotation_interval = count;
+ return VDO_SUCCESS;
+ }
+ if (strcmp(thread_param_type, "logical") == 0) {
+ if (count > MAX_VDO_LOGICAL_ZONES) {
+ vdo_log_error("thread config string error: at most %d 'logical' threads are allowed",
+ MAX_VDO_LOGICAL_ZONES);
+ return -EINVAL;
+ }
+ config->logical_zones = count;
+ return VDO_SUCCESS;
+ }
+ if (strcmp(thread_param_type, "physical") == 0) {
+ if (count > MAX_VDO_PHYSICAL_ZONES) {
+ vdo_log_error("thread config string error: at most %d 'physical' threads are allowed",
+ MAX_VDO_PHYSICAL_ZONES);
+ return -EINVAL;
+ }
+ config->physical_zones = count;
+ return VDO_SUCCESS;
+ }
+ /* Handle other thread count parameters */
+ if (count > MAXIMUM_VDO_THREADS) {
+ vdo_log_error("thread config string error: at most %d '%s' threads are allowed",
+ MAXIMUM_VDO_THREADS, thread_param_type);
+ return -EINVAL;
+ }
+ if (strcmp(thread_param_type, "hash") == 0) {
+ config->hash_zones = count;
+ return VDO_SUCCESS;
+ }
+ if (strcmp(thread_param_type, "cpu") == 0) {
+ if (count == 0) {
+ vdo_log_error("thread config string error: at least one 'cpu' thread required");
+ return -EINVAL;
+ }
+ config->cpu_threads = count;
+ return VDO_SUCCESS;
+ }
+ if (strcmp(thread_param_type, "ack") == 0) {
+ config->bio_ack_threads = count;
+ return VDO_SUCCESS;
+ }
+ if (strcmp(thread_param_type, "bio") == 0) {
+ if (count == 0) {
+ vdo_log_error("thread config string error: at least one 'bio' thread required");
+ return -EINVAL;
+ }
+ config->bio_threads = count;
+ return VDO_SUCCESS;
+ }
+
+ /*
+ * Don't fail, just log. This will handle version mismatches between user mode tools and
+ * kernel.
+ */
+ vdo_log_info("unknown thread parameter type \"%s\"", thread_param_type);
+ return VDO_SUCCESS;
+}
+
+/**
+ * parse_one_thread_config_spec() - Parse one component of a thread parameter configuration string
+ * and update the configuration data structure.
+ * @spec: The thread parameter specification string.
+ * @config: The configuration data to be updated.
+ */
+static int parse_one_thread_config_spec(const char *spec,
+ struct thread_count_config *config)
+{
+ unsigned int count;
+ char **fields;
+ int result;
+
+ result = split_string(spec, '=', &fields);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if ((fields[0] == NULL) || (fields[1] == NULL) || (fields[2] != NULL)) {
+ vdo_log_error("thread config string error: expected thread parameter assignment, saw \"%s\"",
+ spec);
+ free_string_array(fields);
+ return -EINVAL;
+ }
+
+ result = kstrtouint(fields[1], 10, &count);
+ if (result) {
+ vdo_log_error("thread config string error: integer value needed, found \"%s\"",
+ fields[1]);
+ free_string_array(fields);
+ return result;
+ }
+
+ result = process_one_thread_config_spec(fields[0], count, config);
+ free_string_array(fields);
+ return result;
+}
+
+/**
+ * parse_thread_config_string() - Parse the configuration string passed and update the specified
+ * counts and other parameters of various types of threads to be
+ * created.
+ * @string: Thread parameter configuration string.
+ * @config: The thread configuration data to update.
+ *
+ * The configuration string should contain one or more comma-separated specs of the form
+ * "typename=number"; the supported type names are "cpu", "ack", "bio", "bioRotationInterval",
+ * "logical", "physical", and "hash".
+ *
+ * If an error occurs during parsing of a single key/value pair, we deem it serious enough to stop
+ * further parsing.
+ *
+ * This function can't set the "reason" value the caller wants to pass back, because we'd want to
+ * format it to say which field was invalid, and we can't allocate the "reason" strings
+ * dynamically. So if an error occurs, we'll log the details and pass back an error.
+ *
+ * Return: VDO_SUCCESS or -EINVAL or -ENOMEM
+ */
+static int parse_thread_config_string(const char *string,
+ struct thread_count_config *config)
+{
+ int result = VDO_SUCCESS;
+ char **specs;
+
+ if (strcmp(".", string) != 0) {
+ unsigned int i;
+
+ result = split_string(string, ',', &specs);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ for (i = 0; specs[i] != NULL; i++) {
+ result = parse_one_thread_config_spec(specs[i], config);
+ if (result != VDO_SUCCESS)
+ break;
+ }
+ free_string_array(specs);
+ }
+ return result;
+}
+
+/**
+ * process_one_key_value_pair() - Process one component of an optional parameter string and update
+ * the configuration data structure.
+ * @key: The optional parameter key name.
+ * @value: The optional parameter value.
+ * @config: The configuration data structure to update.
+ *
+ * If the value requested is invalid, a message is logged and -EINVAL returned. If the key is
+ * unknown, a message is logged but no error is returned.
+ *
+ * Return: VDO_SUCCESS or -EINVAL
+ */
+static int process_one_key_value_pair(const char *key, unsigned int value,
+ struct device_config *config)
+{
+ /* Non thread optional parameters */
+ if (strcmp(key, "maxDiscard") == 0) {
+ if (value == 0) {
+ vdo_log_error("optional parameter error: at least one max discard block required");
+ return -EINVAL;
+ }
+ /* Max discard sectors in blkdev_issue_discard is UINT_MAX >> 9 */
+ if (value > (UINT_MAX / VDO_BLOCK_SIZE)) {
+ vdo_log_error("optional parameter error: at most %d max discard blocks are allowed",
+ UINT_MAX / VDO_BLOCK_SIZE);
+ return -EINVAL;
+ }
+ config->max_discard_blocks = value;
+ return VDO_SUCCESS;
+ }
+ /* Handles unknown key names */
+ return process_one_thread_config_spec(key, value, &config->thread_counts);
+}
+
+/**
+ * parse_one_key_value_pair() - Parse one key/value pair and update the configuration data
+ * structure.
+ * @key: The optional key name.
+ * @value: The optional value.
+ * @config: The configuration data to be updated.
+ *
+ * Return: VDO_SUCCESS or error.
+ */
+static int parse_one_key_value_pair(const char *key, const char *value,
+ struct device_config *config)
+{
+ unsigned int count;
+ int result;
+
+ if (strcmp(key, "deduplication") == 0)
+ return parse_bool(value, "on", "off", &config->deduplication);
+
+ if (strcmp(key, "compression") == 0)
+ return parse_bool(value, "on", "off", &config->compression);
+
+ /* The remaining arguments must have integral values. */
+ result = kstrtouint(value, 10, &count);
+ if (result) {
+ vdo_log_error("optional config string error: integer value needed, found \"%s\"",
+ value);
+ return result;
+ }
+ return process_one_key_value_pair(key, count, config);
+}
+
+/**
+ * parse_key_value_pairs() - Parse all key/value pairs from a list of arguments.
+ * @argc: The total number of arguments in list.
+ * @argv: The list of key/value pairs.
+ * @config: The device configuration data to update.
+ *
+ * If an error occurs during parsing of a single key/value pair, we deem it serious enough to stop
+ * further parsing.
+ *
+ * This function can't set the "reason" value the caller wants to pass back, because we'd want to
+ * format it to say which field was invalid, and we can't allocate the "reason" strings
+ * dynamically. So if an error occurs, we'll log the details and return the error.
+ *
+ * Return: VDO_SUCCESS or error
+ */
+static int parse_key_value_pairs(int argc, char **argv, struct device_config *config)
+{
+ int result = VDO_SUCCESS;
+
+ while (argc) {
+ result = parse_one_key_value_pair(argv[0], argv[1], config);
+ if (result != VDO_SUCCESS)
+ break;
+
+ argc -= 2;
+ argv += 2;
+ }
+
+ return result;
+}
+
+/**
+ * parse_optional_arguments() - Parse the configuration string passed in for optional arguments.
+ * @arg_set: The structure holding the arguments to parse.
+ * @error_ptr: Pointer to a buffer to hold the error string.
+ * @config: Pointer to device configuration data to update.
+ *
+ * For V0/V1 configurations, there will only be one optional parameter; the thread configuration.
+ * The configuration string should contain one or more comma-separated specs of the form
+ * "typename=number"; the supported type names are "cpu", "ack", "bio", "bioRotationInterval",
+ * "logical", "physical", and "hash".
+ *
+ * For V2 configurations and beyond, there could be any number of arguments. They should contain
+ * one or more key/value pairs separated by a space.
+ *
+ * Return: VDO_SUCCESS or error
+ */
+static int parse_optional_arguments(struct dm_arg_set *arg_set, char **error_ptr,
+ struct device_config *config)
+{
+ int result = VDO_SUCCESS;
+
+ if (config->version == 0 || config->version == 1) {
+ result = parse_thread_config_string(arg_set->argv[0],
+ &config->thread_counts);
+ if (result != VDO_SUCCESS) {
+ *error_ptr = "Invalid thread-count configuration";
+ return VDO_BAD_CONFIGURATION;
+ }
+ } else {
+ if ((arg_set->argc % 2) != 0) {
+ *error_ptr = "Odd number of optional arguments given but they should be <key> <value> pairs";
+ return VDO_BAD_CONFIGURATION;
+ }
+ result = parse_key_value_pairs(arg_set->argc, arg_set->argv, config);
+ if (result != VDO_SUCCESS) {
+ *error_ptr = "Invalid optional argument configuration";
+ return VDO_BAD_CONFIGURATION;
+ }
+ }
+ return result;
+}
+
+/**
+ * handle_parse_error() - Handle a parsing error.
+ * @config: The config to free.
+ * @error_ptr: A place to store a constant string about the error.
+ * @error_str: A constant string to store in error_ptr.
+ */
+static void handle_parse_error(struct device_config *config, char **error_ptr,
+ char *error_str)
+{
+ free_device_config(config);
+ *error_ptr = error_str;
+}
+
+/**
+ * parse_device_config() - Convert the dmsetup table into a struct device_config.
+ * @argc: The number of table values.
+ * @argv: The array of table values.
+ * @ti: The target structure for this table.
+ * @config_ptr: A pointer to return the allocated config.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int parse_device_config(int argc, char **argv, struct dm_target *ti,
+ struct device_config **config_ptr)
+{
+ bool enable_512e;
+ size_t logical_bytes = to_bytes(ti->len);
+ struct dm_arg_set arg_set;
+ char **error_ptr = &ti->error;
+ struct device_config *config = NULL;
+ int result;
+
+ if ((logical_bytes % VDO_BLOCK_SIZE) != 0) {
+ handle_parse_error(config, error_ptr,
+ "Logical size must be a multiple of 4096");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ if (argc == 0) {
+ handle_parse_error(config, error_ptr, "Incorrect number of arguments");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ result = vdo_allocate(1, struct device_config, "device_config", &config);
+ if (result != VDO_SUCCESS) {
+ handle_parse_error(config, error_ptr,
+ "Could not allocate config structure");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ config->owning_target = ti;
+ config->logical_blocks = logical_bytes / VDO_BLOCK_SIZE;
+ INIT_LIST_HEAD(&config->config_list);
+
+ /* Save the original string. */
+ result = join_strings(argv, argc, ' ', &config->original_string);
+ if (result != VDO_SUCCESS) {
+ handle_parse_error(config, error_ptr, "Could not populate string");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ vdo_log_info("table line: %s", config->original_string);
+
+ config->thread_counts = (struct thread_count_config) {
+ .bio_ack_threads = 1,
+ .bio_threads = DEFAULT_VDO_BIO_SUBMIT_QUEUE_COUNT,
+ .bio_rotation_interval = DEFAULT_VDO_BIO_SUBMIT_QUEUE_ROTATE_INTERVAL,
+ .cpu_threads = 1,
+ .logical_zones = 0,
+ .physical_zones = 0,
+ .hash_zones = 0,
+ };
+ config->max_discard_blocks = 1;
+ config->deduplication = true;
+ config->compression = false;
+
+ arg_set.argc = argc;
+ arg_set.argv = argv;
+
+ result = get_version_number(argc, argv, error_ptr, &config->version);
+ if (result != VDO_SUCCESS) {
+ /* get_version_number sets error_ptr itself. */
+ handle_parse_error(config, error_ptr, *error_ptr);
+ return result;
+ }
+ /* Move the arg pointer forward only if the argument was there. */
+ if (config->version >= 1)
+ dm_shift_arg(&arg_set);
+
+ result = vdo_duplicate_string(dm_shift_arg(&arg_set), "parent device name",
+ &config->parent_device_name);
+ if (result != VDO_SUCCESS) {
+ handle_parse_error(config, error_ptr,
+ "Could not copy parent device name");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ /* Get the physical blocks, if known. */
+ if (config->version >= 1) {
+ result = kstrtoull(dm_shift_arg(&arg_set), 10, &config->physical_blocks);
+ if (result != VDO_SUCCESS) {
+ handle_parse_error(config, error_ptr,
+ "Invalid physical block count");
+ return VDO_BAD_CONFIGURATION;
+ }
+ }
+
+ /* Get the logical block size and validate */
+ result = parse_bool(dm_shift_arg(&arg_set), "512", "4096", &enable_512e);
+ if (result != VDO_SUCCESS) {
+ handle_parse_error(config, error_ptr, "Invalid logical block size");
+ return VDO_BAD_CONFIGURATION;
+ }
+ config->logical_block_size = (enable_512e ? 512 : 4096);
+
+ /* Skip past the two no longer used read cache options. */
+ if (config->version <= 1)
+ dm_consume_args(&arg_set, 2);
+
+ /* Get the page cache size. */
+ result = kstrtouint(dm_shift_arg(&arg_set), 10, &config->cache_size);
+ if (result != VDO_SUCCESS) {
+ handle_parse_error(config, error_ptr,
+ "Invalid block map page cache size");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ /* Get the block map era length. */
+ result = kstrtouint(dm_shift_arg(&arg_set), 10, &config->block_map_maximum_age);
+ if (result != VDO_SUCCESS) {
+ handle_parse_error(config, error_ptr, "Invalid block map maximum age");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ /* Skip past the no longer used MD RAID5 optimization mode */
+ if (config->version <= 2)
+ dm_consume_args(&arg_set, 1);
+
+ /* Skip past the no longer used write policy setting */
+ if (config->version <= 3)
+ dm_consume_args(&arg_set, 1);
+
+ /* Skip past the no longer used pool name for older table lines */
+ if (config->version <= 2) {
+ /*
+ * Make sure the enum to get the pool name from argv directly is still in sync with
+ * the parsing of the table line.
+ */
+ if (&arg_set.argv[0] != &argv[POOL_NAME_ARG_INDEX[config->version]]) {
+ handle_parse_error(config, error_ptr,
+ "Pool name not in expected location");
+ return VDO_BAD_CONFIGURATION;
+ }
+ dm_shift_arg(&arg_set);
+ }
+
+ /* Get the optional arguments and validate. */
+ result = parse_optional_arguments(&arg_set, error_ptr, config);
+ if (result != VDO_SUCCESS) {
+ /* parse_optional_arguments sets error_ptr itself. */
+ handle_parse_error(config, error_ptr, *error_ptr);
+ return result;
+ }
+
+ /*
+ * Logical, physical, and hash zone counts can all be zero; then we get one thread doing
+ * everything, our older configuration. If any zone count is non-zero, the others must be
+ * as well.
+ */
+ if (((config->thread_counts.logical_zones == 0) !=
+ (config->thread_counts.physical_zones == 0)) ||
+ ((config->thread_counts.physical_zones == 0) !=
+ (config->thread_counts.hash_zones == 0))) {
+ handle_parse_error(config, error_ptr,
+ "Logical, physical, and hash zones counts must all be zero or all non-zero");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ if (config->cache_size <
+ (2 * MAXIMUM_VDO_USER_VIOS * config->thread_counts.logical_zones)) {
+ handle_parse_error(config, error_ptr,
+ "Insufficient block map cache for logical zones");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ result = dm_get_device(ti, config->parent_device_name,
+ dm_table_get_mode(ti->table), &config->owned_device);
+ if (result != 0) {
+ vdo_log_error("couldn't open device \"%s\": error %d",
+ config->parent_device_name, result);
+ handle_parse_error(config, error_ptr, "Unable to open storage device");
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ if (config->version == 0) {
+ u64 device_size = i_size_read(config->owned_device->bdev->bd_inode);
+
+ config->physical_blocks = device_size / VDO_BLOCK_SIZE;
+ }
+
+ *config_ptr = config;
+ return result;
+}
+
+static struct vdo *get_vdo_for_target(struct dm_target *ti)
+{
+ return ((struct device_config *) ti->private)->vdo;
+}
+
+
+static int vdo_map_bio(struct dm_target *ti, struct bio *bio)
+{
+ struct vdo *vdo = get_vdo_for_target(ti);
+ struct vdo_work_queue *current_work_queue;
+ const struct admin_state_code *code = vdo_get_admin_state_code(&vdo->admin.state);
+
+ VDO_ASSERT_LOG_ONLY(code->normal, "vdo should not receive bios while in state %s",
+ code->name);
+
+ /* Count all incoming bios. */
+ vdo_count_bios(&vdo->stats.bios_in, bio);
+
+
+ /* Handle empty bios. Empty flush bios are not associated with a vio. */
+ if ((bio_op(bio) == REQ_OP_FLUSH) || ((bio->bi_opf & REQ_PREFLUSH) != 0)) {
+ vdo_launch_flush(vdo, bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ /* This could deadlock, */
+ current_work_queue = vdo_get_current_work_queue();
+ BUG_ON((current_work_queue != NULL) &&
+ (vdo == vdo_get_work_queue_owner(current_work_queue)->vdo));
+ vdo_launch_bio(vdo->data_vio_pool, bio);
+ return DM_MAPIO_SUBMITTED;
+}
+
+static void vdo_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct vdo *vdo = get_vdo_for_target(ti);
+
+ limits->logical_block_size = vdo->device_config->logical_block_size;
+ limits->physical_block_size = VDO_BLOCK_SIZE;
+
+ /* The minimum io size for random io */
+ blk_limits_io_min(limits, VDO_BLOCK_SIZE);
+ /* The optimal io size for streamed/sequential io */
+ blk_limits_io_opt(limits, VDO_BLOCK_SIZE);
+
+ /*
+ * Sets the maximum discard size that will be passed into VDO. This value comes from a
+ * table line value passed in during dmsetup create.
+ *
+ * The value 1024 is the largest usable value on HD systems. A 2048 sector discard on a
+ * busy HD system takes 31 seconds. We should use a value no higher than 1024, which takes
+ * 15 to 16 seconds on a busy HD system. However, using large values results in 120 second
+ * blocked task warnings in kernel logs. In order to avoid these warnings, we choose to
+ * use the smallest reasonable value.
+ *
+ * The value is used by dm-thin to determine whether to pass down discards. The block layer
+ * splits large discards on this boundary when this is set.
+ */
+ limits->max_discard_sectors =
+ (vdo->device_config->max_discard_blocks * VDO_SECTORS_PER_BLOCK);
+
+ /*
+ * Force discards to not begin or end with a partial block by stating the granularity is
+ * 4k.
+ */
+ limits->discard_granularity = VDO_BLOCK_SIZE;
+}
+
+static int vdo_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn,
+ void *data)
+{
+ struct device_config *config = get_vdo_for_target(ti)->device_config;
+
+ return fn(ti, config->owned_device, 0,
+ config->physical_blocks * VDO_SECTORS_PER_BLOCK, data);
+}
+
+/*
+ * Status line is:
+ * <device> <operating mode> <in recovery> <index state> <compression state>
+ * <used physical blocks> <total physical blocks>
+ */
+
+static void vdo_status(struct dm_target *ti, status_type_t status_type,
+ unsigned int status_flags, char *result, unsigned int maxlen)
+{
+ struct vdo *vdo = get_vdo_for_target(ti);
+ struct vdo_statistics *stats;
+ struct device_config *device_config;
+ /* N.B.: The DMEMIT macro uses the variables named "sz", "result", "maxlen". */
+ int sz = 0;
+
+ switch (status_type) {
+ case STATUSTYPE_INFO:
+ /* Report info for dmsetup status */
+ mutex_lock(&vdo->stats_mutex);
+ vdo_fetch_statistics(vdo, &vdo->stats_buffer);
+ stats = &vdo->stats_buffer;
+
+ DMEMIT("/dev/%pg %s %s %s %s %llu %llu",
+ vdo_get_backing_device(vdo), stats->mode,
+ stats->in_recovery_mode ? "recovering" : "-",
+ vdo_get_dedupe_index_state_name(vdo->hash_zones),
+ vdo_get_compressing(vdo) ? "online" : "offline",
+ stats->data_blocks_used + stats->overhead_blocks_used,
+ stats->physical_blocks);
+ mutex_unlock(&vdo->stats_mutex);
+ break;
+
+ case STATUSTYPE_TABLE:
+ /* Report the string actually specified in the beginning. */
+ device_config = (struct device_config *) ti->private;
+ DMEMIT("%s", device_config->original_string);
+ break;
+
+ case STATUSTYPE_IMA:
+ /* FIXME: We ought to be more detailed here, but this is what thin does. */
+ *result = '\0';
+ break;
+ }
+}
+
+static block_count_t __must_check get_underlying_device_block_count(const struct vdo *vdo)
+{
+ return i_size_read(vdo_get_backing_device(vdo)->bd_inode) / VDO_BLOCK_SIZE;
+}
+
+static int __must_check process_vdo_message_locked(struct vdo *vdo, unsigned int argc,
+ char **argv)
+{
+ if ((argc == 2) && (strcasecmp(argv[0], "compression") == 0)) {
+ if (strcasecmp(argv[1], "on") == 0) {
+ vdo_set_compressing(vdo, true);
+ return 0;
+ }
+
+ if (strcasecmp(argv[1], "off") == 0) {
+ vdo_set_compressing(vdo, false);
+ return 0;
+ }
+
+ vdo_log_warning("invalid argument '%s' to dmsetup compression message",
+ argv[1]);
+ return -EINVAL;
+ }
+
+ vdo_log_warning("unrecognized dmsetup message '%s' received", argv[0]);
+ return -EINVAL;
+}
+
+/*
+ * If the message is a dump, just do it. Otherwise, check that no other message is being processed,
+ * and only proceed if so.
+ * Returns -EBUSY if another message is being processed
+ */
+static int __must_check process_vdo_message(struct vdo *vdo, unsigned int argc,
+ char **argv)
+{
+ int result;
+
+ /*
+ * All messages which may be processed in parallel with other messages should be handled
+ * here before the atomic check below. Messages which should be exclusive should be
+ * processed in process_vdo_message_locked().
+ */
+
+ /* Dump messages should always be processed */
+ if (strcasecmp(argv[0], "dump") == 0)
+ return vdo_dump(vdo, argc, argv, "dmsetup message");
+
+ if (argc == 1) {
+ if (strcasecmp(argv[0], "dump-on-shutdown") == 0) {
+ vdo->dump_on_shutdown = true;
+ return 0;
+ }
+
+ /* Index messages should always be processed */
+ if ((strcasecmp(argv[0], "index-close") == 0) ||
+ (strcasecmp(argv[0], "index-create") == 0) ||
+ (strcasecmp(argv[0], "index-disable") == 0) ||
+ (strcasecmp(argv[0], "index-enable") == 0))
+ return vdo_message_dedupe_index(vdo->hash_zones, argv[0]);
+ }
+
+ if (atomic_cmpxchg(&vdo->processing_message, 0, 1) != 0)
+ return -EBUSY;
+
+ result = process_vdo_message_locked(vdo, argc, argv);
+
+ /* Pairs with the implicit barrier in cmpxchg just above */
+ smp_wmb();
+ atomic_set(&vdo->processing_message, 0);
+ return result;
+}
+
+static int vdo_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result_buffer, unsigned int maxlen)
+{
+ struct registered_thread allocating_thread, instance_thread;
+ struct vdo *vdo;
+ int result;
+
+ if (argc == 0) {
+ vdo_log_warning("unspecified dmsetup message");
+ return -EINVAL;
+ }
+
+ vdo = get_vdo_for_target(ti);
+ vdo_register_allocating_thread(&allocating_thread, NULL);
+ vdo_register_thread_device_id(&instance_thread, &vdo->instance);
+
+ /*
+ * Must be done here so we don't map return codes. The code in dm-ioctl expects a 1 for a
+ * return code to look at the buffer and see if it is full or not.
+ */
+ if ((argc == 1) && (strcasecmp(argv[0], "stats") == 0)) {
+ vdo_write_stats(vdo, result_buffer, maxlen);
+ result = 1;
+ } else {
+ result = vdo_status_to_errno(process_vdo_message(vdo, argc, argv));
+ }
+
+ vdo_unregister_thread_device_id();
+ vdo_unregister_allocating_thread();
+ return result;
+}
+
+static void configure_target_capabilities(struct dm_target *ti)
+{
+ ti->discards_supported = 1;
+ ti->flush_supported = true;
+ ti->num_discard_bios = 1;
+ ti->num_flush_bios = 1;
+
+ /*
+ * If this value changes, please make sure to update the value for max_discard_sectors
+ * accordingly.
+ */
+ BUG_ON(dm_set_target_max_io_len(ti, VDO_SECTORS_PER_BLOCK) != 0);
+}
+
+/*
+ * Implements vdo_filter_fn.
+ */
+static bool vdo_uses_device(struct vdo *vdo, const void *context)
+{
+ const struct device_config *config = context;
+
+ return vdo_get_backing_device(vdo)->bd_dev == config->owned_device->bdev->bd_dev;
+}
+
+/**
+ * get_thread_id_for_phase() - Get the thread id for the current phase of the admin operation in
+ * progress.
+ */
+static thread_id_t __must_check get_thread_id_for_phase(struct vdo *vdo)
+{
+ switch (vdo->admin.phase) {
+ case RESUME_PHASE_PACKER:
+ case RESUME_PHASE_FLUSHER:
+ case SUSPEND_PHASE_PACKER:
+ case SUSPEND_PHASE_FLUSHES:
+ return vdo->thread_config.packer_thread;
+
+ case RESUME_PHASE_DATA_VIOS:
+ case SUSPEND_PHASE_DATA_VIOS:
+ return vdo->thread_config.cpu_thread;
+
+ case LOAD_PHASE_DRAIN_JOURNAL:
+ case RESUME_PHASE_JOURNAL:
+ case SUSPEND_PHASE_JOURNAL:
+ return vdo->thread_config.journal_thread;
+
+ default:
+ return vdo->thread_config.admin_thread;
+ }
+}
+
+static struct vdo_completion *prepare_admin_completion(struct vdo *vdo,
+ vdo_action_fn callback,
+ vdo_action_fn error_handler)
+{
+ struct vdo_completion *completion = &vdo->admin.completion;
+
+ /*
+ * We can't use vdo_prepare_completion_for_requeue() here because we don't want to reset
+ * any error in the completion.
+ */
+ completion->callback = callback;
+ completion->error_handler = error_handler;
+ completion->callback_thread_id = get_thread_id_for_phase(vdo);
+ completion->requeue = true;
+ return completion;
+}
+
+/**
+ * advance_phase() - Increment the phase of the current admin operation and prepare the admin
+ * completion to run on the thread for the next phase.
+ * @vdo: The on which an admin operation is being performed
+ *
+ * Return: The current phase
+ */
+static u32 advance_phase(struct vdo *vdo)
+{
+ u32 phase = vdo->admin.phase++;
+
+ vdo->admin.completion.callback_thread_id = get_thread_id_for_phase(vdo);
+ vdo->admin.completion.requeue = true;
+ return phase;
+}
+
+/*
+ * Perform an administrative operation (load, suspend, grow logical, or grow physical). This method
+ * should not be called from vdo threads.
+ */
+static int perform_admin_operation(struct vdo *vdo, u32 starting_phase,
+ vdo_action_fn callback, vdo_action_fn error_handler,
+ const char *type)
+{
+ int result;
+ struct vdo_administrator *admin = &vdo->admin;
+
+ if (atomic_cmpxchg(&admin->busy, 0, 1) != 0) {
+ return vdo_log_error_strerror(VDO_COMPONENT_BUSY,
+ "Can't start %s operation, another operation is already in progress",
+ type);
+ }
+
+ admin->phase = starting_phase;
+ reinit_completion(&admin->callback_sync);
+ vdo_reset_completion(&admin->completion);
+ vdo_launch_completion(prepare_admin_completion(vdo, callback, error_handler));
+
+ /*
+ * Using the "interruptible" interface means that Linux will not log a message when we wait
+ * for more than 120 seconds.
+ */
+ while (wait_for_completion_interruptible(&admin->callback_sync)) {
+ /* However, if we get a signal in a user-mode process, we could spin... */
+ fsleep(1000);
+ }
+
+ result = admin->completion.result;
+ /* pairs with implicit barrier in cmpxchg above */
+ smp_wmb();
+ atomic_set(&admin->busy, 0);
+ return result;
+}
+
+/* Assert that we are operating on the correct thread for the current phase. */
+static void assert_admin_phase_thread(struct vdo *vdo, const char *what)
+{
+ VDO_ASSERT_LOG_ONLY(vdo_get_callback_thread_id() == get_thread_id_for_phase(vdo),
+ "%s on correct thread for %s", what,
+ ADMIN_PHASE_NAMES[vdo->admin.phase]);
+}
+
+/**
+ * finish_operation_callback() - Callback to finish an admin operation.
+ * @completion: The admin_completion.
+ */
+static void finish_operation_callback(struct vdo_completion *completion)
+{
+ struct vdo_administrator *admin = &completion->vdo->admin;
+
+ vdo_finish_operation(&admin->state, completion->result);
+ complete(&admin->callback_sync);
+}
+
+/**
+ * decode_from_super_block() - Decode the VDO state from the super block and validate that it is
+ * correct.
+ * @vdo: The vdo being loaded.
+ *
+ * On error from this method, the component states must be destroyed explicitly. If this method
+ * returns successfully, the component states must not be destroyed.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check decode_from_super_block(struct vdo *vdo)
+{
+ const struct device_config *config = vdo->device_config;
+ int result;
+
+ result = vdo_decode_component_states(vdo->super_block.buffer, &vdo->geometry,
+ &vdo->states);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_set_state(vdo, vdo->states.vdo.state);
+ vdo->load_state = vdo->states.vdo.state;
+
+ /*
+ * If the device config specifies a larger logical size than was recorded in the super
+ * block, just accept it.
+ */
+ if (vdo->states.vdo.config.logical_blocks < config->logical_blocks) {
+ vdo_log_warning("Growing logical size: a logical size of %llu blocks was specified, but that differs from the %llu blocks configured in the vdo super block",
+ (unsigned long long) config->logical_blocks,
+ (unsigned long long) vdo->states.vdo.config.logical_blocks);
+ vdo->states.vdo.config.logical_blocks = config->logical_blocks;
+ }
+
+ result = vdo_validate_component_states(&vdo->states, vdo->geometry.nonce,
+ config->physical_blocks,
+ config->logical_blocks);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo->layout = vdo->states.layout;
+ return VDO_SUCCESS;
+}
+
+/**
+ * decode_vdo() - Decode the component data portion of a super block and fill in the corresponding
+ * portions of the vdo being loaded.
+ * @vdo: The vdo being loaded.
+ *
+ * This will also allocate the recovery journal and slab depot. If this method is called with an
+ * asynchronous layer (i.e. a thread config which specifies at least one base thread), the block
+ * map and packer will be constructed as well.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check decode_vdo(struct vdo *vdo)
+{
+ block_count_t maximum_age, journal_length;
+ struct partition *partition;
+ int result;
+
+ result = decode_from_super_block(vdo);
+ if (result != VDO_SUCCESS) {
+ vdo_destroy_component_states(&vdo->states);
+ return result;
+ }
+
+ maximum_age = vdo_convert_maximum_age(vdo->device_config->block_map_maximum_age);
+ journal_length =
+ vdo_get_recovery_journal_length(vdo->states.vdo.config.recovery_journal_size);
+ if (maximum_age > (journal_length / 2)) {
+ return vdo_log_error_strerror(VDO_BAD_CONFIGURATION,
+ "maximum age: %llu exceeds limit %llu",
+ (unsigned long long) maximum_age,
+ (unsigned long long) (journal_length / 2));
+ }
+
+ if (maximum_age == 0) {
+ return vdo_log_error_strerror(VDO_BAD_CONFIGURATION,
+ "maximum age must be greater than 0");
+ }
+
+ result = vdo_enable_read_only_entry(vdo);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ partition = vdo_get_known_partition(&vdo->layout,
+ VDO_RECOVERY_JOURNAL_PARTITION);
+ result = vdo_decode_recovery_journal(vdo->states.recovery_journal,
+ vdo->states.vdo.nonce, vdo, partition,
+ vdo->states.vdo.complete_recoveries,
+ vdo->states.vdo.config.recovery_journal_size,
+ &vdo->recovery_journal);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ partition = vdo_get_known_partition(&vdo->layout, VDO_SLAB_SUMMARY_PARTITION);
+ result = vdo_decode_slab_depot(vdo->states.slab_depot, vdo, partition,
+ &vdo->depot);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_decode_block_map(vdo->states.block_map,
+ vdo->states.vdo.config.logical_blocks, vdo,
+ vdo->recovery_journal, vdo->states.vdo.nonce,
+ vdo->device_config->cache_size, maximum_age,
+ &vdo->block_map);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_make_physical_zones(vdo, &vdo->physical_zones);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* The logical zones depend on the physical zones already existing. */
+ result = vdo_make_logical_zones(vdo, &vdo->logical_zones);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return vdo_make_hash_zones(vdo, &vdo->hash_zones);
+}
+
+/**
+ * pre_load_callback() - Callback to initiate a pre-load, registered in vdo_initialize().
+ * @completion: The admin completion.
+ */
+static void pre_load_callback(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ int result;
+
+ assert_admin_phase_thread(vdo, __func__);
+
+ switch (advance_phase(vdo)) {
+ case PRE_LOAD_PHASE_START:
+ result = vdo_start_operation(&vdo->admin.state,
+ VDO_ADMIN_STATE_PRE_LOADING);
+ if (result != VDO_SUCCESS) {
+ vdo_continue_completion(completion, result);
+ return;
+ }
+
+ vdo_load_super_block(vdo, completion);
+ return;
+
+ case PRE_LOAD_PHASE_LOAD_COMPONENTS:
+ vdo_continue_completion(completion, decode_vdo(vdo));
+ return;
+
+ case PRE_LOAD_PHASE_END:
+ break;
+
+ default:
+ vdo_set_completion_result(completion, UDS_BAD_STATE);
+ }
+
+ finish_operation_callback(completion);
+}
+
+static void release_instance(unsigned int instance)
+{
+ mutex_lock(&instances_lock);
+ if (instance >= instances.bit_count) {
+ VDO_ASSERT_LOG_ONLY(false,
+ "instance number %u must be less than bit count %u",
+ instance, instances.bit_count);
+ } else if (test_bit(instance, instances.words) == 0) {
+ VDO_ASSERT_LOG_ONLY(false, "instance number %u must be allocated", instance);
+ } else {
+ __clear_bit(instance, instances.words);
+ instances.count -= 1;
+ }
+ mutex_unlock(&instances_lock);
+}
+
+static void set_device_config(struct dm_target *ti, struct vdo *vdo,
+ struct device_config *config)
+{
+ list_del_init(&config->config_list);
+ list_add_tail(&config->config_list, &vdo->device_config_list);
+ config->vdo = vdo;
+ ti->private = config;
+ configure_target_capabilities(ti);
+}
+
+static int vdo_initialize(struct dm_target *ti, unsigned int instance,
+ struct device_config *config)
+{
+ struct vdo *vdo;
+ int result;
+ u64 block_size = VDO_BLOCK_SIZE;
+ u64 logical_size = to_bytes(ti->len);
+ block_count_t logical_blocks = logical_size / block_size;
+
+ vdo_log_info("loading device '%s'", vdo_get_device_name(ti));
+ vdo_log_debug("Logical block size = %llu", (u64) config->logical_block_size);
+ vdo_log_debug("Logical blocks = %llu", logical_blocks);
+ vdo_log_debug("Physical block size = %llu", (u64) block_size);
+ vdo_log_debug("Physical blocks = %llu", config->physical_blocks);
+ vdo_log_debug("Block map cache blocks = %u", config->cache_size);
+ vdo_log_debug("Block map maximum age = %u", config->block_map_maximum_age);
+ vdo_log_debug("Deduplication = %s", (config->deduplication ? "on" : "off"));
+ vdo_log_debug("Compression = %s", (config->compression ? "on" : "off"));
+
+ vdo = vdo_find_matching(vdo_uses_device, config);
+ if (vdo != NULL) {
+ vdo_log_error("Existing vdo already uses device %s",
+ vdo->device_config->parent_device_name);
+ ti->error = "Cannot share storage device with already-running VDO";
+ return VDO_BAD_CONFIGURATION;
+ }
+
+ result = vdo_make(instance, config, &ti->error, &vdo);
+ if (result != VDO_SUCCESS) {
+ vdo_log_error("Could not create VDO device. (VDO error %d, message %s)",
+ result, ti->error);
+ vdo_destroy(vdo);
+ return result;
+ }
+
+ result = perform_admin_operation(vdo, PRE_LOAD_PHASE_START, pre_load_callback,
+ finish_operation_callback, "pre-load");
+ if (result != VDO_SUCCESS) {
+ ti->error = ((result == VDO_INVALID_ADMIN_STATE) ?
+ "Pre-load is only valid immediately after initialization" :
+ "Cannot load metadata from device");
+ vdo_log_error("Could not start VDO device. (VDO error %d, message %s)",
+ result, ti->error);
+ vdo_destroy(vdo);
+ return result;
+ }
+
+ set_device_config(ti, vdo, config);
+ vdo->device_config = config;
+ return VDO_SUCCESS;
+}
+
+/* Implements vdo_filter_fn. */
+static bool __must_check vdo_is_named(struct vdo *vdo, const void *context)
+{
+ struct dm_target *ti = vdo->device_config->owning_target;
+ const char *device_name = vdo_get_device_name(ti);
+
+ return strcmp(device_name, context) == 0;
+}
+
+/**
+ * get_bit_array_size() - Return the number of bytes needed to store a bit array of the specified
+ * capacity in an array of unsigned longs.
+ * @bit_count: The number of bits the array must hold.
+ *
+ * Return: the number of bytes needed for the array representation.
+ */
+static size_t get_bit_array_size(unsigned int bit_count)
+{
+ /* Round up to a multiple of the word size and convert to a byte count. */
+ return (BITS_TO_LONGS(bit_count) * sizeof(unsigned long));
+}
+
+/**
+ * grow_bit_array() - Re-allocate the bitmap word array so there will more instance numbers that
+ * can be allocated.
+ *
+ * Since the array is initially NULL, this also initializes the array the first time we allocate an
+ * instance number.
+ *
+ * Return: VDO_SUCCESS or an error code from the allocation
+ */
+static int grow_bit_array(void)
+{
+ unsigned int new_count = max(instances.bit_count + BIT_COUNT_INCREMENT,
+ (unsigned int) BIT_COUNT_MINIMUM);
+ unsigned long *new_words;
+ int result;
+
+ result = vdo_reallocate_memory(instances.words,
+ get_bit_array_size(instances.bit_count),
+ get_bit_array_size(new_count),
+ "instance number bit array", &new_words);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ instances.bit_count = new_count;
+ instances.words = new_words;
+ return VDO_SUCCESS;
+}
+
+/**
+ * allocate_instance() - Allocate an instance number.
+ * @instance_ptr: A point to hold the instance number
+ *
+ * Return: VDO_SUCCESS or an error code
+ *
+ * This function must be called while holding the instances lock.
+ */
+static int allocate_instance(unsigned int *instance_ptr)
+{
+ unsigned int instance;
+ int result;
+
+ /* If there are no unallocated instances, grow the bit array. */
+ if (instances.count >= instances.bit_count) {
+ result = grow_bit_array();
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+
+ /*
+ * There must be a zero bit somewhere now. Find it, starting just after the last instance
+ * allocated.
+ */
+ instance = find_next_zero_bit(instances.words, instances.bit_count,
+ instances.next);
+ if (instance >= instances.bit_count) {
+ /* Nothing free after next, so wrap around to instance zero. */
+ instance = find_first_zero_bit(instances.words, instances.bit_count);
+ result = VDO_ASSERT(instance < instances.bit_count,
+ "impossibly, no zero bit found");
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+
+ __set_bit(instance, instances.words);
+ instances.count++;
+ instances.next = instance + 1;
+ *instance_ptr = instance;
+ return VDO_SUCCESS;
+}
+
+static int construct_new_vdo_registered(struct dm_target *ti, unsigned int argc,
+ char **argv, unsigned int instance)
+{
+ int result;
+ struct device_config *config;
+
+ result = parse_device_config(argc, argv, ti, &config);
+ if (result != VDO_SUCCESS) {
+ vdo_log_error_strerror(result, "parsing failed: %s", ti->error);
+ release_instance(instance);
+ return -EINVAL;
+ }
+
+ /* Beyond this point, the instance number will be cleaned up for us if needed */
+ result = vdo_initialize(ti, instance, config);
+ if (result != VDO_SUCCESS) {
+ release_instance(instance);
+ free_device_config(config);
+ return vdo_status_to_errno(result);
+ }
+
+ return VDO_SUCCESS;
+}
+
+static int construct_new_vdo(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ int result;
+ unsigned int instance;
+ struct registered_thread instance_thread;
+
+ mutex_lock(&instances_lock);
+ result = allocate_instance(&instance);
+ mutex_unlock(&instances_lock);
+ if (result != VDO_SUCCESS)
+ return -ENOMEM;
+
+ vdo_register_thread_device_id(&instance_thread, &instance);
+ result = construct_new_vdo_registered(ti, argc, argv, instance);
+ vdo_unregister_thread_device_id();
+ return result;
+}
+
+/**
+ * check_may_grow_physical() - Callback to check that we're not in recovery mode, used in
+ * vdo_prepare_to_grow_physical().
+ * @completion: The admin completion.
+ */
+static void check_may_grow_physical(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+
+ assert_admin_phase_thread(vdo, __func__);
+
+ /* These checks can only be done from a vdo thread. */
+ if (vdo_is_read_only(vdo))
+ vdo_set_completion_result(completion, VDO_READ_ONLY);
+
+ if (vdo_in_recovery_mode(vdo))
+ vdo_set_completion_result(completion, VDO_RETRY_AFTER_REBUILD);
+
+ finish_operation_callback(completion);
+}
+
+static block_count_t get_partition_size(struct layout *layout, enum partition_id id)
+{
+ return vdo_get_known_partition(layout, id)->count;
+}
+
+/**
+ * grow_layout() - Make the layout for growing a vdo.
+ * @vdo: The vdo preparing to grow.
+ * @old_size: The current size of the vdo.
+ * @new_size: The size to which the vdo will be grown.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int grow_layout(struct vdo *vdo, block_count_t old_size, block_count_t new_size)
+{
+ int result;
+ block_count_t min_new_size;
+
+ if (vdo->next_layout.size == new_size) {
+ /* We are already prepared to grow to the new size, so we're done. */
+ return VDO_SUCCESS;
+ }
+
+ /* Make a copy completion if there isn't one */
+ if (vdo->partition_copier == NULL) {
+ vdo->partition_copier = dm_kcopyd_client_create(NULL);
+ if (IS_ERR(vdo->partition_copier)) {
+ result = PTR_ERR(vdo->partition_copier);
+ vdo->partition_copier = NULL;
+ return result;
+ }
+ }
+
+ /* Free any unused preparation. */
+ vdo_uninitialize_layout(&vdo->next_layout);
+
+ /*
+ * Make a new layout with the existing partition sizes for everything but the slab depot
+ * partition.
+ */
+ result = vdo_initialize_layout(new_size, vdo->layout.start,
+ get_partition_size(&vdo->layout,
+ VDO_BLOCK_MAP_PARTITION),
+ get_partition_size(&vdo->layout,
+ VDO_RECOVERY_JOURNAL_PARTITION),
+ get_partition_size(&vdo->layout,
+ VDO_SLAB_SUMMARY_PARTITION),
+ &vdo->next_layout);
+ if (result != VDO_SUCCESS) {
+ dm_kcopyd_client_destroy(vdo_forget(vdo->partition_copier));
+ return result;
+ }
+
+ /* Ensure the new journal and summary are entirely within the added blocks. */
+ min_new_size = (old_size +
+ get_partition_size(&vdo->next_layout,
+ VDO_SLAB_SUMMARY_PARTITION) +
+ get_partition_size(&vdo->next_layout,
+ VDO_RECOVERY_JOURNAL_PARTITION));
+ if (min_new_size > new_size) {
+ /* Copying the journal and summary would destroy some old metadata. */
+ vdo_uninitialize_layout(&vdo->next_layout);
+ dm_kcopyd_client_destroy(vdo_forget(vdo->partition_copier));
+ return VDO_INCREMENT_TOO_SMALL;
+ }
+
+ return VDO_SUCCESS;
+}
+
+static int prepare_to_grow_physical(struct vdo *vdo, block_count_t new_physical_blocks)
+{
+ int result;
+ block_count_t current_physical_blocks = vdo->states.vdo.config.physical_blocks;
+
+ vdo_log_info("Preparing to resize physical to %llu",
+ (unsigned long long) new_physical_blocks);
+ VDO_ASSERT_LOG_ONLY((new_physical_blocks > current_physical_blocks),
+ "New physical size is larger than current physical size");
+ result = perform_admin_operation(vdo, PREPARE_GROW_PHYSICAL_PHASE_START,
+ check_may_grow_physical,
+ finish_operation_callback,
+ "prepare grow-physical");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = grow_layout(vdo, current_physical_blocks, new_physical_blocks);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_prepare_to_grow_slab_depot(vdo->depot,
+ vdo_get_known_partition(&vdo->next_layout,
+ VDO_SLAB_DEPOT_PARTITION));
+ if (result != VDO_SUCCESS) {
+ vdo_uninitialize_layout(&vdo->next_layout);
+ return result;
+ }
+
+ vdo_log_info("Done preparing to resize physical");
+ return VDO_SUCCESS;
+}
+
+/**
+ * validate_new_device_config() - Check whether a new device config represents a valid modification
+ * to an existing config.
+ * @to_validate: The new config to validate.
+ * @config: The existing config.
+ * @may_grow: Set to true if growing the logical and physical size of the vdo is currently
+ * permitted.
+ * @error_ptr: A pointer to hold the reason for any error.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int validate_new_device_config(struct device_config *to_validate,
+ struct device_config *config, bool may_grow,
+ char **error_ptr)
+{
+ if (to_validate->owning_target->begin != config->owning_target->begin) {
+ *error_ptr = "Starting sector cannot change";
+ return VDO_PARAMETER_MISMATCH;
+ }
+
+ if (to_validate->logical_block_size != config->logical_block_size) {
+ *error_ptr = "Logical block size cannot change";
+ return VDO_PARAMETER_MISMATCH;
+ }
+
+ if (to_validate->logical_blocks < config->logical_blocks) {
+ *error_ptr = "Can't shrink VDO logical size";
+ return VDO_PARAMETER_MISMATCH;
+ }
+
+ if (to_validate->cache_size != config->cache_size) {
+ *error_ptr = "Block map cache size cannot change";
+ return VDO_PARAMETER_MISMATCH;
+ }
+
+ if (to_validate->block_map_maximum_age != config->block_map_maximum_age) {
+ *error_ptr = "Block map maximum age cannot change";
+ return VDO_PARAMETER_MISMATCH;
+ }
+
+ if (memcmp(&to_validate->thread_counts, &config->thread_counts,
+ sizeof(struct thread_count_config)) != 0) {
+ *error_ptr = "Thread configuration cannot change";
+ return VDO_PARAMETER_MISMATCH;
+ }
+
+ if (to_validate->physical_blocks < config->physical_blocks) {
+ *error_ptr = "Removing physical storage from a VDO is not supported";
+ return VDO_NOT_IMPLEMENTED;
+ }
+
+ if (!may_grow && (to_validate->physical_blocks > config->physical_blocks)) {
+ *error_ptr = "VDO physical size may not grow in current state";
+ return VDO_NOT_IMPLEMENTED;
+ }
+
+ return VDO_SUCCESS;
+}
+
+static int prepare_to_modify(struct dm_target *ti, struct device_config *config,
+ struct vdo *vdo)
+{
+ int result;
+ bool may_grow = (vdo_get_admin_state(vdo) != VDO_ADMIN_STATE_PRE_LOADED);
+
+ result = validate_new_device_config(config, vdo->device_config, may_grow,
+ &ti->error);
+ if (result != VDO_SUCCESS)
+ return -EINVAL;
+
+ if (config->logical_blocks > vdo->device_config->logical_blocks) {
+ block_count_t logical_blocks = vdo->states.vdo.config.logical_blocks;
+
+ vdo_log_info("Preparing to resize logical to %llu",
+ (unsigned long long) config->logical_blocks);
+ VDO_ASSERT_LOG_ONLY((config->logical_blocks > logical_blocks),
+ "New logical size is larger than current size");
+
+ result = vdo_prepare_to_grow_block_map(vdo->block_map,
+ config->logical_blocks);
+ if (result != VDO_SUCCESS) {
+ ti->error = "Device vdo_prepare_to_grow_logical failed";
+ return result;
+ }
+
+ vdo_log_info("Done preparing to resize logical");
+ }
+
+ if (config->physical_blocks > vdo->device_config->physical_blocks) {
+ result = prepare_to_grow_physical(vdo, config->physical_blocks);
+ if (result != VDO_SUCCESS) {
+ if (result == VDO_PARAMETER_MISMATCH) {
+ /*
+ * If we don't trap this case, vdo_status_to_errno() will remap
+ * it to -EIO, which is misleading and ahistorical.
+ */
+ result = -EINVAL;
+ }
+
+ if (result == VDO_TOO_MANY_SLABS)
+ ti->error = "Device vdo_prepare_to_grow_physical failed (specified physical size too big based on formatted slab size)";
+ else
+ ti->error = "Device vdo_prepare_to_grow_physical failed";
+
+ return result;
+ }
+ }
+
+ if (strcmp(config->parent_device_name, vdo->device_config->parent_device_name) != 0) {
+ const char *device_name = vdo_get_device_name(config->owning_target);
+
+ vdo_log_info("Updating backing device of %s from %s to %s", device_name,
+ vdo->device_config->parent_device_name,
+ config->parent_device_name);
+ }
+
+ return VDO_SUCCESS;
+}
+
+static int update_existing_vdo(const char *device_name, struct dm_target *ti,
+ unsigned int argc, char **argv, struct vdo *vdo)
+{
+ int result;
+ struct device_config *config;
+
+ result = parse_device_config(argc, argv, ti, &config);
+ if (result != VDO_SUCCESS)
+ return -EINVAL;
+
+ vdo_log_info("preparing to modify device '%s'", device_name);
+ result = prepare_to_modify(ti, config, vdo);
+ if (result != VDO_SUCCESS) {
+ free_device_config(config);
+ return vdo_status_to_errno(result);
+ }
+
+ set_device_config(ti, vdo, config);
+ return VDO_SUCCESS;
+}
+
+static int vdo_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ int result;
+ struct registered_thread allocating_thread, instance_thread;
+ const char *device_name;
+ struct vdo *vdo;
+
+ vdo_register_allocating_thread(&allocating_thread, NULL);
+ device_name = vdo_get_device_name(ti);
+ vdo = vdo_find_matching(vdo_is_named, device_name);
+ if (vdo == NULL) {
+ result = construct_new_vdo(ti, argc, argv);
+ } else {
+ vdo_register_thread_device_id(&instance_thread, &vdo->instance);
+ result = update_existing_vdo(device_name, ti, argc, argv, vdo);
+ vdo_unregister_thread_device_id();
+ }
+
+ vdo_unregister_allocating_thread();
+ return result;
+}
+
+static void vdo_dtr(struct dm_target *ti)
+{
+ struct device_config *config = ti->private;
+ struct vdo *vdo = vdo_forget(config->vdo);
+
+ list_del_init(&config->config_list);
+ if (list_empty(&vdo->device_config_list)) {
+ const char *device_name;
+
+ /* This was the last config referencing the VDO. Free it. */
+ unsigned int instance = vdo->instance;
+ struct registered_thread allocating_thread, instance_thread;
+
+ vdo_register_thread_device_id(&instance_thread, &instance);
+ vdo_register_allocating_thread(&allocating_thread, NULL);
+
+ device_name = vdo_get_device_name(ti);
+ vdo_log_info("stopping device '%s'", device_name);
+ if (vdo->dump_on_shutdown)
+ vdo_dump_all(vdo, "device shutdown");
+
+ vdo_destroy(vdo_forget(vdo));
+ vdo_log_info("device '%s' stopped", device_name);
+ vdo_unregister_thread_device_id();
+ vdo_unregister_allocating_thread();
+ release_instance(instance);
+ } else if (config == vdo->device_config) {
+ /*
+ * The VDO still references this config. Give it a reference to a config that isn't
+ * being destroyed.
+ */
+ vdo->device_config = list_first_entry(&vdo->device_config_list,
+ struct device_config, config_list);
+ }
+
+ free_device_config(config);
+ ti->private = NULL;
+}
+
+static void vdo_presuspend(struct dm_target *ti)
+{
+ get_vdo_for_target(ti)->suspend_type =
+ (dm_noflush_suspending(ti) ? VDO_ADMIN_STATE_SUSPENDING : VDO_ADMIN_STATE_SAVING);
+}
+
+/**
+ * write_super_block_for_suspend() - Update the VDO state and save the super block.
+ * @completion: The admin completion
+ */
+static void write_super_block_for_suspend(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+
+ switch (vdo_get_state(vdo)) {
+ case VDO_DIRTY:
+ case VDO_NEW:
+ vdo_set_state(vdo, VDO_CLEAN);
+ break;
+
+ case VDO_CLEAN:
+ case VDO_READ_ONLY_MODE:
+ case VDO_FORCE_REBUILD:
+ case VDO_RECOVERING:
+ case VDO_REBUILD_FOR_UPGRADE:
+ break;
+
+ case VDO_REPLAYING:
+ default:
+ vdo_continue_completion(completion, UDS_BAD_STATE);
+ return;
+ }
+
+ vdo_save_components(vdo, completion);
+}
+
+/**
+ * suspend_callback() - Callback to initiate a suspend, registered in vdo_postsuspend().
+ * @completion: The sub-task completion.
+ */
+static void suspend_callback(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ struct admin_state *state = &vdo->admin.state;
+ int result;
+
+ assert_admin_phase_thread(vdo, __func__);
+
+ switch (advance_phase(vdo)) {
+ case SUSPEND_PHASE_START:
+ if (vdo_get_admin_state_code(state)->quiescent) {
+ /* Already suspended */
+ break;
+ }
+
+ vdo_continue_completion(completion,
+ vdo_start_operation(state, vdo->suspend_type));
+ return;
+
+ case SUSPEND_PHASE_PACKER:
+ /*
+ * If the VDO was already resumed from a prior suspend while read-only, some of the
+ * components may not have been resumed. By setting a read-only error here, we
+ * guarantee that the result of this suspend will be VDO_READ_ONLY and not
+ * VDO_INVALID_ADMIN_STATE in that case.
+ */
+ if (vdo_in_read_only_mode(vdo))
+ vdo_set_completion_result(completion, VDO_READ_ONLY);
+
+ vdo_drain_packer(vdo->packer, completion);
+ return;
+
+ case SUSPEND_PHASE_DATA_VIOS:
+ drain_data_vio_pool(vdo->data_vio_pool, completion);
+ return;
+
+ case SUSPEND_PHASE_DEDUPE:
+ vdo_drain_hash_zones(vdo->hash_zones, completion);
+ return;
+
+ case SUSPEND_PHASE_FLUSHES:
+ vdo_drain_flusher(vdo->flusher, completion);
+ return;
+
+ case SUSPEND_PHASE_LOGICAL_ZONES:
+ /*
+ * Attempt to flush all I/O before completing post suspend work. We believe a
+ * suspended device is expected to have persisted all data written before the
+ * suspend, even if it hasn't been flushed yet.
+ */
+ result = vdo_synchronous_flush(vdo);
+ if (result != VDO_SUCCESS)
+ vdo_enter_read_only_mode(vdo, result);
+
+ vdo_drain_logical_zones(vdo->logical_zones,
+ vdo_get_admin_state_code(state), completion);
+ return;
+
+ case SUSPEND_PHASE_BLOCK_MAP:
+ vdo_drain_block_map(vdo->block_map, vdo_get_admin_state_code(state),
+ completion);
+ return;
+
+ case SUSPEND_PHASE_JOURNAL:
+ vdo_drain_recovery_journal(vdo->recovery_journal,
+ vdo_get_admin_state_code(state), completion);
+ return;
+
+ case SUSPEND_PHASE_DEPOT:
+ vdo_drain_slab_depot(vdo->depot, vdo_get_admin_state_code(state),
+ completion);
+ return;
+
+ case SUSPEND_PHASE_READ_ONLY_WAIT:
+ vdo_wait_until_not_entering_read_only_mode(completion);
+ return;
+
+ case SUSPEND_PHASE_WRITE_SUPER_BLOCK:
+ if (vdo_is_state_suspending(state) || (completion->result != VDO_SUCCESS)) {
+ /* If we didn't save the VDO or there was an error, we're done. */
+ break;
+ }
+
+ write_super_block_for_suspend(completion);
+ return;
+
+ case SUSPEND_PHASE_END:
+ break;
+
+ default:
+ vdo_set_completion_result(completion, UDS_BAD_STATE);
+ }
+
+ finish_operation_callback(completion);
+}
+
+static void vdo_postsuspend(struct dm_target *ti)
+{
+ struct vdo *vdo = get_vdo_for_target(ti);
+ struct registered_thread instance_thread;
+ const char *device_name;
+ int result;
+
+ vdo_register_thread_device_id(&instance_thread, &vdo->instance);
+ device_name = vdo_get_device_name(vdo->device_config->owning_target);
+ vdo_log_info("suspending device '%s'", device_name);
+
+ /*
+ * It's important to note any error here does not actually stop device-mapper from
+ * suspending the device. All this work is done post suspend.
+ */
+ result = perform_admin_operation(vdo, SUSPEND_PHASE_START, suspend_callback,
+ suspend_callback, "suspend");
+
+ if ((result == VDO_SUCCESS) || (result == VDO_READ_ONLY)) {
+ /*
+ * Treat VDO_READ_ONLY as a success since a read-only suspension still leaves the
+ * VDO suspended.
+ */
+ vdo_log_info("device '%s' suspended", device_name);
+ } else if (result == VDO_INVALID_ADMIN_STATE) {
+ vdo_log_error("Suspend invoked while in unexpected state: %s",
+ vdo_get_admin_state(vdo)->name);
+ } else {
+ vdo_log_error_strerror(result, "Suspend of device '%s' failed",
+ device_name);
+ }
+
+ vdo_unregister_thread_device_id();
+}
+
+/**
+ * was_new() - Check whether the vdo was new when it was loaded.
+ * @vdo: The vdo to query.
+ *
+ * Return: true if the vdo was new.
+ */
+static bool was_new(const struct vdo *vdo)
+{
+ return (vdo->load_state == VDO_NEW);
+}
+
+/**
+ * requires_repair() - Check whether a vdo requires recovery or rebuild.
+ * @vdo: The vdo to query.
+ *
+ * Return: true if the vdo must be repaired.
+ */
+static bool __must_check requires_repair(const struct vdo *vdo)
+{
+ switch (vdo_get_state(vdo)) {
+ case VDO_DIRTY:
+ case VDO_FORCE_REBUILD:
+ case VDO_REPLAYING:
+ case VDO_REBUILD_FOR_UPGRADE:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/**
+ * get_load_type() - Determine how the slab depot was loaded.
+ * @vdo: The vdo.
+ *
+ * Return: How the depot was loaded.
+ */
+static enum slab_depot_load_type get_load_type(struct vdo *vdo)
+{
+ if (vdo_state_requires_read_only_rebuild(vdo->load_state))
+ return VDO_SLAB_DEPOT_REBUILD_LOAD;
+
+ if (vdo_state_requires_recovery(vdo->load_state))
+ return VDO_SLAB_DEPOT_RECOVERY_LOAD;
+
+ return VDO_SLAB_DEPOT_NORMAL_LOAD;
+}
+
+/**
+ * load_callback() - Callback to do the destructive parts of loading a VDO.
+ * @completion: The sub-task completion.
+ */
+static void load_callback(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ int result;
+
+ assert_admin_phase_thread(vdo, __func__);
+
+ switch (advance_phase(vdo)) {
+ case LOAD_PHASE_START:
+ result = vdo_start_operation(&vdo->admin.state, VDO_ADMIN_STATE_LOADING);
+ if (result != VDO_SUCCESS) {
+ vdo_continue_completion(completion, result);
+ return;
+ }
+
+ /* Prepare the recovery journal for new entries. */
+ vdo_open_recovery_journal(vdo->recovery_journal, vdo->depot,
+ vdo->block_map);
+ vdo_allow_read_only_mode_entry(completion);
+ return;
+
+ case LOAD_PHASE_LOAD_DEPOT:
+ vdo_set_dedupe_state_normal(vdo->hash_zones);
+ if (vdo_is_read_only(vdo)) {
+ /*
+ * In read-only mode we don't use the allocator and it may not even be
+ * readable, so don't bother trying to load it.
+ */
+ vdo_set_completion_result(completion, VDO_READ_ONLY);
+ break;
+ }
+
+ if (requires_repair(vdo)) {
+ vdo_repair(completion);
+ return;
+ }
+
+ vdo_load_slab_depot(vdo->depot,
+ (was_new(vdo) ? VDO_ADMIN_STATE_FORMATTING :
+ VDO_ADMIN_STATE_LOADING),
+ completion, NULL);
+ return;
+
+ case LOAD_PHASE_MAKE_DIRTY:
+ vdo_set_state(vdo, VDO_DIRTY);
+ vdo_save_components(vdo, completion);
+ return;
+
+ case LOAD_PHASE_PREPARE_TO_ALLOCATE:
+ vdo_initialize_block_map_from_journal(vdo->block_map,
+ vdo->recovery_journal);
+ vdo_prepare_slab_depot_to_allocate(vdo->depot, get_load_type(vdo),
+ completion);
+ return;
+
+ case LOAD_PHASE_SCRUB_SLABS:
+ if (vdo_state_requires_recovery(vdo->load_state))
+ vdo_enter_recovery_mode(vdo);
+
+ vdo_scrub_all_unrecovered_slabs(vdo->depot, completion);
+ return;
+
+ case LOAD_PHASE_DATA_REDUCTION:
+ WRITE_ONCE(vdo->compressing, vdo->device_config->compression);
+ if (vdo->device_config->deduplication) {
+ /*
+ * Don't try to load or rebuild the index first (and log scary error
+ * messages) if this is known to be a newly-formatted volume.
+ */
+ vdo_start_dedupe_index(vdo->hash_zones, was_new(vdo));
+ }
+
+ vdo->allocations_allowed = false;
+ fallthrough;
+
+ case LOAD_PHASE_FINISHED:
+ break;
+
+ case LOAD_PHASE_DRAIN_JOURNAL:
+ vdo_drain_recovery_journal(vdo->recovery_journal, VDO_ADMIN_STATE_SAVING,
+ completion);
+ return;
+
+ case LOAD_PHASE_WAIT_FOR_READ_ONLY:
+ /* Avoid an infinite loop */
+ completion->error_handler = NULL;
+ vdo->admin.phase = LOAD_PHASE_FINISHED;
+ vdo_wait_until_not_entering_read_only_mode(completion);
+ return;
+
+ default:
+ vdo_set_completion_result(completion, UDS_BAD_STATE);
+ }
+
+ finish_operation_callback(completion);
+}
+
+/**
+ * handle_load_error() - Handle an error during the load operation.
+ * @completion: The admin completion.
+ *
+ * If at all possible, brings the vdo online in read-only mode. This handler is registered in
+ * vdo_preresume_registered().
+ */
+static void handle_load_error(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+
+ if (vdo_requeue_completion_if_needed(completion,
+ vdo->thread_config.admin_thread))
+ return;
+
+ if (vdo_state_requires_read_only_rebuild(vdo->load_state) &&
+ (vdo->admin.phase == LOAD_PHASE_MAKE_DIRTY)) {
+ vdo_log_error_strerror(completion->result, "aborting load");
+ vdo->admin.phase = LOAD_PHASE_DRAIN_JOURNAL;
+ load_callback(vdo_forget(completion));
+ return;
+ }
+
+ vdo_log_error_strerror(completion->result,
+ "Entering read-only mode due to load error");
+ vdo->admin.phase = LOAD_PHASE_WAIT_FOR_READ_ONLY;
+ vdo_enter_read_only_mode(vdo, completion->result);
+ completion->result = VDO_READ_ONLY;
+ load_callback(completion);
+}
+
+/**
+ * write_super_block_for_resume() - Update the VDO state and save the super block.
+ * @completion: The admin completion
+ */
+static void write_super_block_for_resume(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+
+ switch (vdo_get_state(vdo)) {
+ case VDO_CLEAN:
+ case VDO_NEW:
+ vdo_set_state(vdo, VDO_DIRTY);
+ vdo_save_components(vdo, completion);
+ return;
+
+ case VDO_DIRTY:
+ case VDO_READ_ONLY_MODE:
+ case VDO_FORCE_REBUILD:
+ case VDO_RECOVERING:
+ case VDO_REBUILD_FOR_UPGRADE:
+ /* No need to write the super block in these cases */
+ vdo_launch_completion(completion);
+ return;
+
+ case VDO_REPLAYING:
+ default:
+ vdo_continue_completion(completion, UDS_BAD_STATE);
+ }
+}
+
+/**
+ * resume_callback() - Callback to resume a VDO.
+ * @completion: The admin completion.
+ */
+static void resume_callback(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ int result;
+
+ assert_admin_phase_thread(vdo, __func__);
+
+ switch (advance_phase(vdo)) {
+ case RESUME_PHASE_START:
+ result = vdo_start_operation(&vdo->admin.state,
+ VDO_ADMIN_STATE_RESUMING);
+ if (result != VDO_SUCCESS) {
+ vdo_continue_completion(completion, result);
+ return;
+ }
+
+ write_super_block_for_resume(completion);
+ return;
+
+ case RESUME_PHASE_ALLOW_READ_ONLY_MODE:
+ vdo_allow_read_only_mode_entry(completion);
+ return;
+
+ case RESUME_PHASE_DEDUPE:
+ vdo_resume_hash_zones(vdo->hash_zones, completion);
+ return;
+
+ case RESUME_PHASE_DEPOT:
+ vdo_resume_slab_depot(vdo->depot, completion);
+ return;
+
+ case RESUME_PHASE_JOURNAL:
+ vdo_resume_recovery_journal(vdo->recovery_journal, completion);
+ return;
+
+ case RESUME_PHASE_BLOCK_MAP:
+ vdo_resume_block_map(vdo->block_map, completion);
+ return;
+
+ case RESUME_PHASE_LOGICAL_ZONES:
+ vdo_resume_logical_zones(vdo->logical_zones, completion);
+ return;
+
+ case RESUME_PHASE_PACKER:
+ {
+ bool was_enabled = vdo_get_compressing(vdo);
+ bool enable = vdo->device_config->compression;
+
+ if (enable != was_enabled)
+ WRITE_ONCE(vdo->compressing, enable);
+ vdo_log_info("compression is %s", (enable ? "enabled" : "disabled"));
+
+ vdo_resume_packer(vdo->packer, completion);
+ return;
+ }
+
+ case RESUME_PHASE_FLUSHER:
+ vdo_resume_flusher(vdo->flusher, completion);
+ return;
+
+ case RESUME_PHASE_DATA_VIOS:
+ resume_data_vio_pool(vdo->data_vio_pool, completion);
+ return;
+
+ case RESUME_PHASE_END:
+ break;
+
+ default:
+ vdo_set_completion_result(completion, UDS_BAD_STATE);
+ }
+
+ finish_operation_callback(completion);
+}
+
+/**
+ * grow_logical_callback() - Callback to initiate a grow logical.
+ * @completion: The admin completion.
+ *
+ * Registered in perform_grow_logical().
+ */
+static void grow_logical_callback(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ int result;
+
+ assert_admin_phase_thread(vdo, __func__);
+
+ switch (advance_phase(vdo)) {
+ case GROW_LOGICAL_PHASE_START:
+ if (vdo_is_read_only(vdo)) {
+ vdo_log_error_strerror(VDO_READ_ONLY,
+ "Can't grow logical size of a read-only VDO");
+ vdo_set_completion_result(completion, VDO_READ_ONLY);
+ break;
+ }
+
+ result = vdo_start_operation(&vdo->admin.state,
+ VDO_ADMIN_STATE_SUSPENDED_OPERATION);
+ if (result != VDO_SUCCESS) {
+ vdo_continue_completion(completion, result);
+ return;
+ }
+
+ vdo->states.vdo.config.logical_blocks = vdo->block_map->next_entry_count;
+ vdo_save_components(vdo, completion);
+ return;
+
+ case GROW_LOGICAL_PHASE_GROW_BLOCK_MAP:
+ vdo_grow_block_map(vdo->block_map, completion);
+ return;
+
+ case GROW_LOGICAL_PHASE_END:
+ break;
+
+ case GROW_LOGICAL_PHASE_ERROR:
+ vdo_enter_read_only_mode(vdo, completion->result);
+ break;
+
+ default:
+ vdo_set_completion_result(completion, UDS_BAD_STATE);
+ }
+
+ finish_operation_callback(completion);
+}
+
+/**
+ * handle_logical_growth_error() - Handle an error during the grow physical process.
+ * @completion: The admin completion.
+ */
+static void handle_logical_growth_error(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+
+ if (vdo->admin.phase == GROW_LOGICAL_PHASE_GROW_BLOCK_MAP) {
+ /*
+ * We've failed to write the new size in the super block, so set our in memory
+ * config back to the old size.
+ */
+ vdo->states.vdo.config.logical_blocks = vdo->block_map->entry_count;
+ vdo_abandon_block_map_growth(vdo->block_map);
+ }
+
+ vdo->admin.phase = GROW_LOGICAL_PHASE_ERROR;
+ grow_logical_callback(completion);
+}
+
+/**
+ * perform_grow_logical() - Grow the logical size of the vdo.
+ * @vdo: The vdo to grow.
+ * @new_logical_blocks: The size to which the vdo should be grown.
+ *
+ * Context: This method may only be called when the vdo has been suspended and must not be called
+ * from a base thread.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int perform_grow_logical(struct vdo *vdo, block_count_t new_logical_blocks)
+{
+ int result;
+
+ if (vdo->device_config->logical_blocks == new_logical_blocks) {
+ /*
+ * A table was loaded for which we prepared to grow, but a table without that
+ * growth was what we are resuming with.
+ */
+ vdo_abandon_block_map_growth(vdo->block_map);
+ return VDO_SUCCESS;
+ }
+
+ vdo_log_info("Resizing logical to %llu",
+ (unsigned long long) new_logical_blocks);
+ if (vdo->block_map->next_entry_count != new_logical_blocks)
+ return VDO_PARAMETER_MISMATCH;
+
+ result = perform_admin_operation(vdo, GROW_LOGICAL_PHASE_START,
+ grow_logical_callback,
+ handle_logical_growth_error, "grow logical");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_log_info("Logical blocks now %llu", (unsigned long long) new_logical_blocks);
+ return VDO_SUCCESS;
+}
+
+static void copy_callback(int read_err, unsigned long write_err, void *context)
+{
+ struct vdo_completion *completion = context;
+ int result = (((read_err == 0) && (write_err == 0)) ? VDO_SUCCESS : -EIO);
+
+ vdo_continue_completion(completion, result);
+}
+
+static void partition_to_region(struct partition *partition, struct vdo *vdo,
+ struct dm_io_region *region)
+{
+ physical_block_number_t pbn = partition->offset - vdo->geometry.bio_offset;
+
+ *region = (struct dm_io_region) {
+ .bdev = vdo_get_backing_device(vdo),
+ .sector = pbn * VDO_SECTORS_PER_BLOCK,
+ .count = partition->count * VDO_SECTORS_PER_BLOCK,
+ };
+}
+
+/**
+ * copy_partition() - Copy a partition from the location specified in the current layout to that in
+ * the next layout.
+ * @vdo: The vdo preparing to grow.
+ * @id: The ID of the partition to copy.
+ * @parent: The completion to notify when the copy is complete.
+ */
+static void copy_partition(struct vdo *vdo, enum partition_id id,
+ struct vdo_completion *parent)
+{
+ struct dm_io_region read_region, write_regions[1];
+ struct partition *from = vdo_get_known_partition(&vdo->layout, id);
+ struct partition *to = vdo_get_known_partition(&vdo->next_layout, id);
+
+ partition_to_region(from, vdo, &read_region);
+ partition_to_region(to, vdo, &write_regions[0]);
+ dm_kcopyd_copy(vdo->partition_copier, &read_region, 1, write_regions, 0,
+ copy_callback, parent);
+}
+
+/**
+ * grow_physical_callback() - Callback to initiate a grow physical.
+ * @completion: The admin completion.
+ *
+ * Registered in perform_grow_physical().
+ */
+static void grow_physical_callback(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ int result;
+
+ assert_admin_phase_thread(vdo, __func__);
+
+ switch (advance_phase(vdo)) {
+ case GROW_PHYSICAL_PHASE_START:
+ if (vdo_is_read_only(vdo)) {
+ vdo_log_error_strerror(VDO_READ_ONLY,
+ "Can't grow physical size of a read-only VDO");
+ vdo_set_completion_result(completion, VDO_READ_ONLY);
+ break;
+ }
+
+ result = vdo_start_operation(&vdo->admin.state,
+ VDO_ADMIN_STATE_SUSPENDED_OPERATION);
+ if (result != VDO_SUCCESS) {
+ vdo_continue_completion(completion, result);
+ return;
+ }
+
+ /* Copy the journal into the new layout. */
+ copy_partition(vdo, VDO_RECOVERY_JOURNAL_PARTITION, completion);
+ return;
+
+ case GROW_PHYSICAL_PHASE_COPY_SUMMARY:
+ copy_partition(vdo, VDO_SLAB_SUMMARY_PARTITION, completion);
+ return;
+
+ case GROW_PHYSICAL_PHASE_UPDATE_COMPONENTS:
+ vdo_uninitialize_layout(&vdo->layout);
+ vdo->layout = vdo->next_layout;
+ vdo_forget(vdo->next_layout.head);
+ vdo->states.vdo.config.physical_blocks = vdo->layout.size;
+ vdo_update_slab_depot_size(vdo->depot);
+ vdo_save_components(vdo, completion);
+ return;
+
+ case GROW_PHYSICAL_PHASE_USE_NEW_SLABS:
+ vdo_use_new_slabs(vdo->depot, completion);
+ return;
+
+ case GROW_PHYSICAL_PHASE_END:
+ vdo->depot->summary_origin =
+ vdo_get_known_partition(&vdo->layout,
+ VDO_SLAB_SUMMARY_PARTITION)->offset;
+ vdo->recovery_journal->origin =
+ vdo_get_known_partition(&vdo->layout,
+ VDO_RECOVERY_JOURNAL_PARTITION)->offset;
+ break;
+
+ case GROW_PHYSICAL_PHASE_ERROR:
+ vdo_enter_read_only_mode(vdo, completion->result);
+ break;
+
+ default:
+ vdo_set_completion_result(completion, UDS_BAD_STATE);
+ }
+
+ vdo_uninitialize_layout(&vdo->next_layout);
+ finish_operation_callback(completion);
+}
+
+/**
+ * handle_physical_growth_error() - Handle an error during the grow physical process.
+ * @completion: The sub-task completion.
+ */
+static void handle_physical_growth_error(struct vdo_completion *completion)
+{
+ completion->vdo->admin.phase = GROW_PHYSICAL_PHASE_ERROR;
+ grow_physical_callback(completion);
+}
+
+/**
+ * perform_grow_physical() - Grow the physical size of the vdo.
+ * @vdo: The vdo to resize.
+ * @new_physical_blocks: The new physical size in blocks.
+ *
+ * Context: This method may only be called when the vdo has been suspended and must not be called
+ * from a base thread.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int perform_grow_physical(struct vdo *vdo, block_count_t new_physical_blocks)
+{
+ int result;
+ block_count_t new_depot_size, prepared_depot_size;
+ block_count_t old_physical_blocks = vdo->states.vdo.config.physical_blocks;
+
+ /* Skip any noop grows. */
+ if (old_physical_blocks == new_physical_blocks)
+ return VDO_SUCCESS;
+
+ if (new_physical_blocks != vdo->next_layout.size) {
+ /*
+ * Either the VDO isn't prepared to grow, or it was prepared to grow to a different
+ * size. Doing this check here relies on the fact that the call to this method is
+ * done under the dmsetup message lock.
+ */
+ vdo_uninitialize_layout(&vdo->next_layout);
+ vdo_abandon_new_slabs(vdo->depot);
+ return VDO_PARAMETER_MISMATCH;
+ }
+
+ /* Validate that we are prepared to grow appropriately. */
+ new_depot_size =
+ vdo_get_known_partition(&vdo->next_layout, VDO_SLAB_DEPOT_PARTITION)->count;
+ prepared_depot_size = (vdo->depot->new_slabs == NULL) ? 0 : vdo->depot->new_size;
+ if (prepared_depot_size != new_depot_size)
+ return VDO_PARAMETER_MISMATCH;
+
+ result = perform_admin_operation(vdo, GROW_PHYSICAL_PHASE_START,
+ grow_physical_callback,
+ handle_physical_growth_error, "grow physical");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_log_info("Physical block count was %llu, now %llu",
+ (unsigned long long) old_physical_blocks,
+ (unsigned long long) new_physical_blocks);
+ return VDO_SUCCESS;
+}
+
+/**
+ * apply_new_vdo_configuration() - Attempt to make any configuration changes from the table being
+ * resumed.
+ * @vdo: The vdo being resumed.
+ * @config: The new device configuration derived from the table with which the vdo is being
+ * resumed.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check apply_new_vdo_configuration(struct vdo *vdo,
+ struct device_config *config)
+{
+ int result;
+
+ result = perform_grow_logical(vdo, config->logical_blocks);
+ if (result != VDO_SUCCESS) {
+ vdo_log_error("grow logical operation failed, result = %d", result);
+ return result;
+ }
+
+ result = perform_grow_physical(vdo, config->physical_blocks);
+ if (result != VDO_SUCCESS)
+ vdo_log_error("resize operation failed, result = %d", result);
+
+ return result;
+}
+
+static int vdo_preresume_registered(struct dm_target *ti, struct vdo *vdo)
+{
+ struct device_config *config = ti->private;
+ const char *device_name = vdo_get_device_name(ti);
+ block_count_t backing_blocks;
+ int result;
+
+ backing_blocks = get_underlying_device_block_count(vdo);
+ if (backing_blocks < config->physical_blocks) {
+ /* FIXME: can this still happen? */
+ vdo_log_error("resume of device '%s' failed: backing device has %llu blocks but VDO physical size is %llu blocks",
+ device_name, (unsigned long long) backing_blocks,
+ (unsigned long long) config->physical_blocks);
+ return -EINVAL;
+ }
+
+ if (vdo_get_admin_state(vdo) == VDO_ADMIN_STATE_PRE_LOADED) {
+ vdo_log_info("starting device '%s'", device_name);
+ result = perform_admin_operation(vdo, LOAD_PHASE_START, load_callback,
+ handle_load_error, "load");
+ if ((result != VDO_SUCCESS) && (result != VDO_READ_ONLY)) {
+ /*
+ * Something has gone very wrong. Make sure everything has drained and
+ * leave the device in an unresumable state.
+ */
+ vdo_log_error_strerror(result,
+ "Start failed, could not load VDO metadata");
+ vdo->suspend_type = VDO_ADMIN_STATE_STOPPING;
+ perform_admin_operation(vdo, SUSPEND_PHASE_START,
+ suspend_callback, suspend_callback,
+ "suspend");
+ return result;
+ }
+
+ /* Even if the VDO is read-only, it is now able to handle read requests. */
+ vdo_log_info("device '%s' started", device_name);
+ }
+
+ vdo_log_info("resuming device '%s'", device_name);
+
+ /* If this fails, the VDO was not in a state to be resumed. This should never happen. */
+ result = apply_new_vdo_configuration(vdo, config);
+ BUG_ON(result == VDO_INVALID_ADMIN_STATE);
+
+ /*
+ * Now that we've tried to modify the vdo, the new config *is* the config, whether the
+ * modifications worked or not.
+ */
+ vdo->device_config = config;
+
+ /*
+ * Any error here is highly unexpected and the state of the vdo is questionable, so we mark
+ * it read-only in memory. Because we are suspended, the read-only state will not be
+ * written to disk.
+ */
+ if (result != VDO_SUCCESS) {
+ vdo_log_error_strerror(result,
+ "Commit of modifications to device '%s' failed",
+ device_name);
+ vdo_enter_read_only_mode(vdo, result);
+ return result;
+ }
+
+ if (vdo_get_admin_state(vdo)->normal) {
+ /* The VDO was just started, so we don't need to resume it. */
+ return VDO_SUCCESS;
+ }
+
+ result = perform_admin_operation(vdo, RESUME_PHASE_START, resume_callback,
+ resume_callback, "resume");
+ BUG_ON(result == VDO_INVALID_ADMIN_STATE);
+ if (result == VDO_READ_ONLY) {
+ /* Even if the vdo is read-only, it has still resumed. */
+ result = VDO_SUCCESS;
+ }
+
+ if (result != VDO_SUCCESS)
+ vdo_log_error("resume of device '%s' failed with error: %d", device_name,
+ result);
+
+ return result;
+}
+
+static int vdo_preresume(struct dm_target *ti)
+{
+ struct registered_thread instance_thread;
+ struct vdo *vdo = get_vdo_for_target(ti);
+ int result;
+
+ vdo_register_thread_device_id(&instance_thread, &vdo->instance);
+ result = vdo_preresume_registered(ti, vdo);
+ if ((result == VDO_PARAMETER_MISMATCH) || (result == VDO_INVALID_ADMIN_STATE))
+ result = -EINVAL;
+ vdo_unregister_thread_device_id();
+ return vdo_status_to_errno(result);
+}
+
+static void vdo_resume(struct dm_target *ti)
+{
+ struct registered_thread instance_thread;
+
+ vdo_register_thread_device_id(&instance_thread,
+ &get_vdo_for_target(ti)->instance);
+ vdo_log_info("device '%s' resumed", vdo_get_device_name(ti));
+ vdo_unregister_thread_device_id();
+}
+
+/*
+ * If anything changes that affects how user tools will interact with vdo, update the version
+ * number and make sure documentation about the change is complete so tools can properly update
+ * their management code.
+ */
+static struct target_type vdo_target_bio = {
+ .features = DM_TARGET_SINGLETON,
+ .name = "vdo",
+ .version = { 9, 0, 0 },
+ .module = THIS_MODULE,
+ .ctr = vdo_ctr,
+ .dtr = vdo_dtr,
+ .io_hints = vdo_io_hints,
+ .iterate_devices = vdo_iterate_devices,
+ .map = vdo_map_bio,
+ .message = vdo_message,
+ .status = vdo_status,
+ .presuspend = vdo_presuspend,
+ .postsuspend = vdo_postsuspend,
+ .preresume = vdo_preresume,
+ .resume = vdo_resume,
+};
+
+static bool dm_registered;
+
+static void vdo_module_destroy(void)
+{
+ vdo_log_debug("unloading");
+
+ if (dm_registered)
+ dm_unregister_target(&vdo_target_bio);
+
+ VDO_ASSERT_LOG_ONLY(instances.count == 0,
+ "should have no instance numbers still in use, but have %u",
+ instances.count);
+ vdo_free(instances.words);
+ memset(&instances, 0, sizeof(struct instance_tracker));
+}
+
+static int __init vdo_init(void)
+{
+ int result = 0;
+
+ /* Memory tracking must be initialized first for accurate accounting. */
+ vdo_memory_init();
+ vdo_initialize_threads_mutex();
+ vdo_initialize_thread_device_registry();
+ vdo_initialize_device_registry_once();
+
+ /* Add VDO errors to the set of errors registered by the indexer. */
+ result = vdo_register_status_codes();
+ if (result != VDO_SUCCESS) {
+ vdo_log_error("vdo_register_status_codes failed %d", result);
+ vdo_module_destroy();
+ return result;
+ }
+
+ result = dm_register_target(&vdo_target_bio);
+ if (result < 0) {
+ vdo_log_error("dm_register_target failed %d", result);
+ vdo_module_destroy();
+ return result;
+ }
+ dm_registered = true;
+
+ return result;
+}
+
+static void __exit vdo_exit(void)
+{
+ vdo_module_destroy();
+ /* Memory tracking cleanup must be done last. */
+ vdo_memory_exit();
+}
+
+module_init(vdo_init);
+module_exit(vdo_exit);
+
+module_param_named(log_level, vdo_log_level, uint, 0644);
+MODULE_PARM_DESC(log_level, "Log level for log messages");
+
+MODULE_DESCRIPTION(DM_NAME " target for transparent deduplication");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-vdo/dump.c b/drivers/md/dm-vdo/dump.c
new file mode 100644
index 000000000000..00e575d7d773
--- /dev/null
+++ b/drivers/md/dm-vdo/dump.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "dump.h"
+
+#include <linux/module.h>
+
+#include "memory-alloc.h"
+#include "string-utils.h"
+
+#include "constants.h"
+#include "data-vio.h"
+#include "dedupe.h"
+#include "funnel-workqueue.h"
+#include "io-submitter.h"
+#include "logger.h"
+#include "types.h"
+#include "vdo.h"
+
+enum dump_options {
+ /* Work queues */
+ SHOW_QUEUES,
+ /* Memory pools */
+ SHOW_VIO_POOL,
+ /* Others */
+ SHOW_VDO_STATUS,
+ /* This one means an option overrides the "default" choices, instead of altering them. */
+ SKIP_DEFAULT
+};
+
+enum dump_option_flags {
+ /* Work queues */
+ FLAG_SHOW_QUEUES = (1 << SHOW_QUEUES),
+ /* Memory pools */
+ FLAG_SHOW_VIO_POOL = (1 << SHOW_VIO_POOL),
+ /* Others */
+ FLAG_SHOW_VDO_STATUS = (1 << SHOW_VDO_STATUS),
+ /* Special */
+ FLAG_SKIP_DEFAULT = (1 << SKIP_DEFAULT)
+};
+
+#define FLAGS_ALL_POOLS (FLAG_SHOW_VIO_POOL)
+#define DEFAULT_DUMP_FLAGS (FLAG_SHOW_QUEUES | FLAG_SHOW_VDO_STATUS)
+/* Another static buffer... log10(256) = 2.408+, round up: */
+#define DIGITS_PER_U64 (1 + sizeof(u64) * 2409 / 1000)
+
+static inline bool is_arg_string(const char *arg, const char *this_option)
+{
+ /* convention seems to be case-independent options */
+ return strncasecmp(arg, this_option, strlen(this_option)) == 0;
+}
+
+static void do_dump(struct vdo *vdo, unsigned int dump_options_requested,
+ const char *why)
+{
+ u32 active, maximum;
+ s64 outstanding;
+
+ vdo_log_info("%s dump triggered via %s", VDO_LOGGING_MODULE_NAME, why);
+ active = get_data_vio_pool_active_requests(vdo->data_vio_pool);
+ maximum = get_data_vio_pool_maximum_requests(vdo->data_vio_pool);
+ outstanding = (atomic64_read(&vdo->stats.bios_submitted) -
+ atomic64_read(&vdo->stats.bios_completed));
+ vdo_log_info("%u device requests outstanding (max %u), %lld bio requests outstanding, device '%s'",
+ active, maximum, outstanding,
+ vdo_get_device_name(vdo->device_config->owning_target));
+ if (((dump_options_requested & FLAG_SHOW_QUEUES) != 0) && (vdo->threads != NULL)) {
+ thread_id_t id;
+
+ for (id = 0; id < vdo->thread_config.thread_count; id++)
+ vdo_dump_work_queue(vdo->threads[id].queue);
+ }
+
+ vdo_dump_hash_zones(vdo->hash_zones);
+ dump_data_vio_pool(vdo->data_vio_pool,
+ (dump_options_requested & FLAG_SHOW_VIO_POOL) != 0);
+ if ((dump_options_requested & FLAG_SHOW_VDO_STATUS) != 0)
+ vdo_dump_status(vdo);
+
+ vdo_report_memory_usage();
+ vdo_log_info("end of %s dump", VDO_LOGGING_MODULE_NAME);
+}
+
+static int parse_dump_options(unsigned int argc, char *const *argv,
+ unsigned int *dump_options_requested_ptr)
+{
+ unsigned int dump_options_requested = 0;
+
+ static const struct {
+ const char *name;
+ unsigned int flags;
+ } option_names[] = {
+ { "viopool", FLAG_SKIP_DEFAULT | FLAG_SHOW_VIO_POOL },
+ { "vdo", FLAG_SKIP_DEFAULT | FLAG_SHOW_VDO_STATUS },
+ { "pools", FLAG_SKIP_DEFAULT | FLAGS_ALL_POOLS },
+ { "queues", FLAG_SKIP_DEFAULT | FLAG_SHOW_QUEUES },
+ { "threads", FLAG_SKIP_DEFAULT | FLAG_SHOW_QUEUES },
+ { "default", FLAG_SKIP_DEFAULT | DEFAULT_DUMP_FLAGS },
+ { "all", ~0 },
+ };
+
+ bool options_okay = true;
+ unsigned int i;
+
+ for (i = 1; i < argc; i++) {
+ unsigned int j;
+
+ for (j = 0; j < ARRAY_SIZE(option_names); j++) {
+ if (is_arg_string(argv[i], option_names[j].name)) {
+ dump_options_requested |= option_names[j].flags;
+ break;
+ }
+ }
+ if (j == ARRAY_SIZE(option_names)) {
+ vdo_log_warning("dump option name '%s' unknown", argv[i]);
+ options_okay = false;
+ }
+ }
+ if (!options_okay)
+ return -EINVAL;
+ if ((dump_options_requested & FLAG_SKIP_DEFAULT) == 0)
+ dump_options_requested |= DEFAULT_DUMP_FLAGS;
+ *dump_options_requested_ptr = dump_options_requested;
+ return 0;
+}
+
+/* Dump as specified by zero or more string arguments. */
+int vdo_dump(struct vdo *vdo, unsigned int argc, char *const *argv, const char *why)
+{
+ unsigned int dump_options_requested = 0;
+ int result = parse_dump_options(argc, argv, &dump_options_requested);
+
+ if (result != 0)
+ return result;
+
+ do_dump(vdo, dump_options_requested, why);
+ return 0;
+}
+
+/* Dump everything we know how to dump */
+void vdo_dump_all(struct vdo *vdo, const char *why)
+{
+ do_dump(vdo, ~0, why);
+}
+
+/*
+ * Dump out the data_vio waiters on a waitq.
+ * wait_on should be the label to print for queue (e.g. logical or physical)
+ */
+static void dump_vio_waiters(struct vdo_wait_queue *waitq, char *wait_on)
+{
+ struct vdo_waiter *waiter, *first = vdo_waitq_get_first_waiter(waitq);
+ struct data_vio *data_vio;
+
+ if (first == NULL)
+ return;
+
+ data_vio = vdo_waiter_as_data_vio(first);
+
+ vdo_log_info(" %s is locked. Waited on by: vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s",
+ wait_on, data_vio, data_vio->allocation.pbn, data_vio->logical.lbn,
+ data_vio->duplicate.pbn, get_data_vio_operation_name(data_vio));
+
+ for (waiter = first->next_waiter; waiter != first; waiter = waiter->next_waiter) {
+ data_vio = vdo_waiter_as_data_vio(waiter);
+ vdo_log_info(" ... and : vio %px pbn %llu lbn %llu d-pbn %llu lastOp %s",
+ data_vio, data_vio->allocation.pbn, data_vio->logical.lbn,
+ data_vio->duplicate.pbn,
+ get_data_vio_operation_name(data_vio));
+ }
+}
+
+/*
+ * Encode various attributes of a data_vio as a string of one-character flags. This encoding is for
+ * logging brevity:
+ *
+ * R => vio completion result not VDO_SUCCESS
+ * W => vio is on a waitq
+ * D => vio is a duplicate
+ * p => vio is a partial block operation
+ * z => vio is a zero block
+ * d => vio is a discard
+ *
+ * The common case of no flags set will result in an empty, null-terminated buffer. If any flags
+ * are encoded, the first character in the string will be a space character.
+ */
+static void encode_vio_dump_flags(struct data_vio *data_vio, char buffer[8])
+{
+ char *p_flag = buffer;
+ *p_flag++ = ' ';
+ if (data_vio->vio.completion.result != VDO_SUCCESS)
+ *p_flag++ = 'R';
+ if (data_vio->waiter.next_waiter != NULL)
+ *p_flag++ = 'W';
+ if (data_vio->is_duplicate)
+ *p_flag++ = 'D';
+ if (data_vio->is_partial)
+ *p_flag++ = 'p';
+ if (data_vio->is_zero)
+ *p_flag++ = 'z';
+ if (data_vio->remaining_discard > 0)
+ *p_flag++ = 'd';
+ if (p_flag == &buffer[1]) {
+ /* No flags, so remove the blank space. */
+ p_flag = buffer;
+ }
+ *p_flag = '\0';
+}
+
+/* Implements buffer_dump_function. */
+void dump_data_vio(void *data)
+{
+ struct data_vio *data_vio = data;
+
+ /*
+ * This just needs to be big enough to hold a queue (thread) name and a function name (plus
+ * a separator character and NUL). The latter is limited only by taste.
+ *
+ * In making this static, we're assuming only one "dump" will run at a time. If more than
+ * one does run, the log output will be garbled anyway.
+ */
+ static char vio_completion_dump_buffer[100 + MAX_VDO_WORK_QUEUE_NAME_LEN];
+ static char vio_block_number_dump_buffer[sizeof("P L D") + 3 * DIGITS_PER_U64];
+ static char vio_flush_generation_buffer[sizeof(" FG") + DIGITS_PER_U64];
+ static char flags_dump_buffer[8];
+
+ /*
+ * We're likely to be logging a couple thousand of these lines, and in some circumstances
+ * syslogd may have trouble keeping up, so keep it BRIEF rather than user-friendly.
+ */
+ vdo_dump_completion_to_buffer(&data_vio->vio.completion,
+ vio_completion_dump_buffer,
+ sizeof(vio_completion_dump_buffer));
+ if (data_vio->is_duplicate) {
+ snprintf(vio_block_number_dump_buffer,
+ sizeof(vio_block_number_dump_buffer), "P%llu L%llu D%llu",
+ data_vio->allocation.pbn, data_vio->logical.lbn,
+ data_vio->duplicate.pbn);
+ } else if (data_vio_has_allocation(data_vio)) {
+ snprintf(vio_block_number_dump_buffer,
+ sizeof(vio_block_number_dump_buffer), "P%llu L%llu",
+ data_vio->allocation.pbn, data_vio->logical.lbn);
+ } else {
+ snprintf(vio_block_number_dump_buffer,
+ sizeof(vio_block_number_dump_buffer), "L%llu",
+ data_vio->logical.lbn);
+ }
+
+ if (data_vio->flush_generation != 0) {
+ snprintf(vio_flush_generation_buffer,
+ sizeof(vio_flush_generation_buffer), " FG%llu",
+ data_vio->flush_generation);
+ } else {
+ vio_flush_generation_buffer[0] = 0;
+ }
+
+ encode_vio_dump_flags(data_vio, flags_dump_buffer);
+
+ vdo_log_info(" vio %px %s%s %s %s%s", data_vio,
+ vio_block_number_dump_buffer,
+ vio_flush_generation_buffer,
+ get_data_vio_operation_name(data_vio),
+ vio_completion_dump_buffer,
+ flags_dump_buffer);
+ /*
+ * might want info on: wantUDSAnswer / operation / status
+ * might want info on: bio / bios_merged
+ */
+
+ dump_vio_waiters(&data_vio->logical.waiters, "lbn");
+
+ /* might want to dump more info from vio here */
+}
diff --git a/drivers/md/dm-vdo/dump.h b/drivers/md/dm-vdo/dump.h
new file mode 100644
index 000000000000..ad47c70cca78
--- /dev/null
+++ b/drivers/md/dm-vdo/dump.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_DUMP_H
+#define VDO_DUMP_H
+
+#include "types.h"
+
+int vdo_dump(struct vdo *vdo, unsigned int argc, char *const *argv, const char *why);
+
+void vdo_dump_all(struct vdo *vdo, const char *why);
+
+void dump_data_vio(void *data);
+
+#endif /* VDO_DUMP_H */
diff --git a/drivers/md/dm-vdo/encodings.c b/drivers/md/dm-vdo/encodings.c
new file mode 100644
index 000000000000..a34ea0229d53
--- /dev/null
+++ b/drivers/md/dm-vdo/encodings.c
@@ -0,0 +1,1483 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "encodings.h"
+
+#include <linux/log2.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "constants.h"
+#include "status-codes.h"
+#include "types.h"
+
+/** The maximum logical space is 4 petabytes, which is 1 terablock. */
+static const block_count_t MAXIMUM_VDO_LOGICAL_BLOCKS = 1024ULL * 1024 * 1024 * 1024;
+
+/** The maximum physical space is 256 terabytes, which is 64 gigablocks. */
+static const block_count_t MAXIMUM_VDO_PHYSICAL_BLOCKS = 1024ULL * 1024 * 1024 * 64;
+
+struct geometry_block {
+ char magic_number[VDO_GEOMETRY_MAGIC_NUMBER_SIZE];
+ struct packed_header header;
+ u32 checksum;
+} __packed;
+
+static const struct header GEOMETRY_BLOCK_HEADER_5_0 = {
+ .id = VDO_GEOMETRY_BLOCK,
+ .version = {
+ .major_version = 5,
+ .minor_version = 0,
+ },
+ /*
+ * Note: this size isn't just the payload size following the header, like it is everywhere
+ * else in VDO.
+ */
+ .size = sizeof(struct geometry_block) + sizeof(struct volume_geometry),
+};
+
+static const struct header GEOMETRY_BLOCK_HEADER_4_0 = {
+ .id = VDO_GEOMETRY_BLOCK,
+ .version = {
+ .major_version = 4,
+ .minor_version = 0,
+ },
+ /*
+ * Note: this size isn't just the payload size following the header, like it is everywhere
+ * else in VDO.
+ */
+ .size = sizeof(struct geometry_block) + sizeof(struct volume_geometry_4_0),
+};
+
+const u8 VDO_GEOMETRY_MAGIC_NUMBER[VDO_GEOMETRY_MAGIC_NUMBER_SIZE + 1] = "dmvdo001";
+
+#define PAGE_HEADER_4_1_SIZE (8 + 8 + 8 + 1 + 1 + 1 + 1)
+
+static const struct version_number BLOCK_MAP_4_1 = {
+ .major_version = 4,
+ .minor_version = 1,
+};
+
+const struct header VDO_BLOCK_MAP_HEADER_2_0 = {
+ .id = VDO_BLOCK_MAP,
+ .version = {
+ .major_version = 2,
+ .minor_version = 0,
+ },
+ .size = sizeof(struct block_map_state_2_0),
+};
+
+const struct header VDO_RECOVERY_JOURNAL_HEADER_7_0 = {
+ .id = VDO_RECOVERY_JOURNAL,
+ .version = {
+ .major_version = 7,
+ .minor_version = 0,
+ },
+ .size = sizeof(struct recovery_journal_state_7_0),
+};
+
+const struct header VDO_SLAB_DEPOT_HEADER_2_0 = {
+ .id = VDO_SLAB_DEPOT,
+ .version = {
+ .major_version = 2,
+ .minor_version = 0,
+ },
+ .size = sizeof(struct slab_depot_state_2_0),
+};
+
+static const struct header VDO_LAYOUT_HEADER_3_0 = {
+ .id = VDO_LAYOUT,
+ .version = {
+ .major_version = 3,
+ .minor_version = 0,
+ },
+ .size = sizeof(struct layout_3_0) + (sizeof(struct partition_3_0) * VDO_PARTITION_COUNT),
+};
+
+static const enum partition_id REQUIRED_PARTITIONS[] = {
+ VDO_BLOCK_MAP_PARTITION,
+ VDO_SLAB_DEPOT_PARTITION,
+ VDO_RECOVERY_JOURNAL_PARTITION,
+ VDO_SLAB_SUMMARY_PARTITION,
+};
+
+/*
+ * The current version for the data encoded in the super block. This must be changed any time there
+ * is a change to encoding of the component data of any VDO component.
+ */
+static const struct version_number VDO_COMPONENT_DATA_41_0 = {
+ .major_version = 41,
+ .minor_version = 0,
+};
+
+const struct version_number VDO_VOLUME_VERSION_67_0 = {
+ .major_version = 67,
+ .minor_version = 0,
+};
+
+static const struct header SUPER_BLOCK_HEADER_12_0 = {
+ .id = VDO_SUPER_BLOCK,
+ .version = {
+ .major_version = 12,
+ .minor_version = 0,
+ },
+
+ /* This is the minimum size, if the super block contains no components. */
+ .size = VDO_SUPER_BLOCK_FIXED_SIZE - VDO_ENCODED_HEADER_SIZE,
+};
+
+/**
+ * validate_version() - Check whether a version matches an expected version.
+ * @expected_version: The expected version.
+ * @actual_version: The version being validated.
+ * @component_name: The name of the component or the calling function (for error logging).
+ *
+ * Logs an error describing a mismatch.
+ *
+ * Return: VDO_SUCCESS if the versions are the same,
+ * VDO_UNSUPPORTED_VERSION if the versions don't match.
+ */
+static int __must_check validate_version(struct version_number expected_version,
+ struct version_number actual_version,
+ const char *component_name)
+{
+ if (!vdo_are_same_version(expected_version, actual_version)) {
+ return vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION,
+ "%s version mismatch, expected %d.%d, got %d.%d",
+ component_name,
+ expected_version.major_version,
+ expected_version.minor_version,
+ actual_version.major_version,
+ actual_version.minor_version);
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_validate_header() - Check whether a header matches expectations.
+ * @expected_header: The expected header.
+ * @actual_header: The header being validated.
+ * @exact_size: If true, the size fields of the two headers must be the same, otherwise it is
+ * required that actual_header.size >= expected_header.size.
+ * @name: The name of the component or the calling function (for error logging).
+ *
+ * Logs an error describing the first mismatch found.
+ *
+ * Return: VDO_SUCCESS if the header meets expectations,
+ * VDO_INCORRECT_COMPONENT if the component ids don't match,
+ * VDO_UNSUPPORTED_VERSION if the versions or sizes don't match.
+ */
+int vdo_validate_header(const struct header *expected_header,
+ const struct header *actual_header, bool exact_size,
+ const char *name)
+{
+ int result;
+
+ if (expected_header->id != actual_header->id) {
+ return vdo_log_error_strerror(VDO_INCORRECT_COMPONENT,
+ "%s ID mismatch, expected %d, got %d",
+ name, expected_header->id,
+ actual_header->id);
+ }
+
+ result = validate_version(expected_header->version, actual_header->version,
+ name);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if ((expected_header->size > actual_header->size) ||
+ (exact_size && (expected_header->size < actual_header->size))) {
+ return vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION,
+ "%s size mismatch, expected %zu, got %zu",
+ name, expected_header->size,
+ actual_header->size);
+ }
+
+ return VDO_SUCCESS;
+}
+
+static void encode_version_number(u8 *buffer, size_t *offset,
+ struct version_number version)
+{
+ struct packed_version_number packed = vdo_pack_version_number(version);
+
+ memcpy(buffer + *offset, &packed, sizeof(packed));
+ *offset += sizeof(packed);
+}
+
+void vdo_encode_header(u8 *buffer, size_t *offset, const struct header *header)
+{
+ struct packed_header packed = vdo_pack_header(header);
+
+ memcpy(buffer + *offset, &packed, sizeof(packed));
+ *offset += sizeof(packed);
+}
+
+static void decode_version_number(u8 *buffer, size_t *offset,
+ struct version_number *version)
+{
+ struct packed_version_number packed;
+
+ memcpy(&packed, buffer + *offset, sizeof(packed));
+ *offset += sizeof(packed);
+ *version = vdo_unpack_version_number(packed);
+}
+
+void vdo_decode_header(u8 *buffer, size_t *offset, struct header *header)
+{
+ struct packed_header packed;
+
+ memcpy(&packed, buffer + *offset, sizeof(packed));
+ *offset += sizeof(packed);
+
+ *header = vdo_unpack_header(&packed);
+}
+
+/**
+ * decode_volume_geometry() - Decode the on-disk representation of a volume geometry from a buffer.
+ * @buffer: A buffer to decode from.
+ * @offset: The offset in the buffer at which to decode.
+ * @geometry: The structure to receive the decoded fields.
+ * @version: The geometry block version to decode.
+ */
+static void decode_volume_geometry(u8 *buffer, size_t *offset,
+ struct volume_geometry *geometry, u32 version)
+{
+ u32 unused, mem;
+ enum volume_region_id id;
+ nonce_t nonce;
+ block_count_t bio_offset = 0;
+ bool sparse;
+
+ /* This is for backwards compatibility. */
+ decode_u32_le(buffer, offset, &unused);
+ geometry->unused = unused;
+
+ decode_u64_le(buffer, offset, &nonce);
+ geometry->nonce = nonce;
+
+ memcpy((unsigned char *) &geometry->uuid, buffer + *offset, sizeof(uuid_t));
+ *offset += sizeof(uuid_t);
+
+ if (version > 4)
+ decode_u64_le(buffer, offset, &bio_offset);
+ geometry->bio_offset = bio_offset;
+
+ for (id = 0; id < VDO_VOLUME_REGION_COUNT; id++) {
+ physical_block_number_t start_block;
+ enum volume_region_id saved_id;
+
+ decode_u32_le(buffer, offset, &saved_id);
+ decode_u64_le(buffer, offset, &start_block);
+
+ geometry->regions[id] = (struct volume_region) {
+ .id = saved_id,
+ .start_block = start_block,
+ };
+ }
+
+ decode_u32_le(buffer, offset, &mem);
+ *offset += sizeof(u32);
+ sparse = buffer[(*offset)++];
+
+ geometry->index_config = (struct index_config) {
+ .mem = mem,
+ .sparse = sparse,
+ };
+}
+
+/**
+ * vdo_parse_geometry_block() - Decode and validate an encoded geometry block.
+ * @block: The encoded geometry block.
+ * @geometry: The structure to receive the decoded fields.
+ */
+int __must_check vdo_parse_geometry_block(u8 *block, struct volume_geometry *geometry)
+{
+ u32 checksum, saved_checksum;
+ struct header header;
+ size_t offset = 0;
+ int result;
+
+ if (memcmp(block, VDO_GEOMETRY_MAGIC_NUMBER, VDO_GEOMETRY_MAGIC_NUMBER_SIZE) != 0)
+ return VDO_BAD_MAGIC;
+ offset += VDO_GEOMETRY_MAGIC_NUMBER_SIZE;
+
+ vdo_decode_header(block, &offset, &header);
+ if (header.version.major_version <= 4) {
+ result = vdo_validate_header(&GEOMETRY_BLOCK_HEADER_4_0, &header,
+ true, __func__);
+ } else {
+ result = vdo_validate_header(&GEOMETRY_BLOCK_HEADER_5_0, &header,
+ true, __func__);
+ }
+ if (result != VDO_SUCCESS)
+ return result;
+
+ decode_volume_geometry(block, &offset, geometry, header.version.major_version);
+
+ result = VDO_ASSERT(header.size == offset + sizeof(u32),
+ "should have decoded up to the geometry checksum");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* Decode and verify the checksum. */
+ checksum = vdo_crc32(block, offset);
+ decode_u32_le(block, &offset, &saved_checksum);
+
+ return ((checksum == saved_checksum) ? VDO_SUCCESS : VDO_CHECKSUM_MISMATCH);
+}
+
+struct block_map_page *vdo_format_block_map_page(void *buffer, nonce_t nonce,
+ physical_block_number_t pbn,
+ bool initialized)
+{
+ struct block_map_page *page = buffer;
+
+ memset(buffer, 0, VDO_BLOCK_SIZE);
+ page->version = vdo_pack_version_number(BLOCK_MAP_4_1);
+ page->header.nonce = __cpu_to_le64(nonce);
+ page->header.pbn = __cpu_to_le64(pbn);
+ page->header.initialized = initialized;
+ return page;
+}
+
+enum block_map_page_validity vdo_validate_block_map_page(struct block_map_page *page,
+ nonce_t nonce,
+ physical_block_number_t pbn)
+{
+ BUILD_BUG_ON(sizeof(struct block_map_page_header) != PAGE_HEADER_4_1_SIZE);
+
+ if (!vdo_are_same_version(BLOCK_MAP_4_1,
+ vdo_unpack_version_number(page->version)) ||
+ !page->header.initialized || (nonce != __le64_to_cpu(page->header.nonce)))
+ return VDO_BLOCK_MAP_PAGE_INVALID;
+
+ if (pbn != vdo_get_block_map_page_pbn(page))
+ return VDO_BLOCK_MAP_PAGE_BAD;
+
+ return VDO_BLOCK_MAP_PAGE_VALID;
+}
+
+static int decode_block_map_state_2_0(u8 *buffer, size_t *offset,
+ struct block_map_state_2_0 *state)
+{
+ size_t initial_offset;
+ block_count_t flat_page_count, root_count;
+ physical_block_number_t flat_page_origin, root_origin;
+ struct header header;
+ int result;
+
+ vdo_decode_header(buffer, offset, &header);
+ result = vdo_validate_header(&VDO_BLOCK_MAP_HEADER_2_0, &header, true, __func__);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ initial_offset = *offset;
+
+ decode_u64_le(buffer, offset, &flat_page_origin);
+ result = VDO_ASSERT(flat_page_origin == VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN,
+ "Flat page origin must be %u (recorded as %llu)",
+ VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN,
+ (unsigned long long) state->flat_page_origin);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ decode_u64_le(buffer, offset, &flat_page_count);
+ result = VDO_ASSERT(flat_page_count == 0,
+ "Flat page count must be 0 (recorded as %llu)",
+ (unsigned long long) state->flat_page_count);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ decode_u64_le(buffer, offset, &root_origin);
+ decode_u64_le(buffer, offset, &root_count);
+
+ result = VDO_ASSERT(VDO_BLOCK_MAP_HEADER_2_0.size == *offset - initial_offset,
+ "decoded block map component size must match header size");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *state = (struct block_map_state_2_0) {
+ .flat_page_origin = flat_page_origin,
+ .flat_page_count = flat_page_count,
+ .root_origin = root_origin,
+ .root_count = root_count,
+ };
+
+ return VDO_SUCCESS;
+}
+
+static void encode_block_map_state_2_0(u8 *buffer, size_t *offset,
+ struct block_map_state_2_0 state)
+{
+ size_t initial_offset;
+
+ vdo_encode_header(buffer, offset, &VDO_BLOCK_MAP_HEADER_2_0);
+
+ initial_offset = *offset;
+ encode_u64_le(buffer, offset, state.flat_page_origin);
+ encode_u64_le(buffer, offset, state.flat_page_count);
+ encode_u64_le(buffer, offset, state.root_origin);
+ encode_u64_le(buffer, offset, state.root_count);
+
+ VDO_ASSERT_LOG_ONLY(VDO_BLOCK_MAP_HEADER_2_0.size == *offset - initial_offset,
+ "encoded block map component size must match header size");
+}
+
+/**
+ * vdo_compute_new_forest_pages() - Compute the number of pages which must be allocated at each
+ * level in order to grow the forest to a new number of entries.
+ * @entries: The new number of entries the block map must address.
+ *
+ * Return: The total number of non-leaf pages required.
+ */
+block_count_t vdo_compute_new_forest_pages(root_count_t root_count,
+ struct boundary *old_sizes,
+ block_count_t entries,
+ struct boundary *new_sizes)
+{
+ page_count_t leaf_pages = max(vdo_compute_block_map_page_count(entries), 1U);
+ page_count_t level_size = DIV_ROUND_UP(leaf_pages, root_count);
+ block_count_t total_pages = 0;
+ height_t height;
+
+ for (height = 0; height < VDO_BLOCK_MAP_TREE_HEIGHT; height++) {
+ block_count_t new_pages;
+
+ level_size = DIV_ROUND_UP(level_size, VDO_BLOCK_MAP_ENTRIES_PER_PAGE);
+ new_sizes->levels[height] = level_size;
+ new_pages = level_size;
+ if (old_sizes != NULL)
+ new_pages -= old_sizes->levels[height];
+ total_pages += (new_pages * root_count);
+ }
+
+ return total_pages;
+}
+
+/**
+ * encode_recovery_journal_state_7_0() - Encode the state of a recovery journal.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static void encode_recovery_journal_state_7_0(u8 *buffer, size_t *offset,
+ struct recovery_journal_state_7_0 state)
+{
+ size_t initial_offset;
+
+ vdo_encode_header(buffer, offset, &VDO_RECOVERY_JOURNAL_HEADER_7_0);
+
+ initial_offset = *offset;
+ encode_u64_le(buffer, offset, state.journal_start);
+ encode_u64_le(buffer, offset, state.logical_blocks_used);
+ encode_u64_le(buffer, offset, state.block_map_data_blocks);
+
+ VDO_ASSERT_LOG_ONLY(VDO_RECOVERY_JOURNAL_HEADER_7_0.size == *offset - initial_offset,
+ "encoded recovery journal component size must match header size");
+}
+
+/**
+ * decode_recovery_journal_state_7_0() - Decode the state of a recovery journal saved in a buffer.
+ * @buffer: The buffer containing the saved state.
+ * @state: A pointer to a recovery journal state to hold the result of a successful decode.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int __must_check decode_recovery_journal_state_7_0(u8 *buffer, size_t *offset,
+ struct recovery_journal_state_7_0 *state)
+{
+ struct header header;
+ int result;
+ size_t initial_offset;
+ sequence_number_t journal_start;
+ block_count_t logical_blocks_used, block_map_data_blocks;
+
+ vdo_decode_header(buffer, offset, &header);
+ result = vdo_validate_header(&VDO_RECOVERY_JOURNAL_HEADER_7_0, &header, true,
+ __func__);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ initial_offset = *offset;
+ decode_u64_le(buffer, offset, &journal_start);
+ decode_u64_le(buffer, offset, &logical_blocks_used);
+ decode_u64_le(buffer, offset, &block_map_data_blocks);
+
+ result = VDO_ASSERT(VDO_RECOVERY_JOURNAL_HEADER_7_0.size == *offset - initial_offset,
+ "decoded recovery journal component size must match header size");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *state = (struct recovery_journal_state_7_0) {
+ .journal_start = journal_start,
+ .logical_blocks_used = logical_blocks_used,
+ .block_map_data_blocks = block_map_data_blocks,
+ };
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_get_journal_operation_name() - Get the name of a journal operation.
+ * @operation: The operation to name.
+ *
+ * Return: The name of the operation.
+ */
+const char *vdo_get_journal_operation_name(enum journal_operation operation)
+{
+ switch (operation) {
+ case VDO_JOURNAL_DATA_REMAPPING:
+ return "data remapping";
+
+ case VDO_JOURNAL_BLOCK_MAP_REMAPPING:
+ return "block map remapping";
+
+ default:
+ return "unknown journal operation";
+ }
+}
+
+/**
+ * encode_slab_depot_state_2_0() - Encode the state of a slab depot into a buffer.
+ */
+static void encode_slab_depot_state_2_0(u8 *buffer, size_t *offset,
+ struct slab_depot_state_2_0 state)
+{
+ size_t initial_offset;
+
+ vdo_encode_header(buffer, offset, &VDO_SLAB_DEPOT_HEADER_2_0);
+
+ initial_offset = *offset;
+ encode_u64_le(buffer, offset, state.slab_config.slab_blocks);
+ encode_u64_le(buffer, offset, state.slab_config.data_blocks);
+ encode_u64_le(buffer, offset, state.slab_config.reference_count_blocks);
+ encode_u64_le(buffer, offset, state.slab_config.slab_journal_blocks);
+ encode_u64_le(buffer, offset, state.slab_config.slab_journal_flushing_threshold);
+ encode_u64_le(buffer, offset, state.slab_config.slab_journal_blocking_threshold);
+ encode_u64_le(buffer, offset, state.slab_config.slab_journal_scrubbing_threshold);
+ encode_u64_le(buffer, offset, state.first_block);
+ encode_u64_le(buffer, offset, state.last_block);
+ buffer[(*offset)++] = state.zone_count;
+
+ VDO_ASSERT_LOG_ONLY(VDO_SLAB_DEPOT_HEADER_2_0.size == *offset - initial_offset,
+ "encoded block map component size must match header size");
+}
+
+/**
+ * decode_slab_depot_state_2_0() - Decode slab depot component state version 2.0 from a buffer.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int decode_slab_depot_state_2_0(u8 *buffer, size_t *offset,
+ struct slab_depot_state_2_0 *state)
+{
+ struct header header;
+ int result;
+ size_t initial_offset;
+ struct slab_config slab_config;
+ block_count_t count;
+ physical_block_number_t first_block, last_block;
+ zone_count_t zone_count;
+
+ vdo_decode_header(buffer, offset, &header);
+ result = vdo_validate_header(&VDO_SLAB_DEPOT_HEADER_2_0, &header, true,
+ __func__);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ initial_offset = *offset;
+ decode_u64_le(buffer, offset, &count);
+ slab_config.slab_blocks = count;
+
+ decode_u64_le(buffer, offset, &count);
+ slab_config.data_blocks = count;
+
+ decode_u64_le(buffer, offset, &count);
+ slab_config.reference_count_blocks = count;
+
+ decode_u64_le(buffer, offset, &count);
+ slab_config.slab_journal_blocks = count;
+
+ decode_u64_le(buffer, offset, &count);
+ slab_config.slab_journal_flushing_threshold = count;
+
+ decode_u64_le(buffer, offset, &count);
+ slab_config.slab_journal_blocking_threshold = count;
+
+ decode_u64_le(buffer, offset, &count);
+ slab_config.slab_journal_scrubbing_threshold = count;
+
+ decode_u64_le(buffer, offset, &first_block);
+ decode_u64_le(buffer, offset, &last_block);
+ zone_count = buffer[(*offset)++];
+
+ result = VDO_ASSERT(VDO_SLAB_DEPOT_HEADER_2_0.size == *offset - initial_offset,
+ "decoded slab depot component size must match header size");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *state = (struct slab_depot_state_2_0) {
+ .slab_config = slab_config,
+ .first_block = first_block,
+ .last_block = last_block,
+ .zone_count = zone_count,
+ };
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_configure_slab_depot() - Configure the slab depot.
+ * @partition: The slab depot partition
+ * @slab_config: The configuration of a single slab.
+ * @zone_count: The number of zones the depot will use.
+ * @state: The state structure to be configured.
+ *
+ * Configures the slab_depot for the specified storage capacity, finding the number of data blocks
+ * that will fit and still leave room for the depot metadata, then return the saved state for that
+ * configuration.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_configure_slab_depot(const struct partition *partition,
+ struct slab_config slab_config, zone_count_t zone_count,
+ struct slab_depot_state_2_0 *state)
+{
+ block_count_t total_slab_blocks, total_data_blocks;
+ size_t slab_count;
+ physical_block_number_t last_block;
+ block_count_t slab_size = slab_config.slab_blocks;
+
+ vdo_log_debug("slabDepot %s(block_count=%llu, first_block=%llu, slab_size=%llu, zone_count=%u)",
+ __func__, (unsigned long long) partition->count,
+ (unsigned long long) partition->offset,
+ (unsigned long long) slab_size, zone_count);
+
+ /* We do not allow runt slabs, so we waste up to a slab's worth. */
+ slab_count = (partition->count / slab_size);
+ if (slab_count == 0)
+ return VDO_NO_SPACE;
+
+ if (slab_count > MAX_VDO_SLABS)
+ return VDO_TOO_MANY_SLABS;
+
+ total_slab_blocks = slab_count * slab_config.slab_blocks;
+ total_data_blocks = slab_count * slab_config.data_blocks;
+ last_block = partition->offset + total_slab_blocks;
+
+ *state = (struct slab_depot_state_2_0) {
+ .slab_config = slab_config,
+ .first_block = partition->offset,
+ .last_block = last_block,
+ .zone_count = zone_count,
+ };
+
+ vdo_log_debug("slab_depot last_block=%llu, total_data_blocks=%llu, slab_count=%zu, left_over=%llu",
+ (unsigned long long) last_block,
+ (unsigned long long) total_data_blocks, slab_count,
+ (unsigned long long) (partition->count - (last_block - partition->offset)));
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_configure_slab() - Measure and initialize the configuration to use for each slab.
+ * @slab_size: The number of blocks per slab.
+ * @slab_journal_blocks: The number of blocks for the slab journal.
+ * @slab_config: The slab configuration to initialize.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_configure_slab(block_count_t slab_size, block_count_t slab_journal_blocks,
+ struct slab_config *slab_config)
+{
+ block_count_t ref_blocks, meta_blocks, data_blocks;
+ block_count_t flushing_threshold, remaining, blocking_threshold;
+ block_count_t minimal_extra_space, scrubbing_threshold;
+
+ if (slab_journal_blocks >= slab_size)
+ return VDO_BAD_CONFIGURATION;
+
+ /*
+ * This calculation should technically be a recurrence, but the total number of metadata
+ * blocks is currently less than a single block of ref_counts, so we'd gain at most one
+ * data block in each slab with more iteration.
+ */
+ ref_blocks = vdo_get_saved_reference_count_size(slab_size - slab_journal_blocks);
+ meta_blocks = (ref_blocks + slab_journal_blocks);
+
+ /* Make sure test code hasn't configured slabs to be too small. */
+ if (meta_blocks >= slab_size)
+ return VDO_BAD_CONFIGURATION;
+
+ /*
+ * If the slab size is very small, assume this must be a unit test and override the number
+ * of data blocks to be a power of two (wasting blocks in the slab). Many tests need their
+ * data_blocks fields to be the exact capacity of the configured volume, and that used to
+ * fall out since they use a power of two for the number of data blocks, the slab size was
+ * a power of two, and every block in a slab was a data block.
+ *
+ * TODO: Try to figure out some way of structuring testParameters and unit tests so this
+ * hack isn't needed without having to edit several unit tests every time the metadata size
+ * changes by one block.
+ */
+ data_blocks = slab_size - meta_blocks;
+ if ((slab_size < 1024) && !is_power_of_2(data_blocks))
+ data_blocks = ((block_count_t) 1 << ilog2(data_blocks));
+
+ /*
+ * Configure the slab journal thresholds. The flush threshold is 168 of 224 blocks in
+ * production, or 3/4ths, so we use this ratio for all sizes.
+ */
+ flushing_threshold = ((slab_journal_blocks * 3) + 3) / 4;
+ /*
+ * The blocking threshold should be far enough from the flushing threshold to not produce
+ * delays, but far enough from the end of the journal to allow multiple successive recovery
+ * failures.
+ */
+ remaining = slab_journal_blocks - flushing_threshold;
+ blocking_threshold = flushing_threshold + ((remaining * 5) / 7);
+ /* The scrubbing threshold should be at least 2048 entries before the end of the journal. */
+ minimal_extra_space = 1 + (MAXIMUM_VDO_USER_VIOS / VDO_SLAB_JOURNAL_FULL_ENTRIES_PER_BLOCK);
+ scrubbing_threshold = blocking_threshold;
+ if (slab_journal_blocks > minimal_extra_space)
+ scrubbing_threshold = slab_journal_blocks - minimal_extra_space;
+ if (blocking_threshold > scrubbing_threshold)
+ blocking_threshold = scrubbing_threshold;
+
+ *slab_config = (struct slab_config) {
+ .slab_blocks = slab_size,
+ .data_blocks = data_blocks,
+ .reference_count_blocks = ref_blocks,
+ .slab_journal_blocks = slab_journal_blocks,
+ .slab_journal_flushing_threshold = flushing_threshold,
+ .slab_journal_blocking_threshold = blocking_threshold,
+ .slab_journal_scrubbing_threshold = scrubbing_threshold};
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_decode_slab_journal_entry() - Decode a slab journal entry.
+ * @block: The journal block holding the entry.
+ * @entry_count: The number of the entry.
+ *
+ * Return: The decoded entry.
+ */
+struct slab_journal_entry vdo_decode_slab_journal_entry(struct packed_slab_journal_block *block,
+ journal_entry_count_t entry_count)
+{
+ struct slab_journal_entry entry =
+ vdo_unpack_slab_journal_entry(&block->payload.entries[entry_count]);
+
+ if (block->header.has_block_map_increments &&
+ ((block->payload.full_entries.entry_types[entry_count / 8] &
+ ((u8) 1 << (entry_count % 8))) != 0))
+ entry.operation = VDO_JOURNAL_BLOCK_MAP_REMAPPING;
+
+ return entry;
+}
+
+/**
+ * allocate_partition() - Allocate a partition and add it to a layout.
+ * @layout: The layout containing the partition.
+ * @id: The id of the partition.
+ * @offset: The offset into the layout at which the partition begins.
+ * @size: The size of the partition in blocks.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int allocate_partition(struct layout *layout, u8 id,
+ physical_block_number_t offset, block_count_t size)
+{
+ struct partition *partition;
+ int result;
+
+ result = vdo_allocate(1, struct partition, __func__, &partition);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ partition->id = id;
+ partition->offset = offset;
+ partition->count = size;
+ partition->next = layout->head;
+ layout->head = partition;
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * make_partition() - Create a new partition from the beginning or end of the unused space in a
+ * layout.
+ * @layout: The layout.
+ * @id: The id of the partition to make.
+ * @size: The number of blocks to carve out; if 0, all remaining space will be used.
+ * @beginning: True if the partition should start at the beginning of the unused space.
+ *
+ * Return: A success or error code, particularly VDO_NO_SPACE if there are fewer than size blocks
+ * remaining.
+ */
+static int __must_check make_partition(struct layout *layout, enum partition_id id,
+ block_count_t size, bool beginning)
+{
+ int result;
+ physical_block_number_t offset;
+ block_count_t free_blocks = layout->last_free - layout->first_free;
+
+ if (size == 0) {
+ if (free_blocks == 0)
+ return VDO_NO_SPACE;
+ size = free_blocks;
+ } else if (size > free_blocks) {
+ return VDO_NO_SPACE;
+ }
+
+ result = vdo_get_partition(layout, id, NULL);
+ if (result != VDO_UNKNOWN_PARTITION)
+ return VDO_PARTITION_EXISTS;
+
+ offset = beginning ? layout->first_free : (layout->last_free - size);
+
+ result = allocate_partition(layout, id, offset, size);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ layout->num_partitions++;
+ if (beginning)
+ layout->first_free += size;
+ else
+ layout->last_free = layout->last_free - size;
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_initialize_layout() - Lay out the partitions of a vdo.
+ * @size: The entire size of the vdo.
+ * @origin: The start of the layout on the underlying storage in blocks.
+ * @block_map_blocks: The size of the block map partition.
+ * @journal_blocks: The size of the journal partition.
+ * @summary_blocks: The size of the slab summary partition.
+ * @layout: The layout to initialize.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_initialize_layout(block_count_t size, physical_block_number_t offset,
+ block_count_t block_map_blocks, block_count_t journal_blocks,
+ block_count_t summary_blocks, struct layout *layout)
+{
+ int result;
+ block_count_t necessary_size =
+ (offset + block_map_blocks + journal_blocks + summary_blocks);
+
+ if (necessary_size > size)
+ return vdo_log_error_strerror(VDO_NO_SPACE,
+ "Not enough space to make a VDO");
+
+ *layout = (struct layout) {
+ .start = offset,
+ .size = size,
+ .first_free = offset,
+ .last_free = size,
+ .num_partitions = 0,
+ .head = NULL,
+ };
+
+ result = make_partition(layout, VDO_BLOCK_MAP_PARTITION, block_map_blocks, true);
+ if (result != VDO_SUCCESS) {
+ vdo_uninitialize_layout(layout);
+ return result;
+ }
+
+ result = make_partition(layout, VDO_SLAB_SUMMARY_PARTITION, summary_blocks,
+ false);
+ if (result != VDO_SUCCESS) {
+ vdo_uninitialize_layout(layout);
+ return result;
+ }
+
+ result = make_partition(layout, VDO_RECOVERY_JOURNAL_PARTITION, journal_blocks,
+ false);
+ if (result != VDO_SUCCESS) {
+ vdo_uninitialize_layout(layout);
+ return result;
+ }
+
+ result = make_partition(layout, VDO_SLAB_DEPOT_PARTITION, 0, true);
+ if (result != VDO_SUCCESS)
+ vdo_uninitialize_layout(layout);
+
+ return result;
+}
+
+/**
+ * vdo_uninitialize_layout() - Clean up a layout.
+ * @layout: The layout to clean up.
+ *
+ * All partitions created by this layout become invalid pointers.
+ */
+void vdo_uninitialize_layout(struct layout *layout)
+{
+ while (layout->head != NULL) {
+ struct partition *part = layout->head;
+
+ layout->head = part->next;
+ vdo_free(part);
+ }
+
+ memset(layout, 0, sizeof(struct layout));
+}
+
+/**
+ * vdo_get_partition() - Get a partition by id.
+ * @layout: The layout from which to get a partition.
+ * @id: The id of the partition.
+ * @partition_ptr: A pointer to hold the partition.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_get_partition(struct layout *layout, enum partition_id id,
+ struct partition **partition_ptr)
+{
+ struct partition *partition;
+
+ for (partition = layout->head; partition != NULL; partition = partition->next) {
+ if (partition->id == id) {
+ if (partition_ptr != NULL)
+ *partition_ptr = partition;
+ return VDO_SUCCESS;
+ }
+ }
+
+ return VDO_UNKNOWN_PARTITION;
+}
+
+/**
+ * vdo_get_known_partition() - Get a partition by id from a validated layout.
+ * @layout: The layout from which to get a partition.
+ * @id: The id of the partition.
+ *
+ * Return: the partition
+ */
+struct partition *vdo_get_known_partition(struct layout *layout, enum partition_id id)
+{
+ struct partition *partition;
+ int result = vdo_get_partition(layout, id, &partition);
+
+ VDO_ASSERT_LOG_ONLY(result == VDO_SUCCESS, "layout has expected partition: %u", id);
+
+ return partition;
+}
+
+static void encode_layout(u8 *buffer, size_t *offset, const struct layout *layout)
+{
+ const struct partition *partition;
+ size_t initial_offset;
+ struct header header = VDO_LAYOUT_HEADER_3_0;
+
+ BUILD_BUG_ON(sizeof(enum partition_id) != sizeof(u8));
+ VDO_ASSERT_LOG_ONLY(layout->num_partitions <= U8_MAX,
+ "layout partition count must fit in a byte");
+
+ vdo_encode_header(buffer, offset, &header);
+
+ initial_offset = *offset;
+ encode_u64_le(buffer, offset, layout->first_free);
+ encode_u64_le(buffer, offset, layout->last_free);
+ buffer[(*offset)++] = layout->num_partitions;
+
+ VDO_ASSERT_LOG_ONLY(sizeof(struct layout_3_0) == *offset - initial_offset,
+ "encoded size of a layout header must match structure");
+
+ for (partition = layout->head; partition != NULL; partition = partition->next) {
+ buffer[(*offset)++] = partition->id;
+ encode_u64_le(buffer, offset, partition->offset);
+ /* This field only exists for backwards compatibility */
+ encode_u64_le(buffer, offset, 0);
+ encode_u64_le(buffer, offset, partition->count);
+ }
+
+ VDO_ASSERT_LOG_ONLY(header.size == *offset - initial_offset,
+ "encoded size of a layout must match header size");
+}
+
+static int decode_layout(u8 *buffer, size_t *offset, physical_block_number_t start,
+ block_count_t size, struct layout *layout)
+{
+ struct header header;
+ struct layout_3_0 layout_header;
+ struct partition *partition;
+ size_t initial_offset;
+ physical_block_number_t first_free, last_free;
+ u8 partition_count;
+ u8 i;
+ int result;
+
+ vdo_decode_header(buffer, offset, &header);
+ /* Layout is variable size, so only do a minimum size check here. */
+ result = vdo_validate_header(&VDO_LAYOUT_HEADER_3_0, &header, false, __func__);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ initial_offset = *offset;
+ decode_u64_le(buffer, offset, &first_free);
+ decode_u64_le(buffer, offset, &last_free);
+ partition_count = buffer[(*offset)++];
+ layout_header = (struct layout_3_0) {
+ .first_free = first_free,
+ .last_free = last_free,
+ .partition_count = partition_count,
+ };
+
+ result = VDO_ASSERT(sizeof(struct layout_3_0) == *offset - initial_offset,
+ "decoded size of a layout header must match structure");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ layout->start = start;
+ layout->size = size;
+ layout->first_free = layout_header.first_free;
+ layout->last_free = layout_header.last_free;
+ layout->num_partitions = layout_header.partition_count;
+
+ if (layout->num_partitions > VDO_PARTITION_COUNT) {
+ return vdo_log_error_strerror(VDO_UNKNOWN_PARTITION,
+ "layout has extra partitions");
+ }
+
+ for (i = 0; i < layout->num_partitions; i++) {
+ u8 id;
+ u64 partition_offset, count;
+
+ id = buffer[(*offset)++];
+ decode_u64_le(buffer, offset, &partition_offset);
+ *offset += sizeof(u64);
+ decode_u64_le(buffer, offset, &count);
+
+ result = allocate_partition(layout, id, partition_offset, count);
+ if (result != VDO_SUCCESS) {
+ vdo_uninitialize_layout(layout);
+ return result;
+ }
+ }
+
+ /* Validate that the layout has all (and only) the required partitions */
+ for (i = 0; i < VDO_PARTITION_COUNT; i++) {
+ result = vdo_get_partition(layout, REQUIRED_PARTITIONS[i], &partition);
+ if (result != VDO_SUCCESS) {
+ vdo_uninitialize_layout(layout);
+ return vdo_log_error_strerror(result,
+ "layout is missing required partition %u",
+ REQUIRED_PARTITIONS[i]);
+ }
+
+ start += partition->count;
+ }
+
+ if (start != size) {
+ vdo_uninitialize_layout(layout);
+ return vdo_log_error_strerror(UDS_BAD_STATE,
+ "partitions do not cover the layout");
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * pack_vdo_config() - Convert a vdo_config to its packed on-disk representation.
+ * @config: The vdo config to convert.
+ *
+ * Return: The platform-independent representation of the config.
+ */
+static struct packed_vdo_config pack_vdo_config(struct vdo_config config)
+{
+ return (struct packed_vdo_config) {
+ .logical_blocks = __cpu_to_le64(config.logical_blocks),
+ .physical_blocks = __cpu_to_le64(config.physical_blocks),
+ .slab_size = __cpu_to_le64(config.slab_size),
+ .recovery_journal_size = __cpu_to_le64(config.recovery_journal_size),
+ .slab_journal_blocks = __cpu_to_le64(config.slab_journal_blocks),
+ };
+}
+
+/**
+ * pack_vdo_component() - Convert a vdo_component to its packed on-disk representation.
+ * @component: The VDO component data to convert.
+ *
+ * Return: The platform-independent representation of the component.
+ */
+static struct packed_vdo_component_41_0 pack_vdo_component(const struct vdo_component component)
+{
+ return (struct packed_vdo_component_41_0) {
+ .state = __cpu_to_le32(component.state),
+ .complete_recoveries = __cpu_to_le64(component.complete_recoveries),
+ .read_only_recoveries = __cpu_to_le64(component.read_only_recoveries),
+ .config = pack_vdo_config(component.config),
+ .nonce = __cpu_to_le64(component.nonce),
+ };
+}
+
+static void encode_vdo_component(u8 *buffer, size_t *offset,
+ struct vdo_component component)
+{
+ struct packed_vdo_component_41_0 packed;
+
+ encode_version_number(buffer, offset, VDO_COMPONENT_DATA_41_0);
+ packed = pack_vdo_component(component);
+ memcpy(buffer + *offset, &packed, sizeof(packed));
+ *offset += sizeof(packed);
+}
+
+/**
+ * unpack_vdo_config() - Convert a packed_vdo_config to its native in-memory representation.
+ * @config: The packed vdo config to convert.
+ *
+ * Return: The native in-memory representation of the vdo config.
+ */
+static struct vdo_config unpack_vdo_config(struct packed_vdo_config config)
+{
+ return (struct vdo_config) {
+ .logical_blocks = __le64_to_cpu(config.logical_blocks),
+ .physical_blocks = __le64_to_cpu(config.physical_blocks),
+ .slab_size = __le64_to_cpu(config.slab_size),
+ .recovery_journal_size = __le64_to_cpu(config.recovery_journal_size),
+ .slab_journal_blocks = __le64_to_cpu(config.slab_journal_blocks),
+ };
+}
+
+/**
+ * unpack_vdo_component_41_0() - Convert a packed_vdo_component_41_0 to its native in-memory
+ * representation.
+ * @component: The packed vdo component data to convert.
+ *
+ * Return: The native in-memory representation of the component.
+ */
+static struct vdo_component unpack_vdo_component_41_0(struct packed_vdo_component_41_0 component)
+{
+ return (struct vdo_component) {
+ .state = __le32_to_cpu(component.state),
+ .complete_recoveries = __le64_to_cpu(component.complete_recoveries),
+ .read_only_recoveries = __le64_to_cpu(component.read_only_recoveries),
+ .config = unpack_vdo_config(component.config),
+ .nonce = __le64_to_cpu(component.nonce),
+ };
+}
+
+/**
+ * decode_vdo_component() - Decode the component data for the vdo itself out of the super block.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int decode_vdo_component(u8 *buffer, size_t *offset, struct vdo_component *component)
+{
+ struct version_number version;
+ struct packed_vdo_component_41_0 packed;
+ int result;
+
+ decode_version_number(buffer, offset, &version);
+ result = validate_version(version, VDO_COMPONENT_DATA_41_0,
+ "VDO component data");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ memcpy(&packed, buffer + *offset, sizeof(packed));
+ *offset += sizeof(packed);
+ *component = unpack_vdo_component_41_0(packed);
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_validate_config() - Validate constraints on a VDO config.
+ * @config: The VDO config.
+ * @physical_block_count: The minimum block count of the underlying storage.
+ * @logical_block_count: The expected logical size of the VDO, or 0 if the logical size may be
+ * unspecified.
+ *
+ * Return: A success or error code.
+ */
+int vdo_validate_config(const struct vdo_config *config,
+ block_count_t physical_block_count,
+ block_count_t logical_block_count)
+{
+ struct slab_config slab_config;
+ int result;
+
+ result = VDO_ASSERT(config->slab_size > 0, "slab size unspecified");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(is_power_of_2(config->slab_size),
+ "slab size must be a power of two");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(config->slab_size <= (1 << MAX_VDO_SLAB_BITS),
+ "slab size must be less than or equal to 2^%d",
+ MAX_VDO_SLAB_BITS);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(config->slab_journal_blocks >= MINIMUM_VDO_SLAB_JOURNAL_BLOCKS,
+ "slab journal size meets minimum size");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(config->slab_journal_blocks <= config->slab_size,
+ "slab journal size is within expected bound");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_configure_slab(config->slab_size, config->slab_journal_blocks,
+ &slab_config);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT((slab_config.data_blocks >= 1),
+ "slab must be able to hold at least one block");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(config->physical_blocks > 0, "physical blocks unspecified");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(config->physical_blocks <= MAXIMUM_VDO_PHYSICAL_BLOCKS,
+ "physical block count %llu exceeds maximum %llu",
+ (unsigned long long) config->physical_blocks,
+ (unsigned long long) MAXIMUM_VDO_PHYSICAL_BLOCKS);
+ if (result != VDO_SUCCESS)
+ return VDO_OUT_OF_RANGE;
+
+ if (physical_block_count != config->physical_blocks) {
+ vdo_log_error("A physical size of %llu blocks was specified, not the %llu blocks configured in the vdo super block",
+ (unsigned long long) physical_block_count,
+ (unsigned long long) config->physical_blocks);
+ return VDO_PARAMETER_MISMATCH;
+ }
+
+ if (logical_block_count > 0) {
+ result = VDO_ASSERT((config->logical_blocks > 0),
+ "logical blocks unspecified");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (logical_block_count != config->logical_blocks) {
+ vdo_log_error("A logical size of %llu blocks was specified, but that differs from the %llu blocks configured in the vdo super block",
+ (unsigned long long) logical_block_count,
+ (unsigned long long) config->logical_blocks);
+ return VDO_PARAMETER_MISMATCH;
+ }
+ }
+
+ result = VDO_ASSERT(config->logical_blocks <= MAXIMUM_VDO_LOGICAL_BLOCKS,
+ "logical blocks too large");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(config->recovery_journal_size > 0,
+ "recovery journal size unspecified");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(is_power_of_2(config->recovery_journal_size),
+ "recovery journal size must be a power of two");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return result;
+}
+
+/**
+ * vdo_destroy_component_states() - Clean up any allocations in a vdo_component_states.
+ * @states: The component states to destroy.
+ */
+void vdo_destroy_component_states(struct vdo_component_states *states)
+{
+ if (states == NULL)
+ return;
+
+ vdo_uninitialize_layout(&states->layout);
+}
+
+/**
+ * decode_components() - Decode the components now that we know the component data is a version we
+ * understand.
+ * @buffer: The buffer being decoded.
+ * @offset: The offset to start decoding from.
+ * @geometry: The vdo geometry
+ * @states: An object to hold the successfully decoded state.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check decode_components(u8 *buffer, size_t *offset,
+ struct volume_geometry *geometry,
+ struct vdo_component_states *states)
+{
+ int result;
+
+ decode_vdo_component(buffer, offset, &states->vdo);
+
+ result = decode_layout(buffer, offset, vdo_get_data_region_start(*geometry) + 1,
+ states->vdo.config.physical_blocks, &states->layout);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = decode_recovery_journal_state_7_0(buffer, offset,
+ &states->recovery_journal);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = decode_slab_depot_state_2_0(buffer, offset, &states->slab_depot);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = decode_block_map_state_2_0(buffer, offset, &states->block_map);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ VDO_ASSERT_LOG_ONLY(*offset == VDO_COMPONENT_DATA_OFFSET + VDO_COMPONENT_DATA_SIZE,
+ "All decoded component data was used");
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_decode_component_states() - Decode the payload of a super block.
+ * @buffer: The buffer containing the encoded super block contents.
+ * @geometry: The vdo geometry
+ * @states: A pointer to hold the decoded states.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_decode_component_states(u8 *buffer, struct volume_geometry *geometry,
+ struct vdo_component_states *states)
+{
+ int result;
+ size_t offset = VDO_COMPONENT_DATA_OFFSET;
+
+ /* This is for backwards compatibility. */
+ decode_u32_le(buffer, &offset, &states->unused);
+
+ /* Check the VDO volume version */
+ decode_version_number(buffer, &offset, &states->volume_version);
+ result = validate_version(VDO_VOLUME_VERSION_67_0, states->volume_version,
+ "volume");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = decode_components(buffer, &offset, geometry, states);
+ if (result != VDO_SUCCESS)
+ vdo_uninitialize_layout(&states->layout);
+
+ return result;
+}
+
+/**
+ * vdo_validate_component_states() - Validate the decoded super block configuration.
+ * @states: The state decoded from the super block.
+ * @geometry_nonce: The nonce from the geometry block.
+ * @physical_size: The minimum block count of the underlying storage.
+ * @logical_size: The expected logical size of the VDO, or 0 if the logical size may be
+ * unspecified.
+ *
+ * Return: VDO_SUCCESS or an error if the configuration is invalid.
+ */
+int vdo_validate_component_states(struct vdo_component_states *states,
+ nonce_t geometry_nonce, block_count_t physical_size,
+ block_count_t logical_size)
+{
+ if (geometry_nonce != states->vdo.nonce) {
+ return vdo_log_error_strerror(VDO_BAD_NONCE,
+ "Geometry nonce %llu does not match superblock nonce %llu",
+ (unsigned long long) geometry_nonce,
+ (unsigned long long) states->vdo.nonce);
+ }
+
+ return vdo_validate_config(&states->vdo.config, physical_size, logical_size);
+}
+
+/**
+ * vdo_encode_component_states() - Encode the state of all vdo components in the super block.
+ */
+static void vdo_encode_component_states(u8 *buffer, size_t *offset,
+ const struct vdo_component_states *states)
+{
+ /* This is for backwards compatibility. */
+ encode_u32_le(buffer, offset, states->unused);
+ encode_version_number(buffer, offset, states->volume_version);
+ encode_vdo_component(buffer, offset, states->vdo);
+ encode_layout(buffer, offset, &states->layout);
+ encode_recovery_journal_state_7_0(buffer, offset, states->recovery_journal);
+ encode_slab_depot_state_2_0(buffer, offset, states->slab_depot);
+ encode_block_map_state_2_0(buffer, offset, states->block_map);
+
+ VDO_ASSERT_LOG_ONLY(*offset == VDO_COMPONENT_DATA_OFFSET + VDO_COMPONENT_DATA_SIZE,
+ "All super block component data was encoded");
+}
+
+/**
+ * vdo_encode_super_block() - Encode a super block into its on-disk representation.
+ */
+void vdo_encode_super_block(u8 *buffer, struct vdo_component_states *states)
+{
+ u32 checksum;
+ struct header header = SUPER_BLOCK_HEADER_12_0;
+ size_t offset = 0;
+
+ header.size += VDO_COMPONENT_DATA_SIZE;
+ vdo_encode_header(buffer, &offset, &header);
+ vdo_encode_component_states(buffer, &offset, states);
+
+ checksum = vdo_crc32(buffer, offset);
+ encode_u32_le(buffer, &offset, checksum);
+
+ /*
+ * Even though the buffer is a full block, to avoid the potential corruption from a torn
+ * write, the entire encoding must fit in the first sector.
+ */
+ VDO_ASSERT_LOG_ONLY(offset <= VDO_SECTOR_SIZE,
+ "entire superblock must fit in one sector");
+}
+
+/**
+ * vdo_decode_super_block() - Decode a super block from its on-disk representation.
+ */
+int vdo_decode_super_block(u8 *buffer)
+{
+ struct header header;
+ int result;
+ u32 checksum, saved_checksum;
+ size_t offset = 0;
+
+ /* Decode and validate the header. */
+ vdo_decode_header(buffer, &offset, &header);
+ result = vdo_validate_header(&SUPER_BLOCK_HEADER_12_0, &header, false, __func__);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (header.size > VDO_COMPONENT_DATA_SIZE + sizeof(u32)) {
+ /*
+ * We can't check release version or checksum until we know the content size, so we
+ * have to assume a version mismatch on unexpected values.
+ */
+ return vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION,
+ "super block contents too large: %zu",
+ header.size);
+ }
+
+ /* Skip past the component data for now, to verify the checksum. */
+ offset += VDO_COMPONENT_DATA_SIZE;
+
+ checksum = vdo_crc32(buffer, offset);
+ decode_u32_le(buffer, &offset, &saved_checksum);
+
+ result = VDO_ASSERT(offset == VDO_SUPER_BLOCK_FIXED_SIZE + VDO_COMPONENT_DATA_SIZE,
+ "must have decoded entire superblock payload");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return ((checksum != saved_checksum) ? VDO_CHECKSUM_MISMATCH : VDO_SUCCESS);
+}
diff --git a/drivers/md/dm-vdo/encodings.h b/drivers/md/dm-vdo/encodings.h
new file mode 100644
index 000000000000..e5ff2b0aaa79
--- /dev/null
+++ b/drivers/md/dm-vdo/encodings.h
@@ -0,0 +1,1298 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_ENCODINGS_H
+#define VDO_ENCODINGS_H
+
+#include <linux/blk_types.h>
+#include <linux/crc32.h>
+#include <linux/limits.h>
+#include <linux/uuid.h>
+
+#include "numeric.h"
+
+#include "constants.h"
+#include "types.h"
+
+/*
+ * An in-memory representation of a version number for versioned structures on disk.
+ *
+ * A version number consists of two portions, a major version and a minor version. Any format
+ * change which does not require an explicit upgrade step from the previous version should
+ * increment the minor version. Any format change which either requires an explicit upgrade step,
+ * or is wholly incompatible (i.e. can not be upgraded to), should increment the major version, and
+ * set the minor version to 0.
+ */
+struct version_number {
+ u32 major_version;
+ u32 minor_version;
+};
+
+/*
+ * A packed, machine-independent, on-disk representation of a version_number. Both fields are
+ * stored in little-endian byte order.
+ */
+struct packed_version_number {
+ __le32 major_version;
+ __le32 minor_version;
+} __packed;
+
+/* The registry of component ids for use in headers */
+#define VDO_SUPER_BLOCK 0
+#define VDO_LAYOUT 1
+#define VDO_RECOVERY_JOURNAL 2
+#define VDO_SLAB_DEPOT 3
+#define VDO_BLOCK_MAP 4
+#define VDO_GEOMETRY_BLOCK 5
+
+/* The header for versioned data stored on disk. */
+struct header {
+ u32 id; /* The component this is a header for */
+ struct version_number version; /* The version of the data format */
+ size_t size; /* The size of the data following this header */
+};
+
+/* A packed, machine-independent, on-disk representation of a component header. */
+struct packed_header {
+ __le32 id;
+ struct packed_version_number version;
+ __le64 size;
+} __packed;
+
+enum {
+ VDO_GEOMETRY_BLOCK_LOCATION = 0,
+ VDO_GEOMETRY_MAGIC_NUMBER_SIZE = 8,
+ VDO_DEFAULT_GEOMETRY_BLOCK_VERSION = 5,
+};
+
+struct index_config {
+ u32 mem;
+ u32 unused;
+ bool sparse;
+} __packed;
+
+enum volume_region_id {
+ VDO_INDEX_REGION = 0,
+ VDO_DATA_REGION = 1,
+ VDO_VOLUME_REGION_COUNT,
+};
+
+struct volume_region {
+ /* The ID of the region */
+ enum volume_region_id id;
+ /*
+ * The absolute starting offset on the device. The region continues until the next region
+ * begins.
+ */
+ physical_block_number_t start_block;
+} __packed;
+
+struct volume_geometry {
+ /* For backwards compatibility */
+ u32 unused;
+ /* The nonce of this volume */
+ nonce_t nonce;
+ /* The uuid of this volume */
+ uuid_t uuid;
+ /* The block offset to be applied to bios */
+ block_count_t bio_offset;
+ /* The regions in ID order */
+ struct volume_region regions[VDO_VOLUME_REGION_COUNT];
+ /* The index config */
+ struct index_config index_config;
+} __packed;
+
+/* This volume geometry struct is used for sizing only */
+struct volume_geometry_4_0 {
+ /* For backwards compatibility */
+ u32 unused;
+ /* The nonce of this volume */
+ nonce_t nonce;
+ /* The uuid of this volume */
+ uuid_t uuid;
+ /* The regions in ID order */
+ struct volume_region regions[VDO_VOLUME_REGION_COUNT];
+ /* The index config */
+ struct index_config index_config;
+} __packed;
+
+extern const u8 VDO_GEOMETRY_MAGIC_NUMBER[VDO_GEOMETRY_MAGIC_NUMBER_SIZE + 1];
+
+/**
+ * DOC: Block map entries
+ *
+ * The entry for each logical block in the block map is encoded into five bytes, which saves space
+ * in both the on-disk and in-memory layouts. It consists of the 36 low-order bits of a
+ * physical_block_number_t (addressing 256 terabytes with a 4KB block size) and a 4-bit encoding of
+ * a block_mapping_state.
+ *
+ * Of the 8 high bits of the 5-byte structure:
+ *
+ * Bits 7..4: The four highest bits of the 36-bit physical block number
+ * Bits 3..0: The 4-bit block_mapping_state
+ *
+ * The following 4 bytes are the low order bytes of the physical block number, in little-endian
+ * order.
+ *
+ * Conversion functions to and from a data location are provided.
+ */
+struct block_map_entry {
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ unsigned mapping_state : 4;
+ unsigned pbn_high_nibble : 4;
+#else
+ unsigned pbn_high_nibble : 4;
+ unsigned mapping_state : 4;
+#endif
+
+ __le32 pbn_low_word;
+} __packed;
+
+struct block_map_page_header {
+ __le64 nonce;
+ __le64 pbn;
+
+ /* May be non-zero on disk */
+ u8 unused_long_word[8];
+
+ /* Whether this page has been written twice to disk */
+ bool initialized;
+
+ /* Always zero on disk */
+ u8 unused_byte1;
+
+ /* May be non-zero on disk */
+ u8 unused_byte2;
+ u8 unused_byte3;
+} __packed;
+
+struct block_map_page {
+ struct packed_version_number version;
+ struct block_map_page_header header;
+ struct block_map_entry entries[];
+} __packed;
+
+enum block_map_page_validity {
+ VDO_BLOCK_MAP_PAGE_VALID,
+ VDO_BLOCK_MAP_PAGE_INVALID,
+ /* Valid page found in the wrong location on disk */
+ VDO_BLOCK_MAP_PAGE_BAD,
+};
+
+struct block_map_state_2_0 {
+ physical_block_number_t flat_page_origin;
+ block_count_t flat_page_count;
+ physical_block_number_t root_origin;
+ block_count_t root_count;
+} __packed;
+
+struct boundary {
+ page_number_t levels[VDO_BLOCK_MAP_TREE_HEIGHT];
+};
+
+extern const struct header VDO_BLOCK_MAP_HEADER_2_0;
+
+/* The state of the recovery journal as encoded in the VDO super block. */
+struct recovery_journal_state_7_0 {
+ /* Sequence number to start the journal */
+ sequence_number_t journal_start;
+ /* Number of logical blocks used by VDO */
+ block_count_t logical_blocks_used;
+ /* Number of block map pages allocated */
+ block_count_t block_map_data_blocks;
+} __packed;
+
+extern const struct header VDO_RECOVERY_JOURNAL_HEADER_7_0;
+
+typedef u16 journal_entry_count_t;
+
+/*
+ * A recovery journal entry stores three physical locations: a data location that is the value of a
+ * single mapping in the block map tree, and the two locations of the block map pages and slots
+ * that are acquiring and releasing a reference to the location. The journal entry also stores an
+ * operation code that says whether the mapping is for a logical block or for the block map tree
+ * itself.
+ */
+struct recovery_journal_entry {
+ struct block_map_slot slot;
+ struct data_location mapping;
+ struct data_location unmapping;
+ enum journal_operation operation;
+};
+
+/* The packed, on-disk representation of a recovery journal entry. */
+struct packed_recovery_journal_entry {
+ /*
+ * In little-endian bit order:
+ * Bits 15..12: The four highest bits of the 36-bit physical block number of the block map
+ * tree page
+ * Bits 11..2: The 10-bit block map page slot number
+ * Bit 1..0: The journal_operation of the entry (this actually only requires 1 bit, but
+ * it is convenient to keep the extra bit as part of this field.
+ */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ unsigned operation : 2;
+ unsigned slot_low : 6;
+ unsigned slot_high : 4;
+ unsigned pbn_high_nibble : 4;
+#else
+ unsigned slot_low : 6;
+ unsigned operation : 2;
+ unsigned pbn_high_nibble : 4;
+ unsigned slot_high : 4;
+#endif
+
+ /*
+ * Bits 47..16: The 32 low-order bits of the block map page PBN, in little-endian byte
+ * order
+ */
+ __le32 pbn_low_word;
+
+ /*
+ * Bits 87..48: The five-byte block map entry encoding the location that will be stored in
+ * the block map page slot
+ */
+ struct block_map_entry mapping;
+
+ /*
+ * Bits 127..88: The five-byte block map entry encoding the location that was stored in the
+ * block map page slot
+ */
+ struct block_map_entry unmapping;
+} __packed;
+
+/* The packed, on-disk representation of an old format recovery journal entry. */
+struct packed_recovery_journal_entry_1 {
+ /*
+ * In little-endian bit order:
+ * Bits 15..12: The four highest bits of the 36-bit physical block number of the block map
+ * tree page
+ * Bits 11..2: The 10-bit block map page slot number
+ * Bits 1..0: The 2-bit journal_operation of the entry
+ *
+ */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ unsigned operation : 2;
+ unsigned slot_low : 6;
+ unsigned slot_high : 4;
+ unsigned pbn_high_nibble : 4;
+#else
+ unsigned slot_low : 6;
+ unsigned operation : 2;
+ unsigned pbn_high_nibble : 4;
+ unsigned slot_high : 4;
+#endif
+
+ /*
+ * Bits 47..16: The 32 low-order bits of the block map page PBN, in little-endian byte
+ * order
+ */
+ __le32 pbn_low_word;
+
+ /*
+ * Bits 87..48: The five-byte block map entry encoding the location that was or will be
+ * stored in the block map page slot
+ */
+ struct block_map_entry block_map_entry;
+} __packed;
+
+enum journal_operation_1 {
+ VDO_JOURNAL_DATA_DECREMENT = 0,
+ VDO_JOURNAL_DATA_INCREMENT = 1,
+ VDO_JOURNAL_BLOCK_MAP_DECREMENT = 2,
+ VDO_JOURNAL_BLOCK_MAP_INCREMENT = 3,
+} __packed;
+
+struct recovery_block_header {
+ sequence_number_t block_map_head; /* Block map head sequence number */
+ sequence_number_t slab_journal_head; /* Slab journal head seq. number */
+ sequence_number_t sequence_number; /* Sequence number for this block */
+ nonce_t nonce; /* A given VDO instance's nonce */
+ block_count_t logical_blocks_used; /* Logical blocks in use */
+ block_count_t block_map_data_blocks; /* Allocated block map pages */
+ journal_entry_count_t entry_count; /* Number of entries written */
+ u8 check_byte; /* The protection check byte */
+ u8 recovery_count; /* Number of recoveries completed */
+ enum vdo_metadata_type metadata_type; /* Metadata type */
+};
+
+/*
+ * The packed, on-disk representation of a recovery journal block header. All fields are kept in
+ * little-endian byte order.
+ */
+struct packed_journal_header {
+ /* Block map head 64-bit sequence number */
+ __le64 block_map_head;
+
+ /* Slab journal head 64-bit sequence number */
+ __le64 slab_journal_head;
+
+ /* The 64-bit sequence number for this block */
+ __le64 sequence_number;
+
+ /* A given VDO instance's 64-bit nonce */
+ __le64 nonce;
+
+ /* 8-bit metadata type (should always be one for the recovery journal) */
+ u8 metadata_type;
+
+ /* 16-bit count of the entries encoded in the block */
+ __le16 entry_count;
+
+ /* 64-bit count of the logical blocks used when this block was opened */
+ __le64 logical_blocks_used;
+
+ /* 64-bit count of the block map blocks used when this block was opened */
+ __le64 block_map_data_blocks;
+
+ /* The protection check byte */
+ u8 check_byte;
+
+ /* The number of recoveries completed */
+ u8 recovery_count;
+} __packed;
+
+struct packed_journal_sector {
+ /* The protection check byte */
+ u8 check_byte;
+
+ /* The number of recoveries completed */
+ u8 recovery_count;
+
+ /* The number of entries in this sector */
+ u8 entry_count;
+
+ /* Journal entries for this sector */
+ struct packed_recovery_journal_entry entries[];
+} __packed;
+
+enum {
+ /* The number of entries in each sector (except the last) when filled */
+ RECOVERY_JOURNAL_ENTRIES_PER_SECTOR =
+ ((VDO_SECTOR_SIZE - sizeof(struct packed_journal_sector)) /
+ sizeof(struct packed_recovery_journal_entry)),
+ RECOVERY_JOURNAL_ENTRIES_PER_BLOCK = RECOVERY_JOURNAL_ENTRIES_PER_SECTOR * 7,
+ /* The number of entries in a v1 recovery journal block. */
+ RECOVERY_JOURNAL_1_ENTRIES_PER_BLOCK = 311,
+ /* The number of entries in each v1 sector (except the last) when filled */
+ RECOVERY_JOURNAL_1_ENTRIES_PER_SECTOR =
+ ((VDO_SECTOR_SIZE - sizeof(struct packed_journal_sector)) /
+ sizeof(struct packed_recovery_journal_entry_1)),
+ /* The number of entries in the last sector when a block is full */
+ RECOVERY_JOURNAL_1_ENTRIES_IN_LAST_SECTOR =
+ (RECOVERY_JOURNAL_1_ENTRIES_PER_BLOCK % RECOVERY_JOURNAL_1_ENTRIES_PER_SECTOR),
+};
+
+/* A type representing a reference count of a block. */
+typedef u8 vdo_refcount_t;
+
+/* The absolute position of an entry in a recovery journal or slab journal. */
+struct journal_point {
+ sequence_number_t sequence_number;
+ journal_entry_count_t entry_count;
+};
+
+/* A packed, platform-independent encoding of a struct journal_point. */
+struct packed_journal_point {
+ /*
+ * The packed representation is the little-endian 64-bit representation of the low-order 48
+ * bits of the sequence number, shifted up 16 bits, or'ed with the 16-bit entry count.
+ *
+ * Very long-term, the top 16 bits of the sequence number may not always be zero, as this
+ * encoding assumes--see BZ 1523240.
+ */
+ __le64 encoded_point;
+} __packed;
+
+/* Special vdo_refcount_t values. */
+#define EMPTY_REFERENCE_COUNT 0
+enum {
+ MAXIMUM_REFERENCE_COUNT = 254,
+ PROVISIONAL_REFERENCE_COUNT = 255,
+};
+
+enum {
+ COUNTS_PER_SECTOR =
+ ((VDO_SECTOR_SIZE - sizeof(struct packed_journal_point)) / sizeof(vdo_refcount_t)),
+ COUNTS_PER_BLOCK = COUNTS_PER_SECTOR * VDO_SECTORS_PER_BLOCK,
+};
+
+/* The format of each sector of a reference_block on disk. */
+struct packed_reference_sector {
+ struct packed_journal_point commit_point;
+ vdo_refcount_t counts[COUNTS_PER_SECTOR];
+} __packed;
+
+struct packed_reference_block {
+ struct packed_reference_sector sectors[VDO_SECTORS_PER_BLOCK];
+};
+
+struct slab_depot_state_2_0 {
+ struct slab_config slab_config;
+ physical_block_number_t first_block;
+ physical_block_number_t last_block;
+ zone_count_t zone_count;
+} __packed;
+
+extern const struct header VDO_SLAB_DEPOT_HEADER_2_0;
+
+/*
+ * vdo_slab journal blocks may have one of two formats, depending upon whether or not any of the
+ * entries in the block are block map increments. Since the steady state for a VDO is that all of
+ * the necessary block map pages will be allocated, most slab journal blocks will have only data
+ * entries. Such blocks can hold more entries, hence the two formats.
+ */
+
+/* A single slab journal entry */
+struct slab_journal_entry {
+ slab_block_number sbn;
+ enum journal_operation operation;
+ bool increment;
+};
+
+/* A single slab journal entry in its on-disk form */
+typedef struct {
+ u8 offset_low8;
+ u8 offset_mid8;
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ unsigned offset_high7 : 7;
+ unsigned increment : 1;
+#else
+ unsigned increment : 1;
+ unsigned offset_high7 : 7;
+#endif
+} __packed packed_slab_journal_entry;
+
+/* The unpacked representation of the header of a slab journal block */
+struct slab_journal_block_header {
+ /* Sequence number for head of journal */
+ sequence_number_t head;
+ /* Sequence number for this block */
+ sequence_number_t sequence_number;
+ /* The nonce for a given VDO instance */
+ nonce_t nonce;
+ /* Recovery journal point for last entry */
+ struct journal_point recovery_point;
+ /* Metadata type */
+ enum vdo_metadata_type metadata_type;
+ /* Whether this block contains block map increments */
+ bool has_block_map_increments;
+ /* The number of entries in the block */
+ journal_entry_count_t entry_count;
+};
+
+/*
+ * The packed, on-disk representation of a slab journal block header. All fields are kept in
+ * little-endian byte order.
+ */
+struct packed_slab_journal_block_header {
+ /* 64-bit sequence number for head of journal */
+ __le64 head;
+ /* 64-bit sequence number for this block */
+ __le64 sequence_number;
+ /* Recovery journal point for the last entry, packed into 64 bits */
+ struct packed_journal_point recovery_point;
+ /* The 64-bit nonce for a given VDO instance */
+ __le64 nonce;
+ /* 8-bit metadata type (should always be two, for the slab journal) */
+ u8 metadata_type;
+ /* Whether this block contains block map increments */
+ bool has_block_map_increments;
+ /* 16-bit count of the entries encoded in the block */
+ __le16 entry_count;
+} __packed;
+
+enum {
+ VDO_SLAB_JOURNAL_PAYLOAD_SIZE =
+ VDO_BLOCK_SIZE - sizeof(struct packed_slab_journal_block_header),
+ VDO_SLAB_JOURNAL_FULL_ENTRIES_PER_BLOCK = (VDO_SLAB_JOURNAL_PAYLOAD_SIZE * 8) / 25,
+ VDO_SLAB_JOURNAL_ENTRY_TYPES_SIZE =
+ ((VDO_SLAB_JOURNAL_FULL_ENTRIES_PER_BLOCK - 1) / 8) + 1,
+ VDO_SLAB_JOURNAL_ENTRIES_PER_BLOCK =
+ (VDO_SLAB_JOURNAL_PAYLOAD_SIZE / sizeof(packed_slab_journal_entry)),
+};
+
+/* The payload of a slab journal block which has block map increments */
+struct full_slab_journal_entries {
+ /* The entries themselves */
+ packed_slab_journal_entry entries[VDO_SLAB_JOURNAL_FULL_ENTRIES_PER_BLOCK];
+ /* The bit map indicating which entries are block map increments */
+ u8 entry_types[VDO_SLAB_JOURNAL_ENTRY_TYPES_SIZE];
+} __packed;
+
+typedef union {
+ /* Entries which include block map increments */
+ struct full_slab_journal_entries full_entries;
+ /* Entries which are only data updates */
+ packed_slab_journal_entry entries[VDO_SLAB_JOURNAL_ENTRIES_PER_BLOCK];
+ /* Ensure the payload fills to the end of the block */
+ u8 space[VDO_SLAB_JOURNAL_PAYLOAD_SIZE];
+} __packed slab_journal_payload;
+
+struct packed_slab_journal_block {
+ struct packed_slab_journal_block_header header;
+ slab_journal_payload payload;
+} __packed;
+
+/* The offset of a slab journal tail block. */
+typedef u8 tail_block_offset_t;
+
+struct slab_summary_entry {
+ /* Bits 7..0: The offset of the tail block within the slab journal */
+ tail_block_offset_t tail_block_offset;
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* Bits 13..8: A hint about the fullness of the slab */
+ unsigned int fullness_hint : 6;
+ /* Bit 14: Whether the ref_counts must be loaded from the layer */
+ unsigned int load_ref_counts : 1;
+ /* Bit 15: The believed cleanliness of this slab */
+ unsigned int is_dirty : 1;
+#else
+ /* Bit 15: The believed cleanliness of this slab */
+ unsigned int is_dirty : 1;
+ /* Bit 14: Whether the ref_counts must be loaded from the layer */
+ unsigned int load_ref_counts : 1;
+ /* Bits 13..8: A hint about the fullness of the slab */
+ unsigned int fullness_hint : 6;
+#endif
+} __packed;
+
+enum {
+ VDO_SLAB_SUMMARY_FULLNESS_HINT_BITS = 6,
+ VDO_SLAB_SUMMARY_ENTRIES_PER_BLOCK = VDO_BLOCK_SIZE / sizeof(struct slab_summary_entry),
+ VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE = MAX_VDO_SLABS / VDO_SLAB_SUMMARY_ENTRIES_PER_BLOCK,
+ VDO_SLAB_SUMMARY_BLOCKS = VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE * MAX_VDO_PHYSICAL_ZONES,
+};
+
+struct layout {
+ physical_block_number_t start;
+ block_count_t size;
+ physical_block_number_t first_free;
+ physical_block_number_t last_free;
+ size_t num_partitions;
+ struct partition *head;
+};
+
+struct partition {
+ enum partition_id id; /* The id of this partition */
+ physical_block_number_t offset; /* The offset into the layout of this partition */
+ block_count_t count; /* The number of blocks in the partition */
+ struct partition *next; /* A pointer to the next partition in the layout */
+};
+
+struct layout_3_0 {
+ physical_block_number_t first_free;
+ physical_block_number_t last_free;
+ u8 partition_count;
+} __packed;
+
+struct partition_3_0 {
+ enum partition_id id;
+ physical_block_number_t offset;
+ physical_block_number_t base; /* unused but retained for backwards compatibility */
+ block_count_t count;
+} __packed;
+
+/*
+ * The configuration of the VDO service.
+ */
+struct vdo_config {
+ block_count_t logical_blocks; /* number of logical blocks */
+ block_count_t physical_blocks; /* number of physical blocks */
+ block_count_t slab_size; /* number of blocks in a slab */
+ block_count_t recovery_journal_size; /* number of recovery journal blocks */
+ block_count_t slab_journal_blocks; /* number of slab journal blocks */
+};
+
+/* This is the structure that captures the vdo fields saved as a super block component. */
+struct vdo_component {
+ enum vdo_state state;
+ u64 complete_recoveries;
+ u64 read_only_recoveries;
+ struct vdo_config config;
+ nonce_t nonce;
+};
+
+/*
+ * A packed, machine-independent, on-disk representation of the vdo_config in the VDO component
+ * data in the super block.
+ */
+struct packed_vdo_config {
+ __le64 logical_blocks;
+ __le64 physical_blocks;
+ __le64 slab_size;
+ __le64 recovery_journal_size;
+ __le64 slab_journal_blocks;
+} __packed;
+
+/*
+ * A packed, machine-independent, on-disk representation of version 41.0 of the VDO component data
+ * in the super block.
+ */
+struct packed_vdo_component_41_0 {
+ __le32 state;
+ __le64 complete_recoveries;
+ __le64 read_only_recoveries;
+ struct packed_vdo_config config;
+ __le64 nonce;
+} __packed;
+
+/*
+ * The version of the on-disk format of a VDO volume. This should be incremented any time the
+ * on-disk representation of any VDO structure changes. Changes which require only online upgrade
+ * steps should increment the minor version. Changes which require an offline upgrade or which can
+ * not be upgraded to at all should increment the major version and set the minor version to 0.
+ */
+extern const struct version_number VDO_VOLUME_VERSION_67_0;
+
+enum {
+ VDO_ENCODED_HEADER_SIZE = sizeof(struct packed_header),
+ BLOCK_MAP_COMPONENT_ENCODED_SIZE =
+ VDO_ENCODED_HEADER_SIZE + sizeof(struct block_map_state_2_0),
+ RECOVERY_JOURNAL_COMPONENT_ENCODED_SIZE =
+ VDO_ENCODED_HEADER_SIZE + sizeof(struct recovery_journal_state_7_0),
+ SLAB_DEPOT_COMPONENT_ENCODED_SIZE =
+ VDO_ENCODED_HEADER_SIZE + sizeof(struct slab_depot_state_2_0),
+ VDO_PARTITION_COUNT = 4,
+ VDO_LAYOUT_ENCODED_SIZE = (VDO_ENCODED_HEADER_SIZE +
+ sizeof(struct layout_3_0) +
+ (sizeof(struct partition_3_0) * VDO_PARTITION_COUNT)),
+ VDO_SUPER_BLOCK_FIXED_SIZE = VDO_ENCODED_HEADER_SIZE + sizeof(u32),
+ VDO_MAX_COMPONENT_DATA_SIZE = VDO_SECTOR_SIZE - VDO_SUPER_BLOCK_FIXED_SIZE,
+ VDO_COMPONENT_ENCODED_SIZE =
+ (sizeof(struct packed_version_number) + sizeof(struct packed_vdo_component_41_0)),
+ VDO_COMPONENT_DATA_OFFSET = VDO_ENCODED_HEADER_SIZE,
+ VDO_COMPONENT_DATA_SIZE = (sizeof(u32) +
+ sizeof(struct packed_version_number) +
+ VDO_COMPONENT_ENCODED_SIZE +
+ VDO_LAYOUT_ENCODED_SIZE +
+ RECOVERY_JOURNAL_COMPONENT_ENCODED_SIZE +
+ SLAB_DEPOT_COMPONENT_ENCODED_SIZE +
+ BLOCK_MAP_COMPONENT_ENCODED_SIZE),
+};
+
+/* The entirety of the component data encoded in the VDO super block. */
+struct vdo_component_states {
+ /* For backwards compatibility */
+ u32 unused;
+
+ /* The VDO volume version */
+ struct version_number volume_version;
+
+ /* Components */
+ struct vdo_component vdo;
+ struct block_map_state_2_0 block_map;
+ struct recovery_journal_state_7_0 recovery_journal;
+ struct slab_depot_state_2_0 slab_depot;
+
+ /* Our partitioning of the underlying storage */
+ struct layout layout;
+};
+
+/**
+ * vdo_are_same_version() - Check whether two version numbers are the same.
+ * @version_a: The first version.
+ * @version_b: The second version.
+ *
+ * Return: true if the two versions are the same.
+ */
+static inline bool vdo_are_same_version(struct version_number version_a,
+ struct version_number version_b)
+{
+ return ((version_a.major_version == version_b.major_version) &&
+ (version_a.minor_version == version_b.minor_version));
+}
+
+/**
+ * vdo_is_upgradable_version() - Check whether an actual version is upgradable to an expected
+ * version.
+ * @expected_version: The expected version.
+ * @actual_version: The version being validated.
+ *
+ * An actual version is upgradable if its major number is expected but its minor number differs,
+ * and the expected version's minor number is greater than the actual version's minor number.
+ *
+ * Return: true if the actual version is upgradable.
+ */
+static inline bool vdo_is_upgradable_version(struct version_number expected_version,
+ struct version_number actual_version)
+{
+ return ((expected_version.major_version == actual_version.major_version) &&
+ (expected_version.minor_version > actual_version.minor_version));
+}
+
+int __must_check vdo_validate_header(const struct header *expected_header,
+ const struct header *actual_header, bool exact_size,
+ const char *component_name);
+
+void vdo_encode_header(u8 *buffer, size_t *offset, const struct header *header);
+void vdo_decode_header(u8 *buffer, size_t *offset, struct header *header);
+
+/**
+ * vdo_pack_version_number() - Convert a version_number to its packed on-disk representation.
+ * @version: The version number to convert.
+ *
+ * Return: the platform-independent representation of the version
+ */
+static inline struct packed_version_number vdo_pack_version_number(struct version_number version)
+{
+ return (struct packed_version_number) {
+ .major_version = __cpu_to_le32(version.major_version),
+ .minor_version = __cpu_to_le32(version.minor_version),
+ };
+}
+
+/**
+ * vdo_unpack_version_number() - Convert a packed_version_number to its native in-memory
+ * representation.
+ * @version: The version number to convert.
+ *
+ * Return: The platform-independent representation of the version.
+ */
+static inline struct version_number vdo_unpack_version_number(struct packed_version_number version)
+{
+ return (struct version_number) {
+ .major_version = __le32_to_cpu(version.major_version),
+ .minor_version = __le32_to_cpu(version.minor_version),
+ };
+}
+
+/**
+ * vdo_pack_header() - Convert a component header to its packed on-disk representation.
+ * @header: The header to convert.
+ *
+ * Return: the platform-independent representation of the header
+ */
+static inline struct packed_header vdo_pack_header(const struct header *header)
+{
+ return (struct packed_header) {
+ .id = __cpu_to_le32(header->id),
+ .version = vdo_pack_version_number(header->version),
+ .size = __cpu_to_le64(header->size),
+ };
+}
+
+/**
+ * vdo_unpack_header() - Convert a packed_header to its native in-memory representation.
+ * @header: The header to convert.
+ *
+ * Return: The platform-independent representation of the version.
+ */
+static inline struct header vdo_unpack_header(const struct packed_header *header)
+{
+ return (struct header) {
+ .id = __le32_to_cpu(header->id),
+ .version = vdo_unpack_version_number(header->version),
+ .size = __le64_to_cpu(header->size),
+ };
+}
+
+/**
+ * vdo_get_index_region_start() - Get the start of the index region from a geometry.
+ * @geometry: The geometry.
+ *
+ * Return: The start of the index region.
+ */
+static inline physical_block_number_t __must_check
+vdo_get_index_region_start(struct volume_geometry geometry)
+{
+ return geometry.regions[VDO_INDEX_REGION].start_block;
+}
+
+/**
+ * vdo_get_data_region_start() - Get the start of the data region from a geometry.
+ * @geometry: The geometry.
+ *
+ * Return: The start of the data region.
+ */
+static inline physical_block_number_t __must_check
+vdo_get_data_region_start(struct volume_geometry geometry)
+{
+ return geometry.regions[VDO_DATA_REGION].start_block;
+}
+
+/**
+ * vdo_get_index_region_size() - Get the size of the index region from a geometry.
+ * @geometry: The geometry.
+ *
+ * Return: The size of the index region.
+ */
+static inline physical_block_number_t __must_check
+vdo_get_index_region_size(struct volume_geometry geometry)
+{
+ return vdo_get_data_region_start(geometry) -
+ vdo_get_index_region_start(geometry);
+}
+
+int __must_check vdo_parse_geometry_block(unsigned char *block,
+ struct volume_geometry *geometry);
+
+static inline bool vdo_is_state_compressed(const enum block_mapping_state mapping_state)
+{
+ return (mapping_state > VDO_MAPPING_STATE_UNCOMPRESSED);
+}
+
+static inline struct block_map_entry
+vdo_pack_block_map_entry(physical_block_number_t pbn, enum block_mapping_state mapping_state)
+{
+ return (struct block_map_entry) {
+ .mapping_state = (mapping_state & 0x0F),
+ .pbn_high_nibble = ((pbn >> 32) & 0x0F),
+ .pbn_low_word = __cpu_to_le32(pbn & UINT_MAX),
+ };
+}
+
+static inline struct data_location vdo_unpack_block_map_entry(const struct block_map_entry *entry)
+{
+ physical_block_number_t low32 = __le32_to_cpu(entry->pbn_low_word);
+ physical_block_number_t high4 = entry->pbn_high_nibble;
+
+ return (struct data_location) {
+ .pbn = ((high4 << 32) | low32),
+ .state = entry->mapping_state,
+ };
+}
+
+static inline bool vdo_is_mapped_location(const struct data_location *location)
+{
+ return (location->state != VDO_MAPPING_STATE_UNMAPPED);
+}
+
+static inline bool vdo_is_valid_location(const struct data_location *location)
+{
+ if (location->pbn == VDO_ZERO_BLOCK)
+ return !vdo_is_state_compressed(location->state);
+ else
+ return vdo_is_mapped_location(location);
+}
+
+static inline physical_block_number_t __must_check
+vdo_get_block_map_page_pbn(const struct block_map_page *page)
+{
+ return __le64_to_cpu(page->header.pbn);
+}
+
+struct block_map_page *vdo_format_block_map_page(void *buffer, nonce_t nonce,
+ physical_block_number_t pbn,
+ bool initialized);
+
+enum block_map_page_validity __must_check vdo_validate_block_map_page(struct block_map_page *page,
+ nonce_t nonce,
+ physical_block_number_t pbn);
+
+static inline page_count_t vdo_compute_block_map_page_count(block_count_t entries)
+{
+ return DIV_ROUND_UP(entries, VDO_BLOCK_MAP_ENTRIES_PER_PAGE);
+}
+
+block_count_t __must_check vdo_compute_new_forest_pages(root_count_t root_count,
+ struct boundary *old_sizes,
+ block_count_t entries,
+ struct boundary *new_sizes);
+
+/**
+ * vdo_pack_recovery_journal_entry() - Return the packed, on-disk representation of a recovery
+ * journal entry.
+ * @entry: The journal entry to pack.
+ *
+ * Return: The packed representation of the journal entry.
+ */
+static inline struct packed_recovery_journal_entry
+vdo_pack_recovery_journal_entry(const struct recovery_journal_entry *entry)
+{
+ return (struct packed_recovery_journal_entry) {
+ .operation = entry->operation,
+ .slot_low = entry->slot.slot & 0x3F,
+ .slot_high = (entry->slot.slot >> 6) & 0x0F,
+ .pbn_high_nibble = (entry->slot.pbn >> 32) & 0x0F,
+ .pbn_low_word = __cpu_to_le32(entry->slot.pbn & UINT_MAX),
+ .mapping = vdo_pack_block_map_entry(entry->mapping.pbn,
+ entry->mapping.state),
+ .unmapping = vdo_pack_block_map_entry(entry->unmapping.pbn,
+ entry->unmapping.state),
+ };
+}
+
+/**
+ * vdo_unpack_recovery_journal_entry() - Unpack the on-disk representation of a recovery journal
+ * entry.
+ * @entry: The recovery journal entry to unpack.
+ *
+ * Return: The unpacked entry.
+ */
+static inline struct recovery_journal_entry
+vdo_unpack_recovery_journal_entry(const struct packed_recovery_journal_entry *entry)
+{
+ physical_block_number_t low32 = __le32_to_cpu(entry->pbn_low_word);
+ physical_block_number_t high4 = entry->pbn_high_nibble;
+
+ return (struct recovery_journal_entry) {
+ .operation = entry->operation,
+ .slot = {
+ .pbn = ((high4 << 32) | low32),
+ .slot = (entry->slot_low | (entry->slot_high << 6)),
+ },
+ .mapping = vdo_unpack_block_map_entry(&entry->mapping),
+ .unmapping = vdo_unpack_block_map_entry(&entry->unmapping),
+ };
+}
+
+const char * __must_check vdo_get_journal_operation_name(enum journal_operation operation);
+
+/**
+ * vdo_is_valid_recovery_journal_sector() - Determine whether the header of the given sector could
+ * describe a valid sector for the given journal block
+ * header.
+ * @header: The unpacked block header to compare against.
+ * @sector: The packed sector to check.
+ * @sector_number: The number of the sector being checked.
+ *
+ * Return: true if the sector matches the block header.
+ */
+static inline bool __must_check
+vdo_is_valid_recovery_journal_sector(const struct recovery_block_header *header,
+ const struct packed_journal_sector *sector,
+ u8 sector_number)
+{
+ if ((header->check_byte != sector->check_byte) ||
+ (header->recovery_count != sector->recovery_count))
+ return false;
+
+ if (header->metadata_type == VDO_METADATA_RECOVERY_JOURNAL_2)
+ return sector->entry_count <= RECOVERY_JOURNAL_ENTRIES_PER_SECTOR;
+
+ if (sector_number == 7)
+ return sector->entry_count <= RECOVERY_JOURNAL_1_ENTRIES_IN_LAST_SECTOR;
+
+ return sector->entry_count <= RECOVERY_JOURNAL_1_ENTRIES_PER_SECTOR;
+}
+
+/**
+ * vdo_compute_recovery_journal_block_number() - Compute the physical block number of the recovery
+ * journal block which would have a given sequence
+ * number.
+ * @journal_size: The size of the journal.
+ * @sequence_number: The sequence number.
+ *
+ * Return: The pbn of the journal block which would the specified sequence number.
+ */
+static inline physical_block_number_t __must_check
+vdo_compute_recovery_journal_block_number(block_count_t journal_size,
+ sequence_number_t sequence_number)
+{
+ /*
+ * Since journal size is a power of two, the block number modulus can just be extracted
+ * from the low-order bits of the sequence.
+ */
+ return (sequence_number & (journal_size - 1));
+}
+
+/**
+ * vdo_get_journal_block_sector() - Find the recovery journal sector from the block header and
+ * sector number.
+ * @header: The header of the recovery journal block.
+ * @sector_number: The index of the sector (1-based).
+ *
+ * Return: A packed recovery journal sector.
+ */
+static inline struct packed_journal_sector * __must_check
+vdo_get_journal_block_sector(struct packed_journal_header *header, int sector_number)
+{
+ char *sector_data = ((char *) header) + (VDO_SECTOR_SIZE * sector_number);
+
+ return (struct packed_journal_sector *) sector_data;
+}
+
+/**
+ * vdo_pack_recovery_block_header() - Generate the packed representation of a recovery block
+ * header.
+ * @header: The header containing the values to encode.
+ * @packed: The header into which to pack the values.
+ */
+static inline void vdo_pack_recovery_block_header(const struct recovery_block_header *header,
+ struct packed_journal_header *packed)
+{
+ *packed = (struct packed_journal_header) {
+ .block_map_head = __cpu_to_le64(header->block_map_head),
+ .slab_journal_head = __cpu_to_le64(header->slab_journal_head),
+ .sequence_number = __cpu_to_le64(header->sequence_number),
+ .nonce = __cpu_to_le64(header->nonce),
+ .logical_blocks_used = __cpu_to_le64(header->logical_blocks_used),
+ .block_map_data_blocks = __cpu_to_le64(header->block_map_data_blocks),
+ .entry_count = __cpu_to_le16(header->entry_count),
+ .check_byte = header->check_byte,
+ .recovery_count = header->recovery_count,
+ .metadata_type = header->metadata_type,
+ };
+}
+
+/**
+ * vdo_unpack_recovery_block_header() - Decode the packed representation of a recovery block
+ * header.
+ * @packed: The packed header to decode.
+ *
+ * Return: The unpacked header.
+ */
+static inline struct recovery_block_header
+vdo_unpack_recovery_block_header(const struct packed_journal_header *packed)
+{
+ return (struct recovery_block_header) {
+ .block_map_head = __le64_to_cpu(packed->block_map_head),
+ .slab_journal_head = __le64_to_cpu(packed->slab_journal_head),
+ .sequence_number = __le64_to_cpu(packed->sequence_number),
+ .nonce = __le64_to_cpu(packed->nonce),
+ .logical_blocks_used = __le64_to_cpu(packed->logical_blocks_used),
+ .block_map_data_blocks = __le64_to_cpu(packed->block_map_data_blocks),
+ .entry_count = __le16_to_cpu(packed->entry_count),
+ .check_byte = packed->check_byte,
+ .recovery_count = packed->recovery_count,
+ .metadata_type = packed->metadata_type,
+ };
+}
+
+/**
+ * vdo_compute_slab_count() - Compute the number of slabs a depot with given parameters would have.
+ * @first_block: PBN of the first data block.
+ * @last_block: PBN of the last data block.
+ * @slab_size_shift: Exponent for the number of blocks per slab.
+ *
+ * Return: The number of slabs.
+ */
+static inline slab_count_t vdo_compute_slab_count(physical_block_number_t first_block,
+ physical_block_number_t last_block,
+ unsigned int slab_size_shift)
+{
+ return (slab_count_t) ((last_block - first_block) >> slab_size_shift);
+}
+
+int __must_check vdo_configure_slab_depot(const struct partition *partition,
+ struct slab_config slab_config,
+ zone_count_t zone_count,
+ struct slab_depot_state_2_0 *state);
+
+int __must_check vdo_configure_slab(block_count_t slab_size,
+ block_count_t slab_journal_blocks,
+ struct slab_config *slab_config);
+
+/**
+ * vdo_get_saved_reference_count_size() - Get the number of blocks required to save a reference
+ * counts state covering the specified number of data
+ * blocks.
+ * @block_count: The number of physical data blocks that can be referenced.
+ *
+ * Return: The number of blocks required to save reference counts with the given block count.
+ */
+static inline block_count_t vdo_get_saved_reference_count_size(block_count_t block_count)
+{
+ return DIV_ROUND_UP(block_count, COUNTS_PER_BLOCK);
+}
+
+/**
+ * vdo_get_slab_journal_start_block() - Get the physical block number of the start of the slab
+ * journal relative to the start block allocator partition.
+ * @slab_config: The slab configuration of the VDO.
+ * @origin: The first block of the slab.
+ */
+static inline physical_block_number_t __must_check
+vdo_get_slab_journal_start_block(const struct slab_config *slab_config,
+ physical_block_number_t origin)
+{
+ return origin + slab_config->data_blocks + slab_config->reference_count_blocks;
+}
+
+/**
+ * vdo_advance_journal_point() - Move the given journal point forward by one entry.
+ * @point: The journal point to adjust.
+ * @entries_per_block: The number of entries in one full block.
+ */
+static inline void vdo_advance_journal_point(struct journal_point *point,
+ journal_entry_count_t entries_per_block)
+{
+ point->entry_count++;
+ if (point->entry_count == entries_per_block) {
+ point->sequence_number++;
+ point->entry_count = 0;
+ }
+}
+
+/**
+ * vdo_before_journal_point() - Check whether the first point precedes the second point.
+ * @first: The first journal point.
+ * @second: The second journal point.
+ *
+ * Return: true if the first point precedes the second point.
+ */
+static inline bool vdo_before_journal_point(const struct journal_point *first,
+ const struct journal_point *second)
+{
+ return ((first->sequence_number < second->sequence_number) ||
+ ((first->sequence_number == second->sequence_number) &&
+ (first->entry_count < second->entry_count)));
+}
+
+/**
+ * vdo_pack_journal_point() - Encode the journal location represented by a
+ * journal_point into a packed_journal_point.
+ * @unpacked: The unpacked input point.
+ * @packed: The packed output point.
+ */
+static inline void vdo_pack_journal_point(const struct journal_point *unpacked,
+ struct packed_journal_point *packed)
+{
+ packed->encoded_point =
+ __cpu_to_le64((unpacked->sequence_number << 16) | unpacked->entry_count);
+}
+
+/**
+ * vdo_unpack_journal_point() - Decode the journal location represented by a packed_journal_point
+ * into a journal_point.
+ * @packed: The packed input point.
+ * @unpacked: The unpacked output point.
+ */
+static inline void vdo_unpack_journal_point(const struct packed_journal_point *packed,
+ struct journal_point *unpacked)
+{
+ u64 native = __le64_to_cpu(packed->encoded_point);
+
+ unpacked->sequence_number = (native >> 16);
+ unpacked->entry_count = (native & 0xffff);
+}
+
+/**
+ * vdo_pack_slab_journal_block_header() - Generate the packed representation of a slab block
+ * header.
+ * @header: The header containing the values to encode.
+ * @packed: The header into which to pack the values.
+ */
+static inline void
+vdo_pack_slab_journal_block_header(const struct slab_journal_block_header *header,
+ struct packed_slab_journal_block_header *packed)
+{
+ packed->head = __cpu_to_le64(header->head);
+ packed->sequence_number = __cpu_to_le64(header->sequence_number);
+ packed->nonce = __cpu_to_le64(header->nonce);
+ packed->entry_count = __cpu_to_le16(header->entry_count);
+ packed->metadata_type = header->metadata_type;
+ packed->has_block_map_increments = header->has_block_map_increments;
+
+ vdo_pack_journal_point(&header->recovery_point, &packed->recovery_point);
+}
+
+/**
+ * vdo_unpack_slab_journal_block_header() - Decode the packed representation of a slab block
+ * header.
+ * @packed: The packed header to decode.
+ * @header: The header into which to unpack the values.
+ */
+static inline void
+vdo_unpack_slab_journal_block_header(const struct packed_slab_journal_block_header *packed,
+ struct slab_journal_block_header *header)
+{
+ *header = (struct slab_journal_block_header) {
+ .head = __le64_to_cpu(packed->head),
+ .sequence_number = __le64_to_cpu(packed->sequence_number),
+ .nonce = __le64_to_cpu(packed->nonce),
+ .entry_count = __le16_to_cpu(packed->entry_count),
+ .metadata_type = packed->metadata_type,
+ .has_block_map_increments = packed->has_block_map_increments,
+ };
+ vdo_unpack_journal_point(&packed->recovery_point, &header->recovery_point);
+}
+
+/**
+ * vdo_pack_slab_journal_entry() - Generate the packed encoding of a slab journal entry.
+ * @packed: The entry into which to pack the values.
+ * @sbn: The slab block number of the entry to encode.
+ * @is_increment: The increment flag.
+ */
+static inline void vdo_pack_slab_journal_entry(packed_slab_journal_entry *packed,
+ slab_block_number sbn, bool is_increment)
+{
+ packed->offset_low8 = (sbn & 0x0000FF);
+ packed->offset_mid8 = (sbn & 0x00FF00) >> 8;
+ packed->offset_high7 = (sbn & 0x7F0000) >> 16;
+ packed->increment = is_increment ? 1 : 0;
+}
+
+/**
+ * vdo_unpack_slab_journal_entry() - Decode the packed representation of a slab journal entry.
+ * @packed: The packed entry to decode.
+ *
+ * Return: The decoded slab journal entry.
+ */
+static inline struct slab_journal_entry __must_check
+vdo_unpack_slab_journal_entry(const packed_slab_journal_entry *packed)
+{
+ struct slab_journal_entry entry;
+
+ entry.sbn = packed->offset_high7;
+ entry.sbn <<= 8;
+ entry.sbn |= packed->offset_mid8;
+ entry.sbn <<= 8;
+ entry.sbn |= packed->offset_low8;
+ entry.operation = VDO_JOURNAL_DATA_REMAPPING;
+ entry.increment = packed->increment;
+ return entry;
+}
+
+struct slab_journal_entry __must_check
+vdo_decode_slab_journal_entry(struct packed_slab_journal_block *block,
+ journal_entry_count_t entry_count);
+
+/**
+ * vdo_get_slab_summary_hint_shift() - Compute the shift for slab summary hints.
+ * @slab_size_shift: Exponent for the number of blocks per slab.
+ *
+ * Return: The hint shift.
+ */
+static inline u8 __must_check vdo_get_slab_summary_hint_shift(unsigned int slab_size_shift)
+{
+ return ((slab_size_shift > VDO_SLAB_SUMMARY_FULLNESS_HINT_BITS) ?
+ (slab_size_shift - VDO_SLAB_SUMMARY_FULLNESS_HINT_BITS) :
+ 0);
+}
+
+int __must_check vdo_initialize_layout(block_count_t size,
+ physical_block_number_t offset,
+ block_count_t block_map_blocks,
+ block_count_t journal_blocks,
+ block_count_t summary_blocks,
+ struct layout *layout);
+
+void vdo_uninitialize_layout(struct layout *layout);
+
+int __must_check vdo_get_partition(struct layout *layout, enum partition_id id,
+ struct partition **partition_ptr);
+
+struct partition * __must_check vdo_get_known_partition(struct layout *layout,
+ enum partition_id id);
+
+int vdo_validate_config(const struct vdo_config *config,
+ block_count_t physical_block_count,
+ block_count_t logical_block_count);
+
+void vdo_destroy_component_states(struct vdo_component_states *states);
+
+int __must_check vdo_decode_component_states(u8 *buffer,
+ struct volume_geometry *geometry,
+ struct vdo_component_states *states);
+
+int __must_check vdo_validate_component_states(struct vdo_component_states *states,
+ nonce_t geometry_nonce,
+ block_count_t physical_size,
+ block_count_t logical_size);
+
+void vdo_encode_super_block(u8 *buffer, struct vdo_component_states *states);
+int __must_check vdo_decode_super_block(u8 *buffer);
+
+/* We start with 0L and postcondition with ~0L to match our historical usage in userspace. */
+static inline u32 vdo_crc32(const void *buf, unsigned long len)
+{
+ return (crc32(0L, buf, len) ^ ~0L);
+}
+
+#endif /* VDO_ENCODINGS_H */
diff --git a/drivers/md/dm-vdo/errors.c b/drivers/md/dm-vdo/errors.c
new file mode 100644
index 000000000000..6f89eb1c63a3
--- /dev/null
+++ b/drivers/md/dm-vdo/errors.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "errors.h"
+
+#include <linux/compiler.h>
+#include <linux/errno.h>
+
+#include "logger.h"
+#include "permassert.h"
+#include "string-utils.h"
+
+static const struct error_info successful = { "UDS_SUCCESS", "Success" };
+
+static const char *const message_table[] = {
+ [EPERM] = "Operation not permitted",
+ [ENOENT] = "No such file or directory",
+ [ESRCH] = "No such process",
+ [EINTR] = "Interrupted system call",
+ [EIO] = "Input/output error",
+ [ENXIO] = "No such device or address",
+ [E2BIG] = "Argument list too long",
+ [ENOEXEC] = "Exec format error",
+ [EBADF] = "Bad file descriptor",
+ [ECHILD] = "No child processes",
+ [EAGAIN] = "Resource temporarily unavailable",
+ [ENOMEM] = "Cannot allocate memory",
+ [EACCES] = "Permission denied",
+ [EFAULT] = "Bad address",
+ [ENOTBLK] = "Block device required",
+ [EBUSY] = "Device or resource busy",
+ [EEXIST] = "File exists",
+ [EXDEV] = "Invalid cross-device link",
+ [ENODEV] = "No such device",
+ [ENOTDIR] = "Not a directory",
+ [EISDIR] = "Is a directory",
+ [EINVAL] = "Invalid argument",
+ [ENFILE] = "Too many open files in system",
+ [EMFILE] = "Too many open files",
+ [ENOTTY] = "Inappropriate ioctl for device",
+ [ETXTBSY] = "Text file busy",
+ [EFBIG] = "File too large",
+ [ENOSPC] = "No space left on device",
+ [ESPIPE] = "Illegal seek",
+ [EROFS] = "Read-only file system",
+ [EMLINK] = "Too many links",
+ [EPIPE] = "Broken pipe",
+ [EDOM] = "Numerical argument out of domain",
+ [ERANGE] = "Numerical result out of range"
+};
+
+static const struct error_info error_list[] = {
+ { "UDS_OVERFLOW", "Index overflow" },
+ { "UDS_INVALID_ARGUMENT", "Invalid argument passed to internal routine" },
+ { "UDS_BAD_STATE", "UDS data structures are in an invalid state" },
+ { "UDS_DUPLICATE_NAME", "Attempt to enter the same name into a delta index twice" },
+ { "UDS_ASSERTION_FAILED", "Assertion failed" },
+ { "UDS_QUEUED", "Request queued" },
+ { "UDS_ALREADY_REGISTERED", "Error range already registered" },
+ { "UDS_OUT_OF_RANGE", "Cannot access data outside specified limits" },
+ { "UDS_DISABLED", "UDS library context is disabled" },
+ { "UDS_UNSUPPORTED_VERSION", "Unsupported version" },
+ { "UDS_CORRUPT_DATA", "Some index structure is corrupt" },
+ { "UDS_NO_INDEX", "No index found" },
+ { "UDS_INDEX_NOT_SAVED_CLEANLY", "Index not saved cleanly" },
+};
+
+struct error_block {
+ const char *name;
+ int base;
+ int last;
+ int max;
+ const struct error_info *infos;
+};
+
+#define MAX_ERROR_BLOCKS 6
+
+static struct {
+ int allocated;
+ int count;
+ struct error_block blocks[MAX_ERROR_BLOCKS];
+} registered_errors = {
+ .allocated = MAX_ERROR_BLOCKS,
+ .count = 1,
+ .blocks = { {
+ .name = "UDS Error",
+ .base = UDS_ERROR_CODE_BASE,
+ .last = UDS_ERROR_CODE_LAST,
+ .max = UDS_ERROR_CODE_BLOCK_END,
+ .infos = error_list,
+ } },
+};
+
+/* Get the error info for an error number. Also returns the name of the error block, if known. */
+static const char *get_error_info(int errnum, const struct error_info **info_ptr)
+{
+ struct error_block *block;
+
+ if (errnum == UDS_SUCCESS) {
+ *info_ptr = &successful;
+ return NULL;
+ }
+
+ for (block = registered_errors.blocks;
+ block < registered_errors.blocks + registered_errors.count;
+ block++) {
+ if ((errnum >= block->base) && (errnum < block->last)) {
+ *info_ptr = block->infos + (errnum - block->base);
+ return block->name;
+ } else if ((errnum >= block->last) && (errnum < block->max)) {
+ *info_ptr = NULL;
+ return block->name;
+ }
+ }
+
+ return NULL;
+}
+
+/* Return a string describing a system error message. */
+static const char *system_string_error(int errnum, char *buf, size_t buflen)
+{
+ size_t len;
+ const char *error_string = NULL;
+
+ if ((errnum > 0) && (errnum < ARRAY_SIZE(message_table)))
+ error_string = message_table[errnum];
+
+ len = ((error_string == NULL) ?
+ snprintf(buf, buflen, "Unknown error %d", errnum) :
+ snprintf(buf, buflen, "%s", error_string));
+ if (len < buflen)
+ return buf;
+
+ buf[0] = '\0';
+ return "System error";
+}
+
+/* Convert an error code to a descriptive string. */
+const char *uds_string_error(int errnum, char *buf, size_t buflen)
+{
+ char *buffer = buf;
+ char *buf_end = buf + buflen;
+ const struct error_info *info = NULL;
+ const char *block_name;
+
+ if (buf == NULL)
+ return NULL;
+
+ if (errnum < 0)
+ errnum = -errnum;
+
+ block_name = get_error_info(errnum, &info);
+ if (block_name != NULL) {
+ if (info != NULL) {
+ buffer = vdo_append_to_buffer(buffer, buf_end, "%s: %s",
+ block_name, info->message);
+ } else {
+ buffer = vdo_append_to_buffer(buffer, buf_end, "Unknown %s %d",
+ block_name, errnum);
+ }
+ } else if (info != NULL) {
+ buffer = vdo_append_to_buffer(buffer, buf_end, "%s", info->message);
+ } else {
+ const char *tmp = system_string_error(errnum, buffer, buf_end - buffer);
+
+ if (tmp != buffer)
+ buffer = vdo_append_to_buffer(buffer, buf_end, "%s", tmp);
+ else
+ buffer += strlen(tmp);
+ }
+
+ return buf;
+}
+
+/* Convert an error code to its name. */
+const char *uds_string_error_name(int errnum, char *buf, size_t buflen)
+{
+ char *buffer = buf;
+ char *buf_end = buf + buflen;
+ const struct error_info *info = NULL;
+ const char *block_name;
+
+ if (errnum < 0)
+ errnum = -errnum;
+
+ block_name = get_error_info(errnum, &info);
+ if (block_name != NULL) {
+ if (info != NULL) {
+ buffer = vdo_append_to_buffer(buffer, buf_end, "%s", info->name);
+ } else {
+ buffer = vdo_append_to_buffer(buffer, buf_end, "%s %d",
+ block_name, errnum);
+ }
+ } else if (info != NULL) {
+ buffer = vdo_append_to_buffer(buffer, buf_end, "%s", info->name);
+ } else {
+ const char *tmp;
+
+ tmp = system_string_error(errnum, buffer, buf_end - buffer);
+ if (tmp != buffer)
+ buffer = vdo_append_to_buffer(buffer, buf_end, "%s", tmp);
+ else
+ buffer += strlen(tmp);
+ }
+
+ return buf;
+}
+
+/*
+ * Translate an error code into a value acceptable to the kernel. The input error code may be a
+ * system-generated value (such as -EIO), or an internal UDS status code. The result will be a
+ * negative errno value.
+ */
+int uds_status_to_errno(int error)
+{
+ char error_name[VDO_MAX_ERROR_NAME_SIZE];
+ char error_message[VDO_MAX_ERROR_MESSAGE_SIZE];
+
+ /* 0 is success, and negative values are already system error codes. */
+ if (likely(error <= 0))
+ return error;
+
+ if (error < 1024) {
+ /* This is probably an errno from userspace. */
+ return -error;
+ }
+
+ /* Internal UDS errors */
+ switch (error) {
+ case UDS_NO_INDEX:
+ case UDS_CORRUPT_DATA:
+ /* The index doesn't exist or can't be recovered. */
+ return -ENOENT;
+
+ case UDS_INDEX_NOT_SAVED_CLEANLY:
+ case UDS_UNSUPPORTED_VERSION:
+ /*
+ * The index exists, but can't be loaded. Tell the client it exists so they don't
+ * destroy it inadvertently.
+ */
+ return -EEXIST;
+
+ case UDS_DISABLED:
+ /* The session is unusable; only returned by requests. */
+ return -EIO;
+
+ default:
+ /* Translate an unexpected error into something generic. */
+ vdo_log_info("%s: mapping status code %d (%s: %s) to -EIO",
+ __func__, error,
+ uds_string_error_name(error, error_name,
+ sizeof(error_name)),
+ uds_string_error(error, error_message,
+ sizeof(error_message)));
+ return -EIO;
+ }
+}
+
+/*
+ * Register a block of error codes.
+ *
+ * @block_name: the name of the block of error codes
+ * @first_error: the first error code in the block
+ * @next_free_error: one past the highest possible error in the block
+ * @infos: a pointer to the error info array for the block
+ * @info_size: the size of the error info array
+ */
+int uds_register_error_block(const char *block_name, int first_error,
+ int next_free_error, const struct error_info *infos,
+ size_t info_size)
+{
+ int result;
+ struct error_block *block;
+ struct error_block new_block = {
+ .name = block_name,
+ .base = first_error,
+ .last = first_error + (info_size / sizeof(struct error_info)),
+ .max = next_free_error,
+ .infos = infos,
+ };
+
+ result = VDO_ASSERT(first_error < next_free_error,
+ "well-defined error block range");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (registered_errors.count == registered_errors.allocated) {
+ /* This should never happen. */
+ return UDS_OVERFLOW;
+ }
+
+ for (block = registered_errors.blocks;
+ block < registered_errors.blocks + registered_errors.count;
+ block++) {
+ if (strcmp(block_name, block->name) == 0)
+ return UDS_DUPLICATE_NAME;
+
+ /* Ensure error ranges do not overlap. */
+ if ((first_error < block->max) && (next_free_error > block->base))
+ return UDS_ALREADY_REGISTERED;
+ }
+
+ registered_errors.blocks[registered_errors.count++] = new_block;
+ return UDS_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/errors.h b/drivers/md/dm-vdo/errors.h
new file mode 100644
index 000000000000..24e0e745fd5f
--- /dev/null
+++ b/drivers/md/dm-vdo/errors.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_ERRORS_H
+#define UDS_ERRORS_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+/* Custom error codes and error-related utilities */
+#define VDO_SUCCESS 0
+
+/* Valid status codes for internal UDS functions. */
+enum uds_status_codes {
+ /* Successful return */
+ UDS_SUCCESS = VDO_SUCCESS,
+ /* Used as a base value for reporting internal errors */
+ UDS_ERROR_CODE_BASE = 1024,
+ /* Index overflow */
+ UDS_OVERFLOW = UDS_ERROR_CODE_BASE,
+ /* Invalid argument passed to internal routine */
+ UDS_INVALID_ARGUMENT,
+ /* UDS data structures are in an invalid state */
+ UDS_BAD_STATE,
+ /* Attempt to enter the same name into an internal structure twice */
+ UDS_DUPLICATE_NAME,
+ /* An assertion failed */
+ UDS_ASSERTION_FAILED,
+ /* A request has been queued for later processing (not an error) */
+ UDS_QUEUED,
+ /* This error range has already been registered */
+ UDS_ALREADY_REGISTERED,
+ /* Attempt to read or write data outside the valid range */
+ UDS_OUT_OF_RANGE,
+ /* The index session is disabled */
+ UDS_DISABLED,
+ /* The index configuration or volume format is no longer supported */
+ UDS_UNSUPPORTED_VERSION,
+ /* Some index structure is corrupt */
+ UDS_CORRUPT_DATA,
+ /* No index state found */
+ UDS_NO_INDEX,
+ /* Attempt to access incomplete index save data */
+ UDS_INDEX_NOT_SAVED_CLEANLY,
+ /* One more than the last UDS_INTERNAL error code */
+ UDS_ERROR_CODE_LAST,
+ /* One more than the last error this block will ever use */
+ UDS_ERROR_CODE_BLOCK_END = UDS_ERROR_CODE_BASE + 440,
+};
+
+enum {
+ VDO_MAX_ERROR_NAME_SIZE = 80,
+ VDO_MAX_ERROR_MESSAGE_SIZE = 128,
+};
+
+struct error_info {
+ const char *name;
+ const char *message;
+};
+
+const char * __must_check uds_string_error(int errnum, char *buf, size_t buflen);
+
+const char *uds_string_error_name(int errnum, char *buf, size_t buflen);
+
+int uds_status_to_errno(int error);
+
+int uds_register_error_block(const char *block_name, int first_error,
+ int last_reserved_error, const struct error_info *infos,
+ size_t info_size);
+
+#endif /* UDS_ERRORS_H */
diff --git a/drivers/md/dm-vdo/flush.c b/drivers/md/dm-vdo/flush.c
new file mode 100644
index 000000000000..57e87f0d7069
--- /dev/null
+++ b/drivers/md/dm-vdo/flush.c
@@ -0,0 +1,560 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "flush.h"
+
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "admin-state.h"
+#include "completion.h"
+#include "io-submitter.h"
+#include "logical-zone.h"
+#include "slab-depot.h"
+#include "types.h"
+#include "vdo.h"
+
+struct flusher {
+ struct vdo_completion completion;
+ /* The vdo to which this flusher belongs */
+ struct vdo *vdo;
+ /* The administrative state of the flusher */
+ struct admin_state state;
+ /* The current flush generation of the vdo */
+ sequence_number_t flush_generation;
+ /* The first unacknowledged flush generation */
+ sequence_number_t first_unacknowledged_generation;
+ /* The queue of flush requests waiting to notify other threads */
+ struct vdo_wait_queue notifiers;
+ /* The queue of flush requests waiting for VIOs to complete */
+ struct vdo_wait_queue pending_flushes;
+ /* The flush generation for which notifications are being sent */
+ sequence_number_t notify_generation;
+ /* The logical zone to notify next */
+ struct logical_zone *logical_zone_to_notify;
+ /* The ID of the thread on which flush requests should be made */
+ thread_id_t thread_id;
+ /* The pool of flush requests */
+ mempool_t *flush_pool;
+ /* Bios waiting for a flush request to become available */
+ struct bio_list waiting_flush_bios;
+ /* The lock to protect the previous fields */
+ spinlock_t lock;
+ /* The rotor for selecting the bio queue for submitting flush bios */
+ zone_count_t bio_queue_rotor;
+ /* The number of flushes submitted to the current bio queue */
+ int flush_count;
+};
+
+/**
+ * assert_on_flusher_thread() - Check that we are on the flusher thread.
+ * @flusher: The flusher.
+ * @caller: The function which is asserting.
+ */
+static inline void assert_on_flusher_thread(struct flusher *flusher, const char *caller)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == flusher->thread_id),
+ "%s() called from flusher thread", caller);
+}
+
+/**
+ * as_flusher() - Convert a generic vdo_completion to a flusher.
+ * @completion: The completion to convert.
+ *
+ * Return: The completion as a flusher.
+ */
+static struct flusher *as_flusher(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_FLUSH_NOTIFICATION_COMPLETION);
+ return container_of(completion, struct flusher, completion);
+}
+
+/**
+ * completion_as_vdo_flush() - Convert a generic vdo_completion to a vdo_flush.
+ * @completion: The completion to convert.
+ *
+ * Return: The completion as a vdo_flush.
+ */
+static inline struct vdo_flush *completion_as_vdo_flush(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_FLUSH_COMPLETION);
+ return container_of(completion, struct vdo_flush, completion);
+}
+
+/**
+ * vdo_waiter_as_flush() - Convert a vdo_flush's generic wait queue entry back to the vdo_flush.
+ * @waiter: The wait queue entry to convert.
+ *
+ * Return: The wait queue entry as a vdo_flush.
+ */
+static struct vdo_flush *vdo_waiter_as_flush(struct vdo_waiter *waiter)
+{
+ return container_of(waiter, struct vdo_flush, waiter);
+}
+
+static void *allocate_flush(gfp_t gfp_mask, void *pool_data)
+{
+ struct vdo_flush *flush = NULL;
+
+ if ((gfp_mask & GFP_NOWAIT) == GFP_NOWAIT) {
+ flush = vdo_allocate_memory_nowait(sizeof(struct vdo_flush), __func__);
+ } else {
+ int result = vdo_allocate(1, struct vdo_flush, __func__, &flush);
+
+ if (result != VDO_SUCCESS)
+ vdo_log_error_strerror(result, "failed to allocate spare flush");
+ }
+
+ if (flush != NULL) {
+ struct flusher *flusher = pool_data;
+
+ vdo_initialize_completion(&flush->completion, flusher->vdo,
+ VDO_FLUSH_COMPLETION);
+ }
+
+ return flush;
+}
+
+static void free_flush(void *element, void *pool_data __always_unused)
+{
+ vdo_free(element);
+}
+
+/**
+ * vdo_make_flusher() - Make a flusher for a vdo.
+ * @vdo: The vdo which owns the flusher.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_make_flusher(struct vdo *vdo)
+{
+ int result = vdo_allocate(1, struct flusher, __func__, &vdo->flusher);
+
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo->flusher->vdo = vdo;
+ vdo->flusher->thread_id = vdo->thread_config.packer_thread;
+ vdo_set_admin_state_code(&vdo->flusher->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+ vdo_initialize_completion(&vdo->flusher->completion, vdo,
+ VDO_FLUSH_NOTIFICATION_COMPLETION);
+
+ spin_lock_init(&vdo->flusher->lock);
+ bio_list_init(&vdo->flusher->waiting_flush_bios);
+ vdo->flusher->flush_pool = mempool_create(1, allocate_flush, free_flush,
+ vdo->flusher);
+ return ((vdo->flusher->flush_pool == NULL) ? -ENOMEM : VDO_SUCCESS);
+}
+
+/**
+ * vdo_free_flusher() - Free a flusher.
+ * @flusher: The flusher to free.
+ */
+void vdo_free_flusher(struct flusher *flusher)
+{
+ if (flusher == NULL)
+ return;
+
+ if (flusher->flush_pool != NULL)
+ mempool_destroy(vdo_forget(flusher->flush_pool));
+ vdo_free(flusher);
+}
+
+/**
+ * vdo_get_flusher_thread_id() - Get the ID of the thread on which flusher functions should be
+ * called.
+ * @flusher: The flusher to query.
+ *
+ * Return: The ID of the thread which handles the flusher.
+ */
+thread_id_t vdo_get_flusher_thread_id(struct flusher *flusher)
+{
+ return flusher->thread_id;
+}
+
+static void notify_flush(struct flusher *flusher);
+static void vdo_complete_flush(struct vdo_flush *flush);
+
+/**
+ * finish_notification() - Finish the notification process.
+ * @completion: The flusher completion.
+ *
+ * Finishes the notification process by checking if any flushes have completed and then starting
+ * the notification of the next flush request if one came in while the current notification was in
+ * progress. This callback is registered in flush_packer_callback().
+ */
+static void finish_notification(struct vdo_completion *completion)
+{
+ struct flusher *flusher = as_flusher(completion);
+
+ assert_on_flusher_thread(flusher, __func__);
+
+ vdo_waitq_enqueue_waiter(&flusher->pending_flushes,
+ vdo_waitq_dequeue_waiter(&flusher->notifiers));
+ vdo_complete_flushes(flusher);
+ if (vdo_waitq_has_waiters(&flusher->notifiers))
+ notify_flush(flusher);
+}
+
+/**
+ * flush_packer_callback() - Flush the packer.
+ * @completion: The flusher completion.
+ *
+ * Flushes the packer now that all of the logical and physical zones have been notified of the new
+ * flush request. This callback is registered in increment_generation().
+ */
+static void flush_packer_callback(struct vdo_completion *completion)
+{
+ struct flusher *flusher = as_flusher(completion);
+
+ vdo_increment_packer_flush_generation(flusher->vdo->packer);
+ vdo_launch_completion_callback(completion, finish_notification,
+ flusher->thread_id);
+}
+
+/**
+ * increment_generation() - Increment the flush generation in a logical zone.
+ * @completion: The flusher as a completion.
+ *
+ * If there are more logical zones, go on to the next one, otherwise, prepare the physical zones.
+ * This callback is registered both in notify_flush() and in itself.
+ */
+static void increment_generation(struct vdo_completion *completion)
+{
+ struct flusher *flusher = as_flusher(completion);
+ struct logical_zone *zone = flusher->logical_zone_to_notify;
+
+ vdo_increment_logical_zone_flush_generation(zone, flusher->notify_generation);
+ if (zone->next == NULL) {
+ vdo_launch_completion_callback(completion, flush_packer_callback,
+ flusher->thread_id);
+ return;
+ }
+
+ flusher->logical_zone_to_notify = zone->next;
+ vdo_launch_completion_callback(completion, increment_generation,
+ flusher->logical_zone_to_notify->thread_id);
+}
+
+/**
+ * notify_flush() - Launch a flush notification.
+ * @flusher: The flusher doing the notification.
+ */
+static void notify_flush(struct flusher *flusher)
+{
+ struct vdo_flush *flush =
+ vdo_waiter_as_flush(vdo_waitq_get_first_waiter(&flusher->notifiers));
+
+ flusher->notify_generation = flush->flush_generation;
+ flusher->logical_zone_to_notify = &flusher->vdo->logical_zones->zones[0];
+ flusher->completion.requeue = true;
+ vdo_launch_completion_callback(&flusher->completion, increment_generation,
+ flusher->logical_zone_to_notify->thread_id);
+}
+
+/**
+ * flush_vdo() - Start processing a flush request.
+ * @completion: A flush request (as a vdo_completion)
+ *
+ * This callback is registered in launch_flush().
+ */
+static void flush_vdo(struct vdo_completion *completion)
+{
+ struct vdo_flush *flush = completion_as_vdo_flush(completion);
+ struct flusher *flusher = completion->vdo->flusher;
+ bool may_notify;
+ int result;
+
+ assert_on_flusher_thread(flusher, __func__);
+ result = VDO_ASSERT(vdo_is_state_normal(&flusher->state),
+ "flusher is in normal operation");
+ if (result != VDO_SUCCESS) {
+ vdo_enter_read_only_mode(flusher->vdo, result);
+ vdo_complete_flush(flush);
+ return;
+ }
+
+ flush->flush_generation = flusher->flush_generation++;
+ may_notify = !vdo_waitq_has_waiters(&flusher->notifiers);
+ vdo_waitq_enqueue_waiter(&flusher->notifiers, &flush->waiter);
+ if (may_notify)
+ notify_flush(flusher);
+}
+
+/**
+ * check_for_drain_complete() - Check whether the flusher has drained.
+ * @flusher: The flusher.
+ */
+static void check_for_drain_complete(struct flusher *flusher)
+{
+ bool drained;
+
+ if (!vdo_is_state_draining(&flusher->state) ||
+ vdo_waitq_has_waiters(&flusher->pending_flushes))
+ return;
+
+ spin_lock(&flusher->lock);
+ drained = bio_list_empty(&flusher->waiting_flush_bios);
+ spin_unlock(&flusher->lock);
+
+ if (drained)
+ vdo_finish_draining(&flusher->state);
+}
+
+/**
+ * vdo_complete_flushes() - Attempt to complete any flushes which might have finished.
+ * @flusher: The flusher.
+ */
+void vdo_complete_flushes(struct flusher *flusher)
+{
+ sequence_number_t oldest_active_generation = U64_MAX;
+ struct logical_zone *zone;
+
+ assert_on_flusher_thread(flusher, __func__);
+
+ for (zone = &flusher->vdo->logical_zones->zones[0]; zone != NULL; zone = zone->next)
+ oldest_active_generation =
+ min(oldest_active_generation,
+ READ_ONCE(zone->oldest_active_generation));
+
+ while (vdo_waitq_has_waiters(&flusher->pending_flushes)) {
+ struct vdo_flush *flush =
+ vdo_waiter_as_flush(vdo_waitq_get_first_waiter(&flusher->pending_flushes));
+
+ if (flush->flush_generation >= oldest_active_generation)
+ return;
+
+ VDO_ASSERT_LOG_ONLY((flush->flush_generation ==
+ flusher->first_unacknowledged_generation),
+ "acknowledged next expected flush, %llu, was: %llu",
+ (unsigned long long) flusher->first_unacknowledged_generation,
+ (unsigned long long) flush->flush_generation);
+ vdo_waitq_dequeue_waiter(&flusher->pending_flushes);
+ vdo_complete_flush(flush);
+ flusher->first_unacknowledged_generation++;
+ }
+
+ check_for_drain_complete(flusher);
+}
+
+/**
+ * vdo_dump_flusher() - Dump the flusher, in a thread-unsafe fashion.
+ * @flusher: The flusher.
+ */
+void vdo_dump_flusher(const struct flusher *flusher)
+{
+ vdo_log_info("struct flusher");
+ vdo_log_info(" flush_generation=%llu first_unacknowledged_generation=%llu",
+ (unsigned long long) flusher->flush_generation,
+ (unsigned long long) flusher->first_unacknowledged_generation);
+ vdo_log_info(" notifiers queue is %s; pending_flushes queue is %s",
+ (vdo_waitq_has_waiters(&flusher->notifiers) ? "not empty" : "empty"),
+ (vdo_waitq_has_waiters(&flusher->pending_flushes) ? "not empty" : "empty"));
+}
+
+/**
+ * initialize_flush() - Initialize a vdo_flush structure.
+ * @flush: The flush to initialize.
+ * @vdo: The vdo being flushed.
+ *
+ * Initializes a vdo_flush structure, transferring all the bios in the flusher's waiting_flush_bios
+ * list to it. The caller MUST already hold the lock.
+ */
+static void initialize_flush(struct vdo_flush *flush, struct vdo *vdo)
+{
+ bio_list_init(&flush->bios);
+ bio_list_merge(&flush->bios, &vdo->flusher->waiting_flush_bios);
+ bio_list_init(&vdo->flusher->waiting_flush_bios);
+}
+
+static void launch_flush(struct vdo_flush *flush)
+{
+ struct vdo_completion *completion = &flush->completion;
+
+ vdo_prepare_completion(completion, flush_vdo, flush_vdo,
+ completion->vdo->thread_config.packer_thread, NULL);
+ vdo_enqueue_completion(completion, VDO_DEFAULT_Q_FLUSH_PRIORITY);
+}
+
+/**
+ * vdo_launch_flush() - Function called to start processing a flush request.
+ * @vdo: The vdo.
+ * @bio: The bio containing an empty flush request.
+ *
+ * This is called when we receive an empty flush bio from the block layer, and before acknowledging
+ * a non-empty bio with the FUA flag set.
+ */
+void vdo_launch_flush(struct vdo *vdo, struct bio *bio)
+{
+ /*
+ * Try to allocate a vdo_flush to represent the flush request. If the allocation fails,
+ * we'll deal with it later.
+ */
+ struct vdo_flush *flush = mempool_alloc(vdo->flusher->flush_pool, GFP_NOWAIT);
+ struct flusher *flusher = vdo->flusher;
+ const struct admin_state_code *code = vdo_get_admin_state_code(&flusher->state);
+
+ VDO_ASSERT_LOG_ONLY(!code->quiescent, "Flushing not allowed in state %s",
+ code->name);
+
+ spin_lock(&flusher->lock);
+
+ /* We have a new bio to start. Add it to the list. */
+ bio_list_add(&flusher->waiting_flush_bios, bio);
+
+ if (flush == NULL) {
+ spin_unlock(&flusher->lock);
+ return;
+ }
+
+ /* We have flushes to start. Capture them in the vdo_flush structure. */
+ initialize_flush(flush, vdo);
+ spin_unlock(&flusher->lock);
+
+ /* Finish launching the flushes. */
+ launch_flush(flush);
+}
+
+/**
+ * release_flush() - Release a vdo_flush structure that has completed its work.
+ * @flush: The completed flush structure to re-use or free.
+ *
+ * If there are any pending flush requests whose vdo_flush allocation failed, they will be launched
+ * by immediately re-using the released vdo_flush. If there is no spare vdo_flush, the released
+ * structure will become the spare. Otherwise, the vdo_flush will be freed.
+ */
+static void release_flush(struct vdo_flush *flush)
+{
+ bool relaunch_flush;
+ struct flusher *flusher = flush->completion.vdo->flusher;
+
+ spin_lock(&flusher->lock);
+ if (bio_list_empty(&flusher->waiting_flush_bios)) {
+ relaunch_flush = false;
+ } else {
+ /* We have flushes to start. Capture them in a flush request. */
+ initialize_flush(flush, flusher->vdo);
+ relaunch_flush = true;
+ }
+ spin_unlock(&flusher->lock);
+
+ if (relaunch_flush) {
+ /* Finish launching the flushes. */
+ launch_flush(flush);
+ return;
+ }
+
+ mempool_free(flush, flusher->flush_pool);
+}
+
+/**
+ * vdo_complete_flush_callback() - Function called to complete and free a flush request, registered
+ * in vdo_complete_flush().
+ * @completion: The flush request.
+ */
+static void vdo_complete_flush_callback(struct vdo_completion *completion)
+{
+ struct vdo_flush *flush = completion_as_vdo_flush(completion);
+ struct vdo *vdo = completion->vdo;
+ struct bio *bio;
+
+ while ((bio = bio_list_pop(&flush->bios)) != NULL) {
+ /*
+ * We're not acknowledging this bio now, but we'll never touch it again, so this is
+ * the last chance to account for it.
+ */
+ vdo_count_bios(&vdo->stats.bios_acknowledged, bio);
+
+ /* Update the device, and send it on down... */
+ bio_set_dev(bio, vdo_get_backing_device(vdo));
+ atomic64_inc(&vdo->stats.flush_out);
+ submit_bio_noacct(bio);
+ }
+
+
+ /*
+ * Release the flush structure, freeing it, re-using it as the spare, or using it to launch
+ * any flushes that had to wait when allocations failed.
+ */
+ release_flush(flush);
+}
+
+/**
+ * select_bio_queue() - Select the bio queue on which to finish a flush request.
+ * @flusher: The flusher finishing the request.
+ */
+static thread_id_t select_bio_queue(struct flusher *flusher)
+{
+ struct vdo *vdo = flusher->vdo;
+ zone_count_t bio_threads = flusher->vdo->thread_config.bio_thread_count;
+ int interval;
+
+ if (bio_threads == 1)
+ return vdo->thread_config.bio_threads[0];
+
+ interval = vdo->device_config->thread_counts.bio_rotation_interval;
+ if (flusher->flush_count == interval) {
+ flusher->flush_count = 1;
+ flusher->bio_queue_rotor = ((flusher->bio_queue_rotor + 1) % bio_threads);
+ } else {
+ flusher->flush_count++;
+ }
+
+ return vdo->thread_config.bio_threads[flusher->bio_queue_rotor];
+}
+
+/**
+ * vdo_complete_flush() - Complete and free a vdo flush request.
+ * @flush: The flush request.
+ */
+static void vdo_complete_flush(struct vdo_flush *flush)
+{
+ struct vdo_completion *completion = &flush->completion;
+
+ vdo_prepare_completion(completion, vdo_complete_flush_callback,
+ vdo_complete_flush_callback,
+ select_bio_queue(completion->vdo->flusher), NULL);
+ vdo_enqueue_completion(completion, BIO_Q_FLUSH_PRIORITY);
+}
+
+/**
+ * initiate_drain() - Initiate a drain.
+ *
+ * Implements vdo_admin_initiator_fn.
+ */
+static void initiate_drain(struct admin_state *state)
+{
+ check_for_drain_complete(container_of(state, struct flusher, state));
+}
+
+/**
+ * vdo_drain_flusher() - Drain the flusher.
+ * @flusher: The flusher to drain.
+ * @completion: The completion to finish when the flusher has drained.
+ *
+ * Drains the flusher by preventing any more VIOs from entering the flusher and then flushing. The
+ * flusher will be left in the suspended state.
+ */
+void vdo_drain_flusher(struct flusher *flusher, struct vdo_completion *completion)
+{
+ assert_on_flusher_thread(flusher, __func__);
+ vdo_start_draining(&flusher->state, VDO_ADMIN_STATE_SUSPENDING, completion,
+ initiate_drain);
+}
+
+/**
+ * vdo_resume_flusher() - Resume a flusher which has been suspended.
+ * @flusher: The flusher to resume.
+ * @parent: The completion to finish when the flusher has resumed.
+ */
+void vdo_resume_flusher(struct flusher *flusher, struct vdo_completion *parent)
+{
+ assert_on_flusher_thread(flusher, __func__);
+ vdo_continue_completion(parent, vdo_resume_if_quiescent(&flusher->state));
+}
diff --git a/drivers/md/dm-vdo/flush.h b/drivers/md/dm-vdo/flush.h
new file mode 100644
index 000000000000..97252d6656e0
--- /dev/null
+++ b/drivers/md/dm-vdo/flush.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_FLUSH_H
+#define VDO_FLUSH_H
+
+#include "funnel-workqueue.h"
+#include "types.h"
+#include "vio.h"
+#include "wait-queue.h"
+
+/* A marker for tracking which journal entries are affected by a flush request. */
+struct vdo_flush {
+ /* The completion for enqueueing this flush request. */
+ struct vdo_completion completion;
+ /* The flush bios covered by this request */
+ struct bio_list bios;
+ /* The wait queue entry for this flush */
+ struct vdo_waiter waiter;
+ /* Which flush this struct represents */
+ sequence_number_t flush_generation;
+};
+
+struct flusher;
+
+int __must_check vdo_make_flusher(struct vdo *vdo);
+
+void vdo_free_flusher(struct flusher *flusher);
+
+thread_id_t __must_check vdo_get_flusher_thread_id(struct flusher *flusher);
+
+void vdo_complete_flushes(struct flusher *flusher);
+
+void vdo_dump_flusher(const struct flusher *flusher);
+
+void vdo_launch_flush(struct vdo *vdo, struct bio *bio);
+
+void vdo_drain_flusher(struct flusher *flusher, struct vdo_completion *completion);
+
+void vdo_resume_flusher(struct flusher *flusher, struct vdo_completion *parent);
+
+#endif /* VDO_FLUSH_H */
diff --git a/drivers/md/dm-vdo/funnel-queue.c b/drivers/md/dm-vdo/funnel-queue.c
new file mode 100644
index 000000000000..a63b2f2bfd7d
--- /dev/null
+++ b/drivers/md/dm-vdo/funnel-queue.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "funnel-queue.h"
+
+#include "cpu.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+int vdo_make_funnel_queue(struct funnel_queue **queue_ptr)
+{
+ int result;
+ struct funnel_queue *queue;
+
+ result = vdo_allocate(1, struct funnel_queue, "funnel queue", &queue);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /*
+ * Initialize the stub entry and put it in the queue, establishing the invariant that
+ * queue->newest and queue->oldest are never null.
+ */
+ queue->stub.next = NULL;
+ queue->newest = &queue->stub;
+ queue->oldest = &queue->stub;
+
+ *queue_ptr = queue;
+ return VDO_SUCCESS;
+}
+
+void vdo_free_funnel_queue(struct funnel_queue *queue)
+{
+ vdo_free(queue);
+}
+
+static struct funnel_queue_entry *get_oldest(struct funnel_queue *queue)
+{
+ /*
+ * Barrier requirements: We need a read barrier between reading a "next" field pointer
+ * value and reading anything it points to. There's an accompanying barrier in
+ * vdo_funnel_queue_put() between its caller setting up the entry and making it visible.
+ */
+ struct funnel_queue_entry *oldest = queue->oldest;
+ struct funnel_queue_entry *next = READ_ONCE(oldest->next);
+
+ if (oldest == &queue->stub) {
+ /*
+ * When the oldest entry is the stub and it has no successor, the queue is
+ * logically empty.
+ */
+ if (next == NULL)
+ return NULL;
+ /*
+ * The stub entry has a successor, so the stub can be dequeued and ignored without
+ * breaking the queue invariants.
+ */
+ oldest = next;
+ queue->oldest = oldest;
+ next = READ_ONCE(oldest->next);
+ }
+
+ /*
+ * We have a non-stub candidate to dequeue. If it lacks a successor, we'll need to put the
+ * stub entry back on the queue first.
+ */
+ if (next == NULL) {
+ struct funnel_queue_entry *newest = READ_ONCE(queue->newest);
+
+ if (oldest != newest) {
+ /*
+ * Another thread has already swung queue->newest atomically, but not yet
+ * assigned previous->next. The queue is really still empty.
+ */
+ return NULL;
+ }
+
+ /*
+ * Put the stub entry back on the queue, ensuring a successor will eventually be
+ * seen.
+ */
+ vdo_funnel_queue_put(queue, &queue->stub);
+
+ /* Check again for a successor. */
+ next = READ_ONCE(oldest->next);
+ if (next == NULL) {
+ /*
+ * We lost a race with a producer who swapped queue->newest before we did,
+ * but who hasn't yet updated previous->next. Try again later.
+ */
+ return NULL;
+ }
+ }
+
+ return oldest;
+}
+
+/*
+ * Poll a queue, removing the oldest entry if the queue is not empty. This function must only be
+ * called from a single consumer thread.
+ */
+struct funnel_queue_entry *vdo_funnel_queue_poll(struct funnel_queue *queue)
+{
+ struct funnel_queue_entry *oldest = get_oldest(queue);
+
+ if (oldest == NULL)
+ return oldest;
+
+ /*
+ * Dequeue the oldest entry and return it. Only one consumer thread may call this function,
+ * so no locking, atomic operations, or fences are needed; queue->oldest is owned by the
+ * consumer and oldest->next is never used by a producer thread after it is swung from NULL
+ * to non-NULL.
+ */
+ queue->oldest = READ_ONCE(oldest->next);
+ /*
+ * Make sure the caller sees the proper stored data for this entry. Since we've already
+ * fetched the entry pointer we stored in "queue->oldest", this also ensures that on entry
+ * to the next call we'll properly see the dependent data.
+ */
+ smp_rmb();
+ /*
+ * If "oldest" is a very light-weight work item, we'll be looking for the next one very
+ * soon, so prefetch it now.
+ */
+ uds_prefetch_address(queue->oldest, true);
+ WRITE_ONCE(oldest->next, NULL);
+ return oldest;
+}
+
+/*
+ * Check whether the funnel queue is empty or not. If the queue is in a transition state with one
+ * or more entries being added such that the list view is incomplete, this function will report the
+ * queue as empty.
+ */
+bool vdo_is_funnel_queue_empty(struct funnel_queue *queue)
+{
+ return get_oldest(queue) == NULL;
+}
+
+/*
+ * Check whether the funnel queue is idle or not. If the queue has entries available to be
+ * retrieved, it is not idle. If the queue is in a transition state with one or more entries being
+ * added such that the list view is incomplete, it may not be possible to retrieve an entry with
+ * the vdo_funnel_queue_poll() function, but the queue will not be considered idle.
+ */
+bool vdo_is_funnel_queue_idle(struct funnel_queue *queue)
+{
+ /*
+ * Oldest is not the stub, so there's another entry, though if next is NULL we can't
+ * retrieve it yet.
+ */
+ if (queue->oldest != &queue->stub)
+ return false;
+
+ /*
+ * Oldest is the stub, but newest has been updated by _put(); either there's another,
+ * retrievable entry in the list, or the list is officially empty but in the intermediate
+ * state of having an entry added.
+ *
+ * Whether anything is retrievable depends on whether stub.next has been updated and become
+ * visible to us, but for idleness we don't care. And due to memory ordering in _put(), the
+ * update to newest would be visible to us at the same time or sooner.
+ */
+ if (READ_ONCE(queue->newest) != &queue->stub)
+ return false;
+
+ return true;
+}
diff --git a/drivers/md/dm-vdo/funnel-queue.h b/drivers/md/dm-vdo/funnel-queue.h
new file mode 100644
index 000000000000..bde0f1deff98
--- /dev/null
+++ b/drivers/md/dm-vdo/funnel-queue.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_FUNNEL_QUEUE_H
+#define VDO_FUNNEL_QUEUE_H
+
+#include <linux/atomic.h>
+#include <linux/cache.h>
+
+/*
+ * A funnel queue is a simple (almost) lock-free queue that accepts entries from multiple threads
+ * (multi-producer) and delivers them to a single thread (single-consumer). "Funnel" is an attempt
+ * to evoke the image of requests from more than one producer being "funneled down" to a single
+ * consumer.
+ *
+ * This is an unsynchronized but thread-safe data structure when used as intended. There is no
+ * mechanism to ensure that only one thread is consuming from the queue. If more than one thread
+ * attempts to consume from the queue, the resulting behavior is undefined. Clients must not
+ * directly access or manipulate the internals of the queue, which are only exposed for the purpose
+ * of allowing the very simple enqueue operation to be inlined.
+ *
+ * The implementation requires that a funnel_queue_entry structure (a link pointer) is embedded in
+ * the queue entries, and pointers to those structures are used exclusively by the queue. No macros
+ * are defined to template the queue, so the offset of the funnel_queue_entry in the records placed
+ * in the queue must all be the same so the client can derive their structure pointer from the
+ * entry pointer returned by vdo_funnel_queue_poll().
+ *
+ * Callers are wholly responsible for allocating and freeing the entries. Entries may be freed as
+ * soon as they are returned since this queue is not susceptible to the "ABA problem" present in
+ * many lock-free data structures. The queue is dynamically allocated to ensure cache-line
+ * alignment, but no other dynamic allocation is used.
+ *
+ * The algorithm is not actually 100% lock-free. There is a single point in vdo_funnel_queue_put()
+ * at which a preempted producer will prevent the consumers from seeing items added to the queue by
+ * later producers, and only if the queue is short enough or the consumer fast enough for it to
+ * reach what was the end of the queue at the time of the preemption.
+ *
+ * The consumer function, vdo_funnel_queue_poll(), will return NULL when the queue is empty. To
+ * wait for data to consume, spin (if safe) or combine the queue with a struct event_count to
+ * signal the presence of new entries.
+ */
+
+/* This queue link structure must be embedded in client entries. */
+struct funnel_queue_entry {
+ /* The next (newer) entry in the queue. */
+ struct funnel_queue_entry *next;
+};
+
+/*
+ * The dynamically allocated queue structure, which is allocated on a cache line boundary so the
+ * producer and consumer fields in the structure will land on separate cache lines. This should be
+ * consider opaque but it is exposed here so vdo_funnel_queue_put() can be inlined.
+ */
+struct __aligned(L1_CACHE_BYTES) funnel_queue {
+ /*
+ * The producers' end of the queue, an atomically exchanged pointer that will never be
+ * NULL.
+ */
+ struct funnel_queue_entry *newest;
+
+ /* The consumer's end of the queue, which is owned by the consumer and never NULL. */
+ struct funnel_queue_entry *oldest __aligned(L1_CACHE_BYTES);
+
+ /* A dummy entry used to provide the non-NULL invariants above. */
+ struct funnel_queue_entry stub;
+};
+
+int __must_check vdo_make_funnel_queue(struct funnel_queue **queue_ptr);
+
+void vdo_free_funnel_queue(struct funnel_queue *queue);
+
+/*
+ * Put an entry on the end of the queue.
+ *
+ * The entry pointer must be to the struct funnel_queue_entry embedded in the caller's data
+ * structure. The caller must be able to derive the address of the start of their data structure
+ * from the pointer that passed in here, so every entry in the queue must have the struct
+ * funnel_queue_entry at the same offset within the client's structure.
+ */
+static inline void vdo_funnel_queue_put(struct funnel_queue *queue,
+ struct funnel_queue_entry *entry)
+{
+ struct funnel_queue_entry *previous;
+
+ /*
+ * Barrier requirements: All stores relating to the entry ("next" pointer, containing data
+ * structure fields) must happen before the previous->next store making it visible to the
+ * consumer. Also, the entry's "next" field initialization to NULL must happen before any
+ * other producer threads can see the entry (the xchg) and try to update the "next" field.
+ *
+ * xchg implements a full barrier.
+ */
+ WRITE_ONCE(entry->next, NULL);
+ previous = xchg(&queue->newest, entry);
+ /*
+ * Preemptions between these two statements hide the rest of the queue from the consumer,
+ * preventing consumption until the following assignment runs.
+ */
+ WRITE_ONCE(previous->next, entry);
+}
+
+struct funnel_queue_entry *__must_check vdo_funnel_queue_poll(struct funnel_queue *queue);
+
+bool __must_check vdo_is_funnel_queue_empty(struct funnel_queue *queue);
+
+bool __must_check vdo_is_funnel_queue_idle(struct funnel_queue *queue);
+
+#endif /* VDO_FUNNEL_QUEUE_H */
diff --git a/drivers/md/dm-vdo/funnel-workqueue.c b/drivers/md/dm-vdo/funnel-workqueue.c
new file mode 100644
index 000000000000..ae11941c90a9
--- /dev/null
+++ b/drivers/md/dm-vdo/funnel-workqueue.c
@@ -0,0 +1,638 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "funnel-workqueue.h"
+
+#include <linux/atomic.h>
+#include <linux/cache.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/kthread.h>
+#include <linux/percpu.h>
+
+#include "funnel-queue.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "permassert.h"
+#include "string-utils.h"
+
+#include "completion.h"
+#include "status-codes.h"
+
+static DEFINE_PER_CPU(unsigned int, service_queue_rotor);
+
+/**
+ * DOC: Work queue definition.
+ *
+ * There are two types of work queues: simple, with one worker thread, and round-robin, which uses
+ * a group of the former to do the work, and assigns work to them in round-robin fashion (roughly).
+ * Externally, both are represented via the same common sub-structure, though there's actually not
+ * a great deal of overlap between the two types internally.
+ */
+struct vdo_work_queue {
+ /* Name of just the work queue (e.g., "cpuQ12") */
+ char *name;
+ bool round_robin_mode;
+ struct vdo_thread *owner;
+ /* Life cycle functions, etc */
+ const struct vdo_work_queue_type *type;
+};
+
+struct simple_work_queue {
+ struct vdo_work_queue common;
+ struct funnel_queue *priority_lists[VDO_WORK_Q_MAX_PRIORITY + 1];
+ void *private;
+
+ /*
+ * The fields above are unchanged after setup but often read, and are good candidates for
+ * caching -- and if the max priority is 2, just fit in one x86-64 cache line if aligned.
+ * The fields below are often modified as we sleep and wake, so we want a separate cache
+ * line for performance.
+ */
+
+ /* Any (0 or 1) worker threads waiting for new work to do */
+ wait_queue_head_t waiting_worker_threads ____cacheline_aligned;
+ /* Hack to reduce wakeup calls if the worker thread is running */
+ atomic_t idle;
+
+ /* These are infrequently used so in terms of performance we don't care where they land. */
+ struct task_struct *thread;
+ /* Notify creator once worker has initialized */
+ struct completion *started;
+};
+
+struct round_robin_work_queue {
+ struct vdo_work_queue common;
+ struct simple_work_queue **service_queues;
+ unsigned int num_service_queues;
+};
+
+static inline struct simple_work_queue *as_simple_work_queue(struct vdo_work_queue *queue)
+{
+ return ((queue == NULL) ?
+ NULL : container_of(queue, struct simple_work_queue, common));
+}
+
+static inline struct round_robin_work_queue *as_round_robin_work_queue(struct vdo_work_queue *queue)
+{
+ return ((queue == NULL) ?
+ NULL :
+ container_of(queue, struct round_robin_work_queue, common));
+}
+
+/* Processing normal completions. */
+
+/*
+ * Dequeue and return the next waiting completion, if any.
+ *
+ * We scan the funnel queues from highest priority to lowest, once; there is therefore a race
+ * condition where a high-priority completion can be enqueued followed by a lower-priority one, and
+ * we'll grab the latter (but we'll catch the high-priority item on the next call). If strict
+ * enforcement of priorities becomes necessary, this function will need fixing.
+ */
+static struct vdo_completion *poll_for_completion(struct simple_work_queue *queue)
+{
+ int i;
+
+ for (i = queue->common.type->max_priority; i >= 0; i--) {
+ struct funnel_queue_entry *link = vdo_funnel_queue_poll(queue->priority_lists[i]);
+
+ if (link != NULL)
+ return container_of(link, struct vdo_completion, work_queue_entry_link);
+ }
+
+ return NULL;
+}
+
+static void enqueue_work_queue_completion(struct simple_work_queue *queue,
+ struct vdo_completion *completion)
+{
+ VDO_ASSERT_LOG_ONLY(completion->my_queue == NULL,
+ "completion %px (fn %px) to enqueue (%px) is not already queued (%px)",
+ completion, completion->callback, queue, completion->my_queue);
+ if (completion->priority == VDO_WORK_Q_DEFAULT_PRIORITY)
+ completion->priority = queue->common.type->default_priority;
+
+ if (VDO_ASSERT(completion->priority <= queue->common.type->max_priority,
+ "priority is in range for queue") != VDO_SUCCESS)
+ completion->priority = 0;
+
+ completion->my_queue = &queue->common;
+
+ /* Funnel queue handles the synchronization for the put. */
+ vdo_funnel_queue_put(queue->priority_lists[completion->priority],
+ &completion->work_queue_entry_link);
+
+ /*
+ * Due to how funnel queue synchronization is handled (just atomic operations), the
+ * simplest safe implementation here would be to wake-up any waiting threads after
+ * enqueueing each item. Even if the funnel queue is not empty at the time of adding an
+ * item to the queue, the consumer thread may not see this since it is not guaranteed to
+ * have the same view of the queue as a producer thread.
+ *
+ * However, the above is wasteful so instead we attempt to minimize the number of thread
+ * wakeups. Using an idle flag, and careful ordering using memory barriers, we should be
+ * able to determine when the worker thread might be asleep or going to sleep. We use
+ * cmpxchg to try to take ownership (vs other producer threads) of the responsibility for
+ * waking the worker thread, so multiple wakeups aren't tried at once.
+ *
+ * This was tuned for some x86 boxes that were handy; it's untested whether doing the read
+ * first is any better or worse for other platforms, even other x86 configurations.
+ */
+ smp_mb();
+ if ((atomic_read(&queue->idle) != 1) || (atomic_cmpxchg(&queue->idle, 1, 0) != 1))
+ return;
+
+ /* There's a maximum of one thread in this list. */
+ wake_up(&queue->waiting_worker_threads);
+}
+
+static void run_start_hook(struct simple_work_queue *queue)
+{
+ if (queue->common.type->start != NULL)
+ queue->common.type->start(queue->private);
+}
+
+static void run_finish_hook(struct simple_work_queue *queue)
+{
+ if (queue->common.type->finish != NULL)
+ queue->common.type->finish(queue->private);
+}
+
+/*
+ * Wait for the next completion to process, or until kthread_should_stop indicates that it's time
+ * for us to shut down.
+ *
+ * If kthread_should_stop says it's time to stop but we have pending completions return a
+ * completion.
+ *
+ * Also update statistics relating to scheduler interactions.
+ */
+static struct vdo_completion *wait_for_next_completion(struct simple_work_queue *queue)
+{
+ struct vdo_completion *completion;
+ DEFINE_WAIT(wait);
+
+ while (true) {
+ prepare_to_wait(&queue->waiting_worker_threads, &wait,
+ TASK_INTERRUPTIBLE);
+ /*
+ * Don't set the idle flag until a wakeup will not be lost.
+ *
+ * Force synchronization between setting the idle flag and checking the funnel
+ * queue; the producer side will do them in the reverse order. (There's still a
+ * race condition we've chosen to allow, because we've got a timeout below that
+ * unwedges us if we hit it, but this may narrow the window a little.)
+ */
+ atomic_set(&queue->idle, 1);
+ smp_mb(); /* store-load barrier between "idle" and funnel queue */
+
+ completion = poll_for_completion(queue);
+ if (completion != NULL)
+ break;
+
+ /*
+ * We need to check for thread-stop after setting TASK_INTERRUPTIBLE state up
+ * above. Otherwise, schedule() will put the thread to sleep and might miss a
+ * wakeup from kthread_stop() call in vdo_finish_work_queue().
+ */
+ if (kthread_should_stop())
+ break;
+
+ schedule();
+
+ /*
+ * Most of the time when we wake, it should be because there's work to do. If it
+ * was a spurious wakeup, continue looping.
+ */
+ completion = poll_for_completion(queue);
+ if (completion != NULL)
+ break;
+ }
+
+ finish_wait(&queue->waiting_worker_threads, &wait);
+ atomic_set(&queue->idle, 0);
+
+ return completion;
+}
+
+static void process_completion(struct simple_work_queue *queue,
+ struct vdo_completion *completion)
+{
+ if (VDO_ASSERT(completion->my_queue == &queue->common,
+ "completion %px from queue %px marked as being in this queue (%px)",
+ completion, queue, completion->my_queue) == VDO_SUCCESS)
+ completion->my_queue = NULL;
+
+ vdo_run_completion(completion);
+}
+
+static void service_work_queue(struct simple_work_queue *queue)
+{
+ run_start_hook(queue);
+
+ while (true) {
+ struct vdo_completion *completion = poll_for_completion(queue);
+
+ if (completion == NULL)
+ completion = wait_for_next_completion(queue);
+
+ if (completion == NULL) {
+ /* No completions but kthread_should_stop() was triggered. */
+ break;
+ }
+
+ process_completion(queue, completion);
+
+ /*
+ * Be friendly to a CPU that has other work to do, if the kernel has told us to.
+ * This speeds up some performance tests; that "other work" might include other VDO
+ * threads.
+ */
+ if (need_resched())
+ cond_resched();
+ }
+
+ run_finish_hook(queue);
+}
+
+static int work_queue_runner(void *ptr)
+{
+ struct simple_work_queue *queue = ptr;
+
+ complete(queue->started);
+ service_work_queue(queue);
+ return 0;
+}
+
+/* Creation & teardown */
+
+static void free_simple_work_queue(struct simple_work_queue *queue)
+{
+ unsigned int i;
+
+ for (i = 0; i <= VDO_WORK_Q_MAX_PRIORITY; i++)
+ vdo_free_funnel_queue(queue->priority_lists[i]);
+ vdo_free(queue->common.name);
+ vdo_free(queue);
+}
+
+static void free_round_robin_work_queue(struct round_robin_work_queue *queue)
+{
+ struct simple_work_queue **queue_table = queue->service_queues;
+ unsigned int count = queue->num_service_queues;
+ unsigned int i;
+
+ queue->service_queues = NULL;
+
+ for (i = 0; i < count; i++)
+ free_simple_work_queue(queue_table[i]);
+ vdo_free(queue_table);
+ vdo_free(queue->common.name);
+ vdo_free(queue);
+}
+
+void vdo_free_work_queue(struct vdo_work_queue *queue)
+{
+ if (queue == NULL)
+ return;
+
+ vdo_finish_work_queue(queue);
+
+ if (queue->round_robin_mode)
+ free_round_robin_work_queue(as_round_robin_work_queue(queue));
+ else
+ free_simple_work_queue(as_simple_work_queue(queue));
+}
+
+static int make_simple_work_queue(const char *thread_name_prefix, const char *name,
+ struct vdo_thread *owner, void *private,
+ const struct vdo_work_queue_type *type,
+ struct simple_work_queue **queue_ptr)
+{
+ DECLARE_COMPLETION_ONSTACK(started);
+ struct simple_work_queue *queue;
+ int i;
+ struct task_struct *thread = NULL;
+ int result;
+
+ VDO_ASSERT_LOG_ONLY((type->max_priority <= VDO_WORK_Q_MAX_PRIORITY),
+ "queue priority count %u within limit %u", type->max_priority,
+ VDO_WORK_Q_MAX_PRIORITY);
+
+ result = vdo_allocate(1, struct simple_work_queue, "simple work queue", &queue);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ queue->private = private;
+ queue->started = &started;
+ queue->common.type = type;
+ queue->common.owner = owner;
+ init_waitqueue_head(&queue->waiting_worker_threads);
+
+ result = vdo_duplicate_string(name, "queue name", &queue->common.name);
+ if (result != VDO_SUCCESS) {
+ vdo_free(queue);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i <= type->max_priority; i++) {
+ result = vdo_make_funnel_queue(&queue->priority_lists[i]);
+ if (result != VDO_SUCCESS) {
+ free_simple_work_queue(queue);
+ return result;
+ }
+ }
+
+ thread = kthread_run(work_queue_runner, queue, "%s:%s", thread_name_prefix,
+ queue->common.name);
+ if (IS_ERR(thread)) {
+ free_simple_work_queue(queue);
+ return (int) PTR_ERR(thread);
+ }
+
+ queue->thread = thread;
+
+ /*
+ * If we don't wait to ensure the thread is running VDO code, a quick kthread_stop (due to
+ * errors elsewhere) could cause it to never get as far as running VDO, skipping the
+ * cleanup code.
+ *
+ * Eventually we should just make that path safe too, and then we won't need this
+ * synchronization.
+ */
+ wait_for_completion(&started);
+
+ *queue_ptr = queue;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_make_work_queue() - Create a work queue; if multiple threads are requested, completions will
+ * be distributed to them in round-robin fashion.
+ *
+ * Each queue is associated with a struct vdo_thread which has a single vdo thread id. Regardless
+ * of the actual number of queues and threads allocated here, code outside of the queue
+ * implementation will treat this as a single zone.
+ */
+int vdo_make_work_queue(const char *thread_name_prefix, const char *name,
+ struct vdo_thread *owner, const struct vdo_work_queue_type *type,
+ unsigned int thread_count, void *thread_privates[],
+ struct vdo_work_queue **queue_ptr)
+{
+ struct round_robin_work_queue *queue;
+ int result;
+ char thread_name[TASK_COMM_LEN];
+ unsigned int i;
+
+ if (thread_count == 1) {
+ struct simple_work_queue *simple_queue;
+ void *context = ((thread_privates != NULL) ? thread_privates[0] : NULL);
+
+ result = make_simple_work_queue(thread_name_prefix, name, owner, context,
+ type, &simple_queue);
+ if (result == VDO_SUCCESS)
+ *queue_ptr = &simple_queue->common;
+ return result;
+ }
+
+ result = vdo_allocate(1, struct round_robin_work_queue, "round-robin work queue",
+ &queue);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(thread_count, struct simple_work_queue *,
+ "subordinate work queues", &queue->service_queues);
+ if (result != VDO_SUCCESS) {
+ vdo_free(queue);
+ return result;
+ }
+
+ queue->num_service_queues = thread_count;
+ queue->common.round_robin_mode = true;
+ queue->common.owner = owner;
+
+ result = vdo_duplicate_string(name, "queue name", &queue->common.name);
+ if (result != VDO_SUCCESS) {
+ vdo_free(queue->service_queues);
+ vdo_free(queue);
+ return -ENOMEM;
+ }
+
+ *queue_ptr = &queue->common;
+
+ for (i = 0; i < thread_count; i++) {
+ void *context = ((thread_privates != NULL) ? thread_privates[i] : NULL);
+
+ snprintf(thread_name, sizeof(thread_name), "%s%u", name, i);
+ result = make_simple_work_queue(thread_name_prefix, thread_name, owner,
+ context, type, &queue->service_queues[i]);
+ if (result != VDO_SUCCESS) {
+ queue->num_service_queues = i;
+ /* Destroy previously created subordinates. */
+ vdo_free_work_queue(vdo_forget(*queue_ptr));
+ return result;
+ }
+ }
+
+ return VDO_SUCCESS;
+}
+
+static void finish_simple_work_queue(struct simple_work_queue *queue)
+{
+ if (queue->thread == NULL)
+ return;
+
+ /* Tells the worker thread to shut down and waits for it to exit. */
+ kthread_stop(queue->thread);
+ queue->thread = NULL;
+}
+
+static void finish_round_robin_work_queue(struct round_robin_work_queue *queue)
+{
+ struct simple_work_queue **queue_table = queue->service_queues;
+ unsigned int count = queue->num_service_queues;
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ finish_simple_work_queue(queue_table[i]);
+}
+
+/* No enqueueing of completions should be done once this function is called. */
+void vdo_finish_work_queue(struct vdo_work_queue *queue)
+{
+ if (queue == NULL)
+ return;
+
+ if (queue->round_robin_mode)
+ finish_round_robin_work_queue(as_round_robin_work_queue(queue));
+ else
+ finish_simple_work_queue(as_simple_work_queue(queue));
+}
+
+/* Debugging dumps */
+
+static void dump_simple_work_queue(struct simple_work_queue *queue)
+{
+ const char *thread_status = "no threads";
+ char task_state_report = '-';
+
+ if (queue->thread != NULL) {
+ task_state_report = task_state_to_char(queue->thread);
+ thread_status = atomic_read(&queue->idle) ? "idle" : "running";
+ }
+
+ vdo_log_info("workQ %px (%s) %s (%c)", &queue->common, queue->common.name,
+ thread_status, task_state_report);
+
+ /* ->waiting_worker_threads wait queue status? anyone waiting? */
+}
+
+/*
+ * Write to the buffer some info about the completion, for logging. Since the common use case is
+ * dumping info about a lot of completions to syslog all at once, the format favors brevity over
+ * readability.
+ */
+void vdo_dump_work_queue(struct vdo_work_queue *queue)
+{
+ if (queue->round_robin_mode) {
+ struct round_robin_work_queue *round_robin = as_round_robin_work_queue(queue);
+ unsigned int i;
+
+ for (i = 0; i < round_robin->num_service_queues; i++)
+ dump_simple_work_queue(round_robin->service_queues[i]);
+ } else {
+ dump_simple_work_queue(as_simple_work_queue(queue));
+ }
+}
+
+static void get_function_name(void *pointer, char *buffer, size_t buffer_length)
+{
+ if (pointer == NULL) {
+ /*
+ * Format "%ps" logs a null pointer as "(null)" with a bunch of leading spaces. We
+ * sometimes use this when logging lots of data; don't be so verbose.
+ */
+ strscpy(buffer, "-", buffer_length);
+ } else {
+ /*
+ * Use a pragma to defeat gcc's format checking, which doesn't understand that
+ * "%ps" actually does support a precision spec in Linux kernel code.
+ */
+ char *space;
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat"
+ snprintf(buffer, buffer_length, "%.*ps", buffer_length - 1, pointer);
+#pragma GCC diagnostic pop
+
+ space = strchr(buffer, ' ');
+ if (space != NULL)
+ *space = '\0';
+ }
+}
+
+void vdo_dump_completion_to_buffer(struct vdo_completion *completion, char *buffer,
+ size_t length)
+{
+ size_t current_length =
+ scnprintf(buffer, length, "%.*s/", TASK_COMM_LEN,
+ (completion->my_queue == NULL ? "-" : completion->my_queue->name));
+
+ if (current_length < length - 1) {
+ get_function_name((void *) completion->callback, buffer + current_length,
+ length - current_length);
+ }
+}
+
+/* Completion submission */
+/*
+ * If the completion has a timeout that has already passed, the timeout handler function may be
+ * invoked by this function.
+ */
+void vdo_enqueue_work_queue(struct vdo_work_queue *queue,
+ struct vdo_completion *completion)
+{
+ /*
+ * Convert the provided generic vdo_work_queue to the simple_work_queue to actually queue
+ * on.
+ */
+ struct simple_work_queue *simple_queue = NULL;
+
+ if (!queue->round_robin_mode) {
+ simple_queue = as_simple_work_queue(queue);
+ } else {
+ struct round_robin_work_queue *round_robin = as_round_robin_work_queue(queue);
+
+ /*
+ * It shouldn't be a big deal if the same rotor gets used for multiple work queues.
+ * Any patterns that might develop are likely to be disrupted by random ordering of
+ * multiple completions and migration between cores, unless the load is so light as
+ * to be regular in ordering of tasks and the threads are confined to individual
+ * cores; with a load that light we won't care.
+ */
+ unsigned int rotor = this_cpu_inc_return(service_queue_rotor);
+ unsigned int index = rotor % round_robin->num_service_queues;
+
+ simple_queue = round_robin->service_queues[index];
+ }
+
+ enqueue_work_queue_completion(simple_queue, completion);
+}
+
+/* Misc */
+
+/*
+ * Return the work queue pointer recorded at initialization time in the work-queue stack handle
+ * initialized on the stack of the current thread, if any.
+ */
+static struct simple_work_queue *get_current_thread_work_queue(void)
+{
+ /*
+ * In interrupt context, if a vdo thread is what got interrupted, the calls below will find
+ * the queue for the thread which was interrupted. However, the interrupted thread may have
+ * been processing a completion, in which case starting to process another would violate
+ * our concurrency assumptions.
+ */
+ if (in_interrupt())
+ return NULL;
+
+ if (kthread_func(current) != work_queue_runner)
+ /* Not a VDO work queue thread. */
+ return NULL;
+
+ return kthread_data(current);
+}
+
+struct vdo_work_queue *vdo_get_current_work_queue(void)
+{
+ struct simple_work_queue *queue = get_current_thread_work_queue();
+
+ return (queue == NULL) ? NULL : &queue->common;
+}
+
+struct vdo_thread *vdo_get_work_queue_owner(struct vdo_work_queue *queue)
+{
+ return queue->owner;
+}
+
+/**
+ * vdo_get_work_queue_private_data() - Returns the private data for the current thread's work
+ * queue, or NULL if none or if the current thread is not a
+ * work queue thread.
+ */
+void *vdo_get_work_queue_private_data(void)
+{
+ struct simple_work_queue *queue = get_current_thread_work_queue();
+
+ return (queue != NULL) ? queue->private : NULL;
+}
+
+bool vdo_work_queue_type_is(struct vdo_work_queue *queue,
+ const struct vdo_work_queue_type *type)
+{
+ return (queue->type == type);
+}
diff --git a/drivers/md/dm-vdo/funnel-workqueue.h b/drivers/md/dm-vdo/funnel-workqueue.h
new file mode 100644
index 000000000000..b5be6e9e83bc
--- /dev/null
+++ b/drivers/md/dm-vdo/funnel-workqueue.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_WORK_QUEUE_H
+#define VDO_WORK_QUEUE_H
+
+#include <linux/sched.h> /* for TASK_COMM_LEN */
+
+#include "types.h"
+
+enum {
+ MAX_VDO_WORK_QUEUE_NAME_LEN = TASK_COMM_LEN,
+};
+
+struct vdo_work_queue_type {
+ void (*start)(void *context);
+ void (*finish)(void *context);
+ enum vdo_completion_priority max_priority;
+ enum vdo_completion_priority default_priority;
+};
+
+struct vdo_completion;
+struct vdo_thread;
+struct vdo_work_queue;
+
+int vdo_make_work_queue(const char *thread_name_prefix, const char *name,
+ struct vdo_thread *owner, const struct vdo_work_queue_type *type,
+ unsigned int thread_count, void *thread_privates[],
+ struct vdo_work_queue **queue_ptr);
+
+void vdo_enqueue_work_queue(struct vdo_work_queue *queue, struct vdo_completion *completion);
+
+void vdo_finish_work_queue(struct vdo_work_queue *queue);
+
+void vdo_free_work_queue(struct vdo_work_queue *queue);
+
+void vdo_dump_work_queue(struct vdo_work_queue *queue);
+
+void vdo_dump_completion_to_buffer(struct vdo_completion *completion, char *buffer,
+ size_t length);
+
+void *vdo_get_work_queue_private_data(void);
+struct vdo_work_queue *vdo_get_current_work_queue(void);
+struct vdo_thread *vdo_get_work_queue_owner(struct vdo_work_queue *queue);
+
+bool __must_check vdo_work_queue_type_is(struct vdo_work_queue *queue,
+ const struct vdo_work_queue_type *type);
+
+#endif /* VDO_WORK_QUEUE_H */
diff --git a/drivers/md/dm-vdo/indexer/chapter-index.c b/drivers/md/dm-vdo/indexer/chapter-index.c
new file mode 100644
index 000000000000..7e32a25d3f2f
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/chapter-index.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "chapter-index.h"
+
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "hash-utils.h"
+#include "indexer.h"
+
+int uds_make_open_chapter_index(struct open_chapter_index **chapter_index,
+ const struct index_geometry *geometry, u64 volume_nonce)
+{
+ int result;
+ size_t memory_size;
+ struct open_chapter_index *index;
+
+ result = vdo_allocate(1, struct open_chapter_index, "open chapter index", &index);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /*
+ * The delta index will rebalance delta lists when memory gets tight,
+ * so give the chapter index one extra page.
+ */
+ memory_size = ((geometry->index_pages_per_chapter + 1) * geometry->bytes_per_page);
+ index->geometry = geometry;
+ index->volume_nonce = volume_nonce;
+ result = uds_initialize_delta_index(&index->delta_index, 1,
+ geometry->delta_lists_per_chapter,
+ geometry->chapter_mean_delta,
+ geometry->chapter_payload_bits,
+ memory_size, 'm');
+ if (result != UDS_SUCCESS) {
+ vdo_free(index);
+ return result;
+ }
+
+ index->memory_size = index->delta_index.memory_size + sizeof(struct open_chapter_index);
+ *chapter_index = index;
+ return UDS_SUCCESS;
+}
+
+void uds_free_open_chapter_index(struct open_chapter_index *chapter_index)
+{
+ if (chapter_index == NULL)
+ return;
+
+ uds_uninitialize_delta_index(&chapter_index->delta_index);
+ vdo_free(chapter_index);
+}
+
+/* Re-initialize an open chapter index for a new chapter. */
+void uds_empty_open_chapter_index(struct open_chapter_index *chapter_index,
+ u64 virtual_chapter_number)
+{
+ uds_reset_delta_index(&chapter_index->delta_index);
+ chapter_index->virtual_chapter_number = virtual_chapter_number;
+}
+
+static inline bool was_entry_found(const struct delta_index_entry *entry, u32 address)
+{
+ return (!entry->at_end) && (entry->key == address);
+}
+
+/* Associate a record name with the record page containing its metadata. */
+int uds_put_open_chapter_index_record(struct open_chapter_index *chapter_index,
+ const struct uds_record_name *name,
+ u32 page_number)
+{
+ int result;
+ struct delta_index_entry entry;
+ u32 address;
+ u32 list_number;
+ const u8 *found_name;
+ bool found;
+ const struct index_geometry *geometry = chapter_index->geometry;
+ u64 chapter_number = chapter_index->virtual_chapter_number;
+ u32 record_pages = geometry->record_pages_per_chapter;
+
+ result = VDO_ASSERT(page_number < record_pages,
+ "Page number within chapter (%u) exceeds the maximum value %u",
+ page_number, record_pages);
+ if (result != VDO_SUCCESS)
+ return UDS_INVALID_ARGUMENT;
+
+ address = uds_hash_to_chapter_delta_address(name, geometry);
+ list_number = uds_hash_to_chapter_delta_list(name, geometry);
+ result = uds_get_delta_index_entry(&chapter_index->delta_index, list_number,
+ address, name->name, &entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ found = was_entry_found(&entry, address);
+ result = VDO_ASSERT(!(found && entry.is_collision),
+ "Chunk appears more than once in chapter %llu",
+ (unsigned long long) chapter_number);
+ if (result != VDO_SUCCESS)
+ return UDS_BAD_STATE;
+
+ found_name = (found ? name->name : NULL);
+ return uds_put_delta_index_entry(&entry, address, page_number, found_name);
+}
+
+/*
+ * Pack a section of an open chapter index into a chapter index page. A range of delta lists
+ * (starting with a specified list index) is copied from the open chapter index into a memory page.
+ * The number of lists copied onto the page is returned to the caller on success.
+ *
+ * @chapter_index: The open chapter index
+ * @memory: The memory page to use
+ * @first_list: The first delta list number to be copied
+ * @last_page: If true, this is the last page of the chapter index and all the remaining lists must
+ * be packed onto this page
+ * @lists_packed: The number of delta lists that were packed onto this page
+ */
+int uds_pack_open_chapter_index_page(struct open_chapter_index *chapter_index,
+ u8 *memory, u32 first_list, bool last_page,
+ u32 *lists_packed)
+{
+ int result;
+ struct delta_index *delta_index = &chapter_index->delta_index;
+ struct delta_index_stats stats;
+ u64 nonce = chapter_index->volume_nonce;
+ u64 chapter_number = chapter_index->virtual_chapter_number;
+ const struct index_geometry *geometry = chapter_index->geometry;
+ u32 list_count = geometry->delta_lists_per_chapter;
+ unsigned int removals = 0;
+ struct delta_index_entry entry;
+ u32 next_list;
+ s32 list_number;
+
+ for (;;) {
+ result = uds_pack_delta_index_page(delta_index, nonce, memory,
+ geometry->bytes_per_page,
+ chapter_number, first_list,
+ lists_packed);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if ((first_list + *lists_packed) == list_count) {
+ /* All lists are packed. */
+ break;
+ } else if (*lists_packed == 0) {
+ /*
+ * The next delta list does not fit on a page. This delta list will be
+ * removed.
+ */
+ } else if (last_page) {
+ /*
+ * This is the last page and there are lists left unpacked, but all of the
+ * remaining lists must fit on the page. Find a list that contains entries
+ * and remove the entire list. Try the first list that does not fit. If it
+ * is empty, we will select the last list that already fits and has any
+ * entries.
+ */
+ } else {
+ /* This page is done. */
+ break;
+ }
+
+ if (removals == 0) {
+ uds_get_delta_index_stats(delta_index, &stats);
+ vdo_log_warning("The chapter index for chapter %llu contains %llu entries with %llu collisions",
+ (unsigned long long) chapter_number,
+ (unsigned long long) stats.record_count,
+ (unsigned long long) stats.collision_count);
+ }
+
+ list_number = *lists_packed;
+ do {
+ if (list_number < 0)
+ return UDS_OVERFLOW;
+
+ next_list = first_list + list_number--,
+ result = uds_start_delta_index_search(delta_index, next_list, 0,
+ &entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = uds_next_delta_index_entry(&entry);
+ if (result != UDS_SUCCESS)
+ return result;
+ } while (entry.at_end);
+
+ do {
+ result = uds_remove_delta_index_entry(&entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ removals++;
+ } while (!entry.at_end);
+ }
+
+ if (removals > 0) {
+ vdo_log_warning("To avoid chapter index page overflow in chapter %llu, %u entries were removed from the chapter index",
+ (unsigned long long) chapter_number, removals);
+ }
+
+ return UDS_SUCCESS;
+}
+
+/* Make a new chapter index page, initializing it with the data from a given index_page buffer. */
+int uds_initialize_chapter_index_page(struct delta_index_page *index_page,
+ const struct index_geometry *geometry,
+ u8 *page_buffer, u64 volume_nonce)
+{
+ return uds_initialize_delta_index_page(index_page, volume_nonce,
+ geometry->chapter_mean_delta,
+ geometry->chapter_payload_bits,
+ page_buffer, geometry->bytes_per_page);
+}
+
+/* Validate a chapter index page read during rebuild. */
+int uds_validate_chapter_index_page(const struct delta_index_page *index_page,
+ const struct index_geometry *geometry)
+{
+ int result;
+ const struct delta_index *delta_index = &index_page->delta_index;
+ u32 first = index_page->lowest_list_number;
+ u32 last = index_page->highest_list_number;
+ u32 list_number;
+
+ /* We walk every delta list from start to finish. */
+ for (list_number = first; list_number <= last; list_number++) {
+ struct delta_index_entry entry;
+
+ result = uds_start_delta_index_search(delta_index, list_number - first,
+ 0, &entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ for (;;) {
+ result = uds_next_delta_index_entry(&entry);
+ if (result != UDS_SUCCESS) {
+ /*
+ * A random bit stream is highly likely to arrive here when we go
+ * past the end of the delta list.
+ */
+ return result;
+ }
+
+ if (entry.at_end)
+ break;
+
+ /* Also make sure that the record page field contains a plausible value. */
+ if (uds_get_delta_entry_value(&entry) >=
+ geometry->record_pages_per_chapter) {
+ /*
+ * Do not log this as an error. It happens in normal operation when
+ * we are doing a rebuild but haven't written the entire volume
+ * once.
+ */
+ return UDS_CORRUPT_DATA;
+ }
+ }
+ }
+ return UDS_SUCCESS;
+}
+
+/*
+ * Search a chapter index page for a record name, returning the record page number that may contain
+ * the name.
+ */
+int uds_search_chapter_index_page(struct delta_index_page *index_page,
+ const struct index_geometry *geometry,
+ const struct uds_record_name *name,
+ u16 *record_page_ptr)
+{
+ int result;
+ struct delta_index *delta_index = &index_page->delta_index;
+ u32 address = uds_hash_to_chapter_delta_address(name, geometry);
+ u32 delta_list_number = uds_hash_to_chapter_delta_list(name, geometry);
+ u32 sub_list_number = delta_list_number - index_page->lowest_list_number;
+ struct delta_index_entry entry;
+
+ result = uds_get_delta_index_entry(delta_index, sub_list_number, address,
+ name->name, &entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (was_entry_found(&entry, address))
+ *record_page_ptr = uds_get_delta_entry_value(&entry);
+ else
+ *record_page_ptr = NO_CHAPTER_INDEX_ENTRY;
+
+ return UDS_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/indexer/chapter-index.h b/drivers/md/dm-vdo/indexer/chapter-index.h
new file mode 100644
index 000000000000..be8bf2b675b1
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/chapter-index.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_CHAPTER_INDEX_H
+#define UDS_CHAPTER_INDEX_H
+
+#include <linux/limits.h>
+
+#include "delta-index.h"
+#include "geometry.h"
+
+/*
+ * A chapter index for an open chapter is a mutable structure that tracks all the records that have
+ * been added to the chapter. A chapter index for a closed chapter is similar except that it is
+ * immutable because the contents of a closed chapter can never change, and the immutable structure
+ * is more efficient. Both types of chapter index are implemented with a delta index.
+ */
+
+/* The value returned when no entry is found in the chapter index. */
+#define NO_CHAPTER_INDEX_ENTRY U16_MAX
+
+struct open_chapter_index {
+ const struct index_geometry *geometry;
+ struct delta_index delta_index;
+ u64 virtual_chapter_number;
+ u64 volume_nonce;
+ size_t memory_size;
+};
+
+int __must_check uds_make_open_chapter_index(struct open_chapter_index **chapter_index,
+ const struct index_geometry *geometry,
+ u64 volume_nonce);
+
+void uds_free_open_chapter_index(struct open_chapter_index *chapter_index);
+
+void uds_empty_open_chapter_index(struct open_chapter_index *chapter_index,
+ u64 virtual_chapter_number);
+
+int __must_check uds_put_open_chapter_index_record(struct open_chapter_index *chapter_index,
+ const struct uds_record_name *name,
+ u32 page_number);
+
+int __must_check uds_pack_open_chapter_index_page(struct open_chapter_index *chapter_index,
+ u8 *memory, u32 first_list,
+ bool last_page, u32 *lists_packed);
+
+int __must_check uds_initialize_chapter_index_page(struct delta_index_page *index_page,
+ const struct index_geometry *geometry,
+ u8 *page_buffer, u64 volume_nonce);
+
+int __must_check uds_validate_chapter_index_page(const struct delta_index_page *index_page,
+ const struct index_geometry *geometry);
+
+int __must_check uds_search_chapter_index_page(struct delta_index_page *index_page,
+ const struct index_geometry *geometry,
+ const struct uds_record_name *name,
+ u16 *record_page_ptr);
+
+#endif /* UDS_CHAPTER_INDEX_H */
diff --git a/drivers/md/dm-vdo/indexer/config.c b/drivers/md/dm-vdo/indexer/config.c
new file mode 100644
index 000000000000..5532371b952f
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/config.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "config.h"
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "string-utils.h"
+#include "thread-utils.h"
+
+static const u8 INDEX_CONFIG_MAGIC[] = "ALBIC";
+static const u8 INDEX_CONFIG_VERSION_6_02[] = "06.02";
+static const u8 INDEX_CONFIG_VERSION_8_02[] = "08.02";
+
+#define DEFAULT_VOLUME_READ_THREADS 2
+#define MAX_VOLUME_READ_THREADS 16
+#define INDEX_CONFIG_MAGIC_LENGTH (sizeof(INDEX_CONFIG_MAGIC) - 1)
+#define INDEX_CONFIG_VERSION_LENGTH ((int)(sizeof(INDEX_CONFIG_VERSION_6_02) - 1))
+
+static bool is_version(const u8 *version, u8 *buffer)
+{
+ return memcmp(version, buffer, INDEX_CONFIG_VERSION_LENGTH) == 0;
+}
+
+static bool are_matching_configurations(struct uds_configuration *saved_config,
+ struct index_geometry *saved_geometry,
+ struct uds_configuration *user)
+{
+ struct index_geometry *geometry = user->geometry;
+ bool result = true;
+
+ if (saved_geometry->record_pages_per_chapter != geometry->record_pages_per_chapter) {
+ vdo_log_error("Record pages per chapter (%u) does not match (%u)",
+ saved_geometry->record_pages_per_chapter,
+ geometry->record_pages_per_chapter);
+ result = false;
+ }
+
+ if (saved_geometry->chapters_per_volume != geometry->chapters_per_volume) {
+ vdo_log_error("Chapter count (%u) does not match (%u)",
+ saved_geometry->chapters_per_volume,
+ geometry->chapters_per_volume);
+ result = false;
+ }
+
+ if (saved_geometry->sparse_chapters_per_volume != geometry->sparse_chapters_per_volume) {
+ vdo_log_error("Sparse chapter count (%u) does not match (%u)",
+ saved_geometry->sparse_chapters_per_volume,
+ geometry->sparse_chapters_per_volume);
+ result = false;
+ }
+
+ if (saved_config->cache_chapters != user->cache_chapters) {
+ vdo_log_error("Cache size (%u) does not match (%u)",
+ saved_config->cache_chapters, user->cache_chapters);
+ result = false;
+ }
+
+ if (saved_config->volume_index_mean_delta != user->volume_index_mean_delta) {
+ vdo_log_error("Volume index mean delta (%u) does not match (%u)",
+ saved_config->volume_index_mean_delta,
+ user->volume_index_mean_delta);
+ result = false;
+ }
+
+ if (saved_geometry->bytes_per_page != geometry->bytes_per_page) {
+ vdo_log_error("Bytes per page value (%zu) does not match (%zu)",
+ saved_geometry->bytes_per_page, geometry->bytes_per_page);
+ result = false;
+ }
+
+ if (saved_config->sparse_sample_rate != user->sparse_sample_rate) {
+ vdo_log_error("Sparse sample rate (%u) does not match (%u)",
+ saved_config->sparse_sample_rate,
+ user->sparse_sample_rate);
+ result = false;
+ }
+
+ if (saved_config->nonce != user->nonce) {
+ vdo_log_error("Nonce (%llu) does not match (%llu)",
+ (unsigned long long) saved_config->nonce,
+ (unsigned long long) user->nonce);
+ result = false;
+ }
+
+ return result;
+}
+
+/* Read the configuration and validate it against the provided one. */
+int uds_validate_config_contents(struct buffered_reader *reader,
+ struct uds_configuration *user_config)
+{
+ int result;
+ struct uds_configuration config;
+ struct index_geometry geometry;
+ u8 version_buffer[INDEX_CONFIG_VERSION_LENGTH];
+ u32 bytes_per_page;
+ u8 buffer[sizeof(struct uds_configuration_6_02)];
+ size_t offset = 0;
+
+ result = uds_verify_buffered_data(reader, INDEX_CONFIG_MAGIC,
+ INDEX_CONFIG_MAGIC_LENGTH);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = uds_read_from_buffered_reader(reader, version_buffer,
+ INDEX_CONFIG_VERSION_LENGTH);
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "cannot read index config version");
+
+ if (!is_version(INDEX_CONFIG_VERSION_6_02, version_buffer) &&
+ !is_version(INDEX_CONFIG_VERSION_8_02, version_buffer)) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "unsupported configuration version: '%.*s'",
+ INDEX_CONFIG_VERSION_LENGTH,
+ version_buffer);
+ }
+
+ result = uds_read_from_buffered_reader(reader, buffer, sizeof(buffer));
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "cannot read config data");
+
+ decode_u32_le(buffer, &offset, &geometry.record_pages_per_chapter);
+ decode_u32_le(buffer, &offset, &geometry.chapters_per_volume);
+ decode_u32_le(buffer, &offset, &geometry.sparse_chapters_per_volume);
+ decode_u32_le(buffer, &offset, &config.cache_chapters);
+ offset += sizeof(u32);
+ decode_u32_le(buffer, &offset, &config.volume_index_mean_delta);
+ decode_u32_le(buffer, &offset, &bytes_per_page);
+ geometry.bytes_per_page = bytes_per_page;
+ decode_u32_le(buffer, &offset, &config.sparse_sample_rate);
+ decode_u64_le(buffer, &offset, &config.nonce);
+
+ result = VDO_ASSERT(offset == sizeof(struct uds_configuration_6_02),
+ "%zu bytes read but not decoded",
+ sizeof(struct uds_configuration_6_02) - offset);
+ if (result != VDO_SUCCESS)
+ return UDS_CORRUPT_DATA;
+
+ if (is_version(INDEX_CONFIG_VERSION_6_02, version_buffer)) {
+ user_config->geometry->remapped_virtual = 0;
+ user_config->geometry->remapped_physical = 0;
+ } else {
+ u8 remapping[sizeof(u64) + sizeof(u64)];
+
+ result = uds_read_from_buffered_reader(reader, remapping,
+ sizeof(remapping));
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "cannot read converted config");
+
+ offset = 0;
+ decode_u64_le(remapping, &offset,
+ &user_config->geometry->remapped_virtual);
+ decode_u64_le(remapping, &offset,
+ &user_config->geometry->remapped_physical);
+ }
+
+ if (!are_matching_configurations(&config, &geometry, user_config)) {
+ vdo_log_warning("Supplied configuration does not match save");
+ return UDS_NO_INDEX;
+ }
+
+ return UDS_SUCCESS;
+}
+
+/*
+ * Write the configuration to stable storage. If the superblock version is < 4, write the 6.02
+ * version; otherwise write the 8.02 version, indicating the configuration is for an index that has
+ * been reduced by one chapter.
+ */
+int uds_write_config_contents(struct buffered_writer *writer,
+ struct uds_configuration *config, u32 version)
+{
+ int result;
+ struct index_geometry *geometry = config->geometry;
+ u8 buffer[sizeof(struct uds_configuration_8_02)];
+ size_t offset = 0;
+
+ result = uds_write_to_buffered_writer(writer, INDEX_CONFIG_MAGIC,
+ INDEX_CONFIG_MAGIC_LENGTH);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ /*
+ * If version is < 4, the index has not been reduced by a chapter so it must be written out
+ * as version 6.02 so that it is still compatible with older versions of UDS.
+ */
+ if (version >= 4) {
+ result = uds_write_to_buffered_writer(writer, INDEX_CONFIG_VERSION_8_02,
+ INDEX_CONFIG_VERSION_LENGTH);
+ if (result != UDS_SUCCESS)
+ return result;
+ } else {
+ result = uds_write_to_buffered_writer(writer, INDEX_CONFIG_VERSION_6_02,
+ INDEX_CONFIG_VERSION_LENGTH);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ encode_u32_le(buffer, &offset, geometry->record_pages_per_chapter);
+ encode_u32_le(buffer, &offset, geometry->chapters_per_volume);
+ encode_u32_le(buffer, &offset, geometry->sparse_chapters_per_volume);
+ encode_u32_le(buffer, &offset, config->cache_chapters);
+ encode_u32_le(buffer, &offset, 0);
+ encode_u32_le(buffer, &offset, config->volume_index_mean_delta);
+ encode_u32_le(buffer, &offset, geometry->bytes_per_page);
+ encode_u32_le(buffer, &offset, config->sparse_sample_rate);
+ encode_u64_le(buffer, &offset, config->nonce);
+
+ result = VDO_ASSERT(offset == sizeof(struct uds_configuration_6_02),
+ "%zu bytes encoded, of %zu expected", offset,
+ sizeof(struct uds_configuration_6_02));
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (version >= 4) {
+ encode_u64_le(buffer, &offset, geometry->remapped_virtual);
+ encode_u64_le(buffer, &offset, geometry->remapped_physical);
+ }
+
+ return uds_write_to_buffered_writer(writer, buffer, offset);
+}
+
+/* Compute configuration parameters that depend on memory size. */
+static int compute_memory_sizes(uds_memory_config_size_t mem_gb, bool sparse,
+ u32 *chapters_per_volume, u32 *record_pages_per_chapter,
+ u32 *sparse_chapters_per_volume)
+{
+ u32 reduced_chapters = 0;
+ u32 base_chapters;
+
+ if (mem_gb == UDS_MEMORY_CONFIG_256MB) {
+ base_chapters = DEFAULT_CHAPTERS_PER_VOLUME;
+ *record_pages_per_chapter = SMALL_RECORD_PAGES_PER_CHAPTER;
+ } else if (mem_gb == UDS_MEMORY_CONFIG_512MB) {
+ base_chapters = DEFAULT_CHAPTERS_PER_VOLUME;
+ *record_pages_per_chapter = 2 * SMALL_RECORD_PAGES_PER_CHAPTER;
+ } else if (mem_gb == UDS_MEMORY_CONFIG_768MB) {
+ base_chapters = DEFAULT_CHAPTERS_PER_VOLUME;
+ *record_pages_per_chapter = 3 * SMALL_RECORD_PAGES_PER_CHAPTER;
+ } else if ((mem_gb >= 1) && (mem_gb <= UDS_MEMORY_CONFIG_MAX)) {
+ base_chapters = mem_gb * DEFAULT_CHAPTERS_PER_VOLUME;
+ *record_pages_per_chapter = DEFAULT_RECORD_PAGES_PER_CHAPTER;
+ } else if (mem_gb == UDS_MEMORY_CONFIG_REDUCED_256MB) {
+ reduced_chapters = 1;
+ base_chapters = DEFAULT_CHAPTERS_PER_VOLUME;
+ *record_pages_per_chapter = SMALL_RECORD_PAGES_PER_CHAPTER;
+ } else if (mem_gb == UDS_MEMORY_CONFIG_REDUCED_512MB) {
+ reduced_chapters = 1;
+ base_chapters = DEFAULT_CHAPTERS_PER_VOLUME;
+ *record_pages_per_chapter = 2 * SMALL_RECORD_PAGES_PER_CHAPTER;
+ } else if (mem_gb == UDS_MEMORY_CONFIG_REDUCED_768MB) {
+ reduced_chapters = 1;
+ base_chapters = DEFAULT_CHAPTERS_PER_VOLUME;
+ *record_pages_per_chapter = 3 * SMALL_RECORD_PAGES_PER_CHAPTER;
+ } else if ((mem_gb >= 1 + UDS_MEMORY_CONFIG_REDUCED) &&
+ (mem_gb <= UDS_MEMORY_CONFIG_REDUCED_MAX)) {
+ reduced_chapters = 1;
+ base_chapters = ((mem_gb - UDS_MEMORY_CONFIG_REDUCED) *
+ DEFAULT_CHAPTERS_PER_VOLUME);
+ *record_pages_per_chapter = DEFAULT_RECORD_PAGES_PER_CHAPTER;
+ } else {
+ vdo_log_error("received invalid memory size");
+ return -EINVAL;
+ }
+
+ if (sparse) {
+ /* Make 95% of chapters sparse, allowing 10x more records. */
+ *sparse_chapters_per_volume = (19 * base_chapters) / 2;
+ base_chapters *= 10;
+ } else {
+ *sparse_chapters_per_volume = 0;
+ }
+
+ *chapters_per_volume = base_chapters - reduced_chapters;
+ return UDS_SUCCESS;
+}
+
+static unsigned int __must_check normalize_zone_count(unsigned int requested)
+{
+ unsigned int zone_count = requested;
+
+ if (zone_count == 0)
+ zone_count = num_online_cpus() / 2;
+
+ if (zone_count < 1)
+ zone_count = 1;
+
+ if (zone_count > MAX_ZONES)
+ zone_count = MAX_ZONES;
+
+ vdo_log_info("Using %u indexing zone%s for concurrency.",
+ zone_count, zone_count == 1 ? "" : "s");
+ return zone_count;
+}
+
+static unsigned int __must_check normalize_read_threads(unsigned int requested)
+{
+ unsigned int read_threads = requested;
+
+ if (read_threads < 1)
+ read_threads = DEFAULT_VOLUME_READ_THREADS;
+
+ if (read_threads > MAX_VOLUME_READ_THREADS)
+ read_threads = MAX_VOLUME_READ_THREADS;
+
+ return read_threads;
+}
+
+int uds_make_configuration(const struct uds_parameters *params,
+ struct uds_configuration **config_ptr)
+{
+ struct uds_configuration *config;
+ u32 chapters_per_volume = 0;
+ u32 record_pages_per_chapter = 0;
+ u32 sparse_chapters_per_volume = 0;
+ int result;
+
+ result = compute_memory_sizes(params->memory_size, params->sparse,
+ &chapters_per_volume, &record_pages_per_chapter,
+ &sparse_chapters_per_volume);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = vdo_allocate(1, struct uds_configuration, __func__, &config);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = uds_make_index_geometry(DEFAULT_BYTES_PER_PAGE, record_pages_per_chapter,
+ chapters_per_volume, sparse_chapters_per_volume,
+ 0, 0, &config->geometry);
+ if (result != UDS_SUCCESS) {
+ uds_free_configuration(config);
+ return result;
+ }
+
+ config->zone_count = normalize_zone_count(params->zone_count);
+ config->read_threads = normalize_read_threads(params->read_threads);
+
+ config->cache_chapters = DEFAULT_CACHE_CHAPTERS;
+ config->volume_index_mean_delta = DEFAULT_VOLUME_INDEX_MEAN_DELTA;
+ config->sparse_sample_rate = (params->sparse ? DEFAULT_SPARSE_SAMPLE_RATE : 0);
+ config->nonce = params->nonce;
+ config->bdev = params->bdev;
+ config->offset = params->offset;
+ config->size = params->size;
+
+ *config_ptr = config;
+ return UDS_SUCCESS;
+}
+
+void uds_free_configuration(struct uds_configuration *config)
+{
+ if (config != NULL) {
+ uds_free_index_geometry(config->geometry);
+ vdo_free(config);
+ }
+}
+
+void uds_log_configuration(struct uds_configuration *config)
+{
+ struct index_geometry *geometry = config->geometry;
+
+ vdo_log_debug("Configuration:");
+ vdo_log_debug(" Record pages per chapter: %10u", geometry->record_pages_per_chapter);
+ vdo_log_debug(" Chapters per volume: %10u", geometry->chapters_per_volume);
+ vdo_log_debug(" Sparse chapters per volume: %10u", geometry->sparse_chapters_per_volume);
+ vdo_log_debug(" Cache size (chapters): %10u", config->cache_chapters);
+ vdo_log_debug(" Volume index mean delta: %10u", config->volume_index_mean_delta);
+ vdo_log_debug(" Bytes per page: %10zu", geometry->bytes_per_page);
+ vdo_log_debug(" Sparse sample rate: %10u", config->sparse_sample_rate);
+ vdo_log_debug(" Nonce: %llu", (unsigned long long) config->nonce);
+}
diff --git a/drivers/md/dm-vdo/indexer/config.h b/drivers/md/dm-vdo/indexer/config.h
new file mode 100644
index 000000000000..08507dc2f7a1
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/config.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_CONFIG_H
+#define UDS_CONFIG_H
+
+#include "geometry.h"
+#include "indexer.h"
+#include "io-factory.h"
+
+/*
+ * The uds_configuration records a variety of parameters used to configure a new UDS index. Some
+ * parameters are provided by the client, while others are fixed or derived from user-supplied
+ * values. It is created when an index is created, and it is recorded in the index metadata.
+ */
+
+enum {
+ DEFAULT_VOLUME_INDEX_MEAN_DELTA = 4096,
+ DEFAULT_CACHE_CHAPTERS = 7,
+ DEFAULT_SPARSE_SAMPLE_RATE = 32,
+ MAX_ZONES = 16,
+};
+
+/* A set of configuration parameters for the indexer. */
+struct uds_configuration {
+ /* Storage device for the index */
+ struct block_device *bdev;
+
+ /* The maximum allowable size of the index */
+ size_t size;
+
+ /* The offset where the index should start */
+ off_t offset;
+
+ /* Parameters for the volume */
+
+ /* The volume layout */
+ struct index_geometry *geometry;
+
+ /* Index owner's nonce */
+ u64 nonce;
+
+ /* The number of threads used to process index requests */
+ unsigned int zone_count;
+
+ /* The number of threads used to read volume pages */
+ unsigned int read_threads;
+
+ /* Size of the page cache and sparse chapter index cache in chapters */
+ u32 cache_chapters;
+
+ /* Parameters for the volume index */
+
+ /* The mean delta for the volume index */
+ u32 volume_index_mean_delta;
+
+ /* Sampling rate for sparse indexing */
+ u32 sparse_sample_rate;
+};
+
+/* On-disk structure of data for a version 8.02 index. */
+struct uds_configuration_8_02 {
+ /* Smaller (16), Small (64) or large (256) indices */
+ u32 record_pages_per_chapter;
+ /* Total number of chapters per volume */
+ u32 chapters_per_volume;
+ /* Number of sparse chapters per volume */
+ u32 sparse_chapters_per_volume;
+ /* Size of the page cache, in chapters */
+ u32 cache_chapters;
+ /* Unused field */
+ u32 unused;
+ /* The volume index mean delta to use */
+ u32 volume_index_mean_delta;
+ /* Size of a page, used for both record pages and index pages */
+ u32 bytes_per_page;
+ /* Sampling rate for sparse indexing */
+ u32 sparse_sample_rate;
+ /* Index owner's nonce */
+ u64 nonce;
+ /* Virtual chapter remapped from physical chapter 0 */
+ u64 remapped_virtual;
+ /* New physical chapter which remapped chapter was moved to */
+ u64 remapped_physical;
+} __packed;
+
+/* On-disk structure of data for a version 6.02 index. */
+struct uds_configuration_6_02 {
+ /* Smaller (16), Small (64) or large (256) indices */
+ u32 record_pages_per_chapter;
+ /* Total number of chapters per volume */
+ u32 chapters_per_volume;
+ /* Number of sparse chapters per volume */
+ u32 sparse_chapters_per_volume;
+ /* Size of the page cache, in chapters */
+ u32 cache_chapters;
+ /* Unused field */
+ u32 unused;
+ /* The volume index mean delta to use */
+ u32 volume_index_mean_delta;
+ /* Size of a page, used for both record pages and index pages */
+ u32 bytes_per_page;
+ /* Sampling rate for sparse indexing */
+ u32 sparse_sample_rate;
+ /* Index owner's nonce */
+ u64 nonce;
+} __packed;
+
+int __must_check uds_make_configuration(const struct uds_parameters *params,
+ struct uds_configuration **config_ptr);
+
+void uds_free_configuration(struct uds_configuration *config);
+
+int __must_check uds_validate_config_contents(struct buffered_reader *reader,
+ struct uds_configuration *config);
+
+int __must_check uds_write_config_contents(struct buffered_writer *writer,
+ struct uds_configuration *config, u32 version);
+
+void uds_log_configuration(struct uds_configuration *config);
+
+#endif /* UDS_CONFIG_H */
diff --git a/drivers/md/dm-vdo/indexer/delta-index.c b/drivers/md/dm-vdo/indexer/delta-index.c
new file mode 100644
index 000000000000..0ac2443f0df3
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/delta-index.c
@@ -0,0 +1,1970 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+#include "delta-index.h"
+
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/compiler.h>
+#include <linux/limits.h>
+#include <linux/log2.h>
+
+#include "cpu.h"
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "permassert.h"
+#include "string-utils.h"
+#include "time-utils.h"
+
+#include "config.h"
+#include "indexer.h"
+
+/*
+ * The entries in a delta index could be stored in a single delta list, but to reduce search times
+ * and update costs it uses multiple delta lists. These lists are stored in a single chunk of
+ * memory managed by the delta_zone structure. The delta_zone can move the data around within its
+ * memory, so the location of each delta list is recorded as a bit offset into the memory. Because
+ * the volume index can contain over a million delta lists, we want to be efficient with the size
+ * of the delta list header information. This information is encoded into 16 bytes per list. The
+ * volume index delta list memory can easily exceed 4 gigabits, so a 64 bit value is needed to
+ * address the memory. The volume index delta lists average around 6 kilobits, so 16 bits are
+ * sufficient to store the size of a delta list.
+ *
+ * Each delta list is stored as a bit stream. Within the delta list encoding, bits and bytes are
+ * numbered in little endian order. Within a byte, bit 0 is the least significant bit (0x1), and
+ * bit 7 is the most significant bit (0x80). Within a bit stream, bit 7 is the most significant bit
+ * of byte 0, and bit 8 is the least significant bit of byte 1. Within a byte array, a byte's
+ * number corresponds to its index in the array.
+ *
+ * A standard delta list entry is stored as a fixed length payload (the value) followed by a
+ * variable length key (the delta). A collision entry is used when two block names have the same
+ * delta list address. A collision entry always follows a standard entry for the hash with which it
+ * collides, and is encoded with DELTA == 0 with an additional 256 bits field at the end,
+ * containing the full block name. An entry with a delta of 0 at the beginning of a delta list
+ * indicates a normal entry.
+ *
+ * The delta in each entry is encoded with a variable-length Huffman code to minimize the memory
+ * used by small deltas. The Huffman code is specified by three parameters, which can be computed
+ * from the desired mean delta when the index is full. (See compute_coding_constants() for
+ * details.)
+ *
+ * The bit field utilities used to read and write delta entries assume that it is possible to read
+ * some bytes beyond the end of the bit field, so a delta_zone memory allocation is guarded by two
+ * invalid delta lists to prevent reading outside the delta_zone memory. The valid delta lists are
+ * numbered 1 to N, and the guard lists are numbered 0 and N+1. The function to decode the bit
+ * stream include a step that skips over bits set to 0 until the first 1 bit is found. A corrupted
+ * delta list could cause this step to run off the end of the delta_zone memory, so as extra
+ * protection against this happening, the tail guard list is set to all ones.
+ *
+ * The delta_index supports two different forms. The mutable form is created by
+ * uds_initialize_delta_index(), and is used for the volume index and for open chapter indexes. The
+ * immutable form is created by uds_initialize_delta_index_page(), and is used for closed (and
+ * cached) chapter index pages. The immutable form does not allocate delta list headers or
+ * temporary offsets, and thus is somewhat more memory efficient.
+ */
+
+/*
+ * This is the largest field size supported by get_field() and set_field(). Any field that is
+ * larger is not guaranteed to fit in a single byte-aligned u32.
+ */
+#define MAX_FIELD_BITS ((sizeof(u32) - 1) * BITS_PER_BYTE + 1)
+
+/*
+ * This is the largest field size supported by get_big_field() and set_big_field(). Any field that
+ * is larger is not guaranteed to fit in a single byte-aligned u64.
+ */
+#define MAX_BIG_FIELD_BITS ((sizeof(u64) - 1) * BITS_PER_BYTE + 1)
+
+/*
+ * This is the number of guard bytes needed at the end of the memory byte array when using the bit
+ * utilities. These utilities call get_big_field() and set_big_field(), which can access up to 7
+ * bytes beyond the end of the desired field. The definition is written to make it clear how this
+ * value is derived.
+ */
+#define POST_FIELD_GUARD_BYTES (sizeof(u64) - 1)
+
+/* The number of guard bits that are needed in the tail guard list */
+#define GUARD_BITS (POST_FIELD_GUARD_BYTES * BITS_PER_BYTE)
+
+/*
+ * The maximum size of a single delta list in bytes. We count guard bytes in this value because a
+ * buffer of this size can be used with move_bits().
+ */
+#define DELTA_LIST_MAX_BYTE_COUNT \
+ ((U16_MAX + BITS_PER_BYTE) / BITS_PER_BYTE + POST_FIELD_GUARD_BYTES)
+
+/* The number of extra bytes and bits needed to store a collision entry */
+#define COLLISION_BYTES UDS_RECORD_NAME_SIZE
+#define COLLISION_BITS (COLLISION_BYTES * BITS_PER_BYTE)
+
+/*
+ * Immutable delta lists are packed into pages containing a header that encodes the delta list
+ * information into 19 bits per list (64KB bit offset).
+ */
+#define IMMUTABLE_HEADER_SIZE 19
+
+/*
+ * Constants and structures for the saved delta index. "DI" is for delta_index, and -##### is a
+ * number to increment when the format of the data changes.
+ */
+#define MAGIC_SIZE 8
+
+static const char DELTA_INDEX_MAGIC[] = "DI-00002";
+
+struct delta_index_header {
+ char magic[MAGIC_SIZE];
+ u32 zone_number;
+ u32 zone_count;
+ u32 first_list;
+ u32 list_count;
+ u64 record_count;
+ u64 collision_count;
+};
+
+/*
+ * Header data used for immutable delta index pages. This data is followed by the delta list offset
+ * table.
+ */
+struct delta_page_header {
+ /* Externally-defined nonce */
+ u64 nonce;
+ /* The virtual chapter number */
+ u64 virtual_chapter_number;
+ /* Index of the first delta list on the page */
+ u16 first_list;
+ /* Number of delta lists on the page */
+ u16 list_count;
+} __packed;
+
+static inline u64 get_delta_list_byte_start(const struct delta_list *delta_list)
+{
+ return delta_list->start / BITS_PER_BYTE;
+}
+
+static inline u16 get_delta_list_byte_size(const struct delta_list *delta_list)
+{
+ unsigned int bit_offset = delta_list->start % BITS_PER_BYTE;
+
+ return BITS_TO_BYTES(bit_offset + delta_list->size);
+}
+
+static void rebalance_delta_zone(const struct delta_zone *delta_zone, u32 first,
+ u32 last)
+{
+ struct delta_list *delta_list;
+ u64 new_start;
+
+ if (first == last) {
+ /* Only one list is moving, and we know there is space. */
+ delta_list = &delta_zone->delta_lists[first];
+ new_start = delta_zone->new_offsets[first];
+ if (delta_list->start != new_start) {
+ u64 source;
+ u64 destination;
+
+ source = get_delta_list_byte_start(delta_list);
+ delta_list->start = new_start;
+ destination = get_delta_list_byte_start(delta_list);
+ memmove(delta_zone->memory + destination,
+ delta_zone->memory + source,
+ get_delta_list_byte_size(delta_list));
+ }
+ } else {
+ /*
+ * There is more than one list. Divide the problem in half, and use recursive calls
+ * to process each half. Note that after this computation, first <= middle, and
+ * middle < last.
+ */
+ u32 middle = (first + last) / 2;
+
+ delta_list = &delta_zone->delta_lists[middle];
+ new_start = delta_zone->new_offsets[middle];
+
+ /*
+ * The direction that our middle list is moving determines which half of the
+ * problem must be processed first.
+ */
+ if (new_start > delta_list->start) {
+ rebalance_delta_zone(delta_zone, middle + 1, last);
+ rebalance_delta_zone(delta_zone, first, middle);
+ } else {
+ rebalance_delta_zone(delta_zone, first, middle);
+ rebalance_delta_zone(delta_zone, middle + 1, last);
+ }
+ }
+}
+
+static inline size_t get_zone_memory_size(unsigned int zone_count, size_t memory_size)
+{
+ /* Round up so that each zone is a multiple of 64K in size. */
+ size_t ALLOC_BOUNDARY = 64 * 1024;
+
+ return (memory_size / zone_count + ALLOC_BOUNDARY - 1) & -ALLOC_BOUNDARY;
+}
+
+void uds_reset_delta_index(const struct delta_index *delta_index)
+{
+ unsigned int z;
+
+ /*
+ * Initialize all delta lists to be empty. We keep 2 extra delta list descriptors, one
+ * before the first real entry and one after so that we don't need to bounds check the
+ * array access when calculating preceding and following gap sizes.
+ */
+ for (z = 0; z < delta_index->zone_count; z++) {
+ u64 list_bits;
+ u64 spacing;
+ u64 offset;
+ unsigned int i;
+ struct delta_zone *zone = &delta_index->delta_zones[z];
+ struct delta_list *delta_lists = zone->delta_lists;
+
+ /* Zeroing the delta list headers initializes the head guard list correctly. */
+ memset(delta_lists, 0,
+ (zone->list_count + 2) * sizeof(struct delta_list));
+
+ /* Set all the bits in the end guard list. */
+ list_bits = (u64) zone->size * BITS_PER_BYTE - GUARD_BITS;
+ delta_lists[zone->list_count + 1].start = list_bits;
+ delta_lists[zone->list_count + 1].size = GUARD_BITS;
+ memset(zone->memory + (list_bits / BITS_PER_BYTE), ~0,
+ POST_FIELD_GUARD_BYTES);
+
+ /* Evenly space out the real delta lists by setting regular offsets. */
+ spacing = list_bits / zone->list_count;
+ offset = spacing / 2;
+ for (i = 1; i <= zone->list_count; i++) {
+ delta_lists[i].start = offset;
+ offset += spacing;
+ }
+
+ /* Update the statistics. */
+ zone->discard_count += zone->record_count;
+ zone->record_count = 0;
+ zone->collision_count = 0;
+ }
+}
+
+/* Compute the Huffman coding parameters for the given mean delta. The Huffman code is specified by
+ * three parameters:
+ *
+ * MINBITS The number of bits in the smallest code
+ * BASE The number of values coded using a code of length MINBITS
+ * INCR The number of values coded by using one additional bit
+ *
+ * These parameters are related by this equation:
+ *
+ * BASE + INCR == 1 << MINBITS
+ *
+ * The math for the Huffman code of an exponential distribution says that
+ *
+ * INCR = log(2) * MEAN_DELTA
+ *
+ * Then use the smallest MINBITS value so that
+ *
+ * (1 << MINBITS) > INCR
+ *
+ * And then
+ *
+ * BASE = (1 << MINBITS) - INCR
+ *
+ * Now the index can generate a code such that
+ * - The first BASE values code using MINBITS bits.
+ * - The next INCR values code using MINBITS+1 bits.
+ * - The next INCR values code using MINBITS+2 bits.
+ * - (and so on).
+ */
+static void compute_coding_constants(u32 mean_delta, u16 *min_bits, u32 *min_keys, u32 *incr_keys)
+{
+ /*
+ * We want to compute the rounded value of log(2) * mean_delta. Since we cannot always use
+ * floating point, use a really good integer approximation.
+ */
+ *incr_keys = (836158UL * mean_delta + 603160UL) / 1206321UL;
+ *min_bits = bits_per(*incr_keys + 1);
+ *min_keys = (1 << *min_bits) - *incr_keys;
+}
+
+void uds_uninitialize_delta_index(struct delta_index *delta_index)
+{
+ unsigned int z;
+
+ if (delta_index->delta_zones == NULL)
+ return;
+
+ for (z = 0; z < delta_index->zone_count; z++) {
+ vdo_free(vdo_forget(delta_index->delta_zones[z].new_offsets));
+ vdo_free(vdo_forget(delta_index->delta_zones[z].delta_lists));
+ vdo_free(vdo_forget(delta_index->delta_zones[z].memory));
+ }
+
+ vdo_free(delta_index->delta_zones);
+ memset(delta_index, 0, sizeof(struct delta_index));
+}
+
+static int initialize_delta_zone(struct delta_zone *delta_zone, size_t size,
+ u32 first_list, u32 list_count, u32 mean_delta,
+ u32 payload_bits, u8 tag)
+{
+ int result;
+
+ result = vdo_allocate(size, u8, "delta list", &delta_zone->memory);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(list_count + 2, u64, "delta list temp",
+ &delta_zone->new_offsets);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* Allocate the delta lists. */
+ result = vdo_allocate(list_count + 2, struct delta_list, "delta lists",
+ &delta_zone->delta_lists);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ compute_coding_constants(mean_delta, &delta_zone->min_bits,
+ &delta_zone->min_keys, &delta_zone->incr_keys);
+ delta_zone->value_bits = payload_bits;
+ delta_zone->buffered_writer = NULL;
+ delta_zone->size = size;
+ delta_zone->rebalance_time = 0;
+ delta_zone->rebalance_count = 0;
+ delta_zone->record_count = 0;
+ delta_zone->collision_count = 0;
+ delta_zone->discard_count = 0;
+ delta_zone->overflow_count = 0;
+ delta_zone->first_list = first_list;
+ delta_zone->list_count = list_count;
+ delta_zone->tag = tag;
+
+ return UDS_SUCCESS;
+}
+
+int uds_initialize_delta_index(struct delta_index *delta_index, unsigned int zone_count,
+ u32 list_count, u32 mean_delta, u32 payload_bits,
+ size_t memory_size, u8 tag)
+{
+ int result;
+ unsigned int z;
+ size_t zone_memory;
+
+ result = vdo_allocate(zone_count, struct delta_zone, "Delta Index Zones",
+ &delta_index->delta_zones);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ delta_index->zone_count = zone_count;
+ delta_index->list_count = list_count;
+ delta_index->lists_per_zone = DIV_ROUND_UP(list_count, zone_count);
+ delta_index->memory_size = 0;
+ delta_index->mutable = true;
+ delta_index->tag = tag;
+
+ for (z = 0; z < zone_count; z++) {
+ u32 lists_in_zone = delta_index->lists_per_zone;
+ u32 first_list_in_zone = z * lists_in_zone;
+
+ if (z == zone_count - 1) {
+ /*
+ * The last zone gets fewer lists if zone_count doesn't evenly divide
+ * list_count. We'll have an underflow if the assertion below doesn't hold.
+ */
+ if (delta_index->list_count <= first_list_in_zone) {
+ uds_uninitialize_delta_index(delta_index);
+ return vdo_log_error_strerror(UDS_INVALID_ARGUMENT,
+ "%u delta lists not enough for %u zones",
+ list_count, zone_count);
+ }
+ lists_in_zone = delta_index->list_count - first_list_in_zone;
+ }
+
+ zone_memory = get_zone_memory_size(zone_count, memory_size);
+ result = initialize_delta_zone(&delta_index->delta_zones[z], zone_memory,
+ first_list_in_zone, lists_in_zone,
+ mean_delta, payload_bits, tag);
+ if (result != UDS_SUCCESS) {
+ uds_uninitialize_delta_index(delta_index);
+ return result;
+ }
+
+ delta_index->memory_size +=
+ (sizeof(struct delta_zone) + zone_memory +
+ (lists_in_zone + 2) * (sizeof(struct delta_list) + sizeof(u64)));
+ }
+
+ uds_reset_delta_index(delta_index);
+ return UDS_SUCCESS;
+}
+
+/* Read a bit field from an arbitrary bit boundary. */
+static inline u32 get_field(const u8 *memory, u64 offset, u8 size)
+{
+ const void *addr = memory + offset / BITS_PER_BYTE;
+
+ return (get_unaligned_le32(addr) >> (offset % BITS_PER_BYTE)) & ((1 << size) - 1);
+}
+
+/* Write a bit field to an arbitrary bit boundary. */
+static inline void set_field(u32 value, u8 *memory, u64 offset, u8 size)
+{
+ void *addr = memory + offset / BITS_PER_BYTE;
+ int shift = offset % BITS_PER_BYTE;
+ u32 data = get_unaligned_le32(addr);
+
+ data &= ~(((1 << size) - 1) << shift);
+ data |= value << shift;
+ put_unaligned_le32(data, addr);
+}
+
+/* Get the bit offset to the immutable delta list header. */
+static inline u32 get_immutable_header_offset(u32 list_number)
+{
+ return sizeof(struct delta_page_header) * BITS_PER_BYTE +
+ list_number * IMMUTABLE_HEADER_SIZE;
+}
+
+/* Get the bit offset to the start of the immutable delta list bit stream. */
+static inline u32 get_immutable_start(const u8 *memory, u32 list_number)
+{
+ return get_field(memory, get_immutable_header_offset(list_number),
+ IMMUTABLE_HEADER_SIZE);
+}
+
+/* Set the bit offset to the start of the immutable delta list bit stream. */
+static inline void set_immutable_start(u8 *memory, u32 list_number, u32 start)
+{
+ set_field(start, memory, get_immutable_header_offset(list_number),
+ IMMUTABLE_HEADER_SIZE);
+}
+
+static bool verify_delta_index_page(u64 nonce, u16 list_count, u64 expected_nonce,
+ u8 *memory, size_t memory_size)
+{
+ unsigned int i;
+
+ /*
+ * Verify the nonce. A mismatch can happen here during rebuild if we haven't written the
+ * entire volume at least once.
+ */
+ if (nonce != expected_nonce)
+ return false;
+
+ /* Verify that the number of delta lists can fit in the page. */
+ if (list_count > ((memory_size - sizeof(struct delta_page_header)) *
+ BITS_PER_BYTE / IMMUTABLE_HEADER_SIZE))
+ return false;
+
+ /*
+ * Verify that the first delta list is immediately after the last delta
+ * list header.
+ */
+ if (get_immutable_start(memory, 0) != get_immutable_header_offset(list_count + 1))
+ return false;
+
+ /* Verify that the lists are in the correct order. */
+ for (i = 0; i < list_count; i++) {
+ if (get_immutable_start(memory, i) > get_immutable_start(memory, i + 1))
+ return false;
+ }
+
+ /*
+ * Verify that the last list ends on the page, and that there is room
+ * for the post-field guard bits.
+ */
+ if (get_immutable_start(memory, list_count) >
+ (memory_size - POST_FIELD_GUARD_BYTES) * BITS_PER_BYTE)
+ return false;
+
+ /* Verify that the guard bytes are correctly set to all ones. */
+ for (i = 0; i < POST_FIELD_GUARD_BYTES; i++) {
+ if (memory[memory_size - POST_FIELD_GUARD_BYTES + i] != (u8) ~0)
+ return false;
+ }
+
+ /* All verifications passed. */
+ return true;
+}
+
+/* Initialize a delta index page to refer to a supplied page. */
+int uds_initialize_delta_index_page(struct delta_index_page *delta_index_page,
+ u64 expected_nonce, u32 mean_delta, u32 payload_bits,
+ u8 *memory, size_t memory_size)
+{
+ u64 nonce;
+ u64 vcn;
+ u64 first_list;
+ u64 list_count;
+ struct delta_page_header *header = (struct delta_page_header *) memory;
+ struct delta_zone *delta_zone = &delta_index_page->delta_zone;
+ const u8 *nonce_addr = (const u8 *) &header->nonce;
+ const u8 *vcn_addr = (const u8 *) &header->virtual_chapter_number;
+ const u8 *first_list_addr = (const u8 *) &header->first_list;
+ const u8 *list_count_addr = (const u8 *) &header->list_count;
+
+ /* First assume that the header is little endian. */
+ nonce = get_unaligned_le64(nonce_addr);
+ vcn = get_unaligned_le64(vcn_addr);
+ first_list = get_unaligned_le16(first_list_addr);
+ list_count = get_unaligned_le16(list_count_addr);
+ if (!verify_delta_index_page(nonce, list_count, expected_nonce, memory,
+ memory_size)) {
+ /* If that fails, try big endian. */
+ nonce = get_unaligned_be64(nonce_addr);
+ vcn = get_unaligned_be64(vcn_addr);
+ first_list = get_unaligned_be16(first_list_addr);
+ list_count = get_unaligned_be16(list_count_addr);
+ if (!verify_delta_index_page(nonce, list_count, expected_nonce, memory,
+ memory_size)) {
+ /*
+ * Both attempts failed. Do not log this as an error, because it can happen
+ * during a rebuild if we haven't written the entire volume at least once.
+ */
+ return UDS_CORRUPT_DATA;
+ }
+ }
+
+ delta_index_page->delta_index.delta_zones = delta_zone;
+ delta_index_page->delta_index.zone_count = 1;
+ delta_index_page->delta_index.list_count = list_count;
+ delta_index_page->delta_index.lists_per_zone = list_count;
+ delta_index_page->delta_index.mutable = false;
+ delta_index_page->delta_index.tag = 'p';
+ delta_index_page->virtual_chapter_number = vcn;
+ delta_index_page->lowest_list_number = first_list;
+ delta_index_page->highest_list_number = first_list + list_count - 1;
+
+ compute_coding_constants(mean_delta, &delta_zone->min_bits,
+ &delta_zone->min_keys, &delta_zone->incr_keys);
+ delta_zone->value_bits = payload_bits;
+ delta_zone->memory = memory;
+ delta_zone->delta_lists = NULL;
+ delta_zone->new_offsets = NULL;
+ delta_zone->buffered_writer = NULL;
+ delta_zone->size = memory_size;
+ delta_zone->rebalance_time = 0;
+ delta_zone->rebalance_count = 0;
+ delta_zone->record_count = 0;
+ delta_zone->collision_count = 0;
+ delta_zone->discard_count = 0;
+ delta_zone->overflow_count = 0;
+ delta_zone->first_list = 0;
+ delta_zone->list_count = list_count;
+ delta_zone->tag = 'p';
+
+ return UDS_SUCCESS;
+}
+
+/* Read a large bit field from an arbitrary bit boundary. */
+static inline u64 get_big_field(const u8 *memory, u64 offset, u8 size)
+{
+ const void *addr = memory + offset / BITS_PER_BYTE;
+
+ return (get_unaligned_le64(addr) >> (offset % BITS_PER_BYTE)) & ((1UL << size) - 1);
+}
+
+/* Write a large bit field to an arbitrary bit boundary. */
+static inline void set_big_field(u64 value, u8 *memory, u64 offset, u8 size)
+{
+ void *addr = memory + offset / BITS_PER_BYTE;
+ u8 shift = offset % BITS_PER_BYTE;
+ u64 data = get_unaligned_le64(addr);
+
+ data &= ~(((1UL << size) - 1) << shift);
+ data |= value << shift;
+ put_unaligned_le64(data, addr);
+}
+
+/* Set a sequence of bits to all zeros. */
+static inline void set_zero(u8 *memory, u64 offset, u32 size)
+{
+ if (size > 0) {
+ u8 *addr = memory + offset / BITS_PER_BYTE;
+ u8 shift = offset % BITS_PER_BYTE;
+ u32 count = size + shift > BITS_PER_BYTE ? (u32) BITS_PER_BYTE - shift : size;
+
+ *addr++ &= ~(((1 << count) - 1) << shift);
+ for (size -= count; size > BITS_PER_BYTE; size -= BITS_PER_BYTE)
+ *addr++ = 0;
+
+ if (size > 0)
+ *addr &= 0xFF << size;
+ }
+}
+
+/*
+ * Move several bits from a higher to a lower address, moving the lower addressed bits first. The
+ * size and memory offsets are measured in bits.
+ */
+static void move_bits_down(const u8 *from, u64 from_offset, u8 *to, u64 to_offset, u32 size)
+{
+ const u8 *source;
+ u8 *destination;
+ u8 offset;
+ u8 count;
+ u64 field;
+
+ /* Start by moving one field that ends on a to int boundary. */
+ count = (MAX_BIG_FIELD_BITS - ((to_offset + MAX_BIG_FIELD_BITS) % BITS_PER_TYPE(u32)));
+ field = get_big_field(from, from_offset, count);
+ set_big_field(field, to, to_offset, count);
+ from_offset += count;
+ to_offset += count;
+ size -= count;
+
+ /* Now do the main loop to copy 32 bit chunks that are int-aligned at the destination. */
+ offset = from_offset % BITS_PER_TYPE(u32);
+ source = from + (from_offset - offset) / BITS_PER_BYTE;
+ destination = to + to_offset / BITS_PER_BYTE;
+ while (size > MAX_BIG_FIELD_BITS) {
+ put_unaligned_le32(get_unaligned_le64(source) >> offset, destination);
+ source += sizeof(u32);
+ destination += sizeof(u32);
+ from_offset += BITS_PER_TYPE(u32);
+ to_offset += BITS_PER_TYPE(u32);
+ size -= BITS_PER_TYPE(u32);
+ }
+
+ /* Finish up by moving any remaining bits. */
+ if (size > 0) {
+ field = get_big_field(from, from_offset, size);
+ set_big_field(field, to, to_offset, size);
+ }
+}
+
+/*
+ * Move several bits from a lower to a higher address, moving the higher addressed bits first. The
+ * size and memory offsets are measured in bits.
+ */
+static void move_bits_up(const u8 *from, u64 from_offset, u8 *to, u64 to_offset, u32 size)
+{
+ const u8 *source;
+ u8 *destination;
+ u8 offset;
+ u8 count;
+ u64 field;
+
+ /* Start by moving one field that begins on a destination int boundary. */
+ count = (to_offset + size) % BITS_PER_TYPE(u32);
+ if (count > 0) {
+ size -= count;
+ field = get_big_field(from, from_offset + size, count);
+ set_big_field(field, to, to_offset + size, count);
+ }
+
+ /* Now do the main loop to copy 32 bit chunks that are int-aligned at the destination. */
+ offset = (from_offset + size) % BITS_PER_TYPE(u32);
+ source = from + (from_offset + size - offset) / BITS_PER_BYTE;
+ destination = to + (to_offset + size) / BITS_PER_BYTE;
+ while (size > MAX_BIG_FIELD_BITS) {
+ source -= sizeof(u32);
+ destination -= sizeof(u32);
+ size -= BITS_PER_TYPE(u32);
+ put_unaligned_le32(get_unaligned_le64(source) >> offset, destination);
+ }
+
+ /* Finish up by moving any remaining bits. */
+ if (size > 0) {
+ field = get_big_field(from, from_offset, size);
+ set_big_field(field, to, to_offset, size);
+ }
+}
+
+/*
+ * Move bits from one field to another. When the fields overlap, behave as if we first move all the
+ * bits from the source to a temporary value, and then move all the bits from the temporary value
+ * to the destination. The size and memory offsets are measured in bits.
+ */
+static void move_bits(const u8 *from, u64 from_offset, u8 *to, u64 to_offset, u32 size)
+{
+ u64 field;
+
+ /* A small move doesn't require special handling. */
+ if (size <= MAX_BIG_FIELD_BITS) {
+ if (size > 0) {
+ field = get_big_field(from, from_offset, size);
+ set_big_field(field, to, to_offset, size);
+ }
+
+ return;
+ }
+
+ if (from_offset > to_offset)
+ move_bits_down(from, from_offset, to, to_offset, size);
+ else
+ move_bits_up(from, from_offset, to, to_offset, size);
+}
+
+/*
+ * Pack delta lists from a mutable delta index into an immutable delta index page. A range of delta
+ * lists (starting with a specified list index) is copied from the mutable delta index into a
+ * memory page used in the immutable index. The number of lists copied onto the page is returned in
+ * list_count.
+ */
+int uds_pack_delta_index_page(const struct delta_index *delta_index, u64 header_nonce,
+ u8 *memory, size_t memory_size, u64 virtual_chapter_number,
+ u32 first_list, u32 *list_count)
+{
+ const struct delta_zone *delta_zone;
+ struct delta_list *delta_lists;
+ u32 max_lists;
+ u32 n_lists = 0;
+ u32 offset;
+ u32 i;
+ int free_bits;
+ int bits;
+ struct delta_page_header *header;
+
+ delta_zone = &delta_index->delta_zones[0];
+ delta_lists = &delta_zone->delta_lists[first_list + 1];
+ max_lists = delta_index->list_count - first_list;
+
+ /*
+ * Compute how many lists will fit on the page. Subtract the size of the fixed header, one
+ * delta list offset, and the guard bytes from the page size to determine how much space is
+ * available for delta lists.
+ */
+ free_bits = memory_size * BITS_PER_BYTE;
+ free_bits -= get_immutable_header_offset(1);
+ free_bits -= GUARD_BITS;
+ if (free_bits < IMMUTABLE_HEADER_SIZE) {
+ /* This page is too small to store any delta lists. */
+ return vdo_log_error_strerror(UDS_OVERFLOW,
+ "Chapter Index Page of %zu bytes is too small",
+ memory_size);
+ }
+
+ while (n_lists < max_lists) {
+ /* Each list requires a delta list offset and the list data. */
+ bits = IMMUTABLE_HEADER_SIZE + delta_lists[n_lists].size;
+ if (bits > free_bits)
+ break;
+
+ n_lists++;
+ free_bits -= bits;
+ }
+
+ *list_count = n_lists;
+
+ header = (struct delta_page_header *) memory;
+ put_unaligned_le64(header_nonce, (u8 *) &header->nonce);
+ put_unaligned_le64(virtual_chapter_number,
+ (u8 *) &header->virtual_chapter_number);
+ put_unaligned_le16(first_list, (u8 *) &header->first_list);
+ put_unaligned_le16(n_lists, (u8 *) &header->list_count);
+
+ /* Construct the delta list offset table. */
+ offset = get_immutable_header_offset(n_lists + 1);
+ set_immutable_start(memory, 0, offset);
+ for (i = 0; i < n_lists; i++) {
+ offset += delta_lists[i].size;
+ set_immutable_start(memory, i + 1, offset);
+ }
+
+ /* Copy the delta list data onto the memory page. */
+ for (i = 0; i < n_lists; i++) {
+ move_bits(delta_zone->memory, delta_lists[i].start, memory,
+ get_immutable_start(memory, i), delta_lists[i].size);
+ }
+
+ /* Set all the bits in the guard bytes. */
+ memset(memory + memory_size - POST_FIELD_GUARD_BYTES, ~0,
+ POST_FIELD_GUARD_BYTES);
+ return UDS_SUCCESS;
+}
+
+/* Compute the new offsets of the delta lists. */
+static void compute_new_list_offsets(struct delta_zone *delta_zone, u32 growing_index,
+ size_t growing_size, size_t used_space)
+{
+ size_t spacing;
+ u32 i;
+ struct delta_list *delta_lists = delta_zone->delta_lists;
+ u32 tail_guard_index = delta_zone->list_count + 1;
+
+ spacing = (delta_zone->size - used_space) / delta_zone->list_count;
+ delta_zone->new_offsets[0] = 0;
+ for (i = 0; i <= delta_zone->list_count; i++) {
+ delta_zone->new_offsets[i + 1] =
+ (delta_zone->new_offsets[i] +
+ get_delta_list_byte_size(&delta_lists[i]) + spacing);
+ delta_zone->new_offsets[i] *= BITS_PER_BYTE;
+ delta_zone->new_offsets[i] += delta_lists[i].start % BITS_PER_BYTE;
+ if (i == 0)
+ delta_zone->new_offsets[i + 1] -= spacing / 2;
+ if (i + 1 == growing_index)
+ delta_zone->new_offsets[i + 1] += growing_size;
+ }
+
+ delta_zone->new_offsets[tail_guard_index] =
+ (delta_zone->size * BITS_PER_BYTE - delta_lists[tail_guard_index].size);
+}
+
+static void rebalance_lists(struct delta_zone *delta_zone)
+{
+ struct delta_list *delta_lists;
+ u32 i;
+ size_t used_space = 0;
+
+ /* Extend and balance memory to receive the delta lists */
+ delta_lists = delta_zone->delta_lists;
+ for (i = 0; i <= delta_zone->list_count + 1; i++)
+ used_space += get_delta_list_byte_size(&delta_lists[i]);
+
+ compute_new_list_offsets(delta_zone, 0, 0, used_space);
+ for (i = 1; i <= delta_zone->list_count + 1; i++)
+ delta_lists[i].start = delta_zone->new_offsets[i];
+}
+
+/* Start restoring a delta index from multiple input streams. */
+int uds_start_restoring_delta_index(struct delta_index *delta_index,
+ struct buffered_reader **buffered_readers,
+ unsigned int reader_count)
+{
+ int result;
+ unsigned int zone_count = reader_count;
+ u64 record_count = 0;
+ u64 collision_count = 0;
+ u32 first_list[MAX_ZONES];
+ u32 list_count[MAX_ZONES];
+ unsigned int z;
+ u32 list_next = 0;
+ const struct delta_zone *delta_zone;
+
+ /* Read and validate each header. */
+ for (z = 0; z < zone_count; z++) {
+ struct delta_index_header header;
+ u8 buffer[sizeof(struct delta_index_header)];
+ size_t offset = 0;
+
+ result = uds_read_from_buffered_reader(buffered_readers[z], buffer,
+ sizeof(buffer));
+ if (result != UDS_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to read delta index header");
+ }
+
+ memcpy(&header.magic, buffer, MAGIC_SIZE);
+ offset += MAGIC_SIZE;
+ decode_u32_le(buffer, &offset, &header.zone_number);
+ decode_u32_le(buffer, &offset, &header.zone_count);
+ decode_u32_le(buffer, &offset, &header.first_list);
+ decode_u32_le(buffer, &offset, &header.list_count);
+ decode_u64_le(buffer, &offset, &header.record_count);
+ decode_u64_le(buffer, &offset, &header.collision_count);
+
+ result = VDO_ASSERT(offset == sizeof(struct delta_index_header),
+ "%zu bytes decoded of %zu expected", offset,
+ sizeof(struct delta_index_header));
+ if (result != VDO_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to read delta index header");
+ }
+
+ if (memcmp(header.magic, DELTA_INDEX_MAGIC, MAGIC_SIZE) != 0) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "delta index file has bad magic number");
+ }
+
+ if (zone_count != header.zone_count) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "delta index files contain mismatched zone counts (%u,%u)",
+ zone_count, header.zone_count);
+ }
+
+ if (header.zone_number != z) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "delta index zone %u found in slot %u",
+ header.zone_number, z);
+ }
+
+ first_list[z] = header.first_list;
+ list_count[z] = header.list_count;
+ record_count += header.record_count;
+ collision_count += header.collision_count;
+
+ if (first_list[z] != list_next) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "delta index file for zone %u starts with list %u instead of list %u",
+ z, first_list[z], list_next);
+ }
+
+ list_next += list_count[z];
+ }
+
+ if (list_next != delta_index->list_count) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "delta index files contain %u delta lists instead of %u delta lists",
+ list_next, delta_index->list_count);
+ }
+
+ if (collision_count > record_count) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "delta index files contain %llu collisions and %llu records",
+ (unsigned long long) collision_count,
+ (unsigned long long) record_count);
+ }
+
+ uds_reset_delta_index(delta_index);
+ delta_index->delta_zones[0].record_count = record_count;
+ delta_index->delta_zones[0].collision_count = collision_count;
+
+ /* Read the delta lists and distribute them to the proper zones. */
+ for (z = 0; z < zone_count; z++) {
+ u32 i;
+
+ delta_index->load_lists[z] = 0;
+ for (i = 0; i < list_count[z]; i++) {
+ u16 delta_list_size;
+ u32 list_number;
+ unsigned int zone_number;
+ u8 size_data[sizeof(u16)];
+
+ result = uds_read_from_buffered_reader(buffered_readers[z],
+ size_data,
+ sizeof(size_data));
+ if (result != UDS_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to read delta index size");
+ }
+
+ delta_list_size = get_unaligned_le16(size_data);
+ if (delta_list_size > 0)
+ delta_index->load_lists[z] += 1;
+
+ list_number = first_list[z] + i;
+ zone_number = list_number / delta_index->lists_per_zone;
+ delta_zone = &delta_index->delta_zones[zone_number];
+ list_number -= delta_zone->first_list;
+ delta_zone->delta_lists[list_number + 1].size = delta_list_size;
+ }
+ }
+
+ /* Prepare each zone to start receiving the delta list data. */
+ for (z = 0; z < delta_index->zone_count; z++)
+ rebalance_lists(&delta_index->delta_zones[z]);
+
+ return UDS_SUCCESS;
+}
+
+static int restore_delta_list_to_zone(struct delta_zone *delta_zone,
+ const struct delta_list_save_info *save_info,
+ const u8 *data)
+{
+ struct delta_list *delta_list;
+ u16 bit_count;
+ u16 byte_count;
+ u32 list_number = save_info->index - delta_zone->first_list;
+
+ if (list_number >= delta_zone->list_count) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "invalid delta list number %u not in range [%u,%u)",
+ save_info->index, delta_zone->first_list,
+ delta_zone->first_list + delta_zone->list_count);
+ }
+
+ delta_list = &delta_zone->delta_lists[list_number + 1];
+ if (delta_list->size == 0) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "unexpected delta list number %u",
+ save_info->index);
+ }
+
+ bit_count = delta_list->size + save_info->bit_offset;
+ byte_count = BITS_TO_BYTES(bit_count);
+ if (save_info->byte_count != byte_count) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "unexpected delta list size %u != %u",
+ save_info->byte_count, byte_count);
+ }
+
+ move_bits(data, save_info->bit_offset, delta_zone->memory, delta_list->start,
+ delta_list->size);
+ return UDS_SUCCESS;
+}
+
+static int restore_delta_list_data(struct delta_index *delta_index, unsigned int load_zone,
+ struct buffered_reader *buffered_reader, u8 *data)
+{
+ int result;
+ struct delta_list_save_info save_info;
+ u8 buffer[sizeof(struct delta_list_save_info)];
+ unsigned int new_zone;
+
+ result = uds_read_from_buffered_reader(buffered_reader, buffer, sizeof(buffer));
+ if (result != UDS_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to read delta list data");
+ }
+
+ save_info = (struct delta_list_save_info) {
+ .tag = buffer[0],
+ .bit_offset = buffer[1],
+ .byte_count = get_unaligned_le16(&buffer[2]),
+ .index = get_unaligned_le32(&buffer[4]),
+ };
+
+ if ((save_info.bit_offset >= BITS_PER_BYTE) ||
+ (save_info.byte_count > DELTA_LIST_MAX_BYTE_COUNT)) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "corrupt delta list data");
+ }
+
+ /* Make sure the data is intended for this delta index. */
+ if (save_info.tag != delta_index->tag)
+ return UDS_CORRUPT_DATA;
+
+ if (save_info.index >= delta_index->list_count) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "invalid delta list number %u of %u",
+ save_info.index,
+ delta_index->list_count);
+ }
+
+ result = uds_read_from_buffered_reader(buffered_reader, data,
+ save_info.byte_count);
+ if (result != UDS_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to read delta list data");
+ }
+
+ delta_index->load_lists[load_zone] -= 1;
+ new_zone = save_info.index / delta_index->lists_per_zone;
+ return restore_delta_list_to_zone(&delta_index->delta_zones[new_zone],
+ &save_info, data);
+}
+
+/* Restore delta lists from saved data. */
+int uds_finish_restoring_delta_index(struct delta_index *delta_index,
+ struct buffered_reader **buffered_readers,
+ unsigned int reader_count)
+{
+ int result;
+ int saved_result = UDS_SUCCESS;
+ unsigned int z;
+ u8 *data;
+
+ result = vdo_allocate(DELTA_LIST_MAX_BYTE_COUNT, u8, __func__, &data);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ for (z = 0; z < reader_count; z++) {
+ while (delta_index->load_lists[z] > 0) {
+ result = restore_delta_list_data(delta_index, z,
+ buffered_readers[z], data);
+ if (result != UDS_SUCCESS) {
+ saved_result = result;
+ break;
+ }
+ }
+ }
+
+ vdo_free(data);
+ return saved_result;
+}
+
+int uds_check_guard_delta_lists(struct buffered_reader **buffered_readers,
+ unsigned int reader_count)
+{
+ int result;
+ unsigned int z;
+ u8 buffer[sizeof(struct delta_list_save_info)];
+
+ for (z = 0; z < reader_count; z++) {
+ result = uds_read_from_buffered_reader(buffered_readers[z], buffer,
+ sizeof(buffer));
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (buffer[0] != 'z')
+ return UDS_CORRUPT_DATA;
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int flush_delta_list(struct delta_zone *zone, u32 flush_index)
+{
+ struct delta_list *delta_list;
+ u8 buffer[sizeof(struct delta_list_save_info)];
+ int result;
+
+ delta_list = &zone->delta_lists[flush_index + 1];
+
+ buffer[0] = zone->tag;
+ buffer[1] = delta_list->start % BITS_PER_BYTE;
+ put_unaligned_le16(get_delta_list_byte_size(delta_list), &buffer[2]);
+ put_unaligned_le32(zone->first_list + flush_index, &buffer[4]);
+
+ result = uds_write_to_buffered_writer(zone->buffered_writer, buffer,
+ sizeof(buffer));
+ if (result != UDS_SUCCESS) {
+ vdo_log_warning_strerror(result, "failed to write delta list memory");
+ return result;
+ }
+
+ result = uds_write_to_buffered_writer(zone->buffered_writer,
+ zone->memory + get_delta_list_byte_start(delta_list),
+ get_delta_list_byte_size(delta_list));
+ if (result != UDS_SUCCESS)
+ vdo_log_warning_strerror(result, "failed to write delta list memory");
+
+ return result;
+}
+
+/* Start saving a delta index zone to a buffered output stream. */
+int uds_start_saving_delta_index(const struct delta_index *delta_index,
+ unsigned int zone_number,
+ struct buffered_writer *buffered_writer)
+{
+ int result;
+ u32 i;
+ struct delta_zone *delta_zone;
+ u8 buffer[sizeof(struct delta_index_header)];
+ size_t offset = 0;
+
+ delta_zone = &delta_index->delta_zones[zone_number];
+ memcpy(buffer, DELTA_INDEX_MAGIC, MAGIC_SIZE);
+ offset += MAGIC_SIZE;
+ encode_u32_le(buffer, &offset, zone_number);
+ encode_u32_le(buffer, &offset, delta_index->zone_count);
+ encode_u32_le(buffer, &offset, delta_zone->first_list);
+ encode_u32_le(buffer, &offset, delta_zone->list_count);
+ encode_u64_le(buffer, &offset, delta_zone->record_count);
+ encode_u64_le(buffer, &offset, delta_zone->collision_count);
+
+ result = VDO_ASSERT(offset == sizeof(struct delta_index_header),
+ "%zu bytes encoded of %zu expected", offset,
+ sizeof(struct delta_index_header));
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = uds_write_to_buffered_writer(buffered_writer, buffer, offset);
+ if (result != UDS_SUCCESS)
+ return vdo_log_warning_strerror(result,
+ "failed to write delta index header");
+
+ for (i = 0; i < delta_zone->list_count; i++) {
+ u8 data[sizeof(u16)];
+ struct delta_list *delta_list;
+
+ delta_list = &delta_zone->delta_lists[i + 1];
+ put_unaligned_le16(delta_list->size, data);
+ result = uds_write_to_buffered_writer(buffered_writer, data,
+ sizeof(data));
+ if (result != UDS_SUCCESS)
+ return vdo_log_warning_strerror(result,
+ "failed to write delta list size");
+ }
+
+ delta_zone->buffered_writer = buffered_writer;
+ return UDS_SUCCESS;
+}
+
+int uds_finish_saving_delta_index(const struct delta_index *delta_index,
+ unsigned int zone_number)
+{
+ int result;
+ int first_error = UDS_SUCCESS;
+ u32 i;
+ struct delta_zone *delta_zone;
+ struct delta_list *delta_list;
+
+ delta_zone = &delta_index->delta_zones[zone_number];
+ for (i = 0; i < delta_zone->list_count; i++) {
+ delta_list = &delta_zone->delta_lists[i + 1];
+ if (delta_list->size > 0) {
+ result = flush_delta_list(delta_zone, i);
+ if ((result != UDS_SUCCESS) && (first_error == UDS_SUCCESS))
+ first_error = result;
+ }
+ }
+
+ delta_zone->buffered_writer = NULL;
+ return first_error;
+}
+
+int uds_write_guard_delta_list(struct buffered_writer *buffered_writer)
+{
+ int result;
+ u8 buffer[sizeof(struct delta_list_save_info)];
+
+ memset(buffer, 0, sizeof(struct delta_list_save_info));
+ buffer[0] = 'z';
+
+ result = uds_write_to_buffered_writer(buffered_writer, buffer, sizeof(buffer));
+ if (result != UDS_SUCCESS)
+ vdo_log_warning_strerror(result, "failed to write guard delta list");
+
+ return UDS_SUCCESS;
+}
+
+size_t uds_compute_delta_index_save_bytes(u32 list_count, size_t memory_size)
+{
+ /* One zone will use at least as much memory as other zone counts. */
+ return (sizeof(struct delta_index_header) +
+ list_count * (sizeof(struct delta_list_save_info) + 1) +
+ get_zone_memory_size(1, memory_size));
+}
+
+static int assert_not_at_end(const struct delta_index_entry *delta_entry)
+{
+ int result = VDO_ASSERT(!delta_entry->at_end,
+ "operation is invalid because the list entry is at the end of the delta list");
+ if (result != VDO_SUCCESS)
+ result = UDS_BAD_STATE;
+
+ return result;
+}
+
+/*
+ * Prepare to search for an entry in the specified delta list.
+ *
+ * This is always the first function to be called when dealing with delta index entries. It is
+ * always followed by calls to uds_next_delta_index_entry() to iterate through a delta list. The
+ * fields of the delta_index_entry argument will be set up for iteration, but will not contain an
+ * entry from the list.
+ */
+int uds_start_delta_index_search(const struct delta_index *delta_index, u32 list_number,
+ u32 key, struct delta_index_entry *delta_entry)
+{
+ int result;
+ unsigned int zone_number;
+ struct delta_zone *delta_zone;
+ struct delta_list *delta_list;
+
+ result = VDO_ASSERT((list_number < delta_index->list_count),
+ "Delta list number (%u) is out of range (%u)", list_number,
+ delta_index->list_count);
+ if (result != VDO_SUCCESS)
+ return UDS_CORRUPT_DATA;
+
+ zone_number = list_number / delta_index->lists_per_zone;
+ delta_zone = &delta_index->delta_zones[zone_number];
+ list_number -= delta_zone->first_list;
+ result = VDO_ASSERT((list_number < delta_zone->list_count),
+ "Delta list number (%u) is out of range (%u) for zone (%u)",
+ list_number, delta_zone->list_count, zone_number);
+ if (result != VDO_SUCCESS)
+ return UDS_CORRUPT_DATA;
+
+ if (delta_index->mutable) {
+ delta_list = &delta_zone->delta_lists[list_number + 1];
+ } else {
+ u32 end_offset;
+
+ /*
+ * Translate the immutable delta list header into a temporary
+ * full delta list header.
+ */
+ delta_list = &delta_entry->temp_delta_list;
+ delta_list->start = get_immutable_start(delta_zone->memory, list_number);
+ end_offset = get_immutable_start(delta_zone->memory, list_number + 1);
+ delta_list->size = end_offset - delta_list->start;
+ delta_list->save_key = 0;
+ delta_list->save_offset = 0;
+ }
+
+ if (key > delta_list->save_key) {
+ delta_entry->key = delta_list->save_key;
+ delta_entry->offset = delta_list->save_offset;
+ } else {
+ delta_entry->key = 0;
+ delta_entry->offset = 0;
+ if (key == 0) {
+ /*
+ * This usually means we're about to walk the entire delta list, so get all
+ * of it into the CPU cache.
+ */
+ uds_prefetch_range(&delta_zone->memory[delta_list->start / BITS_PER_BYTE],
+ delta_list->size / BITS_PER_BYTE, false);
+ }
+ }
+
+ delta_entry->at_end = false;
+ delta_entry->delta_zone = delta_zone;
+ delta_entry->delta_list = delta_list;
+ delta_entry->entry_bits = 0;
+ delta_entry->is_collision = false;
+ delta_entry->list_number = list_number;
+ delta_entry->list_overflow = false;
+ delta_entry->value_bits = delta_zone->value_bits;
+ return UDS_SUCCESS;
+}
+
+static inline u64 get_delta_entry_offset(const struct delta_index_entry *delta_entry)
+{
+ return delta_entry->delta_list->start + delta_entry->offset;
+}
+
+/*
+ * Decode a delta index entry delta value. The delta_index_entry basically describes the previous
+ * list entry, and has had its offset field changed to point to the subsequent entry. We decode the
+ * bit stream and update the delta_list_entry to describe the entry.
+ */
+static inline void decode_delta(struct delta_index_entry *delta_entry)
+{
+ int key_bits;
+ u32 delta;
+ const struct delta_zone *delta_zone = delta_entry->delta_zone;
+ const u8 *memory = delta_zone->memory;
+ u64 delta_offset = get_delta_entry_offset(delta_entry) + delta_entry->value_bits;
+ const u8 *addr = memory + delta_offset / BITS_PER_BYTE;
+ int offset = delta_offset % BITS_PER_BYTE;
+ u32 data = get_unaligned_le32(addr) >> offset;
+
+ addr += sizeof(u32);
+ key_bits = delta_zone->min_bits;
+ delta = data & ((1 << key_bits) - 1);
+ if (delta >= delta_zone->min_keys) {
+ data >>= key_bits;
+ if (data == 0) {
+ key_bits = sizeof(u32) * BITS_PER_BYTE - offset;
+ while ((data = get_unaligned_le32(addr)) == 0) {
+ addr += sizeof(u32);
+ key_bits += sizeof(u32) * BITS_PER_BYTE;
+ }
+ }
+ key_bits += ffs(data);
+ delta += ((key_bits - delta_zone->min_bits - 1) * delta_zone->incr_keys);
+ }
+ delta_entry->delta = delta;
+ delta_entry->key += delta;
+
+ /* Check for a collision, a delta of zero after the start. */
+ if (unlikely((delta == 0) && (delta_entry->offset > 0))) {
+ delta_entry->is_collision = true;
+ delta_entry->entry_bits = delta_entry->value_bits + key_bits + COLLISION_BITS;
+ } else {
+ delta_entry->is_collision = false;
+ delta_entry->entry_bits = delta_entry->value_bits + key_bits;
+ }
+}
+
+noinline int uds_next_delta_index_entry(struct delta_index_entry *delta_entry)
+{
+ int result;
+ const struct delta_list *delta_list;
+ u32 next_offset;
+ u16 size;
+
+ result = assert_not_at_end(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ delta_list = delta_entry->delta_list;
+ delta_entry->offset += delta_entry->entry_bits;
+ size = delta_list->size;
+ if (unlikely(delta_entry->offset >= size)) {
+ delta_entry->at_end = true;
+ delta_entry->delta = 0;
+ delta_entry->is_collision = false;
+ result = VDO_ASSERT((delta_entry->offset == size),
+ "next offset past end of delta list");
+ if (result != VDO_SUCCESS)
+ result = UDS_CORRUPT_DATA;
+
+ return result;
+ }
+
+ decode_delta(delta_entry);
+
+ next_offset = delta_entry->offset + delta_entry->entry_bits;
+ if (next_offset > size) {
+ /*
+ * This is not an assertion because uds_validate_chapter_index_page() wants to
+ * handle this error.
+ */
+ vdo_log_warning("Decoded past the end of the delta list");
+ return UDS_CORRUPT_DATA;
+ }
+
+ return UDS_SUCCESS;
+}
+
+int uds_remember_delta_index_offset(const struct delta_index_entry *delta_entry)
+{
+ int result;
+ struct delta_list *delta_list = delta_entry->delta_list;
+
+ result = VDO_ASSERT(!delta_entry->is_collision, "entry is not a collision");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ delta_list->save_key = delta_entry->key - delta_entry->delta;
+ delta_list->save_offset = delta_entry->offset;
+ return UDS_SUCCESS;
+}
+
+static void set_delta(struct delta_index_entry *delta_entry, u32 delta)
+{
+ const struct delta_zone *delta_zone = delta_entry->delta_zone;
+ u32 key_bits = (delta_zone->min_bits +
+ ((delta_zone->incr_keys - delta_zone->min_keys + delta) /
+ delta_zone->incr_keys));
+
+ delta_entry->delta = delta;
+ delta_entry->entry_bits = delta_entry->value_bits + key_bits;
+}
+
+static void get_collision_name(const struct delta_index_entry *entry, u8 *name)
+{
+ u64 offset = get_delta_entry_offset(entry) + entry->entry_bits - COLLISION_BITS;
+ const u8 *addr = entry->delta_zone->memory + offset / BITS_PER_BYTE;
+ int size = COLLISION_BYTES;
+ int shift = offset % BITS_PER_BYTE;
+
+ while (--size >= 0)
+ *name++ = get_unaligned_le16(addr++) >> shift;
+}
+
+static void set_collision_name(const struct delta_index_entry *entry, const u8 *name)
+{
+ u64 offset = get_delta_entry_offset(entry) + entry->entry_bits - COLLISION_BITS;
+ u8 *addr = entry->delta_zone->memory + offset / BITS_PER_BYTE;
+ int size = COLLISION_BYTES;
+ int shift = offset % BITS_PER_BYTE;
+ u16 mask = ~((u16) 0xFF << shift);
+ u16 data;
+
+ while (--size >= 0) {
+ data = (get_unaligned_le16(addr) & mask) | (*name++ << shift);
+ put_unaligned_le16(data, addr++);
+ }
+}
+
+int uds_get_delta_index_entry(const struct delta_index *delta_index, u32 list_number,
+ u32 key, const u8 *name,
+ struct delta_index_entry *delta_entry)
+{
+ int result;
+
+ result = uds_start_delta_index_search(delta_index, list_number, key,
+ delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ do {
+ result = uds_next_delta_index_entry(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+ } while (!delta_entry->at_end && (key > delta_entry->key));
+
+ result = uds_remember_delta_index_offset(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (!delta_entry->at_end && (key == delta_entry->key)) {
+ struct delta_index_entry collision_entry = *delta_entry;
+
+ for (;;) {
+ u8 full_name[COLLISION_BYTES];
+
+ result = uds_next_delta_index_entry(&collision_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (collision_entry.at_end || !collision_entry.is_collision)
+ break;
+
+ get_collision_name(&collision_entry, full_name);
+ if (memcmp(full_name, name, COLLISION_BYTES) == 0) {
+ *delta_entry = collision_entry;
+ break;
+ }
+ }
+ }
+
+ return UDS_SUCCESS;
+}
+
+int uds_get_delta_entry_collision(const struct delta_index_entry *delta_entry, u8 *name)
+{
+ int result;
+
+ result = assert_not_at_end(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(delta_entry->is_collision,
+ "Cannot get full block name from a non-collision delta index entry");
+ if (result != VDO_SUCCESS)
+ return UDS_BAD_STATE;
+
+ get_collision_name(delta_entry, name);
+ return UDS_SUCCESS;
+}
+
+u32 uds_get_delta_entry_value(const struct delta_index_entry *delta_entry)
+{
+ return get_field(delta_entry->delta_zone->memory,
+ get_delta_entry_offset(delta_entry), delta_entry->value_bits);
+}
+
+static int assert_mutable_entry(const struct delta_index_entry *delta_entry)
+{
+ int result = VDO_ASSERT((delta_entry->delta_list != &delta_entry->temp_delta_list),
+ "delta index is mutable");
+ if (result != VDO_SUCCESS)
+ result = UDS_BAD_STATE;
+
+ return result;
+}
+
+int uds_set_delta_entry_value(const struct delta_index_entry *delta_entry, u32 value)
+{
+ int result;
+ u32 value_mask = (1 << delta_entry->value_bits) - 1;
+
+ result = assert_mutable_entry(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = assert_not_at_end(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT((value & value_mask) == value,
+ "Value (%u) being set in a delta index is too large (must fit in %u bits)",
+ value, delta_entry->value_bits);
+ if (result != VDO_SUCCESS)
+ return UDS_INVALID_ARGUMENT;
+
+ set_field(value, delta_entry->delta_zone->memory,
+ get_delta_entry_offset(delta_entry), delta_entry->value_bits);
+ return UDS_SUCCESS;
+}
+
+/*
+ * Extend the memory used by the delta lists by adding growing_size bytes before the list indicated
+ * by growing_index, then rebalancing the lists in the new chunk.
+ */
+static int extend_delta_zone(struct delta_zone *delta_zone, u32 growing_index,
+ size_t growing_size)
+{
+ ktime_t start_time;
+ ktime_t end_time;
+ struct delta_list *delta_lists;
+ u32 i;
+ size_t used_space;
+
+
+ /* Calculate the amount of space that is or will be in use. */
+ start_time = current_time_ns(CLOCK_MONOTONIC);
+ delta_lists = delta_zone->delta_lists;
+ used_space = growing_size;
+ for (i = 0; i <= delta_zone->list_count + 1; i++)
+ used_space += get_delta_list_byte_size(&delta_lists[i]);
+
+ if (delta_zone->size < used_space)
+ return UDS_OVERFLOW;
+
+ /* Compute the new offsets of the delta lists. */
+ compute_new_list_offsets(delta_zone, growing_index, growing_size, used_space);
+
+ /*
+ * When we rebalance the delta list, we will include the end guard list in the rebalancing.
+ * It contains the end guard data, which must be copied.
+ */
+ rebalance_delta_zone(delta_zone, 1, delta_zone->list_count + 1);
+ end_time = current_time_ns(CLOCK_MONOTONIC);
+ delta_zone->rebalance_count++;
+ delta_zone->rebalance_time += ktime_sub(end_time, start_time);
+ return UDS_SUCCESS;
+}
+
+static int insert_bits(struct delta_index_entry *delta_entry, u16 size)
+{
+ u64 free_before;
+ u64 free_after;
+ u64 source;
+ u64 destination;
+ u32 count;
+ bool before_flag;
+ u8 *memory;
+ struct delta_zone *delta_zone = delta_entry->delta_zone;
+ struct delta_list *delta_list = delta_entry->delta_list;
+ /* Compute bits in use before and after the inserted bits. */
+ u32 total_size = delta_list->size;
+ u32 before_size = delta_entry->offset;
+ u32 after_size = total_size - delta_entry->offset;
+
+ if (total_size + size > U16_MAX) {
+ delta_entry->list_overflow = true;
+ delta_zone->overflow_count++;
+ return UDS_OVERFLOW;
+ }
+
+ /* Compute bits available before and after the delta list. */
+ free_before = (delta_list[0].start - (delta_list[-1].start + delta_list[-1].size));
+ free_after = (delta_list[1].start - (delta_list[0].start + delta_list[0].size));
+
+ if ((size <= free_before) && (size <= free_after)) {
+ /*
+ * We have enough space to use either before or after the list. Select the smaller
+ * amount of data. If it is exactly the same, try to take from the larger amount of
+ * free space.
+ */
+ if (before_size < after_size)
+ before_flag = true;
+ else if (after_size < before_size)
+ before_flag = false;
+ else
+ before_flag = free_before > free_after;
+ } else if (size <= free_before) {
+ /* There is space before but not after. */
+ before_flag = true;
+ } else if (size <= free_after) {
+ /* There is space after but not before. */
+ before_flag = false;
+ } else {
+ /*
+ * Neither of the surrounding spaces is large enough for this request. Extend
+ * and/or rebalance the delta list memory choosing to move the least amount of
+ * data.
+ */
+ int result;
+ u32 growing_index = delta_entry->list_number + 1;
+
+ before_flag = before_size < after_size;
+ if (!before_flag)
+ growing_index++;
+ result = extend_delta_zone(delta_zone, growing_index,
+ BITS_TO_BYTES(size));
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ delta_list->size += size;
+ if (before_flag) {
+ source = delta_list->start;
+ destination = source - size;
+ delta_list->start -= size;
+ count = before_size;
+ } else {
+ source = delta_list->start + delta_entry->offset;
+ destination = source + size;
+ count = after_size;
+ }
+
+ memory = delta_zone->memory;
+ move_bits(memory, source, memory, destination, count);
+ return UDS_SUCCESS;
+}
+
+static void encode_delta(const struct delta_index_entry *delta_entry)
+{
+ u32 temp;
+ u32 t1;
+ u32 t2;
+ u64 offset;
+ const struct delta_zone *delta_zone = delta_entry->delta_zone;
+ u8 *memory = delta_zone->memory;
+
+ offset = get_delta_entry_offset(delta_entry) + delta_entry->value_bits;
+ if (delta_entry->delta < delta_zone->min_keys) {
+ set_field(delta_entry->delta, memory, offset, delta_zone->min_bits);
+ return;
+ }
+
+ temp = delta_entry->delta - delta_zone->min_keys;
+ t1 = (temp % delta_zone->incr_keys) + delta_zone->min_keys;
+ t2 = temp / delta_zone->incr_keys;
+ set_field(t1, memory, offset, delta_zone->min_bits);
+ set_zero(memory, offset + delta_zone->min_bits, t2);
+ set_field(1, memory, offset + delta_zone->min_bits + t2, 1);
+}
+
+static void encode_entry(const struct delta_index_entry *delta_entry, u32 value,
+ const u8 *name)
+{
+ u8 *memory = delta_entry->delta_zone->memory;
+ u64 offset = get_delta_entry_offset(delta_entry);
+
+ set_field(value, memory, offset, delta_entry->value_bits);
+ encode_delta(delta_entry);
+ if (name != NULL)
+ set_collision_name(delta_entry, name);
+}
+
+/*
+ * Create a new entry in the delta index. If the entry is a collision, the full 256 bit name must
+ * be provided.
+ */
+int uds_put_delta_index_entry(struct delta_index_entry *delta_entry, u32 key, u32 value,
+ const u8 *name)
+{
+ int result;
+ struct delta_zone *delta_zone;
+
+ result = assert_mutable_entry(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (delta_entry->is_collision) {
+ /*
+ * The caller wants us to insert a collision entry onto a collision entry. This
+ * happens when we find a collision and attempt to add the name again to the index.
+ * This is normally a fatal error unless we are replaying a closed chapter while we
+ * are rebuilding a volume index.
+ */
+ return UDS_DUPLICATE_NAME;
+ }
+
+ if (delta_entry->offset < delta_entry->delta_list->save_offset) {
+ /*
+ * The saved entry offset is after the new entry and will no longer be valid, so
+ * replace it with the insertion point.
+ */
+ result = uds_remember_delta_index_offset(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ if (name != NULL) {
+ /* Insert a collision entry which is placed after this entry. */
+ result = assert_not_at_end(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT((key == delta_entry->key),
+ "incorrect key for collision entry");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ delta_entry->offset += delta_entry->entry_bits;
+ set_delta(delta_entry, 0);
+ delta_entry->is_collision = true;
+ delta_entry->entry_bits += COLLISION_BITS;
+ result = insert_bits(delta_entry, delta_entry->entry_bits);
+ } else if (delta_entry->at_end) {
+ /* Insert a new entry at the end of the delta list. */
+ result = VDO_ASSERT((key >= delta_entry->key), "key past end of list");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ set_delta(delta_entry, key - delta_entry->key);
+ delta_entry->key = key;
+ delta_entry->at_end = false;
+ result = insert_bits(delta_entry, delta_entry->entry_bits);
+ } else {
+ u16 old_entry_size;
+ u16 additional_size;
+ struct delta_index_entry next_entry;
+ u32 next_value;
+
+ /*
+ * Insert a new entry which requires the delta in the following entry to be
+ * updated.
+ */
+ result = VDO_ASSERT((key < delta_entry->key),
+ "key precedes following entry");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT((key >= delta_entry->key - delta_entry->delta),
+ "key effects following entry's delta");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ old_entry_size = delta_entry->entry_bits;
+ next_entry = *delta_entry;
+ next_value = uds_get_delta_entry_value(&next_entry);
+ set_delta(delta_entry, key - (delta_entry->key - delta_entry->delta));
+ delta_entry->key = key;
+ set_delta(&next_entry, next_entry.key - key);
+ next_entry.offset += delta_entry->entry_bits;
+ /* The two new entries are always bigger than the single entry being replaced. */
+ additional_size = (delta_entry->entry_bits +
+ next_entry.entry_bits - old_entry_size);
+ result = insert_bits(delta_entry, additional_size);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ encode_entry(&next_entry, next_value, NULL);
+ }
+
+ if (result != UDS_SUCCESS)
+ return result;
+
+ encode_entry(delta_entry, value, name);
+ delta_zone = delta_entry->delta_zone;
+ delta_zone->record_count++;
+ delta_zone->collision_count += delta_entry->is_collision ? 1 : 0;
+ return UDS_SUCCESS;
+}
+
+static void delete_bits(const struct delta_index_entry *delta_entry, int size)
+{
+ u64 source;
+ u64 destination;
+ u32 count;
+ bool before_flag;
+ struct delta_list *delta_list = delta_entry->delta_list;
+ u8 *memory = delta_entry->delta_zone->memory;
+ /* Compute bits retained before and after the deleted bits. */
+ u32 total_size = delta_list->size;
+ u32 before_size = delta_entry->offset;
+ u32 after_size = total_size - delta_entry->offset - size;
+
+ /*
+ * Determine whether to add to the available space either before or after the delta list.
+ * We prefer to move the least amount of data. If it is exactly the same, try to add to the
+ * smaller amount of free space.
+ */
+ if (before_size < after_size) {
+ before_flag = true;
+ } else if (after_size < before_size) {
+ before_flag = false;
+ } else {
+ u64 free_before =
+ (delta_list[0].start - (delta_list[-1].start + delta_list[-1].size));
+ u64 free_after =
+ (delta_list[1].start - (delta_list[0].start + delta_list[0].size));
+
+ before_flag = (free_before < free_after);
+ }
+
+ delta_list->size -= size;
+ if (before_flag) {
+ source = delta_list->start;
+ destination = source + size;
+ delta_list->start += size;
+ count = before_size;
+ } else {
+ destination = delta_list->start + delta_entry->offset;
+ source = destination + size;
+ count = after_size;
+ }
+
+ move_bits(memory, source, memory, destination, count);
+}
+
+int uds_remove_delta_index_entry(struct delta_index_entry *delta_entry)
+{
+ int result;
+ struct delta_index_entry next_entry;
+ struct delta_zone *delta_zone;
+ struct delta_list *delta_list;
+
+ result = assert_mutable_entry(delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ next_entry = *delta_entry;
+ result = uds_next_delta_index_entry(&next_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ delta_zone = delta_entry->delta_zone;
+
+ if (delta_entry->is_collision) {
+ /* This is a collision entry, so just remove it. */
+ delete_bits(delta_entry, delta_entry->entry_bits);
+ next_entry.offset = delta_entry->offset;
+ delta_zone->collision_count -= 1;
+ } else if (next_entry.at_end) {
+ /* This entry is at the end of the list, so just remove it. */
+ delete_bits(delta_entry, delta_entry->entry_bits);
+ next_entry.key -= delta_entry->delta;
+ next_entry.offset = delta_entry->offset;
+ } else {
+ /* The delta in the next entry needs to be updated. */
+ u32 next_value = uds_get_delta_entry_value(&next_entry);
+ u16 old_size = delta_entry->entry_bits + next_entry.entry_bits;
+
+ if (next_entry.is_collision) {
+ next_entry.is_collision = false;
+ delta_zone->collision_count -= 1;
+ }
+
+ set_delta(&next_entry, delta_entry->delta + next_entry.delta);
+ next_entry.offset = delta_entry->offset;
+ /* The one new entry is always smaller than the two entries being replaced. */
+ delete_bits(delta_entry, old_size - next_entry.entry_bits);
+ encode_entry(&next_entry, next_value, NULL);
+ }
+
+ delta_zone->record_count--;
+ delta_zone->discard_count++;
+ *delta_entry = next_entry;
+
+ delta_list = delta_entry->delta_list;
+ if (delta_entry->offset < delta_list->save_offset) {
+ /* The saved entry offset is no longer valid. */
+ delta_list->save_key = 0;
+ delta_list->save_offset = 0;
+ }
+
+ return UDS_SUCCESS;
+}
+
+void uds_get_delta_index_stats(const struct delta_index *delta_index,
+ struct delta_index_stats *stats)
+{
+ unsigned int z;
+ const struct delta_zone *delta_zone;
+
+ memset(stats, 0, sizeof(struct delta_index_stats));
+ for (z = 0; z < delta_index->zone_count; z++) {
+ delta_zone = &delta_index->delta_zones[z];
+ stats->rebalance_time += delta_zone->rebalance_time;
+ stats->rebalance_count += delta_zone->rebalance_count;
+ stats->record_count += delta_zone->record_count;
+ stats->collision_count += delta_zone->collision_count;
+ stats->discard_count += delta_zone->discard_count;
+ stats->overflow_count += delta_zone->overflow_count;
+ stats->list_count += delta_zone->list_count;
+ }
+}
+
+size_t uds_compute_delta_index_size(u32 entry_count, u32 mean_delta, u32 payload_bits)
+{
+ u16 min_bits;
+ u32 incr_keys;
+ u32 min_keys;
+
+ compute_coding_constants(mean_delta, &min_bits, &min_keys, &incr_keys);
+ /* On average, each delta is encoded into about min_bits + 1.5 bits. */
+ return entry_count * (payload_bits + min_bits + 1) + entry_count / 2;
+}
+
+u32 uds_get_delta_index_page_count(u32 entry_count, u32 list_count, u32 mean_delta,
+ u32 payload_bits, size_t bytes_per_page)
+{
+ unsigned int bits_per_delta_list;
+ unsigned int bits_per_page;
+ size_t bits_per_index;
+
+ /* Compute the expected number of bits needed for all the entries. */
+ bits_per_index = uds_compute_delta_index_size(entry_count, mean_delta,
+ payload_bits);
+ bits_per_delta_list = bits_per_index / list_count;
+
+ /* Add in the immutable delta list headers. */
+ bits_per_index += list_count * IMMUTABLE_HEADER_SIZE;
+ /* Compute the number of usable bits on an immutable index page. */
+ bits_per_page = ((bytes_per_page - sizeof(struct delta_page_header)) * BITS_PER_BYTE);
+ /*
+ * Reduce the bits per page by one immutable delta list header and one delta list to
+ * account for internal fragmentation.
+ */
+ bits_per_page -= IMMUTABLE_HEADER_SIZE + bits_per_delta_list;
+ /* Now compute the number of pages needed. */
+ return DIV_ROUND_UP(bits_per_index, bits_per_page);
+}
+
+void uds_log_delta_index_entry(struct delta_index_entry *delta_entry)
+{
+ vdo_log_ratelimit(vdo_log_info,
+ "List 0x%X Key 0x%X Offset 0x%X%s%s List_size 0x%X%s",
+ delta_entry->list_number, delta_entry->key,
+ delta_entry->offset, delta_entry->at_end ? " end" : "",
+ delta_entry->is_collision ? " collision" : "",
+ delta_entry->delta_list->size,
+ delta_entry->list_overflow ? " overflow" : "");
+ delta_entry->list_overflow = false;
+}
diff --git a/drivers/md/dm-vdo/indexer/delta-index.h b/drivers/md/dm-vdo/indexer/delta-index.h
new file mode 100644
index 000000000000..53f6c6ac0bc7
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/delta-index.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_DELTA_INDEX_H
+#define UDS_DELTA_INDEX_H
+
+#include <linux/cache.h>
+
+#include "numeric.h"
+#include "time-utils.h"
+
+#include "config.h"
+#include "io-factory.h"
+
+/*
+ * A delta index is a key-value store, where each entry maps an address (the key) to a payload (the
+ * value). The entries are sorted by address, and only the delta between successive addresses is
+ * stored in the entry. The addresses are assumed to be uniformly distributed, and the deltas are
+ * therefore exponentially distributed.
+ *
+ * A delta_index can either be mutable or immutable depending on its expected use. The immutable
+ * form of a delta index is used for the indexes of closed chapters committed to the volume. The
+ * mutable form of a delta index is used by the volume index, and also by the chapter index in an
+ * open chapter. Like the index as a whole, each mutable delta index is divided into a number of
+ * independent zones.
+ */
+
+struct delta_list {
+ /* The offset of the delta list start, in bits */
+ u64 start;
+ /* The number of bits in the delta list */
+ u16 size;
+ /* Where the last search "found" the key, in bits */
+ u16 save_offset;
+ /* The key for the record just before save_offset */
+ u32 save_key;
+};
+
+struct delta_zone {
+ /* The delta list memory */
+ u8 *memory;
+ /* The delta list headers */
+ struct delta_list *delta_lists;
+ /* Temporary starts of delta lists */
+ u64 *new_offsets;
+ /* Buffered writer for saving an index */
+ struct buffered_writer *buffered_writer;
+ /* The size of delta list memory */
+ size_t size;
+ /* Nanoseconds spent rebalancing */
+ ktime_t rebalance_time;
+ /* Number of memory rebalances */
+ u32 rebalance_count;
+ /* The number of bits in a stored value */
+ u8 value_bits;
+ /* The number of bits in the minimal key code */
+ u16 min_bits;
+ /* The number of keys used in a minimal code */
+ u32 min_keys;
+ /* The number of keys used for another code bit */
+ u32 incr_keys;
+ /* The number of records in the index */
+ u64 record_count;
+ /* The number of collision records */
+ u64 collision_count;
+ /* The number of records removed */
+ u64 discard_count;
+ /* The number of UDS_OVERFLOW errors detected */
+ u64 overflow_count;
+ /* The index of the first delta list */
+ u32 first_list;
+ /* The number of delta lists */
+ u32 list_count;
+ /* Tag belonging to this delta index */
+ u8 tag;
+} __aligned(L1_CACHE_BYTES);
+
+struct delta_list_save_info {
+ /* Tag identifying which delta index this list is in */
+ u8 tag;
+ /* Bit offset of the start of the list data */
+ u8 bit_offset;
+ /* Number of bytes of list data */
+ u16 byte_count;
+ /* The delta list number within the delta index */
+ u32 index;
+} __packed;
+
+struct delta_index {
+ /* The zones */
+ struct delta_zone *delta_zones;
+ /* The number of zones */
+ unsigned int zone_count;
+ /* The number of delta lists */
+ u32 list_count;
+ /* Maximum lists per zone */
+ u32 lists_per_zone;
+ /* Total memory allocated to this index */
+ size_t memory_size;
+ /* The number of non-empty lists at load time per zone */
+ u32 load_lists[MAX_ZONES];
+ /* True if this index is mutable */
+ bool mutable;
+ /* Tag belonging to this delta index */
+ u8 tag;
+};
+
+/*
+ * A delta_index_page describes a single page of a chapter index. The delta_index field allows the
+ * page to be treated as an immutable delta_index. We use the delta_zone field to treat the chapter
+ * index page as a single zone index, and without the need to do an additional memory allocation.
+ */
+struct delta_index_page {
+ struct delta_index delta_index;
+ /* These values are loaded from the delta_page_header */
+ u32 lowest_list_number;
+ u32 highest_list_number;
+ u64 virtual_chapter_number;
+ /* This structure describes the single zone of a delta index page. */
+ struct delta_zone delta_zone;
+};
+
+/*
+ * Notes on the delta_index_entries:
+ *
+ * The fields documented as "public" can be read by any code that uses a delta_index. The fields
+ * documented as "private" carry information between delta_index method calls and should not be
+ * used outside the delta_index module.
+ *
+ * (1) The delta_index_entry is used like an iterator when searching a delta list.
+ *
+ * (2) It is also the result of a successful search and can be used to refer to the element found
+ * by the search.
+ *
+ * (3) It is also the result of an unsuccessful search and can be used to refer to the insertion
+ * point for a new record.
+ *
+ * (4) If at_end is true, the delta_list entry can only be used as the insertion point for a new
+ * record at the end of the list.
+ *
+ * (5) If at_end is false and is_collision is true, the delta_list entry fields refer to a
+ * collision entry in the list, and the delta_list entry can be used as a reference to this
+ * entry.
+ *
+ * (6) If at_end is false and is_collision is false, the delta_list entry fields refer to a
+ * non-collision entry in the list. Such delta_list entries can be used as a reference to a
+ * found entry, or an insertion point for a non-collision entry before this entry, or an
+ * insertion point for a collision entry that collides with this entry.
+ */
+struct delta_index_entry {
+ /* Public fields */
+ /* The key for this entry */
+ u32 key;
+ /* We are after the last list entry */
+ bool at_end;
+ /* This record is a collision */
+ bool is_collision;
+
+ /* Private fields */
+ /* This delta list overflowed */
+ bool list_overflow;
+ /* The number of bits used for the value */
+ u8 value_bits;
+ /* The number of bits used for the entire entry */
+ u16 entry_bits;
+ /* The delta index zone */
+ struct delta_zone *delta_zone;
+ /* The delta list containing the entry */
+ struct delta_list *delta_list;
+ /* The delta list number */
+ u32 list_number;
+ /* Bit offset of this entry within the list */
+ u16 offset;
+ /* The delta between this and previous entry */
+ u32 delta;
+ /* Temporary delta list for immutable indices */
+ struct delta_list temp_delta_list;
+};
+
+struct delta_index_stats {
+ /* Number of bytes allocated */
+ size_t memory_allocated;
+ /* Nanoseconds spent rebalancing */
+ ktime_t rebalance_time;
+ /* Number of memory rebalances */
+ u32 rebalance_count;
+ /* The number of records in the index */
+ u64 record_count;
+ /* The number of collision records */
+ u64 collision_count;
+ /* The number of records removed */
+ u64 discard_count;
+ /* The number of UDS_OVERFLOW errors detected */
+ u64 overflow_count;
+ /* The number of delta lists */
+ u32 list_count;
+};
+
+int __must_check uds_initialize_delta_index(struct delta_index *delta_index,
+ unsigned int zone_count, u32 list_count,
+ u32 mean_delta, u32 payload_bits,
+ size_t memory_size, u8 tag);
+
+int __must_check uds_initialize_delta_index_page(struct delta_index_page *delta_index_page,
+ u64 expected_nonce, u32 mean_delta,
+ u32 payload_bits, u8 *memory,
+ size_t memory_size);
+
+void uds_uninitialize_delta_index(struct delta_index *delta_index);
+
+void uds_reset_delta_index(const struct delta_index *delta_index);
+
+int __must_check uds_pack_delta_index_page(const struct delta_index *delta_index,
+ u64 header_nonce, u8 *memory,
+ size_t memory_size,
+ u64 virtual_chapter_number, u32 first_list,
+ u32 *list_count);
+
+int __must_check uds_start_restoring_delta_index(struct delta_index *delta_index,
+ struct buffered_reader **buffered_readers,
+ unsigned int reader_count);
+
+int __must_check uds_finish_restoring_delta_index(struct delta_index *delta_index,
+ struct buffered_reader **buffered_readers,
+ unsigned int reader_count);
+
+int __must_check uds_check_guard_delta_lists(struct buffered_reader **buffered_readers,
+ unsigned int reader_count);
+
+int __must_check uds_start_saving_delta_index(const struct delta_index *delta_index,
+ unsigned int zone_number,
+ struct buffered_writer *buffered_writer);
+
+int __must_check uds_finish_saving_delta_index(const struct delta_index *delta_index,
+ unsigned int zone_number);
+
+int __must_check uds_write_guard_delta_list(struct buffered_writer *buffered_writer);
+
+size_t __must_check uds_compute_delta_index_save_bytes(u32 list_count,
+ size_t memory_size);
+
+int __must_check uds_start_delta_index_search(const struct delta_index *delta_index,
+ u32 list_number, u32 key,
+ struct delta_index_entry *iterator);
+
+int __must_check uds_next_delta_index_entry(struct delta_index_entry *delta_entry);
+
+int __must_check uds_remember_delta_index_offset(const struct delta_index_entry *delta_entry);
+
+int __must_check uds_get_delta_index_entry(const struct delta_index *delta_index,
+ u32 list_number, u32 key, const u8 *name,
+ struct delta_index_entry *delta_entry);
+
+int __must_check uds_get_delta_entry_collision(const struct delta_index_entry *delta_entry,
+ u8 *name);
+
+u32 __must_check uds_get_delta_entry_value(const struct delta_index_entry *delta_entry);
+
+int __must_check uds_set_delta_entry_value(const struct delta_index_entry *delta_entry, u32 value);
+
+int __must_check uds_put_delta_index_entry(struct delta_index_entry *delta_entry, u32 key,
+ u32 value, const u8 *name);
+
+int __must_check uds_remove_delta_index_entry(struct delta_index_entry *delta_entry);
+
+void uds_get_delta_index_stats(const struct delta_index *delta_index,
+ struct delta_index_stats *stats);
+
+size_t __must_check uds_compute_delta_index_size(u32 entry_count, u32 mean_delta,
+ u32 payload_bits);
+
+u32 uds_get_delta_index_page_count(u32 entry_count, u32 list_count, u32 mean_delta,
+ u32 payload_bits, size_t bytes_per_page);
+
+void uds_log_delta_index_entry(struct delta_index_entry *delta_entry);
+
+#endif /* UDS_DELTA_INDEX_H */
diff --git a/drivers/md/dm-vdo/indexer/funnel-requestqueue.c b/drivers/md/dm-vdo/indexer/funnel-requestqueue.c
new file mode 100644
index 000000000000..1a5735375ddc
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/funnel-requestqueue.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "funnel-requestqueue.h"
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/wait.h>
+
+#include "funnel-queue.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "thread-utils.h"
+
+/*
+ * This queue will attempt to handle requests in reasonably sized batches instead of reacting
+ * immediately to each new request. The wait time between batches is dynamically adjusted up or
+ * down to try to balance responsiveness against wasted thread run time.
+ *
+ * If the wait time becomes long enough, the queue will become dormant and must be explicitly
+ * awoken when a new request is enqueued. The enqueue operation updates "newest" in the funnel
+ * queue via xchg (which is a memory barrier), and later checks "dormant" to decide whether to do a
+ * wakeup of the worker thread.
+ *
+ * When deciding to go to sleep, the worker thread sets "dormant" and then examines "newest" to
+ * decide if the funnel queue is idle. In dormant mode, the last examination of "newest" before
+ * going to sleep is done inside the wait_event_interruptible() macro, after a point where one or
+ * more memory barriers have been issued. (Preparing to sleep uses spin locks.) Even if the funnel
+ * queue's "next" field update isn't visible yet to make the entry accessible, its existence will
+ * kick the worker thread out of dormant mode and back into timer-based mode.
+ *
+ * Unbatched requests are used to communicate between different zone threads and will also cause
+ * the queue to awaken immediately.
+ */
+
+enum {
+ NANOSECOND = 1,
+ MICROSECOND = 1000 * NANOSECOND,
+ MILLISECOND = 1000 * MICROSECOND,
+ DEFAULT_WAIT_TIME = 20 * MICROSECOND,
+ MINIMUM_WAIT_TIME = DEFAULT_WAIT_TIME / 2,
+ MAXIMUM_WAIT_TIME = MILLISECOND,
+ MINIMUM_BATCH = 32,
+ MAXIMUM_BATCH = 64,
+};
+
+struct uds_request_queue {
+ /* Wait queue for synchronizing producers and consumer */
+ struct wait_queue_head wait_head;
+ /* Function to process a request */
+ uds_request_queue_processor_fn processor;
+ /* Queue of new incoming requests */
+ struct funnel_queue *main_queue;
+ /* Queue of old requests to retry */
+ struct funnel_queue *retry_queue;
+ /* The thread id of the worker thread */
+ struct thread *thread;
+ /* True if the worker was started */
+ bool started;
+ /* When true, requests can be enqueued */
+ bool running;
+ /* A flag set when the worker is waiting without a timeout */
+ atomic_t dormant;
+};
+
+static inline struct uds_request *poll_queues(struct uds_request_queue *queue)
+{
+ struct funnel_queue_entry *entry;
+
+ entry = vdo_funnel_queue_poll(queue->retry_queue);
+ if (entry != NULL)
+ return container_of(entry, struct uds_request, queue_link);
+
+ entry = vdo_funnel_queue_poll(queue->main_queue);
+ if (entry != NULL)
+ return container_of(entry, struct uds_request, queue_link);
+
+ return NULL;
+}
+
+static inline bool are_queues_idle(struct uds_request_queue *queue)
+{
+ return vdo_is_funnel_queue_idle(queue->retry_queue) &&
+ vdo_is_funnel_queue_idle(queue->main_queue);
+}
+
+/*
+ * Determine if there is a next request to process, and return it if there is. Also return flags
+ * indicating whether the worker thread can sleep (for the use of wait_event() macros) and whether
+ * the thread did sleep before returning a new request.
+ */
+static inline bool dequeue_request(struct uds_request_queue *queue,
+ struct uds_request **request_ptr, bool *waited_ptr)
+{
+ struct uds_request *request = poll_queues(queue);
+
+ if (request != NULL) {
+ *request_ptr = request;
+ return true;
+ }
+
+ if (!READ_ONCE(queue->running)) {
+ /* Wake the worker thread so it can exit. */
+ *request_ptr = NULL;
+ return true;
+ }
+
+ *request_ptr = NULL;
+ *waited_ptr = true;
+ return false;
+}
+
+static void wait_for_request(struct uds_request_queue *queue, bool dormant,
+ unsigned long timeout, struct uds_request **request,
+ bool *waited)
+{
+ if (dormant) {
+ wait_event_interruptible(queue->wait_head,
+ (dequeue_request(queue, request, waited) ||
+ !are_queues_idle(queue)));
+ return;
+ }
+
+ wait_event_interruptible_hrtimeout(queue->wait_head,
+ dequeue_request(queue, request, waited),
+ ns_to_ktime(timeout));
+}
+
+static void request_queue_worker(void *arg)
+{
+ struct uds_request_queue *queue = arg;
+ struct uds_request *request = NULL;
+ unsigned long time_batch = DEFAULT_WAIT_TIME;
+ bool dormant = atomic_read(&queue->dormant);
+ bool waited = false;
+ long current_batch = 0;
+
+ for (;;) {
+ wait_for_request(queue, dormant, time_batch, &request, &waited);
+ if (likely(request != NULL)) {
+ current_batch++;
+ queue->processor(request);
+ } else if (!READ_ONCE(queue->running)) {
+ break;
+ }
+
+ if (dormant) {
+ /*
+ * The queue has been roused from dormancy. Clear the flag so enqueuers can
+ * stop broadcasting. No fence is needed for this transition.
+ */
+ atomic_set(&queue->dormant, false);
+ dormant = false;
+ time_batch = DEFAULT_WAIT_TIME;
+ } else if (waited) {
+ /*
+ * We waited for this request to show up. Adjust the wait time to smooth
+ * out the batch size.
+ */
+ if (current_batch < MINIMUM_BATCH) {
+ /*
+ * If the last batch of requests was too small, increase the wait
+ * time.
+ */
+ time_batch += time_batch / 4;
+ if (time_batch >= MAXIMUM_WAIT_TIME) {
+ atomic_set(&queue->dormant, true);
+ dormant = true;
+ }
+ } else if (current_batch > MAXIMUM_BATCH) {
+ /*
+ * If the last batch of requests was too large, decrease the wait
+ * time.
+ */
+ time_batch -= time_batch / 4;
+ if (time_batch < MINIMUM_WAIT_TIME)
+ time_batch = MINIMUM_WAIT_TIME;
+ }
+ current_batch = 0;
+ }
+ }
+
+ /*
+ * Ensure that we process any remaining requests that were enqueued before trying to shut
+ * down. The corresponding write barrier is in uds_request_queue_finish().
+ */
+ smp_rmb();
+ while ((request = poll_queues(queue)) != NULL)
+ queue->processor(request);
+}
+
+int uds_make_request_queue(const char *queue_name,
+ uds_request_queue_processor_fn processor,
+ struct uds_request_queue **queue_ptr)
+{
+ int result;
+ struct uds_request_queue *queue;
+
+ result = vdo_allocate(1, struct uds_request_queue, __func__, &queue);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ queue->processor = processor;
+ queue->running = true;
+ atomic_set(&queue->dormant, false);
+ init_waitqueue_head(&queue->wait_head);
+
+ result = vdo_make_funnel_queue(&queue->main_queue);
+ if (result != VDO_SUCCESS) {
+ uds_request_queue_finish(queue);
+ return result;
+ }
+
+ result = vdo_make_funnel_queue(&queue->retry_queue);
+ if (result != VDO_SUCCESS) {
+ uds_request_queue_finish(queue);
+ return result;
+ }
+
+ result = vdo_create_thread(request_queue_worker, queue, queue_name,
+ &queue->thread);
+ if (result != VDO_SUCCESS) {
+ uds_request_queue_finish(queue);
+ return result;
+ }
+
+ queue->started = true;
+ *queue_ptr = queue;
+ return UDS_SUCCESS;
+}
+
+static inline void wake_up_worker(struct uds_request_queue *queue)
+{
+ if (wq_has_sleeper(&queue->wait_head))
+ wake_up(&queue->wait_head);
+}
+
+void uds_request_queue_enqueue(struct uds_request_queue *queue,
+ struct uds_request *request)
+{
+ struct funnel_queue *sub_queue;
+ bool unbatched = request->unbatched;
+
+ sub_queue = request->requeued ? queue->retry_queue : queue->main_queue;
+ vdo_funnel_queue_put(sub_queue, &request->queue_link);
+
+ /*
+ * We must wake the worker thread when it is dormant. A read fence isn't needed here since
+ * we know the queue operation acts as one.
+ */
+ if (atomic_read(&queue->dormant) || unbatched)
+ wake_up_worker(queue);
+}
+
+void uds_request_queue_finish(struct uds_request_queue *queue)
+{
+ if (queue == NULL)
+ return;
+
+ /*
+ * This memory barrier ensures that any requests we queued will be seen. The point is that
+ * when dequeue_request() sees the following update to the running flag, it will also be
+ * able to see any change we made to a next field in the funnel queue entry. The
+ * corresponding read barrier is in request_queue_worker().
+ */
+ smp_wmb();
+ WRITE_ONCE(queue->running, false);
+
+ if (queue->started) {
+ wake_up_worker(queue);
+ vdo_join_threads(queue->thread);
+ }
+
+ vdo_free_funnel_queue(queue->main_queue);
+ vdo_free_funnel_queue(queue->retry_queue);
+ vdo_free(queue);
+}
diff --git a/drivers/md/dm-vdo/indexer/funnel-requestqueue.h b/drivers/md/dm-vdo/indexer/funnel-requestqueue.h
new file mode 100644
index 000000000000..9b0f53939b4d
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/funnel-requestqueue.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_REQUEST_QUEUE_H
+#define UDS_REQUEST_QUEUE_H
+
+#include "indexer.h"
+
+/*
+ * A simple request queue which will handle new requests in the order in which they are received,
+ * and will attempt to handle requeued requests before new ones. However, the nature of the
+ * implementation means that it cannot guarantee this ordering; the prioritization is merely a
+ * hint.
+ */
+
+struct uds_request_queue;
+
+typedef void (*uds_request_queue_processor_fn)(struct uds_request *);
+
+int __must_check uds_make_request_queue(const char *queue_name,
+ uds_request_queue_processor_fn processor,
+ struct uds_request_queue **queue_ptr);
+
+void uds_request_queue_enqueue(struct uds_request_queue *queue,
+ struct uds_request *request);
+
+void uds_request_queue_finish(struct uds_request_queue *queue);
+
+#endif /* UDS_REQUEST_QUEUE_H */
diff --git a/drivers/md/dm-vdo/indexer/geometry.c b/drivers/md/dm-vdo/indexer/geometry.c
new file mode 100644
index 000000000000..c0575612e820
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/geometry.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "geometry.h"
+
+#include <linux/compiler.h>
+#include <linux/log2.h>
+
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "delta-index.h"
+#include "indexer.h"
+
+/*
+ * An index volume is divided into a fixed number of fixed-size chapters, each consisting of a
+ * fixed number of fixed-size pages. The volume layout is defined by two constants and four
+ * parameters. The constants are that index records are 32 bytes long (16-byte block name plus
+ * 16-byte metadata) and that open chapter index hash slots are one byte long. The four parameters
+ * are the number of bytes in a page, the number of record pages in a chapter, the number of
+ * chapters in a volume, and the number of chapters that are sparse. From these parameters, we can
+ * derive the rest of the layout and other index properties.
+ *
+ * The index volume is sized by its maximum memory footprint. For a dense index, the persistent
+ * storage is about 10 times the size of the memory footprint. For a sparse index, the persistent
+ * storage is about 100 times the size of the memory footprint.
+ *
+ * For a small index with a memory footprint less than 1GB, there are three possible memory
+ * configurations: 0.25GB, 0.5GB and 0.75GB. The default geometry for each is 1024 index records
+ * per 32 KB page, 1024 chapters per volume, and either 64, 128, or 192 record pages per chapter
+ * (resulting in 6, 13, or 20 index pages per chapter) depending on the memory configuration. For
+ * the VDO default of a 0.25 GB index, this yields a deduplication window of 256 GB using about 2.5
+ * GB for the persistent storage and 256 MB of RAM.
+ *
+ * For a larger index with a memory footprint that is a multiple of 1 GB, the geometry is 1024
+ * index records per 32 KB page, 256 record pages per chapter, 26 index pages per chapter, and 1024
+ * chapters for every GB of memory footprint. For a 1 GB volume, this yields a deduplication window
+ * of 1 TB using about 9GB of persistent storage and 1 GB of RAM.
+ *
+ * The above numbers hold for volumes which have no sparse chapters. A sparse volume has 10 times
+ * as many chapters as the corresponding non-sparse volume, which provides 10 times the
+ * deduplication window while using 10 times as much persistent storage as the equivalent
+ * non-sparse volume with the same memory footprint.
+ *
+ * If the volume has been converted from a non-lvm format to an lvm volume, the number of chapters
+ * per volume will have been reduced by one by eliminating physical chapter 0, and the virtual
+ * chapter that formerly mapped to physical chapter 0 may be remapped to another physical chapter.
+ * This remapping is expressed by storing which virtual chapter was remapped, and which physical
+ * chapter it was moved to.
+ */
+
+int uds_make_index_geometry(size_t bytes_per_page, u32 record_pages_per_chapter,
+ u32 chapters_per_volume, u32 sparse_chapters_per_volume,
+ u64 remapped_virtual, u64 remapped_physical,
+ struct index_geometry **geometry_ptr)
+{
+ int result;
+ struct index_geometry *geometry;
+
+ result = vdo_allocate(1, struct index_geometry, "geometry", &geometry);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ geometry->bytes_per_page = bytes_per_page;
+ geometry->record_pages_per_chapter = record_pages_per_chapter;
+ geometry->chapters_per_volume = chapters_per_volume;
+ geometry->sparse_chapters_per_volume = sparse_chapters_per_volume;
+ geometry->dense_chapters_per_volume = chapters_per_volume - sparse_chapters_per_volume;
+ geometry->remapped_virtual = remapped_virtual;
+ geometry->remapped_physical = remapped_physical;
+
+ geometry->records_per_page = bytes_per_page / BYTES_PER_RECORD;
+ geometry->records_per_chapter = geometry->records_per_page * record_pages_per_chapter;
+ geometry->records_per_volume = (u64) geometry->records_per_chapter * chapters_per_volume;
+
+ geometry->chapter_mean_delta = 1 << DEFAULT_CHAPTER_MEAN_DELTA_BITS;
+ geometry->chapter_payload_bits = bits_per(record_pages_per_chapter - 1);
+ /*
+ * We want 1 delta list for every 64 records in the chapter.
+ * The "| 077" ensures that the chapter_delta_list_bits computation
+ * does not underflow.
+ */
+ geometry->chapter_delta_list_bits =
+ bits_per((geometry->records_per_chapter - 1) | 077) - 6;
+ geometry->delta_lists_per_chapter = 1 << geometry->chapter_delta_list_bits;
+ /* We need enough address bits to achieve the desired mean delta. */
+ geometry->chapter_address_bits =
+ (DEFAULT_CHAPTER_MEAN_DELTA_BITS -
+ geometry->chapter_delta_list_bits +
+ bits_per(geometry->records_per_chapter - 1));
+ geometry->index_pages_per_chapter =
+ uds_get_delta_index_page_count(geometry->records_per_chapter,
+ geometry->delta_lists_per_chapter,
+ geometry->chapter_mean_delta,
+ geometry->chapter_payload_bits,
+ bytes_per_page);
+
+ geometry->pages_per_chapter = geometry->index_pages_per_chapter + record_pages_per_chapter;
+ geometry->pages_per_volume = geometry->pages_per_chapter * chapters_per_volume;
+ geometry->bytes_per_volume =
+ bytes_per_page * (geometry->pages_per_volume + HEADER_PAGES_PER_VOLUME);
+
+ *geometry_ptr = geometry;
+ return UDS_SUCCESS;
+}
+
+int uds_copy_index_geometry(struct index_geometry *source,
+ struct index_geometry **geometry_ptr)
+{
+ return uds_make_index_geometry(source->bytes_per_page,
+ source->record_pages_per_chapter,
+ source->chapters_per_volume,
+ source->sparse_chapters_per_volume,
+ source->remapped_virtual, source->remapped_physical,
+ geometry_ptr);
+}
+
+void uds_free_index_geometry(struct index_geometry *geometry)
+{
+ vdo_free(geometry);
+}
+
+u32 __must_check uds_map_to_physical_chapter(const struct index_geometry *geometry,
+ u64 virtual_chapter)
+{
+ u64 delta;
+
+ if (!uds_is_reduced_index_geometry(geometry))
+ return virtual_chapter % geometry->chapters_per_volume;
+
+ if (likely(virtual_chapter > geometry->remapped_virtual)) {
+ delta = virtual_chapter - geometry->remapped_virtual;
+ if (likely(delta > geometry->remapped_physical))
+ return delta % geometry->chapters_per_volume;
+ else
+ return delta - 1;
+ }
+
+ if (virtual_chapter == geometry->remapped_virtual)
+ return geometry->remapped_physical;
+
+ delta = geometry->remapped_virtual - virtual_chapter;
+ if (delta < geometry->chapters_per_volume)
+ return geometry->chapters_per_volume - delta;
+
+ /* This chapter is so old the answer doesn't matter. */
+ return 0;
+}
+
+/* Check whether any sparse chapters are in use. */
+bool uds_has_sparse_chapters(const struct index_geometry *geometry,
+ u64 oldest_virtual_chapter, u64 newest_virtual_chapter)
+{
+ return uds_is_sparse_index_geometry(geometry) &&
+ ((newest_virtual_chapter - oldest_virtual_chapter + 1) >
+ geometry->dense_chapters_per_volume);
+}
+
+bool uds_is_chapter_sparse(const struct index_geometry *geometry,
+ u64 oldest_virtual_chapter, u64 newest_virtual_chapter,
+ u64 virtual_chapter_number)
+{
+ return uds_has_sparse_chapters(geometry, oldest_virtual_chapter,
+ newest_virtual_chapter) &&
+ ((virtual_chapter_number + geometry->dense_chapters_per_volume) <=
+ newest_virtual_chapter);
+}
+
+/* Calculate how many chapters to expire after opening the newest chapter. */
+u32 uds_chapters_to_expire(const struct index_geometry *geometry, u64 newest_chapter)
+{
+ /* If the index isn't full yet, don't expire anything. */
+ if (newest_chapter < geometry->chapters_per_volume)
+ return 0;
+
+ /* If a chapter is out of order... */
+ if (geometry->remapped_physical > 0) {
+ u64 oldest_chapter = newest_chapter - geometry->chapters_per_volume;
+
+ /*
+ * ... expire an extra chapter when expiring the moved chapter to free physical
+ * space for the new chapter ...
+ */
+ if (oldest_chapter == geometry->remapped_virtual)
+ return 2;
+
+ /*
+ * ... but don't expire anything when the new chapter will use the physical chapter
+ * freed by expiring the moved chapter.
+ */
+ if (oldest_chapter == (geometry->remapped_virtual + geometry->remapped_physical))
+ return 0;
+ }
+
+ /* Normally, just expire one. */
+ return 1;
+}
diff --git a/drivers/md/dm-vdo/indexer/geometry.h b/drivers/md/dm-vdo/indexer/geometry.h
new file mode 100644
index 000000000000..a2ecdb238cf2
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/geometry.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_INDEX_GEOMETRY_H
+#define UDS_INDEX_GEOMETRY_H
+
+#include "indexer.h"
+
+/*
+ * The index_geometry records parameters that define the layout of a UDS index volume, and the size and
+ * shape of various index structures. It is created when the index is created, and is referenced by
+ * many index sub-components.
+ */
+
+struct index_geometry {
+ /* Size of a chapter page, in bytes */
+ size_t bytes_per_page;
+ /* Number of record pages in a chapter */
+ u32 record_pages_per_chapter;
+ /* Total number of chapters in a volume */
+ u32 chapters_per_volume;
+ /* Number of sparsely-indexed chapters in a volume */
+ u32 sparse_chapters_per_volume;
+ /* Number of bits used to determine delta list numbers */
+ u8 chapter_delta_list_bits;
+ /* Virtual chapter remapped from physical chapter 0 */
+ u64 remapped_virtual;
+ /* New physical chapter where the remapped chapter can be found */
+ u64 remapped_physical;
+
+ /*
+ * The following properties are derived from the ones above, but they are computed and
+ * recorded as fields for convenience.
+ */
+ /* Total number of pages in a volume, excluding the header */
+ u32 pages_per_volume;
+ /* Total number of bytes in a volume, including the header */
+ size_t bytes_per_volume;
+ /* Number of pages in a chapter */
+ u32 pages_per_chapter;
+ /* Number of index pages in a chapter index */
+ u32 index_pages_per_chapter;
+ /* Number of records that fit on a page */
+ u32 records_per_page;
+ /* Number of records that fit in a chapter */
+ u32 records_per_chapter;
+ /* Number of records that fit in a volume */
+ u64 records_per_volume;
+ /* Number of delta lists per chapter index */
+ u32 delta_lists_per_chapter;
+ /* Mean delta for chapter indexes */
+ u32 chapter_mean_delta;
+ /* Number of bits needed for record page numbers */
+ u8 chapter_payload_bits;
+ /* Number of bits used to compute addresses for chapter delta lists */
+ u8 chapter_address_bits;
+ /* Number of densely-indexed chapters in a volume */
+ u32 dense_chapters_per_volume;
+};
+
+enum {
+ /* The number of bytes in a record (name + metadata) */
+ BYTES_PER_RECORD = (UDS_RECORD_NAME_SIZE + UDS_RECORD_DATA_SIZE),
+
+ /* The default length of a page in a chapter, in bytes */
+ DEFAULT_BYTES_PER_PAGE = 1024 * BYTES_PER_RECORD,
+
+ /* The default maximum number of records per page */
+ DEFAULT_RECORDS_PER_PAGE = DEFAULT_BYTES_PER_PAGE / BYTES_PER_RECORD,
+
+ /* The default number of record pages in a chapter */
+ DEFAULT_RECORD_PAGES_PER_CHAPTER = 256,
+
+ /* The default number of record pages in a chapter for a small index */
+ SMALL_RECORD_PAGES_PER_CHAPTER = 64,
+
+ /* The default number of chapters in a volume */
+ DEFAULT_CHAPTERS_PER_VOLUME = 1024,
+
+ /* The default number of sparsely-indexed chapters in a volume */
+ DEFAULT_SPARSE_CHAPTERS_PER_VOLUME = 0,
+
+ /* The log2 of the default mean delta */
+ DEFAULT_CHAPTER_MEAN_DELTA_BITS = 16,
+
+ /* The log2 of the number of delta lists in a large chapter */
+ DEFAULT_CHAPTER_DELTA_LIST_BITS = 12,
+
+ /* The log2 of the number of delta lists in a small chapter */
+ SMALL_CHAPTER_DELTA_LIST_BITS = 10,
+
+ /* The number of header pages per volume */
+ HEADER_PAGES_PER_VOLUME = 1,
+};
+
+int __must_check uds_make_index_geometry(size_t bytes_per_page, u32 record_pages_per_chapter,
+ u32 chapters_per_volume,
+ u32 sparse_chapters_per_volume, u64 remapped_virtual,
+ u64 remapped_physical,
+ struct index_geometry **geometry_ptr);
+
+int __must_check uds_copy_index_geometry(struct index_geometry *source,
+ struct index_geometry **geometry_ptr);
+
+void uds_free_index_geometry(struct index_geometry *geometry);
+
+u32 __must_check uds_map_to_physical_chapter(const struct index_geometry *geometry,
+ u64 virtual_chapter);
+
+/*
+ * Check whether this geometry is reduced by a chapter. This will only be true if the volume was
+ * converted from a non-lvm volume to an lvm volume.
+ */
+static inline bool __must_check
+uds_is_reduced_index_geometry(const struct index_geometry *geometry)
+{
+ return !!(geometry->chapters_per_volume & 1);
+}
+
+static inline bool __must_check
+uds_is_sparse_index_geometry(const struct index_geometry *geometry)
+{
+ return geometry->sparse_chapters_per_volume > 0;
+}
+
+bool __must_check uds_has_sparse_chapters(const struct index_geometry *geometry,
+ u64 oldest_virtual_chapter,
+ u64 newest_virtual_chapter);
+
+bool __must_check uds_is_chapter_sparse(const struct index_geometry *geometry,
+ u64 oldest_virtual_chapter,
+ u64 newest_virtual_chapter,
+ u64 virtual_chapter_number);
+
+u32 __must_check uds_chapters_to_expire(const struct index_geometry *geometry,
+ u64 newest_chapter);
+
+#endif /* UDS_INDEX_GEOMETRY_H */
diff --git a/drivers/md/dm-vdo/indexer/hash-utils.h b/drivers/md/dm-vdo/indexer/hash-utils.h
new file mode 100644
index 000000000000..6a8dd8ffea6c
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/hash-utils.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_HASH_UTILS_H
+#define UDS_HASH_UTILS_H
+
+#include "numeric.h"
+
+#include "geometry.h"
+#include "indexer.h"
+
+/* Utilities for extracting portions of a request name for various uses. */
+
+/* How various portions of a record name are apportioned. */
+enum {
+ VOLUME_INDEX_BYTES_OFFSET = 0,
+ VOLUME_INDEX_BYTES_COUNT = 8,
+ CHAPTER_INDEX_BYTES_OFFSET = 8,
+ CHAPTER_INDEX_BYTES_COUNT = 6,
+ SAMPLE_BYTES_OFFSET = 14,
+ SAMPLE_BYTES_COUNT = 2,
+};
+
+static inline u64 uds_extract_chapter_index_bytes(const struct uds_record_name *name)
+{
+ const u8 *chapter_bits = &name->name[CHAPTER_INDEX_BYTES_OFFSET];
+ u64 bytes = (u64) get_unaligned_be16(chapter_bits) << 32;
+
+ bytes |= get_unaligned_be32(chapter_bits + 2);
+ return bytes;
+}
+
+static inline u64 uds_extract_volume_index_bytes(const struct uds_record_name *name)
+{
+ return get_unaligned_be64(&name->name[VOLUME_INDEX_BYTES_OFFSET]);
+}
+
+static inline u32 uds_extract_sampling_bytes(const struct uds_record_name *name)
+{
+ return get_unaligned_be16(&name->name[SAMPLE_BYTES_OFFSET]);
+}
+
+/* Compute the chapter delta list for a given name. */
+static inline u32 uds_hash_to_chapter_delta_list(const struct uds_record_name *name,
+ const struct index_geometry *geometry)
+{
+ return ((uds_extract_chapter_index_bytes(name) >> geometry->chapter_address_bits) &
+ ((1 << geometry->chapter_delta_list_bits) - 1));
+}
+
+/* Compute the chapter delta address for a given name. */
+static inline u32 uds_hash_to_chapter_delta_address(const struct uds_record_name *name,
+ const struct index_geometry *geometry)
+{
+ return uds_extract_chapter_index_bytes(name) & ((1 << geometry->chapter_address_bits) - 1);
+}
+
+static inline unsigned int uds_name_to_hash_slot(const struct uds_record_name *name,
+ unsigned int slot_count)
+{
+ return (unsigned int) (uds_extract_chapter_index_bytes(name) % slot_count);
+}
+
+#endif /* UDS_HASH_UTILS_H */
diff --git a/drivers/md/dm-vdo/indexer/index-layout.c b/drivers/md/dm-vdo/indexer/index-layout.c
new file mode 100644
index 000000000000..627adc24af3b
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/index-layout.c
@@ -0,0 +1,1765 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "index-layout.h"
+
+#include <linux/random.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "murmurhash3.h"
+#include "numeric.h"
+#include "time-utils.h"
+
+#include "config.h"
+#include "open-chapter.h"
+#include "volume-index.h"
+
+/*
+ * The UDS layout on storage media is divided into a number of fixed-size regions, the sizes of
+ * which are computed when the index is created. Every header and region begins on 4K block
+ * boundary. Save regions are further sub-divided into regions of their own.
+ *
+ * Each region has a kind and an instance number. Some kinds only have one instance and therefore
+ * use RL_SOLE_INSTANCE (-1) as the instance number. The RL_KIND_INDEX used to use instances to
+ * represent sub-indices; now, however there is only ever one sub-index and therefore one instance.
+ * The RL_KIND_VOLUME_INDEX uses instances to record which zone is being saved.
+ *
+ * Every region header has a type and version.
+ *
+ * +-+-+---------+--------+--------+-+
+ * | | | I N D E X 0 101, 0 | |
+ * |H|C+---------+--------+--------+S|
+ * |D|f| Volume | Save | Save |e|
+ * |R|g| Region | Region | Region |a|
+ * | | | 201, -1 | 202, 0 | 202, 1 |l|
+ * +-+-+--------+---------+--------+-+
+ *
+ * The header contains the encoded region layout table as well as some index configuration data.
+ * The sub-index region and its subdivisions are maintained in the same table.
+ *
+ * There are two save regions to preserve the old state in case saving the new state is incomplete.
+ * They are used in alternation. Each save region is further divided into sub-regions.
+ *
+ * +-+-----+------+------+-----+-----+
+ * |H| IPM | MI | MI | | OC |
+ * |D| | zone | zone | ... | |
+ * |R| 301 | 302 | 302 | | 303 |
+ * | | -1 | 0 | 1 | | -1 |
+ * +-+-----+------+------+-----+-----+
+ *
+ * The header contains the encoded region layout table as well as index state data for that save.
+ * Each save also has a unique nonce.
+ */
+
+#define MAGIC_SIZE 32
+#define NONCE_INFO_SIZE 32
+#define MAX_SAVES 2
+
+enum region_kind {
+ RL_KIND_EMPTY = 0,
+ RL_KIND_HEADER = 1,
+ RL_KIND_CONFIG = 100,
+ RL_KIND_INDEX = 101,
+ RL_KIND_SEAL = 102,
+ RL_KIND_VOLUME = 201,
+ RL_KIND_SAVE = 202,
+ RL_KIND_INDEX_PAGE_MAP = 301,
+ RL_KIND_VOLUME_INDEX = 302,
+ RL_KIND_OPEN_CHAPTER = 303,
+};
+
+/* Some region types are historical and are no longer used. */
+enum region_type {
+ RH_TYPE_FREE = 0, /* unused */
+ RH_TYPE_SUPER = 1,
+ RH_TYPE_SAVE = 2,
+ RH_TYPE_CHECKPOINT = 3, /* unused */
+ RH_TYPE_UNSAVED = 4,
+};
+
+#define RL_SOLE_INSTANCE 65535
+
+/*
+ * Super block version 2 is the first released version.
+ *
+ * Super block version 3 is the normal version used from RHEL 8.2 onwards.
+ *
+ * Super block versions 4 through 6 were incremental development versions and
+ * are not supported.
+ *
+ * Super block version 7 is used for volumes which have been reduced in size by one chapter in
+ * order to make room to prepend LVM metadata to a volume originally created without lvm. This
+ * allows the index to retain most its deduplication records.
+ */
+#define SUPER_VERSION_MINIMUM 3
+#define SUPER_VERSION_CURRENT 3
+#define SUPER_VERSION_MAXIMUM 7
+
+static const u8 LAYOUT_MAGIC[MAGIC_SIZE] = "*ALBIREO*SINGLE*FILE*LAYOUT*001*";
+static const u64 REGION_MAGIC = 0x416c6252676e3031; /* 'AlbRgn01' */
+
+struct region_header {
+ u64 magic;
+ u64 region_blocks;
+ u16 type;
+ /* Currently always version 1 */
+ u16 version;
+ u16 region_count;
+ u16 payload;
+};
+
+struct layout_region {
+ u64 start_block;
+ u64 block_count;
+ u32 __unused;
+ u16 kind;
+ u16 instance;
+};
+
+struct region_table {
+ size_t encoded_size;
+ struct region_header header;
+ struct layout_region regions[];
+};
+
+struct index_save_data {
+ u64 timestamp;
+ u64 nonce;
+ /* Currently always version 1 */
+ u32 version;
+ u32 unused__;
+};
+
+struct index_state_version {
+ s32 signature;
+ s32 version_id;
+};
+
+static const struct index_state_version INDEX_STATE_VERSION_301 = {
+ .signature = -1,
+ .version_id = 301,
+};
+
+struct index_state_data301 {
+ struct index_state_version version;
+ u64 newest_chapter;
+ u64 oldest_chapter;
+ u64 last_save;
+ u32 unused;
+ u32 padding;
+};
+
+struct index_save_layout {
+ unsigned int zone_count;
+ struct layout_region index_save;
+ struct layout_region header;
+ struct layout_region index_page_map;
+ struct layout_region free_space;
+ struct layout_region volume_index_zones[MAX_ZONES];
+ struct layout_region open_chapter;
+ struct index_save_data save_data;
+ struct index_state_data301 state_data;
+};
+
+struct sub_index_layout {
+ u64 nonce;
+ struct layout_region sub_index;
+ struct layout_region volume;
+ struct index_save_layout *saves;
+};
+
+struct super_block_data {
+ u8 magic_label[MAGIC_SIZE];
+ u8 nonce_info[NONCE_INFO_SIZE];
+ u64 nonce;
+ u32 version;
+ u32 block_size;
+ u16 index_count;
+ u16 max_saves;
+ /* Padding reflects a blank field on permanent storage */
+ u8 padding[4];
+ u64 open_chapter_blocks;
+ u64 page_map_blocks;
+ u64 volume_offset;
+ u64 start_offset;
+};
+
+struct index_layout {
+ struct io_factory *factory;
+ size_t factory_size;
+ off_t offset;
+ struct super_block_data super;
+ struct layout_region header;
+ struct layout_region config;
+ struct sub_index_layout index;
+ struct layout_region seal;
+ u64 total_blocks;
+};
+
+struct save_layout_sizes {
+ unsigned int save_count;
+ size_t block_size;
+ u64 volume_blocks;
+ u64 volume_index_blocks;
+ u64 page_map_blocks;
+ u64 open_chapter_blocks;
+ u64 save_blocks;
+ u64 sub_index_blocks;
+ u64 total_blocks;
+ size_t total_size;
+};
+
+static inline bool is_converted_super_block(struct super_block_data *super)
+{
+ return super->version == 7;
+}
+
+static int __must_check compute_sizes(const struct uds_configuration *config,
+ struct save_layout_sizes *sls)
+{
+ int result;
+ struct index_geometry *geometry = config->geometry;
+
+ memset(sls, 0, sizeof(*sls));
+ sls->save_count = MAX_SAVES;
+ sls->block_size = UDS_BLOCK_SIZE;
+ sls->volume_blocks = geometry->bytes_per_volume / sls->block_size;
+
+ result = uds_compute_volume_index_save_blocks(config, sls->block_size,
+ &sls->volume_index_blocks);
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "cannot compute index save size");
+
+ sls->page_map_blocks =
+ DIV_ROUND_UP(uds_compute_index_page_map_save_size(geometry),
+ sls->block_size);
+ sls->open_chapter_blocks =
+ DIV_ROUND_UP(uds_compute_saved_open_chapter_size(geometry),
+ sls->block_size);
+ sls->save_blocks =
+ 1 + (sls->volume_index_blocks + sls->page_map_blocks + sls->open_chapter_blocks);
+ sls->sub_index_blocks = sls->volume_blocks + (sls->save_count * sls->save_blocks);
+ sls->total_blocks = 3 + sls->sub_index_blocks;
+ sls->total_size = sls->total_blocks * sls->block_size;
+
+ return UDS_SUCCESS;
+}
+
+int uds_compute_index_size(const struct uds_parameters *parameters, u64 *index_size)
+{
+ int result;
+ struct uds_configuration *index_config;
+ struct save_layout_sizes sizes;
+
+ if (index_size == NULL) {
+ vdo_log_error("Missing output size pointer");
+ return -EINVAL;
+ }
+
+ result = uds_make_configuration(parameters, &index_config);
+ if (result != UDS_SUCCESS) {
+ vdo_log_error_strerror(result, "cannot compute index size");
+ return uds_status_to_errno(result);
+ }
+
+ result = compute_sizes(index_config, &sizes);
+ uds_free_configuration(index_config);
+ if (result != UDS_SUCCESS)
+ return uds_status_to_errno(result);
+
+ *index_size = sizes.total_size;
+ return UDS_SUCCESS;
+}
+
+/* Create unique data using the current time and a pseudorandom number. */
+static void create_unique_nonce_data(u8 *buffer)
+{
+ ktime_t now = current_time_ns(CLOCK_REALTIME);
+ u32 rand;
+ size_t offset = 0;
+
+ get_random_bytes(&rand, sizeof(u32));
+ memcpy(buffer + offset, &now, sizeof(now));
+ offset += sizeof(now);
+ memcpy(buffer + offset, &rand, sizeof(rand));
+ offset += sizeof(rand);
+ while (offset < NONCE_INFO_SIZE) {
+ size_t len = min(NONCE_INFO_SIZE - offset, offset);
+
+ memcpy(buffer + offset, buffer, len);
+ offset += len;
+ }
+}
+
+static u64 hash_stuff(u64 start, const void *data, size_t len)
+{
+ u32 seed = start ^ (start >> 27);
+ u8 hash_buffer[16];
+
+ murmurhash3_128(data, len, seed, hash_buffer);
+ return get_unaligned_le64(hash_buffer + 4);
+}
+
+/* Generate a primary nonce from the provided data. */
+static u64 generate_primary_nonce(const void *data, size_t len)
+{
+ return hash_stuff(0xa1b1e0fc, data, len);
+}
+
+/*
+ * Deterministically generate a secondary nonce from an existing nonce and some arbitrary data by
+ * hashing the original nonce and the data to produce a new nonce.
+ */
+static u64 generate_secondary_nonce(u64 nonce, const void *data, size_t len)
+{
+ return hash_stuff(nonce + 1, data, len);
+}
+
+static int __must_check open_layout_reader(struct index_layout *layout,
+ struct layout_region *lr, off_t offset,
+ struct buffered_reader **reader_ptr)
+{
+ return uds_make_buffered_reader(layout->factory, lr->start_block + offset,
+ lr->block_count, reader_ptr);
+}
+
+static int open_region_reader(struct index_layout *layout, struct layout_region *region,
+ struct buffered_reader **reader_ptr)
+{
+ return open_layout_reader(layout, region, -layout->super.start_offset,
+ reader_ptr);
+}
+
+static int __must_check open_layout_writer(struct index_layout *layout,
+ struct layout_region *lr, off_t offset,
+ struct buffered_writer **writer_ptr)
+{
+ return uds_make_buffered_writer(layout->factory, lr->start_block + offset,
+ lr->block_count, writer_ptr);
+}
+
+static int open_region_writer(struct index_layout *layout, struct layout_region *region,
+ struct buffered_writer **writer_ptr)
+{
+ return open_layout_writer(layout, region, -layout->super.start_offset,
+ writer_ptr);
+}
+
+static void generate_super_block_data(struct save_layout_sizes *sls,
+ struct super_block_data *super)
+{
+ memset(super, 0, sizeof(*super));
+ memcpy(super->magic_label, LAYOUT_MAGIC, MAGIC_SIZE);
+ create_unique_nonce_data(super->nonce_info);
+
+ super->nonce = generate_primary_nonce(super->nonce_info,
+ sizeof(super->nonce_info));
+ super->version = SUPER_VERSION_CURRENT;
+ super->block_size = sls->block_size;
+ super->index_count = 1;
+ super->max_saves = sls->save_count;
+ super->open_chapter_blocks = sls->open_chapter_blocks;
+ super->page_map_blocks = sls->page_map_blocks;
+ super->volume_offset = 0;
+ super->start_offset = 0;
+}
+
+static void define_sub_index_nonce(struct index_layout *layout)
+{
+ struct sub_index_nonce_data {
+ u64 offset;
+ u16 index_id;
+ };
+ struct sub_index_layout *sil = &layout->index;
+ u64 primary_nonce = layout->super.nonce;
+ u8 buffer[sizeof(struct sub_index_nonce_data)] = { 0 };
+ size_t offset = 0;
+
+ encode_u64_le(buffer, &offset, sil->sub_index.start_block);
+ encode_u16_le(buffer, &offset, 0);
+ sil->nonce = generate_secondary_nonce(primary_nonce, buffer, sizeof(buffer));
+ if (sil->nonce == 0) {
+ sil->nonce = generate_secondary_nonce(~primary_nonce + 1, buffer,
+ sizeof(buffer));
+ }
+}
+
+static void setup_sub_index(struct index_layout *layout, u64 start_block,
+ struct save_layout_sizes *sls)
+{
+ struct sub_index_layout *sil = &layout->index;
+ u64 next_block = start_block;
+ unsigned int i;
+
+ sil->sub_index = (struct layout_region) {
+ .start_block = start_block,
+ .block_count = sls->sub_index_blocks,
+ .kind = RL_KIND_INDEX,
+ .instance = 0,
+ };
+
+ sil->volume = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = sls->volume_blocks,
+ .kind = RL_KIND_VOLUME,
+ .instance = RL_SOLE_INSTANCE,
+ };
+
+ next_block += sls->volume_blocks;
+
+ for (i = 0; i < sls->save_count; i++) {
+ sil->saves[i].index_save = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = sls->save_blocks,
+ .kind = RL_KIND_SAVE,
+ .instance = i,
+ };
+
+ next_block += sls->save_blocks;
+ }
+
+ define_sub_index_nonce(layout);
+}
+
+static void initialize_layout(struct index_layout *layout, struct save_layout_sizes *sls)
+{
+ u64 next_block = layout->offset / sls->block_size;
+
+ layout->total_blocks = sls->total_blocks;
+ generate_super_block_data(sls, &layout->super);
+ layout->header = (struct layout_region) {
+ .start_block = next_block++,
+ .block_count = 1,
+ .kind = RL_KIND_HEADER,
+ .instance = RL_SOLE_INSTANCE,
+ };
+
+ layout->config = (struct layout_region) {
+ .start_block = next_block++,
+ .block_count = 1,
+ .kind = RL_KIND_CONFIG,
+ .instance = RL_SOLE_INSTANCE,
+ };
+
+ setup_sub_index(layout, next_block, sls);
+ next_block += sls->sub_index_blocks;
+
+ layout->seal = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = 1,
+ .kind = RL_KIND_SEAL,
+ .instance = RL_SOLE_INSTANCE,
+ };
+}
+
+static int __must_check make_index_save_region_table(struct index_save_layout *isl,
+ struct region_table **table_ptr)
+{
+ int result;
+ unsigned int z;
+ struct region_table *table;
+ struct layout_region *lr;
+ u16 region_count;
+ size_t payload;
+ size_t type;
+
+ if (isl->zone_count > 0) {
+ /*
+ * Normal save regions: header, page map, volume index zones,
+ * open chapter, and possibly free space.
+ */
+ region_count = 3 + isl->zone_count;
+ if (isl->free_space.block_count > 0)
+ region_count++;
+
+ payload = sizeof(isl->save_data) + sizeof(isl->state_data);
+ type = RH_TYPE_SAVE;
+ } else {
+ /* Empty save regions: header, page map, free space. */
+ region_count = 3;
+ payload = sizeof(isl->save_data);
+ type = RH_TYPE_UNSAVED;
+ }
+
+ result = vdo_allocate_extended(struct region_table, region_count,
+ struct layout_region,
+ "layout region table for ISL", &table);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ lr = &table->regions[0];
+ *lr++ = isl->header;
+ *lr++ = isl->index_page_map;
+ for (z = 0; z < isl->zone_count; z++)
+ *lr++ = isl->volume_index_zones[z];
+
+ if (isl->zone_count > 0)
+ *lr++ = isl->open_chapter;
+
+ if (isl->free_space.block_count > 0)
+ *lr++ = isl->free_space;
+
+ table->header = (struct region_header) {
+ .magic = REGION_MAGIC,
+ .region_blocks = isl->index_save.block_count,
+ .type = type,
+ .version = 1,
+ .region_count = region_count,
+ .payload = payload,
+ };
+
+ table->encoded_size = (sizeof(struct region_header) + payload +
+ region_count * sizeof(struct layout_region));
+ *table_ptr = table;
+ return UDS_SUCCESS;
+}
+
+static void encode_region_table(u8 *buffer, size_t *offset, struct region_table *table)
+{
+ unsigned int i;
+
+ encode_u64_le(buffer, offset, REGION_MAGIC);
+ encode_u64_le(buffer, offset, table->header.region_blocks);
+ encode_u16_le(buffer, offset, table->header.type);
+ encode_u16_le(buffer, offset, table->header.version);
+ encode_u16_le(buffer, offset, table->header.region_count);
+ encode_u16_le(buffer, offset, table->header.payload);
+
+ for (i = 0; i < table->header.region_count; i++) {
+ encode_u64_le(buffer, offset, table->regions[i].start_block);
+ encode_u64_le(buffer, offset, table->regions[i].block_count);
+ encode_u32_le(buffer, offset, 0);
+ encode_u16_le(buffer, offset, table->regions[i].kind);
+ encode_u16_le(buffer, offset, table->regions[i].instance);
+ }
+}
+
+static int __must_check write_index_save_header(struct index_save_layout *isl,
+ struct region_table *table,
+ struct buffered_writer *writer)
+{
+ int result;
+ u8 *buffer;
+ size_t offset = 0;
+
+ result = vdo_allocate(table->encoded_size, u8, "index save data", &buffer);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ encode_region_table(buffer, &offset, table);
+ encode_u64_le(buffer, &offset, isl->save_data.timestamp);
+ encode_u64_le(buffer, &offset, isl->save_data.nonce);
+ encode_u32_le(buffer, &offset, isl->save_data.version);
+ encode_u32_le(buffer, &offset, 0);
+ if (isl->zone_count > 0) {
+ encode_u32_le(buffer, &offset, INDEX_STATE_VERSION_301.signature);
+ encode_u32_le(buffer, &offset, INDEX_STATE_VERSION_301.version_id);
+ encode_u64_le(buffer, &offset, isl->state_data.newest_chapter);
+ encode_u64_le(buffer, &offset, isl->state_data.oldest_chapter);
+ encode_u64_le(buffer, &offset, isl->state_data.last_save);
+ encode_u64_le(buffer, &offset, 0);
+ }
+
+ result = uds_write_to_buffered_writer(writer, buffer, offset);
+ vdo_free(buffer);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ return uds_flush_buffered_writer(writer);
+}
+
+static int write_index_save_layout(struct index_layout *layout,
+ struct index_save_layout *isl)
+{
+ int result;
+ struct region_table *table;
+ struct buffered_writer *writer;
+
+ result = make_index_save_region_table(isl, &table);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = open_region_writer(layout, &isl->header, &writer);
+ if (result != UDS_SUCCESS) {
+ vdo_free(table);
+ return result;
+ }
+
+ result = write_index_save_header(isl, table, writer);
+ vdo_free(table);
+ uds_free_buffered_writer(writer);
+
+ return result;
+}
+
+static void reset_index_save_layout(struct index_save_layout *isl, u64 page_map_blocks)
+{
+ u64 free_blocks;
+ u64 next_block = isl->index_save.start_block;
+
+ isl->zone_count = 0;
+ memset(&isl->save_data, 0, sizeof(isl->save_data));
+
+ isl->header = (struct layout_region) {
+ .start_block = next_block++,
+ .block_count = 1,
+ .kind = RL_KIND_HEADER,
+ .instance = RL_SOLE_INSTANCE,
+ };
+
+ isl->index_page_map = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = page_map_blocks,
+ .kind = RL_KIND_INDEX_PAGE_MAP,
+ .instance = RL_SOLE_INSTANCE,
+ };
+
+ next_block += page_map_blocks;
+
+ free_blocks = isl->index_save.block_count - page_map_blocks - 1;
+ isl->free_space = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = free_blocks,
+ .kind = RL_KIND_EMPTY,
+ .instance = RL_SOLE_INSTANCE,
+ };
+}
+
+static int __must_check invalidate_old_save(struct index_layout *layout,
+ struct index_save_layout *isl)
+{
+ reset_index_save_layout(isl, layout->super.page_map_blocks);
+ return write_index_save_layout(layout, isl);
+}
+
+static int discard_index_state_data(struct index_layout *layout)
+{
+ int result;
+ int saved_result = UDS_SUCCESS;
+ unsigned int i;
+
+ for (i = 0; i < layout->super.max_saves; i++) {
+ result = invalidate_old_save(layout, &layout->index.saves[i]);
+ if (result != UDS_SUCCESS)
+ saved_result = result;
+ }
+
+ if (saved_result != UDS_SUCCESS) {
+ return vdo_log_error_strerror(result,
+ "%s: cannot destroy all index saves",
+ __func__);
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int __must_check make_layout_region_table(struct index_layout *layout,
+ struct region_table **table_ptr)
+{
+ int result;
+ unsigned int i;
+ /* Regions: header, config, index, volume, saves, seal */
+ u16 region_count = 5 + layout->super.max_saves;
+ u16 payload;
+ struct region_table *table;
+ struct layout_region *lr;
+
+ result = vdo_allocate_extended(struct region_table, region_count,
+ struct layout_region, "layout region table",
+ &table);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ lr = &table->regions[0];
+ *lr++ = layout->header;
+ *lr++ = layout->config;
+ *lr++ = layout->index.sub_index;
+ *lr++ = layout->index.volume;
+
+ for (i = 0; i < layout->super.max_saves; i++)
+ *lr++ = layout->index.saves[i].index_save;
+
+ *lr++ = layout->seal;
+
+ if (is_converted_super_block(&layout->super)) {
+ payload = sizeof(struct super_block_data);
+ } else {
+ payload = (sizeof(struct super_block_data) -
+ sizeof(layout->super.volume_offset) -
+ sizeof(layout->super.start_offset));
+ }
+
+ table->header = (struct region_header) {
+ .magic = REGION_MAGIC,
+ .region_blocks = layout->total_blocks,
+ .type = RH_TYPE_SUPER,
+ .version = 1,
+ .region_count = region_count,
+ .payload = payload,
+ };
+
+ table->encoded_size = (sizeof(struct region_header) + payload +
+ region_count * sizeof(struct layout_region));
+ *table_ptr = table;
+ return UDS_SUCCESS;
+}
+
+static int __must_check write_layout_header(struct index_layout *layout,
+ struct region_table *table,
+ struct buffered_writer *writer)
+{
+ int result;
+ u8 *buffer;
+ size_t offset = 0;
+
+ result = vdo_allocate(table->encoded_size, u8, "layout data", &buffer);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ encode_region_table(buffer, &offset, table);
+ memcpy(buffer + offset, &layout->super.magic_label, MAGIC_SIZE);
+ offset += MAGIC_SIZE;
+ memcpy(buffer + offset, &layout->super.nonce_info, NONCE_INFO_SIZE);
+ offset += NONCE_INFO_SIZE;
+ encode_u64_le(buffer, &offset, layout->super.nonce);
+ encode_u32_le(buffer, &offset, layout->super.version);
+ encode_u32_le(buffer, &offset, layout->super.block_size);
+ encode_u16_le(buffer, &offset, layout->super.index_count);
+ encode_u16_le(buffer, &offset, layout->super.max_saves);
+ encode_u32_le(buffer, &offset, 0);
+ encode_u64_le(buffer, &offset, layout->super.open_chapter_blocks);
+ encode_u64_le(buffer, &offset, layout->super.page_map_blocks);
+
+ if (is_converted_super_block(&layout->super)) {
+ encode_u64_le(buffer, &offset, layout->super.volume_offset);
+ encode_u64_le(buffer, &offset, layout->super.start_offset);
+ }
+
+ result = uds_write_to_buffered_writer(writer, buffer, offset);
+ vdo_free(buffer);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ return uds_flush_buffered_writer(writer);
+}
+
+static int __must_check write_uds_index_config(struct index_layout *layout,
+ struct uds_configuration *config,
+ off_t offset)
+{
+ int result;
+ struct buffered_writer *writer = NULL;
+
+ result = open_layout_writer(layout, &layout->config, offset, &writer);
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "failed to open config region");
+
+ result = uds_write_config_contents(writer, config, layout->super.version);
+ if (result != UDS_SUCCESS) {
+ uds_free_buffered_writer(writer);
+ return vdo_log_error_strerror(result, "failed to write config region");
+ }
+
+ result = uds_flush_buffered_writer(writer);
+ if (result != UDS_SUCCESS) {
+ uds_free_buffered_writer(writer);
+ return vdo_log_error_strerror(result, "cannot flush config writer");
+ }
+
+ uds_free_buffered_writer(writer);
+ return UDS_SUCCESS;
+}
+
+static int __must_check save_layout(struct index_layout *layout, off_t offset)
+{
+ int result;
+ struct buffered_writer *writer = NULL;
+ struct region_table *table;
+
+ result = make_layout_region_table(layout, &table);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = open_layout_writer(layout, &layout->header, offset, &writer);
+ if (result != UDS_SUCCESS) {
+ vdo_free(table);
+ return result;
+ }
+
+ result = write_layout_header(layout, table, writer);
+ vdo_free(table);
+ uds_free_buffered_writer(writer);
+
+ return result;
+}
+
+static int create_index_layout(struct index_layout *layout, struct uds_configuration *config)
+{
+ int result;
+ struct save_layout_sizes sizes;
+
+ result = compute_sizes(config, &sizes);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = vdo_allocate(sizes.save_count, struct index_save_layout, __func__,
+ &layout->index.saves);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ initialize_layout(layout, &sizes);
+
+ result = discard_index_state_data(layout);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = write_uds_index_config(layout, config, 0);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ return save_layout(layout, 0);
+}
+
+static u64 generate_index_save_nonce(u64 volume_nonce, struct index_save_layout *isl)
+{
+ struct save_nonce_data {
+ struct index_save_data data;
+ u64 offset;
+ } nonce_data;
+ u8 buffer[sizeof(nonce_data)];
+ size_t offset = 0;
+
+ encode_u64_le(buffer, &offset, isl->save_data.timestamp);
+ encode_u64_le(buffer, &offset, 0);
+ encode_u32_le(buffer, &offset, isl->save_data.version);
+ encode_u32_le(buffer, &offset, 0U);
+ encode_u64_le(buffer, &offset, isl->index_save.start_block);
+ VDO_ASSERT_LOG_ONLY(offset == sizeof(nonce_data),
+ "%zu bytes encoded of %zu expected",
+ offset, sizeof(nonce_data));
+ return generate_secondary_nonce(volume_nonce, buffer, sizeof(buffer));
+}
+
+static u64 validate_index_save_layout(struct index_save_layout *isl, u64 volume_nonce)
+{
+ if ((isl->zone_count == 0) || (isl->save_data.timestamp == 0))
+ return 0;
+
+ if (isl->save_data.nonce != generate_index_save_nonce(volume_nonce, isl))
+ return 0;
+
+ return isl->save_data.timestamp;
+}
+
+static int find_latest_uds_index_save_slot(struct index_layout *layout,
+ struct index_save_layout **isl_ptr)
+{
+ struct index_save_layout *latest = NULL;
+ struct index_save_layout *isl;
+ unsigned int i;
+ u64 save_time = 0;
+ u64 latest_time = 0;
+
+ for (i = 0; i < layout->super.max_saves; i++) {
+ isl = &layout->index.saves[i];
+ save_time = validate_index_save_layout(isl, layout->index.nonce);
+ if (save_time > latest_time) {
+ latest = isl;
+ latest_time = save_time;
+ }
+ }
+
+ if (latest == NULL) {
+ vdo_log_error("No valid index save found");
+ return UDS_INDEX_NOT_SAVED_CLEANLY;
+ }
+
+ *isl_ptr = latest;
+ return UDS_SUCCESS;
+}
+
+int uds_discard_open_chapter(struct index_layout *layout)
+{
+ int result;
+ struct index_save_layout *isl;
+ struct buffered_writer *writer;
+
+ result = find_latest_uds_index_save_slot(layout, &isl);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = open_region_writer(layout, &isl->open_chapter, &writer);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = uds_write_to_buffered_writer(writer, NULL, UDS_BLOCK_SIZE);
+ if (result != UDS_SUCCESS) {
+ uds_free_buffered_writer(writer);
+ return result;
+ }
+
+ result = uds_flush_buffered_writer(writer);
+ uds_free_buffered_writer(writer);
+ return result;
+}
+
+int uds_load_index_state(struct index_layout *layout, struct uds_index *index)
+{
+ int result;
+ unsigned int zone;
+ struct index_save_layout *isl;
+ struct buffered_reader *readers[MAX_ZONES];
+
+ result = find_latest_uds_index_save_slot(layout, &isl);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ index->newest_virtual_chapter = isl->state_data.newest_chapter;
+ index->oldest_virtual_chapter = isl->state_data.oldest_chapter;
+ index->last_save = isl->state_data.last_save;
+
+ result = open_region_reader(layout, &isl->open_chapter, &readers[0]);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = uds_load_open_chapter(index, readers[0]);
+ uds_free_buffered_reader(readers[0]);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ for (zone = 0; zone < isl->zone_count; zone++) {
+ result = open_region_reader(layout, &isl->volume_index_zones[zone],
+ &readers[zone]);
+ if (result != UDS_SUCCESS) {
+ for (; zone > 0; zone--)
+ uds_free_buffered_reader(readers[zone - 1]);
+
+ return result;
+ }
+ }
+
+ result = uds_load_volume_index(index->volume_index, readers, isl->zone_count);
+ for (zone = 0; zone < isl->zone_count; zone++)
+ uds_free_buffered_reader(readers[zone]);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = open_region_reader(layout, &isl->index_page_map, &readers[0]);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = uds_read_index_page_map(index->volume->index_page_map, readers[0]);
+ uds_free_buffered_reader(readers[0]);
+
+ return result;
+}
+
+static struct index_save_layout *select_oldest_index_save_layout(struct index_layout *layout)
+{
+ struct index_save_layout *oldest = NULL;
+ struct index_save_layout *isl;
+ unsigned int i;
+ u64 save_time = 0;
+ u64 oldest_time = 0;
+
+ for (i = 0; i < layout->super.max_saves; i++) {
+ isl = &layout->index.saves[i];
+ save_time = validate_index_save_layout(isl, layout->index.nonce);
+ if (oldest == NULL || save_time < oldest_time) {
+ oldest = isl;
+ oldest_time = save_time;
+ }
+ }
+
+ return oldest;
+}
+
+static void instantiate_index_save_layout(struct index_save_layout *isl,
+ struct super_block_data *super,
+ u64 volume_nonce, unsigned int zone_count)
+{
+ unsigned int z;
+ u64 next_block;
+ u64 free_blocks;
+ u64 volume_index_blocks;
+
+ isl->zone_count = zone_count;
+ memset(&isl->save_data, 0, sizeof(isl->save_data));
+ isl->save_data.timestamp = ktime_to_ms(current_time_ns(CLOCK_REALTIME));
+ isl->save_data.version = 1;
+ isl->save_data.nonce = generate_index_save_nonce(volume_nonce, isl);
+
+ next_block = isl->index_save.start_block;
+ isl->header = (struct layout_region) {
+ .start_block = next_block++,
+ .block_count = 1,
+ .kind = RL_KIND_HEADER,
+ .instance = RL_SOLE_INSTANCE,
+ };
+
+ isl->index_page_map = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = super->page_map_blocks,
+ .kind = RL_KIND_INDEX_PAGE_MAP,
+ .instance = RL_SOLE_INSTANCE,
+ };
+ next_block += super->page_map_blocks;
+
+ free_blocks = (isl->index_save.block_count - 1 -
+ super->page_map_blocks -
+ super->open_chapter_blocks);
+ volume_index_blocks = free_blocks / isl->zone_count;
+ for (z = 0; z < isl->zone_count; z++) {
+ isl->volume_index_zones[z] = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = volume_index_blocks,
+ .kind = RL_KIND_VOLUME_INDEX,
+ .instance = z,
+ };
+
+ next_block += volume_index_blocks;
+ free_blocks -= volume_index_blocks;
+ }
+
+ isl->open_chapter = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = super->open_chapter_blocks,
+ .kind = RL_KIND_OPEN_CHAPTER,
+ .instance = RL_SOLE_INSTANCE,
+ };
+
+ next_block += super->open_chapter_blocks;
+
+ isl->free_space = (struct layout_region) {
+ .start_block = next_block,
+ .block_count = free_blocks,
+ .kind = RL_KIND_EMPTY,
+ .instance = RL_SOLE_INSTANCE,
+ };
+}
+
+static int setup_uds_index_save_slot(struct index_layout *layout,
+ unsigned int zone_count,
+ struct index_save_layout **isl_ptr)
+{
+ int result;
+ struct index_save_layout *isl;
+
+ isl = select_oldest_index_save_layout(layout);
+ result = invalidate_old_save(layout, isl);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ instantiate_index_save_layout(isl, &layout->super, layout->index.nonce,
+ zone_count);
+
+ *isl_ptr = isl;
+ return UDS_SUCCESS;
+}
+
+static void cancel_uds_index_save(struct index_save_layout *isl)
+{
+ memset(&isl->save_data, 0, sizeof(isl->save_data));
+ memset(&isl->state_data, 0, sizeof(isl->state_data));
+ isl->zone_count = 0;
+}
+
+int uds_save_index_state(struct index_layout *layout, struct uds_index *index)
+{
+ int result;
+ unsigned int zone;
+ struct index_save_layout *isl;
+ struct buffered_writer *writers[MAX_ZONES];
+
+ result = setup_uds_index_save_slot(layout, index->zone_count, &isl);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ isl->state_data = (struct index_state_data301) {
+ .newest_chapter = index->newest_virtual_chapter,
+ .oldest_chapter = index->oldest_virtual_chapter,
+ .last_save = index->last_save,
+ };
+
+ result = open_region_writer(layout, &isl->open_chapter, &writers[0]);
+ if (result != UDS_SUCCESS) {
+ cancel_uds_index_save(isl);
+ return result;
+ }
+
+ result = uds_save_open_chapter(index, writers[0]);
+ uds_free_buffered_writer(writers[0]);
+ if (result != UDS_SUCCESS) {
+ cancel_uds_index_save(isl);
+ return result;
+ }
+
+ for (zone = 0; zone < index->zone_count; zone++) {
+ result = open_region_writer(layout, &isl->volume_index_zones[zone],
+ &writers[zone]);
+ if (result != UDS_SUCCESS) {
+ for (; zone > 0; zone--)
+ uds_free_buffered_writer(writers[zone - 1]);
+
+ cancel_uds_index_save(isl);
+ return result;
+ }
+ }
+
+ result = uds_save_volume_index(index->volume_index, writers, index->zone_count);
+ for (zone = 0; zone < index->zone_count; zone++)
+ uds_free_buffered_writer(writers[zone]);
+ if (result != UDS_SUCCESS) {
+ cancel_uds_index_save(isl);
+ return result;
+ }
+
+ result = open_region_writer(layout, &isl->index_page_map, &writers[0]);
+ if (result != UDS_SUCCESS) {
+ cancel_uds_index_save(isl);
+ return result;
+ }
+
+ result = uds_write_index_page_map(index->volume->index_page_map, writers[0]);
+ uds_free_buffered_writer(writers[0]);
+ if (result != UDS_SUCCESS) {
+ cancel_uds_index_save(isl);
+ return result;
+ }
+
+ return write_index_save_layout(layout, isl);
+}
+
+static int __must_check load_region_table(struct buffered_reader *reader,
+ struct region_table **table_ptr)
+{
+ int result;
+ unsigned int i;
+ struct region_header header;
+ struct region_table *table;
+ u8 buffer[sizeof(struct region_header)];
+ size_t offset = 0;
+
+ result = uds_read_from_buffered_reader(reader, buffer, sizeof(buffer));
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "cannot read region table header");
+
+ decode_u64_le(buffer, &offset, &header.magic);
+ decode_u64_le(buffer, &offset, &header.region_blocks);
+ decode_u16_le(buffer, &offset, &header.type);
+ decode_u16_le(buffer, &offset, &header.version);
+ decode_u16_le(buffer, &offset, &header.region_count);
+ decode_u16_le(buffer, &offset, &header.payload);
+
+ if (header.magic != REGION_MAGIC)
+ return UDS_NO_INDEX;
+
+ if (header.version != 1) {
+ return vdo_log_error_strerror(UDS_UNSUPPORTED_VERSION,
+ "unknown region table version %hu",
+ header.version);
+ }
+
+ result = vdo_allocate_extended(struct region_table, header.region_count,
+ struct layout_region,
+ "single file layout region table", &table);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ table->header = header;
+ for (i = 0; i < header.region_count; i++) {
+ u8 region_buffer[sizeof(struct layout_region)];
+
+ offset = 0;
+ result = uds_read_from_buffered_reader(reader, region_buffer,
+ sizeof(region_buffer));
+ if (result != UDS_SUCCESS) {
+ vdo_free(table);
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "cannot read region table layouts");
+ }
+
+ decode_u64_le(region_buffer, &offset, &table->regions[i].start_block);
+ decode_u64_le(region_buffer, &offset, &table->regions[i].block_count);
+ offset += sizeof(u32);
+ decode_u16_le(region_buffer, &offset, &table->regions[i].kind);
+ decode_u16_le(region_buffer, &offset, &table->regions[i].instance);
+ }
+
+ *table_ptr = table;
+ return UDS_SUCCESS;
+}
+
+static int __must_check read_super_block_data(struct buffered_reader *reader,
+ struct index_layout *layout,
+ size_t saved_size)
+{
+ int result;
+ struct super_block_data *super = &layout->super;
+ u8 *buffer;
+ size_t offset = 0;
+
+ result = vdo_allocate(saved_size, u8, "super block data", &buffer);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = uds_read_from_buffered_reader(reader, buffer, saved_size);
+ if (result != UDS_SUCCESS) {
+ vdo_free(buffer);
+ return vdo_log_error_strerror(result, "cannot read region table header");
+ }
+
+ memcpy(&super->magic_label, buffer, MAGIC_SIZE);
+ offset += MAGIC_SIZE;
+ memcpy(&super->nonce_info, buffer + offset, NONCE_INFO_SIZE);
+ offset += NONCE_INFO_SIZE;
+ decode_u64_le(buffer, &offset, &super->nonce);
+ decode_u32_le(buffer, &offset, &super->version);
+ decode_u32_le(buffer, &offset, &super->block_size);
+ decode_u16_le(buffer, &offset, &super->index_count);
+ decode_u16_le(buffer, &offset, &super->max_saves);
+ offset += sizeof(u32);
+ decode_u64_le(buffer, &offset, &super->open_chapter_blocks);
+ decode_u64_le(buffer, &offset, &super->page_map_blocks);
+
+ if (is_converted_super_block(super)) {
+ decode_u64_le(buffer, &offset, &super->volume_offset);
+ decode_u64_le(buffer, &offset, &super->start_offset);
+ } else {
+ super->volume_offset = 0;
+ super->start_offset = 0;
+ }
+
+ vdo_free(buffer);
+
+ if (memcmp(super->magic_label, LAYOUT_MAGIC, MAGIC_SIZE) != 0)
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "unknown superblock magic label");
+
+ if ((super->version < SUPER_VERSION_MINIMUM) ||
+ (super->version == 4) || (super->version == 5) || (super->version == 6) ||
+ (super->version > SUPER_VERSION_MAXIMUM)) {
+ return vdo_log_error_strerror(UDS_UNSUPPORTED_VERSION,
+ "unknown superblock version number %u",
+ super->version);
+ }
+
+ if (super->volume_offset < super->start_offset) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "inconsistent offsets (start %llu, volume %llu)",
+ (unsigned long long) super->start_offset,
+ (unsigned long long) super->volume_offset);
+ }
+
+ /* Sub-indexes are no longer used but the layout retains this field. */
+ if (super->index_count != 1) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "invalid subindex count %u",
+ super->index_count);
+ }
+
+ if (generate_primary_nonce(super->nonce_info, sizeof(super->nonce_info)) != super->nonce) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "inconsistent superblock nonce");
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int __must_check verify_region(struct layout_region *lr, u64 start_block,
+ enum region_kind kind, unsigned int instance)
+{
+ if (lr->start_block != start_block)
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "incorrect layout region offset");
+
+ if (lr->kind != kind)
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "incorrect layout region kind");
+
+ if (lr->instance != instance) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "incorrect layout region instance");
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int __must_check verify_sub_index(struct index_layout *layout, u64 start_block,
+ struct region_table *table)
+{
+ int result;
+ unsigned int i;
+ struct sub_index_layout *sil = &layout->index;
+ u64 next_block = start_block;
+
+ sil->sub_index = table->regions[2];
+ result = verify_region(&sil->sub_index, next_block, RL_KIND_INDEX, 0);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ define_sub_index_nonce(layout);
+
+ sil->volume = table->regions[3];
+ result = verify_region(&sil->volume, next_block, RL_KIND_VOLUME,
+ RL_SOLE_INSTANCE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ next_block += sil->volume.block_count + layout->super.volume_offset;
+
+ for (i = 0; i < layout->super.max_saves; i++) {
+ sil->saves[i].index_save = table->regions[i + 4];
+ result = verify_region(&sil->saves[i].index_save, next_block,
+ RL_KIND_SAVE, i);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ next_block += sil->saves[i].index_save.block_count;
+ }
+
+ next_block -= layout->super.volume_offset;
+ if (next_block != start_block + sil->sub_index.block_count) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "sub index region does not span all saves");
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int __must_check reconstitute_layout(struct index_layout *layout,
+ struct region_table *table, u64 first_block)
+{
+ int result;
+ u64 next_block = first_block;
+
+ result = vdo_allocate(layout->super.max_saves, struct index_save_layout,
+ __func__, &layout->index.saves);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ layout->total_blocks = table->header.region_blocks;
+
+ layout->header = table->regions[0];
+ result = verify_region(&layout->header, next_block++, RL_KIND_HEADER,
+ RL_SOLE_INSTANCE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ layout->config = table->regions[1];
+ result = verify_region(&layout->config, next_block++, RL_KIND_CONFIG,
+ RL_SOLE_INSTANCE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = verify_sub_index(layout, next_block, table);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ next_block += layout->index.sub_index.block_count;
+
+ layout->seal = table->regions[table->header.region_count - 1];
+ result = verify_region(&layout->seal, next_block + layout->super.volume_offset,
+ RL_KIND_SEAL, RL_SOLE_INSTANCE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (++next_block != (first_block + layout->total_blocks)) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "layout table does not span total blocks");
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int __must_check load_super_block(struct index_layout *layout, size_t block_size,
+ u64 first_block, struct buffered_reader *reader)
+{
+ int result;
+ struct region_table *table = NULL;
+ struct super_block_data *super = &layout->super;
+
+ result = load_region_table(reader, &table);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (table->header.type != RH_TYPE_SUPER) {
+ vdo_free(table);
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "not a superblock region table");
+ }
+
+ result = read_super_block_data(reader, layout, table->header.payload);
+ if (result != UDS_SUCCESS) {
+ vdo_free(table);
+ return vdo_log_error_strerror(result, "unknown superblock format");
+ }
+
+ if (super->block_size != block_size) {
+ vdo_free(table);
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "superblock saved block_size %u differs from supplied block_size %zu",
+ super->block_size, block_size);
+ }
+
+ first_block -= (super->volume_offset - super->start_offset);
+ result = reconstitute_layout(layout, table, first_block);
+ vdo_free(table);
+ return result;
+}
+
+static int __must_check read_index_save_data(struct buffered_reader *reader,
+ struct index_save_layout *isl,
+ size_t saved_size)
+{
+ int result;
+ struct index_state_version file_version;
+ u8 buffer[sizeof(struct index_save_data) + sizeof(struct index_state_data301)];
+ size_t offset = 0;
+
+ if (saved_size != sizeof(buffer)) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "unexpected index save data size %zu",
+ saved_size);
+ }
+
+ result = uds_read_from_buffered_reader(reader, buffer, sizeof(buffer));
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "cannot read index save data");
+
+ decode_u64_le(buffer, &offset, &isl->save_data.timestamp);
+ decode_u64_le(buffer, &offset, &isl->save_data.nonce);
+ decode_u32_le(buffer, &offset, &isl->save_data.version);
+ offset += sizeof(u32);
+
+ if (isl->save_data.version > 1) {
+ return vdo_log_error_strerror(UDS_UNSUPPORTED_VERSION,
+ "unknown index save version number %u",
+ isl->save_data.version);
+ }
+
+ decode_s32_le(buffer, &offset, &file_version.signature);
+ decode_s32_le(buffer, &offset, &file_version.version_id);
+
+ if ((file_version.signature != INDEX_STATE_VERSION_301.signature) ||
+ (file_version.version_id != INDEX_STATE_VERSION_301.version_id)) {
+ return vdo_log_error_strerror(UDS_UNSUPPORTED_VERSION,
+ "index state version %d,%d is unsupported",
+ file_version.signature,
+ file_version.version_id);
+ }
+
+ decode_u64_le(buffer, &offset, &isl->state_data.newest_chapter);
+ decode_u64_le(buffer, &offset, &isl->state_data.oldest_chapter);
+ decode_u64_le(buffer, &offset, &isl->state_data.last_save);
+ /* Skip past some historical fields that are now unused */
+ offset += sizeof(u32) + sizeof(u32);
+ return UDS_SUCCESS;
+}
+
+static int __must_check reconstruct_index_save(struct index_save_layout *isl,
+ struct region_table *table)
+{
+ int result;
+ unsigned int z;
+ struct layout_region *last_region;
+ u64 next_block = isl->index_save.start_block;
+ u64 last_block = next_block + isl->index_save.block_count;
+
+ isl->zone_count = table->header.region_count - 3;
+
+ last_region = &table->regions[table->header.region_count - 1];
+ if (last_region->kind == RL_KIND_EMPTY) {
+ isl->free_space = *last_region;
+ isl->zone_count--;
+ } else {
+ isl->free_space = (struct layout_region) {
+ .start_block = last_block,
+ .block_count = 0,
+ .kind = RL_KIND_EMPTY,
+ .instance = RL_SOLE_INSTANCE,
+ };
+ }
+
+ isl->header = table->regions[0];
+ result = verify_region(&isl->header, next_block++, RL_KIND_HEADER,
+ RL_SOLE_INSTANCE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ isl->index_page_map = table->regions[1];
+ result = verify_region(&isl->index_page_map, next_block, RL_KIND_INDEX_PAGE_MAP,
+ RL_SOLE_INSTANCE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ next_block += isl->index_page_map.block_count;
+
+ for (z = 0; z < isl->zone_count; z++) {
+ isl->volume_index_zones[z] = table->regions[z + 2];
+ result = verify_region(&isl->volume_index_zones[z], next_block,
+ RL_KIND_VOLUME_INDEX, z);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ next_block += isl->volume_index_zones[z].block_count;
+ }
+
+ isl->open_chapter = table->regions[isl->zone_count + 2];
+ result = verify_region(&isl->open_chapter, next_block, RL_KIND_OPEN_CHAPTER,
+ RL_SOLE_INSTANCE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ next_block += isl->open_chapter.block_count;
+
+ result = verify_region(&isl->free_space, next_block, RL_KIND_EMPTY,
+ RL_SOLE_INSTANCE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ next_block += isl->free_space.block_count;
+ if (next_block != last_block) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "index save layout table incomplete");
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int __must_check load_index_save(struct index_save_layout *isl,
+ struct buffered_reader *reader,
+ unsigned int instance)
+{
+ int result;
+ struct region_table *table = NULL;
+
+ result = load_region_table(reader, &table);
+ if (result != UDS_SUCCESS) {
+ return vdo_log_error_strerror(result, "cannot read index save %u header",
+ instance);
+ }
+
+ if (table->header.region_blocks != isl->index_save.block_count) {
+ u64 region_blocks = table->header.region_blocks;
+
+ vdo_free(table);
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "unexpected index save %u region block count %llu",
+ instance,
+ (unsigned long long) region_blocks);
+ }
+
+ if (table->header.type == RH_TYPE_UNSAVED) {
+ vdo_free(table);
+ reset_index_save_layout(isl, 0);
+ return UDS_SUCCESS;
+ }
+
+
+ if (table->header.type != RH_TYPE_SAVE) {
+ vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "unexpected index save %u header type %u",
+ instance, table->header.type);
+ vdo_free(table);
+ return UDS_CORRUPT_DATA;
+ }
+
+ result = read_index_save_data(reader, isl, table->header.payload);
+ if (result != UDS_SUCCESS) {
+ vdo_free(table);
+ return vdo_log_error_strerror(result,
+ "unknown index save %u data format",
+ instance);
+ }
+
+ result = reconstruct_index_save(isl, table);
+ vdo_free(table);
+ if (result != UDS_SUCCESS) {
+ return vdo_log_error_strerror(result, "cannot reconstruct index save %u",
+ instance);
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int __must_check load_sub_index_regions(struct index_layout *layout)
+{
+ int result;
+ unsigned int j;
+ struct index_save_layout *isl;
+ struct buffered_reader *reader;
+
+ for (j = 0; j < layout->super.max_saves; j++) {
+ isl = &layout->index.saves[j];
+ result = open_region_reader(layout, &isl->index_save, &reader);
+
+ if (result != UDS_SUCCESS) {
+ vdo_log_error_strerror(result,
+ "cannot get reader for index 0 save %u",
+ j);
+ return result;
+ }
+
+ result = load_index_save(isl, reader, j);
+ uds_free_buffered_reader(reader);
+ if (result != UDS_SUCCESS) {
+ /* Another save slot might be valid. */
+ reset_index_save_layout(isl, 0);
+ continue;
+ }
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int __must_check verify_uds_index_config(struct index_layout *layout,
+ struct uds_configuration *config)
+{
+ int result;
+ struct buffered_reader *reader = NULL;
+ u64 offset;
+
+ offset = layout->super.volume_offset - layout->super.start_offset;
+ result = open_layout_reader(layout, &layout->config, offset, &reader);
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "failed to open config reader");
+
+ result = uds_validate_config_contents(reader, config);
+ if (result != UDS_SUCCESS) {
+ uds_free_buffered_reader(reader);
+ return vdo_log_error_strerror(result, "failed to read config region");
+ }
+
+ uds_free_buffered_reader(reader);
+ return UDS_SUCCESS;
+}
+
+static int load_index_layout(struct index_layout *layout, struct uds_configuration *config)
+{
+ int result;
+ struct buffered_reader *reader;
+
+ result = uds_make_buffered_reader(layout->factory,
+ layout->offset / UDS_BLOCK_SIZE, 1, &reader);
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result, "unable to read superblock");
+
+ result = load_super_block(layout, UDS_BLOCK_SIZE,
+ layout->offset / UDS_BLOCK_SIZE, reader);
+ uds_free_buffered_reader(reader);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = verify_uds_index_config(layout, config);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ return load_sub_index_regions(layout);
+}
+
+static int create_layout_factory(struct index_layout *layout,
+ const struct uds_configuration *config)
+{
+ int result;
+ size_t writable_size;
+ struct io_factory *factory = NULL;
+
+ result = uds_make_io_factory(config->bdev, &factory);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ writable_size = uds_get_writable_size(factory) & -UDS_BLOCK_SIZE;
+ if (writable_size < config->size + config->offset) {
+ uds_put_io_factory(factory);
+ vdo_log_error("index storage (%zu) is smaller than the requested size %zu",
+ writable_size, config->size + config->offset);
+ return -ENOSPC;
+ }
+
+ layout->factory = factory;
+ layout->factory_size = (config->size > 0) ? config->size : writable_size;
+ layout->offset = config->offset;
+ return UDS_SUCCESS;
+}
+
+int uds_make_index_layout(struct uds_configuration *config, bool new_layout,
+ struct index_layout **layout_ptr)
+{
+ int result;
+ struct index_layout *layout = NULL;
+ struct save_layout_sizes sizes;
+
+ result = compute_sizes(config, &sizes);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = vdo_allocate(1, struct index_layout, __func__, &layout);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = create_layout_factory(layout, config);
+ if (result != UDS_SUCCESS) {
+ uds_free_index_layout(layout);
+ return result;
+ }
+
+ if (layout->factory_size < sizes.total_size) {
+ vdo_log_error("index storage (%zu) is smaller than the required size %llu",
+ layout->factory_size,
+ (unsigned long long) sizes.total_size);
+ uds_free_index_layout(layout);
+ return -ENOSPC;
+ }
+
+ if (new_layout)
+ result = create_index_layout(layout, config);
+ else
+ result = load_index_layout(layout, config);
+ if (result != UDS_SUCCESS) {
+ uds_free_index_layout(layout);
+ return result;
+ }
+
+ *layout_ptr = layout;
+ return UDS_SUCCESS;
+}
+
+void uds_free_index_layout(struct index_layout *layout)
+{
+ if (layout == NULL)
+ return;
+
+ vdo_free(layout->index.saves);
+ if (layout->factory != NULL)
+ uds_put_io_factory(layout->factory);
+
+ vdo_free(layout);
+}
+
+int uds_replace_index_layout_storage(struct index_layout *layout,
+ struct block_device *bdev)
+{
+ return uds_replace_storage(layout->factory, bdev);
+}
+
+/* Obtain a dm_bufio_client for the volume region. */
+int uds_open_volume_bufio(struct index_layout *layout, size_t block_size,
+ unsigned int reserved_buffers,
+ struct dm_bufio_client **client_ptr)
+{
+ off_t offset = (layout->index.volume.start_block +
+ layout->super.volume_offset -
+ layout->super.start_offset);
+
+ return uds_make_bufio(layout->factory, offset, block_size, reserved_buffers,
+ client_ptr);
+}
+
+u64 uds_get_volume_nonce(struct index_layout *layout)
+{
+ return layout->index.nonce;
+}
diff --git a/drivers/md/dm-vdo/indexer/index-layout.h b/drivers/md/dm-vdo/indexer/index-layout.h
new file mode 100644
index 000000000000..e9ac6f4302d6
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/index-layout.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_INDEX_LAYOUT_H
+#define UDS_INDEX_LAYOUT_H
+
+#include "config.h"
+#include "indexer.h"
+#include "io-factory.h"
+
+/*
+ * The index layout describes the format of the index on the underlying storage, and is responsible
+ * for creating those structures when the index is first created. It also validates the index data
+ * when loading a saved index, and updates it when saving the index.
+ */
+
+struct index_layout;
+
+int __must_check uds_make_index_layout(struct uds_configuration *config, bool new_layout,
+ struct index_layout **layout_ptr);
+
+void uds_free_index_layout(struct index_layout *layout);
+
+int __must_check uds_replace_index_layout_storage(struct index_layout *layout,
+ struct block_device *bdev);
+
+int __must_check uds_load_index_state(struct index_layout *layout,
+ struct uds_index *index);
+
+int __must_check uds_save_index_state(struct index_layout *layout,
+ struct uds_index *index);
+
+int __must_check uds_discard_open_chapter(struct index_layout *layout);
+
+u64 __must_check uds_get_volume_nonce(struct index_layout *layout);
+
+int __must_check uds_open_volume_bufio(struct index_layout *layout, size_t block_size,
+ unsigned int reserved_buffers,
+ struct dm_bufio_client **client_ptr);
+
+#endif /* UDS_INDEX_LAYOUT_H */
diff --git a/drivers/md/dm-vdo/indexer/index-page-map.c b/drivers/md/dm-vdo/indexer/index-page-map.c
new file mode 100644
index 000000000000..00b44e07d0c1
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/index-page-map.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "index-page-map.h"
+
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "permassert.h"
+#include "string-utils.h"
+#include "thread-utils.h"
+
+#include "hash-utils.h"
+#include "indexer.h"
+
+/*
+ * The index page map is conceptually a two-dimensional array indexed by chapter number and index
+ * page number within the chapter. Each entry contains the number of the last delta list on that
+ * index page. In order to save memory, the information for the last page in each chapter is not
+ * recorded, as it is known from the geometry.
+ */
+
+static const u8 PAGE_MAP_MAGIC[] = "ALBIPM02";
+
+#define PAGE_MAP_MAGIC_LENGTH (sizeof(PAGE_MAP_MAGIC) - 1)
+
+static inline u32 get_entry_count(const struct index_geometry *geometry)
+{
+ return geometry->chapters_per_volume * (geometry->index_pages_per_chapter - 1);
+}
+
+int uds_make_index_page_map(const struct index_geometry *geometry,
+ struct index_page_map **map_ptr)
+{
+ int result;
+ struct index_page_map *map;
+
+ result = vdo_allocate(1, struct index_page_map, "page map", &map);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ map->geometry = geometry;
+ map->entries_per_chapter = geometry->index_pages_per_chapter - 1;
+ result = vdo_allocate(get_entry_count(geometry), u16, "Index Page Map Entries",
+ &map->entries);
+ if (result != VDO_SUCCESS) {
+ uds_free_index_page_map(map);
+ return result;
+ }
+
+ *map_ptr = map;
+ return UDS_SUCCESS;
+}
+
+void uds_free_index_page_map(struct index_page_map *map)
+{
+ if (map != NULL) {
+ vdo_free(map->entries);
+ vdo_free(map);
+ }
+}
+
+void uds_update_index_page_map(struct index_page_map *map, u64 virtual_chapter_number,
+ u32 chapter_number, u32 index_page_number,
+ u32 delta_list_number)
+{
+ size_t slot;
+
+ map->last_update = virtual_chapter_number;
+ if (index_page_number == map->entries_per_chapter)
+ return;
+
+ slot = (chapter_number * map->entries_per_chapter) + index_page_number;
+ map->entries[slot] = delta_list_number;
+}
+
+u32 uds_find_index_page_number(const struct index_page_map *map,
+ const struct uds_record_name *name, u32 chapter_number)
+{
+ u32 delta_list_number = uds_hash_to_chapter_delta_list(name, map->geometry);
+ u32 slot = chapter_number * map->entries_per_chapter;
+ u32 page;
+
+ for (page = 0; page < map->entries_per_chapter; page++) {
+ if (delta_list_number <= map->entries[slot + page])
+ break;
+ }
+
+ return page;
+}
+
+void uds_get_list_number_bounds(const struct index_page_map *map, u32 chapter_number,
+ u32 index_page_number, u32 *lowest_list,
+ u32 *highest_list)
+{
+ u32 slot = chapter_number * map->entries_per_chapter;
+
+ *lowest_list = ((index_page_number == 0) ?
+ 0 : map->entries[slot + index_page_number - 1] + 1);
+ *highest_list = ((index_page_number < map->entries_per_chapter) ?
+ map->entries[slot + index_page_number] :
+ map->geometry->delta_lists_per_chapter - 1);
+}
+
+u64 uds_compute_index_page_map_save_size(const struct index_geometry *geometry)
+{
+ return PAGE_MAP_MAGIC_LENGTH + sizeof(u64) + sizeof(u16) * get_entry_count(geometry);
+}
+
+int uds_write_index_page_map(struct index_page_map *map, struct buffered_writer *writer)
+{
+ int result;
+ u8 *buffer;
+ size_t offset = 0;
+ u64 saved_size = uds_compute_index_page_map_save_size(map->geometry);
+ u32 i;
+
+ result = vdo_allocate(saved_size, u8, "page map data", &buffer);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ memcpy(buffer, PAGE_MAP_MAGIC, PAGE_MAP_MAGIC_LENGTH);
+ offset += PAGE_MAP_MAGIC_LENGTH;
+ encode_u64_le(buffer, &offset, map->last_update);
+ for (i = 0; i < get_entry_count(map->geometry); i++)
+ encode_u16_le(buffer, &offset, map->entries[i]);
+
+ result = uds_write_to_buffered_writer(writer, buffer, offset);
+ vdo_free(buffer);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ return uds_flush_buffered_writer(writer);
+}
+
+int uds_read_index_page_map(struct index_page_map *map, struct buffered_reader *reader)
+{
+ int result;
+ u8 magic[PAGE_MAP_MAGIC_LENGTH];
+ u8 *buffer;
+ size_t offset = 0;
+ u64 saved_size = uds_compute_index_page_map_save_size(map->geometry);
+ u32 i;
+
+ result = vdo_allocate(saved_size, u8, "page map data", &buffer);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = uds_read_from_buffered_reader(reader, buffer, saved_size);
+ if (result != UDS_SUCCESS) {
+ vdo_free(buffer);
+ return result;
+ }
+
+ memcpy(&magic, buffer, PAGE_MAP_MAGIC_LENGTH);
+ offset += PAGE_MAP_MAGIC_LENGTH;
+ if (memcmp(magic, PAGE_MAP_MAGIC, PAGE_MAP_MAGIC_LENGTH) != 0) {
+ vdo_free(buffer);
+ return UDS_CORRUPT_DATA;
+ }
+
+ decode_u64_le(buffer, &offset, &map->last_update);
+ for (i = 0; i < get_entry_count(map->geometry); i++)
+ decode_u16_le(buffer, &offset, &map->entries[i]);
+
+ vdo_free(buffer);
+ vdo_log_debug("read index page map, last update %llu",
+ (unsigned long long) map->last_update);
+ return UDS_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/indexer/index-page-map.h b/drivers/md/dm-vdo/indexer/index-page-map.h
new file mode 100644
index 000000000000..b327c0bb9656
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/index-page-map.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_INDEX_PAGE_MAP_H
+#define UDS_INDEX_PAGE_MAP_H
+
+#include "geometry.h"
+#include "io-factory.h"
+
+/*
+ * The index maintains a page map which records how the chapter delta lists are distributed among
+ * the index pages for each chapter, allowing the volume to be efficient about reading only pages
+ * that it knows it will need.
+ */
+
+struct index_page_map {
+ const struct index_geometry *geometry;
+ u64 last_update;
+ u32 entries_per_chapter;
+ u16 *entries;
+};
+
+int __must_check uds_make_index_page_map(const struct index_geometry *geometry,
+ struct index_page_map **map_ptr);
+
+void uds_free_index_page_map(struct index_page_map *map);
+
+int __must_check uds_read_index_page_map(struct index_page_map *map,
+ struct buffered_reader *reader);
+
+int __must_check uds_write_index_page_map(struct index_page_map *map,
+ struct buffered_writer *writer);
+
+void uds_update_index_page_map(struct index_page_map *map, u64 virtual_chapter_number,
+ u32 chapter_number, u32 index_page_number,
+ u32 delta_list_number);
+
+u32 __must_check uds_find_index_page_number(const struct index_page_map *map,
+ const struct uds_record_name *name,
+ u32 chapter_number);
+
+void uds_get_list_number_bounds(const struct index_page_map *map, u32 chapter_number,
+ u32 index_page_number, u32 *lowest_list,
+ u32 *highest_list);
+
+u64 uds_compute_index_page_map_save_size(const struct index_geometry *geometry);
+
+#endif /* UDS_INDEX_PAGE_MAP_H */
diff --git a/drivers/md/dm-vdo/indexer/index-session.c b/drivers/md/dm-vdo/indexer/index-session.c
new file mode 100644
index 000000000000..aee0914d604a
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/index-session.c
@@ -0,0 +1,739 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "index-session.h"
+
+#include <linux/atomic.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "time-utils.h"
+
+#include "funnel-requestqueue.h"
+#include "index.h"
+#include "index-layout.h"
+
+/*
+ * The index session contains a lock (the request_mutex) which ensures that only one thread can
+ * change the state of its index at a time. The state field indicates the current state of the
+ * index through a set of descriptive flags. The request_mutex must be notified whenever a
+ * non-transient state flag is cleared. The request_mutex is also used to count the number of
+ * requests currently in progress so that they can be drained when suspending or closing the index.
+ *
+ * If the index session is suspended shortly after opening an index, it may have to suspend during
+ * a rebuild. Depending on the size of the index, a rebuild may take a significant amount of time,
+ * so UDS allows the rebuild to be paused in order to suspend the session in a timely manner. When
+ * the index session is resumed, the rebuild can continue from where it left off. If the index
+ * session is shut down with a suspended rebuild, the rebuild progress is abandoned and the rebuild
+ * will start from the beginning the next time the index is loaded. The mutex and status fields in
+ * the index_load_context are used to record the state of any interrupted rebuild.
+ */
+
+enum index_session_flag_bit {
+ IS_FLAG_BIT_START = 8,
+ /* The session has started loading an index but not completed it. */
+ IS_FLAG_BIT_LOADING = IS_FLAG_BIT_START,
+ /* The session has loaded an index, which can handle requests. */
+ IS_FLAG_BIT_LOADED,
+ /* The session's index has been permanently disabled. */
+ IS_FLAG_BIT_DISABLED,
+ /* The session's index is suspended. */
+ IS_FLAG_BIT_SUSPENDED,
+ /* The session is handling some index state change. */
+ IS_FLAG_BIT_WAITING,
+ /* The session's index is closing and draining requests. */
+ IS_FLAG_BIT_CLOSING,
+ /* The session is being destroyed and is draining requests. */
+ IS_FLAG_BIT_DESTROYING,
+};
+
+enum index_session_flag {
+ IS_FLAG_LOADED = (1 << IS_FLAG_BIT_LOADED),
+ IS_FLAG_LOADING = (1 << IS_FLAG_BIT_LOADING),
+ IS_FLAG_DISABLED = (1 << IS_FLAG_BIT_DISABLED),
+ IS_FLAG_SUSPENDED = (1 << IS_FLAG_BIT_SUSPENDED),
+ IS_FLAG_WAITING = (1 << IS_FLAG_BIT_WAITING),
+ IS_FLAG_CLOSING = (1 << IS_FLAG_BIT_CLOSING),
+ IS_FLAG_DESTROYING = (1 << IS_FLAG_BIT_DESTROYING),
+};
+
+/* Release a reference to an index session. */
+static void release_index_session(struct uds_index_session *index_session)
+{
+ mutex_lock(&index_session->request_mutex);
+ if (--index_session->request_count == 0)
+ uds_broadcast_cond(&index_session->request_cond);
+ mutex_unlock(&index_session->request_mutex);
+}
+
+/*
+ * Acquire a reference to the index session for an asynchronous index request. The reference must
+ * eventually be released with a corresponding call to release_index_session().
+ */
+static int get_index_session(struct uds_index_session *index_session)
+{
+ unsigned int state;
+ int result = UDS_SUCCESS;
+
+ mutex_lock(&index_session->request_mutex);
+ index_session->request_count++;
+ state = index_session->state;
+ mutex_unlock(&index_session->request_mutex);
+
+ if (state == IS_FLAG_LOADED) {
+ return UDS_SUCCESS;
+ } else if (state & IS_FLAG_DISABLED) {
+ result = UDS_DISABLED;
+ } else if ((state & IS_FLAG_LOADING) ||
+ (state & IS_FLAG_SUSPENDED) ||
+ (state & IS_FLAG_WAITING)) {
+ result = -EBUSY;
+ } else {
+ result = UDS_NO_INDEX;
+ }
+
+ release_index_session(index_session);
+ return result;
+}
+
+int uds_launch_request(struct uds_request *request)
+{
+ size_t internal_size;
+ int result;
+
+ if (request->callback == NULL) {
+ vdo_log_error("missing required callback");
+ return -EINVAL;
+ }
+
+ switch (request->type) {
+ case UDS_DELETE:
+ case UDS_POST:
+ case UDS_QUERY:
+ case UDS_QUERY_NO_UPDATE:
+ case UDS_UPDATE:
+ break;
+ default:
+ vdo_log_error("received invalid callback type");
+ return -EINVAL;
+ }
+
+ /* Reset all internal fields before processing. */
+ internal_size =
+ sizeof(struct uds_request) - offsetof(struct uds_request, zone_number);
+ // FIXME should be using struct_group for this instead
+ memset((char *) request + sizeof(*request) - internal_size, 0, internal_size);
+
+ result = get_index_session(request->session);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ request->found = false;
+ request->unbatched = false;
+ request->index = request->session->index;
+
+ uds_enqueue_request(request, STAGE_TRIAGE);
+ return UDS_SUCCESS;
+}
+
+static void enter_callback_stage(struct uds_request *request)
+{
+ if (request->status != UDS_SUCCESS) {
+ /* All request errors are considered unrecoverable */
+ mutex_lock(&request->session->request_mutex);
+ request->session->state |= IS_FLAG_DISABLED;
+ mutex_unlock(&request->session->request_mutex);
+ }
+
+ uds_request_queue_enqueue(request->session->callback_queue, request);
+}
+
+static inline void count_once(u64 *count_ptr)
+{
+ WRITE_ONCE(*count_ptr, READ_ONCE(*count_ptr) + 1);
+}
+
+static void update_session_stats(struct uds_request *request)
+{
+ struct session_stats *session_stats = &request->session->stats;
+
+ count_once(&session_stats->requests);
+
+ switch (request->type) {
+ case UDS_POST:
+ if (request->found)
+ count_once(&session_stats->posts_found);
+ else
+ count_once(&session_stats->posts_not_found);
+
+ if (request->location == UDS_LOCATION_IN_OPEN_CHAPTER)
+ count_once(&session_stats->posts_found_open_chapter);
+ else if (request->location == UDS_LOCATION_IN_DENSE)
+ count_once(&session_stats->posts_found_dense);
+ else if (request->location == UDS_LOCATION_IN_SPARSE)
+ count_once(&session_stats->posts_found_sparse);
+ break;
+
+ case UDS_UPDATE:
+ if (request->found)
+ count_once(&session_stats->updates_found);
+ else
+ count_once(&session_stats->updates_not_found);
+ break;
+
+ case UDS_DELETE:
+ if (request->found)
+ count_once(&session_stats->deletions_found);
+ else
+ count_once(&session_stats->deletions_not_found);
+ break;
+
+ case UDS_QUERY:
+ case UDS_QUERY_NO_UPDATE:
+ if (request->found)
+ count_once(&session_stats->queries_found);
+ else
+ count_once(&session_stats->queries_not_found);
+ break;
+
+ default:
+ request->status = VDO_ASSERT(false, "unknown request type: %d",
+ request->type);
+ }
+}
+
+static void handle_callbacks(struct uds_request *request)
+{
+ struct uds_index_session *index_session = request->session;
+
+ if (request->status == UDS_SUCCESS)
+ update_session_stats(request);
+
+ request->status = uds_status_to_errno(request->status);
+ request->callback(request);
+ release_index_session(index_session);
+}
+
+static int __must_check make_empty_index_session(struct uds_index_session **index_session_ptr)
+{
+ int result;
+ struct uds_index_session *session;
+
+ result = vdo_allocate(1, struct uds_index_session, __func__, &session);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ mutex_init(&session->request_mutex);
+ uds_init_cond(&session->request_cond);
+ mutex_init(&session->load_context.mutex);
+ uds_init_cond(&session->load_context.cond);
+
+ result = uds_make_request_queue("callbackW", &handle_callbacks,
+ &session->callback_queue);
+ if (result != UDS_SUCCESS) {
+ vdo_free(session);
+ return result;
+ }
+
+ *index_session_ptr = session;
+ return UDS_SUCCESS;
+}
+
+int uds_create_index_session(struct uds_index_session **session)
+{
+ if (session == NULL) {
+ vdo_log_error("missing session pointer");
+ return -EINVAL;
+ }
+
+ return uds_status_to_errno(make_empty_index_session(session));
+}
+
+static int __must_check start_loading_index_session(struct uds_index_session *index_session)
+{
+ int result;
+
+ mutex_lock(&index_session->request_mutex);
+ if (index_session->state & IS_FLAG_SUSPENDED) {
+ vdo_log_info("Index session is suspended");
+ result = -EBUSY;
+ } else if (index_session->state != 0) {
+ vdo_log_info("Index is already loaded");
+ result = -EBUSY;
+ } else {
+ index_session->state |= IS_FLAG_LOADING;
+ result = UDS_SUCCESS;
+ }
+ mutex_unlock(&index_session->request_mutex);
+ return result;
+}
+
+static void finish_loading_index_session(struct uds_index_session *index_session,
+ int result)
+{
+ mutex_lock(&index_session->request_mutex);
+ index_session->state &= ~IS_FLAG_LOADING;
+ if (result == UDS_SUCCESS)
+ index_session->state |= IS_FLAG_LOADED;
+
+ uds_broadcast_cond(&index_session->request_cond);
+ mutex_unlock(&index_session->request_mutex);
+}
+
+static int initialize_index_session(struct uds_index_session *index_session,
+ enum uds_open_index_type open_type)
+{
+ int result;
+ struct uds_configuration *config;
+
+ result = uds_make_configuration(&index_session->parameters, &config);
+ if (result != UDS_SUCCESS) {
+ vdo_log_error_strerror(result, "Failed to allocate config");
+ return result;
+ }
+
+ memset(&index_session->stats, 0, sizeof(index_session->stats));
+ result = uds_make_index(config, open_type, &index_session->load_context,
+ enter_callback_stage, &index_session->index);
+ if (result != UDS_SUCCESS)
+ vdo_log_error_strerror(result, "Failed to make index");
+ else
+ uds_log_configuration(config);
+
+ uds_free_configuration(config);
+ return result;
+}
+
+static const char *get_open_type_string(enum uds_open_index_type open_type)
+{
+ switch (open_type) {
+ case UDS_CREATE:
+ return "creating index";
+ case UDS_LOAD:
+ return "loading or rebuilding index";
+ case UDS_NO_REBUILD:
+ return "loading index";
+ default:
+ return "unknown open method";
+ }
+}
+
+/*
+ * Open an index under the given session. This operation will fail if the
+ * index session is suspended, or if there is already an open index.
+ */
+int uds_open_index(enum uds_open_index_type open_type,
+ const struct uds_parameters *parameters,
+ struct uds_index_session *session)
+{
+ int result;
+ char name[BDEVNAME_SIZE];
+
+ if (parameters == NULL) {
+ vdo_log_error("missing required parameters");
+ return -EINVAL;
+ }
+ if (parameters->bdev == NULL) {
+ vdo_log_error("missing required block device");
+ return -EINVAL;
+ }
+ if (session == NULL) {
+ vdo_log_error("missing required session pointer");
+ return -EINVAL;
+ }
+
+ result = start_loading_index_session(session);
+ if (result != UDS_SUCCESS)
+ return uds_status_to_errno(result);
+
+ session->parameters = *parameters;
+ format_dev_t(name, parameters->bdev->bd_dev);
+ vdo_log_info("%s: %s", get_open_type_string(open_type), name);
+
+ result = initialize_index_session(session, open_type);
+ if (result != UDS_SUCCESS)
+ vdo_log_error_strerror(result, "Failed %s",
+ get_open_type_string(open_type));
+
+ finish_loading_index_session(session, result);
+ return uds_status_to_errno(result);
+}
+
+static void wait_for_no_requests_in_progress(struct uds_index_session *index_session)
+{
+ mutex_lock(&index_session->request_mutex);
+ while (index_session->request_count > 0) {
+ uds_wait_cond(&index_session->request_cond,
+ &index_session->request_mutex);
+ }
+ mutex_unlock(&index_session->request_mutex);
+}
+
+static int __must_check save_index(struct uds_index_session *index_session)
+{
+ wait_for_no_requests_in_progress(index_session);
+ return uds_save_index(index_session->index);
+}
+
+static void suspend_rebuild(struct uds_index_session *session)
+{
+ mutex_lock(&session->load_context.mutex);
+ switch (session->load_context.status) {
+ case INDEX_OPENING:
+ session->load_context.status = INDEX_SUSPENDING;
+
+ /* Wait until the index indicates that it is not replaying. */
+ while ((session->load_context.status != INDEX_SUSPENDED) &&
+ (session->load_context.status != INDEX_READY)) {
+ uds_wait_cond(&session->load_context.cond,
+ &session->load_context.mutex);
+ }
+
+ break;
+
+ case INDEX_READY:
+ /* Index load does not need to be suspended. */
+ break;
+
+ case INDEX_SUSPENDED:
+ case INDEX_SUSPENDING:
+ case INDEX_FREEING:
+ default:
+ /* These cases should not happen. */
+ VDO_ASSERT_LOG_ONLY(false, "Bad load context state %u",
+ session->load_context.status);
+ break;
+ }
+ mutex_unlock(&session->load_context.mutex);
+}
+
+/*
+ * Suspend index operation, draining all current index requests and preventing new index requests
+ * from starting. Optionally saves all index data before returning.
+ */
+int uds_suspend_index_session(struct uds_index_session *session, bool save)
+{
+ int result = UDS_SUCCESS;
+ bool no_work = false;
+ bool rebuilding = false;
+
+ /* Wait for any current index state change to complete. */
+ mutex_lock(&session->request_mutex);
+ while (session->state & IS_FLAG_CLOSING)
+ uds_wait_cond(&session->request_cond, &session->request_mutex);
+
+ if ((session->state & IS_FLAG_WAITING) || (session->state & IS_FLAG_DESTROYING)) {
+ no_work = true;
+ vdo_log_info("Index session is already changing state");
+ result = -EBUSY;
+ } else if (session->state & IS_FLAG_SUSPENDED) {
+ no_work = true;
+ } else if (session->state & IS_FLAG_LOADING) {
+ session->state |= IS_FLAG_WAITING;
+ rebuilding = true;
+ } else if (session->state & IS_FLAG_LOADED) {
+ session->state |= IS_FLAG_WAITING;
+ } else {
+ no_work = true;
+ session->state |= IS_FLAG_SUSPENDED;
+ uds_broadcast_cond(&session->request_cond);
+ }
+ mutex_unlock(&session->request_mutex);
+
+ if (no_work)
+ return uds_status_to_errno(result);
+
+ if (rebuilding)
+ suspend_rebuild(session);
+ else if (save)
+ result = save_index(session);
+ else
+ result = uds_flush_index_session(session);
+
+ mutex_lock(&session->request_mutex);
+ session->state &= ~IS_FLAG_WAITING;
+ session->state |= IS_FLAG_SUSPENDED;
+ uds_broadcast_cond(&session->request_cond);
+ mutex_unlock(&session->request_mutex);
+ return uds_status_to_errno(result);
+}
+
+static int replace_device(struct uds_index_session *session, struct block_device *bdev)
+{
+ int result;
+
+ result = uds_replace_index_storage(session->index, bdev);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ session->parameters.bdev = bdev;
+ return UDS_SUCCESS;
+}
+
+/*
+ * Resume index operation after being suspended. If the index is suspended and the supplied block
+ * device differs from the current backing store, the index will start using the new backing store.
+ */
+int uds_resume_index_session(struct uds_index_session *session,
+ struct block_device *bdev)
+{
+ int result = UDS_SUCCESS;
+ bool no_work = false;
+ bool resume_replay = false;
+
+ mutex_lock(&session->request_mutex);
+ if (session->state & IS_FLAG_WAITING) {
+ vdo_log_info("Index session is already changing state");
+ no_work = true;
+ result = -EBUSY;
+ } else if (!(session->state & IS_FLAG_SUSPENDED)) {
+ /* If not suspended, just succeed. */
+ no_work = true;
+ result = UDS_SUCCESS;
+ } else {
+ session->state |= IS_FLAG_WAITING;
+ if (session->state & IS_FLAG_LOADING)
+ resume_replay = true;
+ }
+ mutex_unlock(&session->request_mutex);
+
+ if (no_work)
+ return result;
+
+ if ((session->index != NULL) && (bdev != session->parameters.bdev)) {
+ result = replace_device(session, bdev);
+ if (result != UDS_SUCCESS) {
+ mutex_lock(&session->request_mutex);
+ session->state &= ~IS_FLAG_WAITING;
+ uds_broadcast_cond(&session->request_cond);
+ mutex_unlock(&session->request_mutex);
+ return uds_status_to_errno(result);
+ }
+ }
+
+ if (resume_replay) {
+ mutex_lock(&session->load_context.mutex);
+ switch (session->load_context.status) {
+ case INDEX_SUSPENDED:
+ session->load_context.status = INDEX_OPENING;
+ /* Notify the index to start replaying again. */
+ uds_broadcast_cond(&session->load_context.cond);
+ break;
+
+ case INDEX_READY:
+ /* There is no index rebuild to resume. */
+ break;
+
+ case INDEX_OPENING:
+ case INDEX_SUSPENDING:
+ case INDEX_FREEING:
+ default:
+ /* These cases should not happen; do nothing. */
+ VDO_ASSERT_LOG_ONLY(false, "Bad load context state %u",
+ session->load_context.status);
+ break;
+ }
+ mutex_unlock(&session->load_context.mutex);
+ }
+
+ mutex_lock(&session->request_mutex);
+ session->state &= ~IS_FLAG_WAITING;
+ session->state &= ~IS_FLAG_SUSPENDED;
+ uds_broadcast_cond(&session->request_cond);
+ mutex_unlock(&session->request_mutex);
+ return UDS_SUCCESS;
+}
+
+static int save_and_free_index(struct uds_index_session *index_session)
+{
+ int result = UDS_SUCCESS;
+ bool suspended;
+ struct uds_index *index = index_session->index;
+
+ if (index == NULL)
+ return UDS_SUCCESS;
+
+ mutex_lock(&index_session->request_mutex);
+ suspended = (index_session->state & IS_FLAG_SUSPENDED);
+ mutex_unlock(&index_session->request_mutex);
+
+ if (!suspended) {
+ result = uds_save_index(index);
+ if (result != UDS_SUCCESS)
+ vdo_log_warning_strerror(result,
+ "ignoring error from save_index");
+ }
+ uds_free_index(index);
+ index_session->index = NULL;
+
+ /*
+ * Reset all index state that happens to be in the index
+ * session, so it doesn't affect any future index.
+ */
+ mutex_lock(&index_session->load_context.mutex);
+ index_session->load_context.status = INDEX_OPENING;
+ mutex_unlock(&index_session->load_context.mutex);
+
+ mutex_lock(&index_session->request_mutex);
+ /* Only the suspend bit will remain relevant. */
+ index_session->state &= IS_FLAG_SUSPENDED;
+ mutex_unlock(&index_session->request_mutex);
+
+ return result;
+}
+
+/* Save and close the current index. */
+int uds_close_index(struct uds_index_session *index_session)
+{
+ int result = UDS_SUCCESS;
+
+ /* Wait for any current index state change to complete. */
+ mutex_lock(&index_session->request_mutex);
+ while ((index_session->state & IS_FLAG_WAITING) ||
+ (index_session->state & IS_FLAG_CLOSING)) {
+ uds_wait_cond(&index_session->request_cond,
+ &index_session->request_mutex);
+ }
+
+ if (index_session->state & IS_FLAG_SUSPENDED) {
+ vdo_log_info("Index session is suspended");
+ result = -EBUSY;
+ } else if ((index_session->state & IS_FLAG_DESTROYING) ||
+ !(index_session->state & IS_FLAG_LOADED)) {
+ /* The index doesn't exist, hasn't finished loading, or is being destroyed. */
+ result = UDS_NO_INDEX;
+ } else {
+ index_session->state |= IS_FLAG_CLOSING;
+ }
+ mutex_unlock(&index_session->request_mutex);
+ if (result != UDS_SUCCESS)
+ return uds_status_to_errno(result);
+
+ vdo_log_debug("Closing index");
+ wait_for_no_requests_in_progress(index_session);
+ result = save_and_free_index(index_session);
+ vdo_log_debug("Closed index");
+
+ mutex_lock(&index_session->request_mutex);
+ index_session->state &= ~IS_FLAG_CLOSING;
+ uds_broadcast_cond(&index_session->request_cond);
+ mutex_unlock(&index_session->request_mutex);
+ return uds_status_to_errno(result);
+}
+
+/* This will save and close an open index before destroying the session. */
+int uds_destroy_index_session(struct uds_index_session *index_session)
+{
+ int result;
+ bool load_pending = false;
+
+ vdo_log_debug("Destroying index session");
+
+ /* Wait for any current index state change to complete. */
+ mutex_lock(&index_session->request_mutex);
+ while ((index_session->state & IS_FLAG_WAITING) ||
+ (index_session->state & IS_FLAG_CLOSING)) {
+ uds_wait_cond(&index_session->request_cond,
+ &index_session->request_mutex);
+ }
+
+ if (index_session->state & IS_FLAG_DESTROYING) {
+ mutex_unlock(&index_session->request_mutex);
+ vdo_log_info("Index session is already closing");
+ return -EBUSY;
+ }
+
+ index_session->state |= IS_FLAG_DESTROYING;
+ load_pending = ((index_session->state & IS_FLAG_LOADING) &&
+ (index_session->state & IS_FLAG_SUSPENDED));
+ mutex_unlock(&index_session->request_mutex);
+
+ if (load_pending) {
+ /* Tell the index to terminate the rebuild. */
+ mutex_lock(&index_session->load_context.mutex);
+ if (index_session->load_context.status == INDEX_SUSPENDED) {
+ index_session->load_context.status = INDEX_FREEING;
+ uds_broadcast_cond(&index_session->load_context.cond);
+ }
+ mutex_unlock(&index_session->load_context.mutex);
+
+ /* Wait until the load exits before proceeding. */
+ mutex_lock(&index_session->request_mutex);
+ while (index_session->state & IS_FLAG_LOADING) {
+ uds_wait_cond(&index_session->request_cond,
+ &index_session->request_mutex);
+ }
+ mutex_unlock(&index_session->request_mutex);
+ }
+
+ wait_for_no_requests_in_progress(index_session);
+ result = save_and_free_index(index_session);
+ uds_request_queue_finish(index_session->callback_queue);
+ index_session->callback_queue = NULL;
+ vdo_log_debug("Destroyed index session");
+ vdo_free(index_session);
+ return uds_status_to_errno(result);
+}
+
+/* Wait until all callbacks for index operations are complete. */
+int uds_flush_index_session(struct uds_index_session *index_session)
+{
+ wait_for_no_requests_in_progress(index_session);
+ uds_wait_for_idle_index(index_session->index);
+ return UDS_SUCCESS;
+}
+
+/* Statistics collection is intended to be thread-safe. */
+static void collect_stats(const struct uds_index_session *index_session,
+ struct uds_index_stats *stats)
+{
+ const struct session_stats *session_stats = &index_session->stats;
+
+ stats->current_time = ktime_to_seconds(current_time_ns(CLOCK_REALTIME));
+ stats->posts_found = READ_ONCE(session_stats->posts_found);
+ stats->in_memory_posts_found = READ_ONCE(session_stats->posts_found_open_chapter);
+ stats->dense_posts_found = READ_ONCE(session_stats->posts_found_dense);
+ stats->sparse_posts_found = READ_ONCE(session_stats->posts_found_sparse);
+ stats->posts_not_found = READ_ONCE(session_stats->posts_not_found);
+ stats->updates_found = READ_ONCE(session_stats->updates_found);
+ stats->updates_not_found = READ_ONCE(session_stats->updates_not_found);
+ stats->deletions_found = READ_ONCE(session_stats->deletions_found);
+ stats->deletions_not_found = READ_ONCE(session_stats->deletions_not_found);
+ stats->queries_found = READ_ONCE(session_stats->queries_found);
+ stats->queries_not_found = READ_ONCE(session_stats->queries_not_found);
+ stats->requests = READ_ONCE(session_stats->requests);
+}
+
+int uds_get_index_session_stats(struct uds_index_session *index_session,
+ struct uds_index_stats *stats)
+{
+ if (stats == NULL) {
+ vdo_log_error("received a NULL index stats pointer");
+ return -EINVAL;
+ }
+
+ collect_stats(index_session, stats);
+ if (index_session->index != NULL) {
+ uds_get_index_stats(index_session->index, stats);
+ } else {
+ stats->entries_indexed = 0;
+ stats->memory_used = 0;
+ stats->collisions = 0;
+ stats->entries_discarded = 0;
+ }
+
+ return UDS_SUCCESS;
+}
+
+void uds_wait_cond(struct cond_var *cv, struct mutex *mutex)
+{
+ DEFINE_WAIT(__wait);
+
+ prepare_to_wait(&cv->wait_queue, &__wait, TASK_IDLE);
+ mutex_unlock(mutex);
+ schedule();
+ finish_wait(&cv->wait_queue, &__wait);
+ mutex_lock(mutex);
+}
diff --git a/drivers/md/dm-vdo/indexer/index-session.h b/drivers/md/dm-vdo/indexer/index-session.h
new file mode 100644
index 000000000000..066648f6e062
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/index-session.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_INDEX_SESSION_H
+#define UDS_INDEX_SESSION_H
+
+#include <linux/atomic.h>
+#include <linux/cache.h>
+
+#include "thread-utils.h"
+
+#include "config.h"
+#include "indexer.h"
+
+/*
+ * The index session mediates all interactions with a UDS index. Once the index session is created,
+ * it can be used to open, close, suspend, or recreate an index. It implements the majority of the
+ * functions in the top-level UDS API.
+ *
+ * If any deduplication request fails due to an internal error, the index is marked disabled. It
+ * will not accept any further requests and can only be closed. Closing the index will clear the
+ * disabled flag, and the index can then be reopened and recovered using the same index session.
+ */
+
+struct __aligned(L1_CACHE_BYTES) session_stats {
+ /* Post requests that found an entry */
+ u64 posts_found;
+ /* Post requests found in the open chapter */
+ u64 posts_found_open_chapter;
+ /* Post requests found in the dense index */
+ u64 posts_found_dense;
+ /* Post requests found in the sparse index */
+ u64 posts_found_sparse;
+ /* Post requests that did not find an entry */
+ u64 posts_not_found;
+ /* Update requests that found an entry */
+ u64 updates_found;
+ /* Update requests that did not find an entry */
+ u64 updates_not_found;
+ /* Delete requests that found an entry */
+ u64 deletions_found;
+ /* Delete requests that did not find an entry */
+ u64 deletions_not_found;
+ /* Query requests that found an entry */
+ u64 queries_found;
+ /* Query requests that did not find an entry */
+ u64 queries_not_found;
+ /* Total number of requests */
+ u64 requests;
+};
+
+enum index_suspend_status {
+ /* An index load has started but the index is not ready for use. */
+ INDEX_OPENING = 0,
+ /* The index is able to handle requests. */
+ INDEX_READY,
+ /* The index is attempting to suspend a rebuild. */
+ INDEX_SUSPENDING,
+ /* An index rebuild has been suspended. */
+ INDEX_SUSPENDED,
+ /* An index rebuild is being stopped in order to shut down. */
+ INDEX_FREEING,
+};
+
+struct index_load_context {
+ struct mutex mutex;
+ struct cond_var cond;
+ enum index_suspend_status status;
+};
+
+struct uds_index_session {
+ unsigned int state;
+ struct uds_index *index;
+ struct uds_request_queue *callback_queue;
+ struct uds_parameters parameters;
+ struct index_load_context load_context;
+ struct mutex request_mutex;
+ struct cond_var request_cond;
+ int request_count;
+ struct session_stats stats;
+};
+
+#endif /* UDS_INDEX_SESSION_H */
diff --git a/drivers/md/dm-vdo/indexer/index.c b/drivers/md/dm-vdo/indexer/index.c
new file mode 100644
index 000000000000..1ba767144426
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/index.c
@@ -0,0 +1,1388 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+
+#include "index.h"
+
+#include "logger.h"
+#include "memory-alloc.h"
+
+#include "funnel-requestqueue.h"
+#include "hash-utils.h"
+#include "sparse-cache.h"
+
+static const u64 NO_LAST_SAVE = U64_MAX;
+
+/*
+ * When searching for deduplication records, the index first searches the volume index, and then
+ * searches the chapter index for the relevant chapter. If the chapter has been fully committed to
+ * storage, the chapter pages are loaded into the page cache. If the chapter has not yet been
+ * committed (either the open chapter or a recently closed one), the index searches the in-memory
+ * representation of the chapter. Finally, if the volume index does not find a record and the index
+ * is sparse, the index will search the sparse cache.
+ *
+ * The index send two kinds of messages to coordinate between zones: chapter close messages for the
+ * chapter writer, and sparse cache barrier messages for the sparse cache.
+ *
+ * The chapter writer is responsible for committing chapters of records to storage. Since zones can
+ * get different numbers of records, some zones may fall behind others. Each time a zone fills up
+ * its available space in a chapter, it informs the chapter writer that the chapter is complete,
+ * and also informs all other zones that it has closed the chapter. Each other zone will then close
+ * the chapter immediately, regardless of how full it is, in order to minimize skew between zones.
+ * Once every zone has closed the chapter, the chapter writer will commit that chapter to storage.
+ *
+ * The last zone to close the chapter also removes the oldest chapter from the volume index.
+ * Although that chapter is invalid for zones that have moved on, the existence of the open chapter
+ * means that those zones will never ask the volume index about it. No zone is allowed to get more
+ * than one chapter ahead of any other. If a zone is so far ahead that it tries to close another
+ * chapter before the previous one has been closed by all zones, it is forced to wait.
+ *
+ * The sparse cache relies on having the same set of chapter indexes available to all zones. When a
+ * request wants to add a chapter to the sparse cache, it sends a barrier message to each zone
+ * during the triage stage that acts as a rendezvous. Once every zone has reached the barrier and
+ * paused its operations, the cache membership is changed and each zone is then informed that it
+ * can proceed. More details can be found in the sparse cache documentation.
+ *
+ * If a sparse cache has only one zone, it will not create a triage queue, but it still needs the
+ * barrier message to change the sparse cache membership, so the index simulates the message by
+ * invoking the handler directly.
+ */
+
+struct chapter_writer {
+ /* The index to which we belong */
+ struct uds_index *index;
+ /* The thread to do the writing */
+ struct thread *thread;
+ /* The lock protecting the following fields */
+ struct mutex mutex;
+ /* The condition signalled on state changes */
+ struct cond_var cond;
+ /* Set to true to stop the thread */
+ bool stop;
+ /* The result from the most recent write */
+ int result;
+ /* The number of bytes allocated by the chapter writer */
+ size_t memory_size;
+ /* The number of zones which have submitted a chapter for writing */
+ unsigned int zones_to_write;
+ /* Open chapter index used by uds_close_open_chapter() */
+ struct open_chapter_index *open_chapter_index;
+ /* Collated records used by uds_close_open_chapter() */
+ struct uds_volume_record *collated_records;
+ /* The chapters to write (one per zone) */
+ struct open_chapter_zone *chapters[];
+};
+
+static bool is_zone_chapter_sparse(const struct index_zone *zone, u64 virtual_chapter)
+{
+ return uds_is_chapter_sparse(zone->index->volume->geometry,
+ zone->oldest_virtual_chapter,
+ zone->newest_virtual_chapter, virtual_chapter);
+}
+
+static int launch_zone_message(struct uds_zone_message message, unsigned int zone,
+ struct uds_index *index)
+{
+ int result;
+ struct uds_request *request;
+
+ result = vdo_allocate(1, struct uds_request, __func__, &request);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ request->index = index;
+ request->unbatched = true;
+ request->zone_number = zone;
+ request->zone_message = message;
+
+ uds_enqueue_request(request, STAGE_MESSAGE);
+ return UDS_SUCCESS;
+}
+
+static void enqueue_barrier_messages(struct uds_index *index, u64 virtual_chapter)
+{
+ struct uds_zone_message message = {
+ .type = UDS_MESSAGE_SPARSE_CACHE_BARRIER,
+ .virtual_chapter = virtual_chapter,
+ };
+ unsigned int zone;
+
+ for (zone = 0; zone < index->zone_count; zone++) {
+ int result = launch_zone_message(message, zone, index);
+
+ VDO_ASSERT_LOG_ONLY((result == UDS_SUCCESS), "barrier message allocation");
+ }
+}
+
+/*
+ * Determine whether this request should trigger a sparse cache barrier message to change the
+ * membership of the sparse cache. If a change in membership is desired, the function returns the
+ * chapter number to add.
+ */
+static u64 triage_index_request(struct uds_index *index, struct uds_request *request)
+{
+ u64 virtual_chapter;
+ struct index_zone *zone;
+
+ virtual_chapter = uds_lookup_volume_index_name(index->volume_index,
+ &request->record_name);
+ if (virtual_chapter == NO_CHAPTER)
+ return NO_CHAPTER;
+
+ zone = index->zones[request->zone_number];
+ if (!is_zone_chapter_sparse(zone, virtual_chapter))
+ return NO_CHAPTER;
+
+ /*
+ * FIXME: Optimize for a common case by remembering the chapter from the most recent
+ * barrier message and skipping this chapter if is it the same.
+ */
+
+ return virtual_chapter;
+}
+
+/*
+ * Simulate a message to change the sparse cache membership for a single-zone sparse index. This
+ * allows us to forgo the complicated locking required by a multi-zone sparse index. Any other kind
+ * of index does nothing here.
+ */
+static int simulate_index_zone_barrier_message(struct index_zone *zone,
+ struct uds_request *request)
+{
+ u64 sparse_virtual_chapter;
+
+ if ((zone->index->zone_count > 1) ||
+ !uds_is_sparse_index_geometry(zone->index->volume->geometry))
+ return UDS_SUCCESS;
+
+ sparse_virtual_chapter = triage_index_request(zone->index, request);
+ if (sparse_virtual_chapter == NO_CHAPTER)
+ return UDS_SUCCESS;
+
+ return uds_update_sparse_cache(zone, sparse_virtual_chapter);
+}
+
+/* This is the request processing function for the triage queue. */
+static void triage_request(struct uds_request *request)
+{
+ struct uds_index *index = request->index;
+ u64 sparse_virtual_chapter = triage_index_request(index, request);
+
+ if (sparse_virtual_chapter != NO_CHAPTER)
+ enqueue_barrier_messages(index, sparse_virtual_chapter);
+
+ uds_enqueue_request(request, STAGE_INDEX);
+}
+
+static int finish_previous_chapter(struct uds_index *index, u64 current_chapter_number)
+{
+ int result;
+ struct chapter_writer *writer = index->chapter_writer;
+
+ mutex_lock(&writer->mutex);
+ while (index->newest_virtual_chapter < current_chapter_number)
+ uds_wait_cond(&writer->cond, &writer->mutex);
+ result = writer->result;
+ mutex_unlock(&writer->mutex);
+
+ if (result != UDS_SUCCESS)
+ return vdo_log_error_strerror(result,
+ "Writing of previous open chapter failed");
+
+ return UDS_SUCCESS;
+}
+
+static int swap_open_chapter(struct index_zone *zone)
+{
+ int result;
+ struct open_chapter_zone *temporary_chapter;
+
+ result = finish_previous_chapter(zone->index, zone->newest_virtual_chapter);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ temporary_chapter = zone->open_chapter;
+ zone->open_chapter = zone->writing_chapter;
+ zone->writing_chapter = temporary_chapter;
+ return UDS_SUCCESS;
+}
+
+/*
+ * Inform the chapter writer that this zone is done with this chapter. The chapter won't start
+ * writing until all zones have closed it.
+ */
+static unsigned int start_closing_chapter(struct uds_index *index,
+ unsigned int zone_number,
+ struct open_chapter_zone *chapter)
+{
+ unsigned int finished_zones;
+ struct chapter_writer *writer = index->chapter_writer;
+
+ mutex_lock(&writer->mutex);
+ finished_zones = ++writer->zones_to_write;
+ writer->chapters[zone_number] = chapter;
+ uds_broadcast_cond(&writer->cond);
+ mutex_unlock(&writer->mutex);
+
+ return finished_zones;
+}
+
+static int announce_chapter_closed(struct index_zone *zone, u64 closed_chapter)
+{
+ int result;
+ unsigned int i;
+ struct uds_zone_message zone_message = {
+ .type = UDS_MESSAGE_ANNOUNCE_CHAPTER_CLOSED,
+ .virtual_chapter = closed_chapter,
+ };
+
+ for (i = 0; i < zone->index->zone_count; i++) {
+ if (zone->id == i)
+ continue;
+
+ result = launch_zone_message(zone_message, i, zone->index);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int open_next_chapter(struct index_zone *zone)
+{
+ int result;
+ u64 closed_chapter;
+ u64 expiring;
+ unsigned int finished_zones;
+ u32 expire_chapters;
+
+ vdo_log_debug("closing chapter %llu of zone %u after %u entries (%u short)",
+ (unsigned long long) zone->newest_virtual_chapter, zone->id,
+ zone->open_chapter->size,
+ zone->open_chapter->capacity - zone->open_chapter->size);
+
+ result = swap_open_chapter(zone);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ closed_chapter = zone->newest_virtual_chapter++;
+ uds_set_volume_index_zone_open_chapter(zone->index->volume_index, zone->id,
+ zone->newest_virtual_chapter);
+ uds_reset_open_chapter(zone->open_chapter);
+
+ finished_zones = start_closing_chapter(zone->index, zone->id,
+ zone->writing_chapter);
+ if ((finished_zones == 1) && (zone->index->zone_count > 1)) {
+ result = announce_chapter_closed(zone, closed_chapter);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ expiring = zone->oldest_virtual_chapter;
+ expire_chapters = uds_chapters_to_expire(zone->index->volume->geometry,
+ zone->newest_virtual_chapter);
+ zone->oldest_virtual_chapter += expire_chapters;
+
+ if (finished_zones < zone->index->zone_count)
+ return UDS_SUCCESS;
+
+ while (expire_chapters-- > 0)
+ uds_forget_chapter(zone->index->volume, expiring++);
+
+ return UDS_SUCCESS;
+}
+
+static int handle_chapter_closed(struct index_zone *zone, u64 virtual_chapter)
+{
+ if (zone->newest_virtual_chapter == virtual_chapter)
+ return open_next_chapter(zone);
+
+ return UDS_SUCCESS;
+}
+
+static int dispatch_index_zone_control_request(struct uds_request *request)
+{
+ struct uds_zone_message *message = &request->zone_message;
+ struct index_zone *zone = request->index->zones[request->zone_number];
+
+ switch (message->type) {
+ case UDS_MESSAGE_SPARSE_CACHE_BARRIER:
+ return uds_update_sparse_cache(zone, message->virtual_chapter);
+
+ case UDS_MESSAGE_ANNOUNCE_CHAPTER_CLOSED:
+ return handle_chapter_closed(zone, message->virtual_chapter);
+
+ default:
+ vdo_log_error("invalid message type: %d", message->type);
+ return UDS_INVALID_ARGUMENT;
+ }
+}
+
+static void set_request_location(struct uds_request *request,
+ enum uds_index_region new_location)
+{
+ request->location = new_location;
+ request->found = ((new_location == UDS_LOCATION_IN_OPEN_CHAPTER) ||
+ (new_location == UDS_LOCATION_IN_DENSE) ||
+ (new_location == UDS_LOCATION_IN_SPARSE));
+}
+
+static void set_chapter_location(struct uds_request *request,
+ const struct index_zone *zone, u64 virtual_chapter)
+{
+ request->found = true;
+ if (virtual_chapter == zone->newest_virtual_chapter)
+ request->location = UDS_LOCATION_IN_OPEN_CHAPTER;
+ else if (is_zone_chapter_sparse(zone, virtual_chapter))
+ request->location = UDS_LOCATION_IN_SPARSE;
+ else
+ request->location = UDS_LOCATION_IN_DENSE;
+}
+
+static int search_sparse_cache_in_zone(struct index_zone *zone, struct uds_request *request,
+ u64 virtual_chapter, bool *found)
+{
+ int result;
+ struct volume *volume;
+ u16 record_page_number;
+ u32 chapter;
+
+ result = uds_search_sparse_cache(zone, &request->record_name, &virtual_chapter,
+ &record_page_number);
+ if ((result != UDS_SUCCESS) || (virtual_chapter == NO_CHAPTER))
+ return result;
+
+ request->virtual_chapter = virtual_chapter;
+ volume = zone->index->volume;
+ chapter = uds_map_to_physical_chapter(volume->geometry, virtual_chapter);
+ return uds_search_cached_record_page(volume, request, chapter,
+ record_page_number, found);
+}
+
+static int get_record_from_zone(struct index_zone *zone, struct uds_request *request,
+ bool *found)
+{
+ struct volume *volume;
+
+ if (request->location == UDS_LOCATION_RECORD_PAGE_LOOKUP) {
+ *found = true;
+ return UDS_SUCCESS;
+ } else if (request->location == UDS_LOCATION_UNAVAILABLE) {
+ *found = false;
+ return UDS_SUCCESS;
+ }
+
+ if (request->virtual_chapter == zone->newest_virtual_chapter) {
+ uds_search_open_chapter(zone->open_chapter, &request->record_name,
+ &request->old_metadata, found);
+ return UDS_SUCCESS;
+ }
+
+ if ((zone->newest_virtual_chapter > 0) &&
+ (request->virtual_chapter == (zone->newest_virtual_chapter - 1)) &&
+ (zone->writing_chapter->size > 0)) {
+ uds_search_open_chapter(zone->writing_chapter, &request->record_name,
+ &request->old_metadata, found);
+ return UDS_SUCCESS;
+ }
+
+ volume = zone->index->volume;
+ if (is_zone_chapter_sparse(zone, request->virtual_chapter) &&
+ uds_sparse_cache_contains(volume->sparse_cache, request->virtual_chapter,
+ request->zone_number))
+ return search_sparse_cache_in_zone(zone, request,
+ request->virtual_chapter, found);
+
+ return uds_search_volume_page_cache(volume, request, found);
+}
+
+static int put_record_in_zone(struct index_zone *zone, struct uds_request *request,
+ const struct uds_record_data *metadata)
+{
+ unsigned int remaining;
+
+ remaining = uds_put_open_chapter(zone->open_chapter, &request->record_name,
+ metadata);
+ if (remaining == 0)
+ return open_next_chapter(zone);
+
+ return UDS_SUCCESS;
+}
+
+static int search_index_zone(struct index_zone *zone, struct uds_request *request)
+{
+ int result;
+ struct volume_index_record record;
+ bool overflow_record, found = false;
+ struct uds_record_data *metadata;
+ u64 chapter;
+
+ result = uds_get_volume_index_record(zone->index->volume_index,
+ &request->record_name, &record);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (record.is_found) {
+ if (request->requeued && request->virtual_chapter != record.virtual_chapter)
+ set_request_location(request, UDS_LOCATION_UNKNOWN);
+
+ request->virtual_chapter = record.virtual_chapter;
+ result = get_record_from_zone(zone, request, &found);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ if (found)
+ set_chapter_location(request, zone, record.virtual_chapter);
+
+ /*
+ * If a record has overflowed a chapter index in more than one chapter (or overflowed in
+ * one chapter and collided with an existing record), it will exist as a collision record
+ * in the volume index, but we won't find it in the volume. This case needs special
+ * handling.
+ */
+ overflow_record = (record.is_found && record.is_collision && !found);
+ chapter = zone->newest_virtual_chapter;
+ if (found || overflow_record) {
+ if ((request->type == UDS_QUERY_NO_UPDATE) ||
+ ((request->type == UDS_QUERY) && overflow_record)) {
+ /* There is nothing left to do. */
+ return UDS_SUCCESS;
+ }
+
+ if (record.virtual_chapter != chapter) {
+ /*
+ * Update the volume index to reference the new chapter for the block. If
+ * the record had been deleted or dropped from the chapter index, it will
+ * be back.
+ */
+ result = uds_set_volume_index_record_chapter(&record, chapter);
+ } else if (request->type != UDS_UPDATE) {
+ /* The record is already in the open chapter. */
+ return UDS_SUCCESS;
+ }
+ } else {
+ /*
+ * The record wasn't in the volume index, so check whether the
+ * name is in a cached sparse chapter. If we found the name on
+ * a previous search, use that result instead.
+ */
+ if (request->location == UDS_LOCATION_RECORD_PAGE_LOOKUP) {
+ found = true;
+ } else if (request->location == UDS_LOCATION_UNAVAILABLE) {
+ found = false;
+ } else if (uds_is_sparse_index_geometry(zone->index->volume->geometry) &&
+ !uds_is_volume_index_sample(zone->index->volume_index,
+ &request->record_name)) {
+ result = search_sparse_cache_in_zone(zone, request, NO_CHAPTER,
+ &found);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ if (found)
+ set_request_location(request, UDS_LOCATION_IN_SPARSE);
+
+ if ((request->type == UDS_QUERY_NO_UPDATE) ||
+ ((request->type == UDS_QUERY) && !found)) {
+ /* There is nothing left to do. */
+ return UDS_SUCCESS;
+ }
+
+ /*
+ * Add a new entry to the volume index referencing the open chapter. This needs to
+ * be done both for new records, and for records from cached sparse chapters.
+ */
+ result = uds_put_volume_index_record(&record, chapter);
+ }
+
+ if (result == UDS_OVERFLOW) {
+ /*
+ * The volume index encountered a delta list overflow. The condition was already
+ * logged. We will go on without adding the record to the open chapter.
+ */
+ return UDS_SUCCESS;
+ }
+
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (!found || (request->type == UDS_UPDATE)) {
+ /* This is a new record or we're updating an existing record. */
+ metadata = &request->new_metadata;
+ } else {
+ /* Move the existing record to the open chapter. */
+ metadata = &request->old_metadata;
+ }
+
+ return put_record_in_zone(zone, request, metadata);
+}
+
+static int remove_from_index_zone(struct index_zone *zone, struct uds_request *request)
+{
+ int result;
+ struct volume_index_record record;
+
+ result = uds_get_volume_index_record(zone->index->volume_index,
+ &request->record_name, &record);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (!record.is_found)
+ return UDS_SUCCESS;
+
+ /* If the request was requeued, check whether the saved state is still valid. */
+
+ if (record.is_collision) {
+ set_chapter_location(request, zone, record.virtual_chapter);
+ } else {
+ /* Non-collision records are hints, so resolve the name in the chapter. */
+ bool found;
+
+ if (request->requeued && request->virtual_chapter != record.virtual_chapter)
+ set_request_location(request, UDS_LOCATION_UNKNOWN);
+
+ request->virtual_chapter = record.virtual_chapter;
+ result = get_record_from_zone(zone, request, &found);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (!found) {
+ /* There is no record to remove. */
+ return UDS_SUCCESS;
+ }
+ }
+
+ set_chapter_location(request, zone, record.virtual_chapter);
+
+ /*
+ * Delete the volume index entry for the named record only. Note that a later search might
+ * later return stale advice if there is a colliding name in the same chapter, but it's a
+ * very rare case (1 in 2^21).
+ */
+ result = uds_remove_volume_index_record(&record);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ /*
+ * If the record is in the open chapter, we must remove it or mark it deleted to avoid
+ * trouble if the record is added again later.
+ */
+ if (request->location == UDS_LOCATION_IN_OPEN_CHAPTER)
+ uds_remove_from_open_chapter(zone->open_chapter, &request->record_name);
+
+ return UDS_SUCCESS;
+}
+
+static int dispatch_index_request(struct uds_index *index, struct uds_request *request)
+{
+ int result;
+ struct index_zone *zone = index->zones[request->zone_number];
+
+ if (!request->requeued) {
+ result = simulate_index_zone_barrier_message(zone, request);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ switch (request->type) {
+ case UDS_POST:
+ case UDS_UPDATE:
+ case UDS_QUERY:
+ case UDS_QUERY_NO_UPDATE:
+ result = search_index_zone(zone, request);
+ break;
+
+ case UDS_DELETE:
+ result = remove_from_index_zone(zone, request);
+ break;
+
+ default:
+ result = vdo_log_warning_strerror(UDS_INVALID_ARGUMENT,
+ "invalid request type: %d",
+ request->type);
+ break;
+ }
+
+ return result;
+}
+
+/* This is the request processing function invoked by each zone's thread. */
+static void execute_zone_request(struct uds_request *request)
+{
+ int result;
+ struct uds_index *index = request->index;
+
+ if (request->zone_message.type != UDS_MESSAGE_NONE) {
+ result = dispatch_index_zone_control_request(request);
+ if (result != UDS_SUCCESS) {
+ vdo_log_error_strerror(result, "error executing message: %d",
+ request->zone_message.type);
+ }
+
+ /* Once the message is processed it can be freed. */
+ vdo_free(vdo_forget(request));
+ return;
+ }
+
+ index->need_to_save = true;
+ if (request->requeued && (request->status != UDS_SUCCESS)) {
+ set_request_location(request, UDS_LOCATION_UNAVAILABLE);
+ index->callback(request);
+ return;
+ }
+
+ result = dispatch_index_request(index, request);
+ if (result == UDS_QUEUED) {
+ /* The request has been requeued so don't let it complete. */
+ return;
+ }
+
+ if (!request->found)
+ set_request_location(request, UDS_LOCATION_UNAVAILABLE);
+
+ request->status = result;
+ index->callback(request);
+}
+
+static int initialize_index_queues(struct uds_index *index,
+ const struct index_geometry *geometry)
+{
+ int result;
+ unsigned int i;
+
+ for (i = 0; i < index->zone_count; i++) {
+ result = uds_make_request_queue("indexW", &execute_zone_request,
+ &index->zone_queues[i]);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ /* The triage queue is only needed for sparse multi-zone indexes. */
+ if ((index->zone_count > 1) && uds_is_sparse_index_geometry(geometry)) {
+ result = uds_make_request_queue("triageW", &triage_request,
+ &index->triage_queue);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ return UDS_SUCCESS;
+}
+
+/* This is the driver function for the chapter writer thread. */
+static void close_chapters(void *arg)
+{
+ int result;
+ struct chapter_writer *writer = arg;
+ struct uds_index *index = writer->index;
+
+ vdo_log_debug("chapter writer starting");
+ mutex_lock(&writer->mutex);
+ for (;;) {
+ while (writer->zones_to_write < index->zone_count) {
+ if (writer->stop && (writer->zones_to_write == 0)) {
+ /*
+ * We've been told to stop, and all of the zones are in the same
+ * open chapter, so we can exit now.
+ */
+ mutex_unlock(&writer->mutex);
+ vdo_log_debug("chapter writer stopping");
+ return;
+ }
+ uds_wait_cond(&writer->cond, &writer->mutex);
+ }
+
+ /*
+ * Release the lock while closing a chapter. We probably don't need to do this, but
+ * it seems safer in principle. It's OK to access the chapter and chapter_number
+ * fields without the lock since those aren't allowed to change until we're done.
+ */
+ mutex_unlock(&writer->mutex);
+
+ if (index->has_saved_open_chapter) {
+ /*
+ * Remove the saved open chapter the first time we close an open chapter
+ * after loading from a clean shutdown, or after doing a clean save. The
+ * lack of the saved open chapter will indicate that a recovery is
+ * necessary.
+ */
+ index->has_saved_open_chapter = false;
+ result = uds_discard_open_chapter(index->layout);
+ if (result == UDS_SUCCESS)
+ vdo_log_debug("Discarding saved open chapter");
+ }
+
+ result = uds_close_open_chapter(writer->chapters, index->zone_count,
+ index->volume,
+ writer->open_chapter_index,
+ writer->collated_records,
+ index->newest_virtual_chapter);
+
+ mutex_lock(&writer->mutex);
+ index->newest_virtual_chapter++;
+ index->oldest_virtual_chapter +=
+ uds_chapters_to_expire(index->volume->geometry,
+ index->newest_virtual_chapter);
+ writer->result = result;
+ writer->zones_to_write = 0;
+ uds_broadcast_cond(&writer->cond);
+ }
+}
+
+static void stop_chapter_writer(struct chapter_writer *writer)
+{
+ struct thread *writer_thread = NULL;
+
+ mutex_lock(&writer->mutex);
+ if (writer->thread != NULL) {
+ writer_thread = writer->thread;
+ writer->thread = NULL;
+ writer->stop = true;
+ uds_broadcast_cond(&writer->cond);
+ }
+ mutex_unlock(&writer->mutex);
+
+ if (writer_thread != NULL)
+ vdo_join_threads(writer_thread);
+}
+
+static void free_chapter_writer(struct chapter_writer *writer)
+{
+ if (writer == NULL)
+ return;
+
+ stop_chapter_writer(writer);
+ uds_free_open_chapter_index(writer->open_chapter_index);
+ vdo_free(writer->collated_records);
+ vdo_free(writer);
+}
+
+static int make_chapter_writer(struct uds_index *index,
+ struct chapter_writer **writer_ptr)
+{
+ int result;
+ struct chapter_writer *writer;
+ size_t collated_records_size =
+ (sizeof(struct uds_volume_record) * index->volume->geometry->records_per_chapter);
+
+ result = vdo_allocate_extended(struct chapter_writer, index->zone_count,
+ struct open_chapter_zone *, "Chapter Writer",
+ &writer);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ writer->index = index;
+ mutex_init(&writer->mutex);
+ uds_init_cond(&writer->cond);
+
+ result = vdo_allocate_cache_aligned(collated_records_size, "collated records",
+ &writer->collated_records);
+ if (result != VDO_SUCCESS) {
+ free_chapter_writer(writer);
+ return result;
+ }
+
+ result = uds_make_open_chapter_index(&writer->open_chapter_index,
+ index->volume->geometry,
+ index->volume->nonce);
+ if (result != UDS_SUCCESS) {
+ free_chapter_writer(writer);
+ return result;
+ }
+
+ writer->memory_size = (sizeof(struct chapter_writer) +
+ index->zone_count * sizeof(struct open_chapter_zone *) +
+ collated_records_size +
+ writer->open_chapter_index->memory_size);
+
+ result = vdo_create_thread(close_chapters, writer, "writer", &writer->thread);
+ if (result != VDO_SUCCESS) {
+ free_chapter_writer(writer);
+ return result;
+ }
+
+ *writer_ptr = writer;
+ return UDS_SUCCESS;
+}
+
+static int load_index(struct uds_index *index)
+{
+ int result;
+ u64 last_save_chapter;
+
+ result = uds_load_index_state(index->layout, index);
+ if (result != UDS_SUCCESS)
+ return UDS_INDEX_NOT_SAVED_CLEANLY;
+
+ last_save_chapter = ((index->last_save != NO_LAST_SAVE) ? index->last_save : 0);
+
+ vdo_log_info("loaded index from chapter %llu through chapter %llu",
+ (unsigned long long) index->oldest_virtual_chapter,
+ (unsigned long long) last_save_chapter);
+
+ return UDS_SUCCESS;
+}
+
+static int rebuild_index_page_map(struct uds_index *index, u64 vcn)
+{
+ int result;
+ struct delta_index_page *chapter_index_page;
+ struct index_geometry *geometry = index->volume->geometry;
+ u32 chapter = uds_map_to_physical_chapter(geometry, vcn);
+ u32 expected_list_number = 0;
+ u32 index_page_number;
+ u32 lowest_delta_list;
+ u32 highest_delta_list;
+
+ for (index_page_number = 0;
+ index_page_number < geometry->index_pages_per_chapter;
+ index_page_number++) {
+ result = uds_get_volume_index_page(index->volume, chapter,
+ index_page_number,
+ &chapter_index_page);
+ if (result != UDS_SUCCESS) {
+ return vdo_log_error_strerror(result,
+ "failed to read index page %u in chapter %u",
+ index_page_number, chapter);
+ }
+
+ lowest_delta_list = chapter_index_page->lowest_list_number;
+ highest_delta_list = chapter_index_page->highest_list_number;
+ if (lowest_delta_list != expected_list_number) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "chapter %u index page %u is corrupt",
+ chapter, index_page_number);
+ }
+
+ uds_update_index_page_map(index->volume->index_page_map, vcn, chapter,
+ index_page_number, highest_delta_list);
+ expected_list_number = highest_delta_list + 1;
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int replay_record(struct uds_index *index, const struct uds_record_name *name,
+ u64 virtual_chapter, bool will_be_sparse_chapter)
+{
+ int result;
+ struct volume_index_record record;
+ bool update_record;
+
+ if (will_be_sparse_chapter &&
+ !uds_is_volume_index_sample(index->volume_index, name)) {
+ /*
+ * This entry will be in a sparse chapter after the rebuild completes, and it is
+ * not a sample, so just skip over it.
+ */
+ return UDS_SUCCESS;
+ }
+
+ result = uds_get_volume_index_record(index->volume_index, name, &record);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (record.is_found) {
+ if (record.is_collision) {
+ if (record.virtual_chapter == virtual_chapter) {
+ /* The record is already correct. */
+ return UDS_SUCCESS;
+ }
+
+ update_record = true;
+ } else if (record.virtual_chapter == virtual_chapter) {
+ /*
+ * There is a volume index entry pointing to the current chapter, but we
+ * don't know if it is for the same name as the one we are currently
+ * working on or not. For now, we're just going to assume that it isn't.
+ * This will create one extra collision record if there was a deleted
+ * record in the current chapter.
+ */
+ update_record = false;
+ } else {
+ /*
+ * If we're rebuilding, we don't normally want to go to disk to see if the
+ * record exists, since we will likely have just read the record from disk
+ * (i.e. we know it's there). The exception to this is when we find an
+ * entry in the volume index that has a different chapter. In this case, we
+ * need to search that chapter to determine if the volume index entry was
+ * for the same record or a different one.
+ */
+ result = uds_search_volume_page_cache_for_rebuild(index->volume,
+ name,
+ record.virtual_chapter,
+ &update_record);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+ } else {
+ update_record = false;
+ }
+
+ if (update_record) {
+ /*
+ * Update the volume index to reference the new chapter for the block. If the
+ * record had been deleted or dropped from the chapter index, it will be back.
+ */
+ result = uds_set_volume_index_record_chapter(&record, virtual_chapter);
+ } else {
+ /*
+ * Add a new entry to the volume index referencing the open chapter. This should be
+ * done regardless of whether we are a brand new record or a sparse record, i.e.
+ * one that doesn't exist in the index but does on disk, since for a sparse record,
+ * we would want to un-sparsify if it did exist.
+ */
+ result = uds_put_volume_index_record(&record, virtual_chapter);
+ }
+
+ if ((result == UDS_DUPLICATE_NAME) || (result == UDS_OVERFLOW)) {
+ /* The rebuilt index will lose these records. */
+ return UDS_SUCCESS;
+ }
+
+ return result;
+}
+
+static bool check_for_suspend(struct uds_index *index)
+{
+ bool closing;
+
+ if (index->load_context == NULL)
+ return false;
+
+ mutex_lock(&index->load_context->mutex);
+ if (index->load_context->status != INDEX_SUSPENDING) {
+ mutex_unlock(&index->load_context->mutex);
+ return false;
+ }
+
+ /* Notify that we are suspended and wait for the resume. */
+ index->load_context->status = INDEX_SUSPENDED;
+ uds_broadcast_cond(&index->load_context->cond);
+
+ while ((index->load_context->status != INDEX_OPENING) &&
+ (index->load_context->status != INDEX_FREEING))
+ uds_wait_cond(&index->load_context->cond, &index->load_context->mutex);
+
+ closing = (index->load_context->status == INDEX_FREEING);
+ mutex_unlock(&index->load_context->mutex);
+ return closing;
+}
+
+static int replay_chapter(struct uds_index *index, u64 virtual, bool sparse)
+{
+ int result;
+ u32 i;
+ u32 j;
+ const struct index_geometry *geometry;
+ u32 physical_chapter;
+
+ if (check_for_suspend(index)) {
+ vdo_log_info("Replay interrupted by index shutdown at chapter %llu",
+ (unsigned long long) virtual);
+ return -EBUSY;
+ }
+
+ geometry = index->volume->geometry;
+ physical_chapter = uds_map_to_physical_chapter(geometry, virtual);
+ uds_prefetch_volume_chapter(index->volume, physical_chapter);
+ uds_set_volume_index_open_chapter(index->volume_index, virtual);
+
+ result = rebuild_index_page_map(index, virtual);
+ if (result != UDS_SUCCESS) {
+ return vdo_log_error_strerror(result,
+ "could not rebuild index page map for chapter %u",
+ physical_chapter);
+ }
+
+ for (i = 0; i < geometry->record_pages_per_chapter; i++) {
+ u8 *record_page;
+ u32 record_page_number;
+
+ record_page_number = geometry->index_pages_per_chapter + i;
+ result = uds_get_volume_record_page(index->volume, physical_chapter,
+ record_page_number, &record_page);
+ if (result != UDS_SUCCESS) {
+ return vdo_log_error_strerror(result, "could not get page %d",
+ record_page_number);
+ }
+
+ for (j = 0; j < geometry->records_per_page; j++) {
+ const u8 *name_bytes;
+ struct uds_record_name name;
+
+ name_bytes = record_page + (j * BYTES_PER_RECORD);
+ memcpy(&name.name, name_bytes, UDS_RECORD_NAME_SIZE);
+ result = replay_record(index, &name, virtual, sparse);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int replay_volume(struct uds_index *index)
+{
+ int result;
+ u64 old_map_update;
+ u64 new_map_update;
+ u64 virtual;
+ u64 from_virtual = index->oldest_virtual_chapter;
+ u64 upto_virtual = index->newest_virtual_chapter;
+ bool will_be_sparse;
+
+ vdo_log_info("Replaying volume from chapter %llu through chapter %llu",
+ (unsigned long long) from_virtual,
+ (unsigned long long) upto_virtual);
+
+ /*
+ * The index failed to load, so the volume index is empty. Add records to the volume index
+ * in order, skipping non-hooks in chapters which will be sparse to save time.
+ *
+ * Go through each record page of each chapter and add the records back to the volume
+ * index. This should not cause anything to be written to either the open chapter or the
+ * on-disk volume. Also skip the on-disk chapter corresponding to upto_virtual, as this
+ * would have already been purged from the volume index when the chapter was opened.
+ *
+ * Also, go through each index page for each chapter and rebuild the index page map.
+ */
+ old_map_update = index->volume->index_page_map->last_update;
+ for (virtual = from_virtual; virtual < upto_virtual; virtual++) {
+ will_be_sparse = uds_is_chapter_sparse(index->volume->geometry,
+ from_virtual, upto_virtual,
+ virtual);
+ result = replay_chapter(index, virtual, will_be_sparse);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ /* Also reap the chapter being replaced by the open chapter. */
+ uds_set_volume_index_open_chapter(index->volume_index, upto_virtual);
+
+ new_map_update = index->volume->index_page_map->last_update;
+ if (new_map_update != old_map_update) {
+ vdo_log_info("replay changed index page map update from %llu to %llu",
+ (unsigned long long) old_map_update,
+ (unsigned long long) new_map_update);
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int rebuild_index(struct uds_index *index)
+{
+ int result;
+ u64 lowest;
+ u64 highest;
+ bool is_empty = false;
+ u32 chapters_per_volume = index->volume->geometry->chapters_per_volume;
+
+ index->volume->lookup_mode = LOOKUP_FOR_REBUILD;
+ result = uds_find_volume_chapter_boundaries(index->volume, &lowest, &highest,
+ &is_empty);
+ if (result != UDS_SUCCESS) {
+ return vdo_log_fatal_strerror(result,
+ "cannot rebuild index: unknown volume chapter boundaries");
+ }
+
+ if (is_empty) {
+ index->newest_virtual_chapter = 0;
+ index->oldest_virtual_chapter = 0;
+ index->volume->lookup_mode = LOOKUP_NORMAL;
+ return UDS_SUCCESS;
+ }
+
+ index->newest_virtual_chapter = highest + 1;
+ index->oldest_virtual_chapter = lowest;
+ if (index->newest_virtual_chapter ==
+ (index->oldest_virtual_chapter + chapters_per_volume)) {
+ /* Skip the chapter shadowed by the open chapter. */
+ index->oldest_virtual_chapter++;
+ }
+
+ result = replay_volume(index);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ index->volume->lookup_mode = LOOKUP_NORMAL;
+ return UDS_SUCCESS;
+}
+
+static void free_index_zone(struct index_zone *zone)
+{
+ if (zone == NULL)
+ return;
+
+ uds_free_open_chapter(zone->open_chapter);
+ uds_free_open_chapter(zone->writing_chapter);
+ vdo_free(zone);
+}
+
+static int make_index_zone(struct uds_index *index, unsigned int zone_number)
+{
+ int result;
+ struct index_zone *zone;
+
+ result = vdo_allocate(1, struct index_zone, "index zone", &zone);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = uds_make_open_chapter(index->volume->geometry, index->zone_count,
+ &zone->open_chapter);
+ if (result != UDS_SUCCESS) {
+ free_index_zone(zone);
+ return result;
+ }
+
+ result = uds_make_open_chapter(index->volume->geometry, index->zone_count,
+ &zone->writing_chapter);
+ if (result != UDS_SUCCESS) {
+ free_index_zone(zone);
+ return result;
+ }
+
+ zone->index = index;
+ zone->id = zone_number;
+ index->zones[zone_number] = zone;
+
+ return UDS_SUCCESS;
+}
+
+int uds_make_index(struct uds_configuration *config, enum uds_open_index_type open_type,
+ struct index_load_context *load_context, index_callback_fn callback,
+ struct uds_index **new_index)
+{
+ int result;
+ bool loaded = false;
+ bool new = (open_type == UDS_CREATE);
+ struct uds_index *index = NULL;
+ struct index_zone *zone;
+ u64 nonce;
+ unsigned int z;
+
+ result = vdo_allocate_extended(struct uds_index, config->zone_count,
+ struct uds_request_queue *, "index", &index);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ index->zone_count = config->zone_count;
+
+ result = uds_make_index_layout(config, new, &index->layout);
+ if (result != UDS_SUCCESS) {
+ uds_free_index(index);
+ return result;
+ }
+
+ result = vdo_allocate(index->zone_count, struct index_zone *, "zones",
+ &index->zones);
+ if (result != VDO_SUCCESS) {
+ uds_free_index(index);
+ return result;
+ }
+
+ result = uds_make_volume(config, index->layout, &index->volume);
+ if (result != UDS_SUCCESS) {
+ uds_free_index(index);
+ return result;
+ }
+
+ index->volume->lookup_mode = LOOKUP_NORMAL;
+ for (z = 0; z < index->zone_count; z++) {
+ result = make_index_zone(index, z);
+ if (result != UDS_SUCCESS) {
+ uds_free_index(index);
+ return vdo_log_error_strerror(result,
+ "Could not create index zone");
+ }
+ }
+
+ nonce = uds_get_volume_nonce(index->layout);
+ result = uds_make_volume_index(config, nonce, &index->volume_index);
+ if (result != UDS_SUCCESS) {
+ uds_free_index(index);
+ return vdo_log_error_strerror(result, "could not make volume index");
+ }
+
+ index->load_context = load_context;
+ index->callback = callback;
+
+ result = initialize_index_queues(index, config->geometry);
+ if (result != UDS_SUCCESS) {
+ uds_free_index(index);
+ return result;
+ }
+
+ result = make_chapter_writer(index, &index->chapter_writer);
+ if (result != UDS_SUCCESS) {
+ uds_free_index(index);
+ return result;
+ }
+
+ if (!new) {
+ result = load_index(index);
+ switch (result) {
+ case UDS_SUCCESS:
+ loaded = true;
+ break;
+ case -ENOMEM:
+ /* We should not try a rebuild for this error. */
+ vdo_log_error_strerror(result, "index could not be loaded");
+ break;
+ default:
+ vdo_log_error_strerror(result, "index could not be loaded");
+ if (open_type == UDS_LOAD) {
+ result = rebuild_index(index);
+ if (result != UDS_SUCCESS) {
+ vdo_log_error_strerror(result,
+ "index could not be rebuilt");
+ }
+ }
+ break;
+ }
+ }
+
+ if (result != UDS_SUCCESS) {
+ uds_free_index(index);
+ return vdo_log_error_strerror(result, "fatal error in %s()", __func__);
+ }
+
+ for (z = 0; z < index->zone_count; z++) {
+ zone = index->zones[z];
+ zone->oldest_virtual_chapter = index->oldest_virtual_chapter;
+ zone->newest_virtual_chapter = index->newest_virtual_chapter;
+ }
+
+ if (index->load_context != NULL) {
+ mutex_lock(&index->load_context->mutex);
+ index->load_context->status = INDEX_READY;
+ /*
+ * If we get here, suspend is meaningless, but notify any thread trying to suspend
+ * us so it doesn't hang.
+ */
+ uds_broadcast_cond(&index->load_context->cond);
+ mutex_unlock(&index->load_context->mutex);
+ }
+
+ index->has_saved_open_chapter = loaded;
+ index->need_to_save = !loaded;
+ *new_index = index;
+ return UDS_SUCCESS;
+}
+
+void uds_free_index(struct uds_index *index)
+{
+ unsigned int i;
+
+ if (index == NULL)
+ return;
+
+ uds_request_queue_finish(index->triage_queue);
+ for (i = 0; i < index->zone_count; i++)
+ uds_request_queue_finish(index->zone_queues[i]);
+
+ free_chapter_writer(index->chapter_writer);
+
+ uds_free_volume_index(index->volume_index);
+ if (index->zones != NULL) {
+ for (i = 0; i < index->zone_count; i++)
+ free_index_zone(index->zones[i]);
+ vdo_free(index->zones);
+ }
+
+ uds_free_volume(index->volume);
+ uds_free_index_layout(vdo_forget(index->layout));
+ vdo_free(index);
+}
+
+/* Wait for the chapter writer to complete any outstanding writes. */
+void uds_wait_for_idle_index(struct uds_index *index)
+{
+ struct chapter_writer *writer = index->chapter_writer;
+
+ mutex_lock(&writer->mutex);
+ while (writer->zones_to_write > 0)
+ uds_wait_cond(&writer->cond, &writer->mutex);
+ mutex_unlock(&writer->mutex);
+}
+
+/* This function assumes that all requests have been drained. */
+int uds_save_index(struct uds_index *index)
+{
+ int result;
+
+ if (!index->need_to_save)
+ return UDS_SUCCESS;
+
+ uds_wait_for_idle_index(index);
+ index->prev_save = index->last_save;
+ index->last_save = ((index->newest_virtual_chapter == 0) ?
+ NO_LAST_SAVE : index->newest_virtual_chapter - 1);
+ vdo_log_info("beginning save (vcn %llu)", (unsigned long long) index->last_save);
+
+ result = uds_save_index_state(index->layout, index);
+ if (result != UDS_SUCCESS) {
+ vdo_log_info("save index failed");
+ index->last_save = index->prev_save;
+ } else {
+ index->has_saved_open_chapter = true;
+ index->need_to_save = false;
+ vdo_log_info("finished save (vcn %llu)",
+ (unsigned long long) index->last_save);
+ }
+
+ return result;
+}
+
+int uds_replace_index_storage(struct uds_index *index, struct block_device *bdev)
+{
+ return uds_replace_volume_storage(index->volume, index->layout, bdev);
+}
+
+/* Accessing statistics should be safe from any thread. */
+void uds_get_index_stats(struct uds_index *index, struct uds_index_stats *counters)
+{
+ struct volume_index_stats stats;
+
+ uds_get_volume_index_stats(index->volume_index, &stats);
+ counters->entries_indexed = stats.record_count;
+ counters->collisions = stats.collision_count;
+ counters->entries_discarded = stats.discard_count;
+
+ counters->memory_used = (index->volume_index->memory_size +
+ index->volume->cache_size +
+ index->chapter_writer->memory_size);
+}
+
+void uds_enqueue_request(struct uds_request *request, enum request_stage stage)
+{
+ struct uds_index *index = request->index;
+ struct uds_request_queue *queue;
+
+ switch (stage) {
+ case STAGE_TRIAGE:
+ if (index->triage_queue != NULL) {
+ queue = index->triage_queue;
+ break;
+ }
+
+ fallthrough;
+
+ case STAGE_INDEX:
+ request->zone_number =
+ uds_get_volume_index_zone(index->volume_index, &request->record_name);
+ fallthrough;
+
+ case STAGE_MESSAGE:
+ queue = index->zone_queues[request->zone_number];
+ break;
+
+ default:
+ VDO_ASSERT_LOG_ONLY(false, "invalid index stage: %d", stage);
+ return;
+ }
+
+ uds_request_queue_enqueue(queue, request);
+}
diff --git a/drivers/md/dm-vdo/indexer/index.h b/drivers/md/dm-vdo/indexer/index.h
new file mode 100644
index 000000000000..edabb239548e
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/index.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_INDEX_H
+#define UDS_INDEX_H
+
+#include "index-layout.h"
+#include "index-session.h"
+#include "open-chapter.h"
+#include "volume.h"
+#include "volume-index.h"
+
+/*
+ * The index is a high-level structure which represents the totality of the UDS index. It manages
+ * the queues for incoming requests and dispatches them to the appropriate sub-components like the
+ * volume or the volume index. It also manages administrative tasks such as saving and loading the
+ * index.
+ *
+ * The index is divided into a number of independent zones and assigns each request to a zone based
+ * on its name. Most sub-components are similarly divided into zones as well so that requests in
+ * each zone usually operate without interference or coordination between zones.
+ */
+
+typedef void (*index_callback_fn)(struct uds_request *request);
+
+struct index_zone {
+ struct uds_index *index;
+ struct open_chapter_zone *open_chapter;
+ struct open_chapter_zone *writing_chapter;
+ u64 oldest_virtual_chapter;
+ u64 newest_virtual_chapter;
+ unsigned int id;
+};
+
+struct uds_index {
+ bool has_saved_open_chapter;
+ bool need_to_save;
+ struct index_load_context *load_context;
+ struct index_layout *layout;
+ struct volume_index *volume_index;
+ struct volume *volume;
+ unsigned int zone_count;
+ struct index_zone **zones;
+
+ u64 oldest_virtual_chapter;
+ u64 newest_virtual_chapter;
+
+ u64 last_save;
+ u64 prev_save;
+ struct chapter_writer *chapter_writer;
+
+ index_callback_fn callback;
+ struct uds_request_queue *triage_queue;
+ struct uds_request_queue *zone_queues[];
+};
+
+enum request_stage {
+ STAGE_TRIAGE,
+ STAGE_INDEX,
+ STAGE_MESSAGE,
+};
+
+int __must_check uds_make_index(struct uds_configuration *config,
+ enum uds_open_index_type open_type,
+ struct index_load_context *load_context,
+ index_callback_fn callback, struct uds_index **new_index);
+
+int __must_check uds_save_index(struct uds_index *index);
+
+void uds_free_index(struct uds_index *index);
+
+int __must_check uds_replace_index_storage(struct uds_index *index,
+ struct block_device *bdev);
+
+void uds_get_index_stats(struct uds_index *index, struct uds_index_stats *counters);
+
+void uds_enqueue_request(struct uds_request *request, enum request_stage stage);
+
+void uds_wait_for_idle_index(struct uds_index *index);
+
+#endif /* UDS_INDEX_H */
diff --git a/drivers/md/dm-vdo/indexer/indexer.h b/drivers/md/dm-vdo/indexer/indexer.h
new file mode 100644
index 000000000000..3744aaf625b0
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/indexer.h
@@ -0,0 +1,353 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef INDEXER_H
+#define INDEXER_H
+
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "funnel-queue.h"
+
+/*
+ * UDS public API
+ *
+ * The Universal Deduplication System (UDS) is an efficient name-value store. When used for
+ * deduplicating storage, the names are generally hashes of data blocks and the associated data is
+ * where that block is located on the underlying storage medium. The stored names are expected to
+ * be randomly distributed among the space of possible names. If this assumption is violated, the
+ * UDS index will store fewer names than normal but will otherwise continue to work. The data
+ * associated with each name can be any 16-byte value.
+ *
+ * A client must first create an index session to interact with an index. Once created, the session
+ * can be shared among multiple threads or users. When a session is destroyed, it will also close
+ * and save any associated index.
+ *
+ * To make a request, a client must allocate a uds_request structure and set the required fields
+ * before launching it. UDS will invoke the provided callback to complete the request. After the
+ * callback has been called, the uds_request structure can be freed or reused for a new request.
+ * There are five types of requests:
+ *
+ * A UDS_UPDATE request will associate the provided name with the provided data. Any previous data
+ * associated with that name will be discarded.
+ *
+ * A UDS_QUERY request will return the data associated with the provided name, if any. The entry
+ * for the name will also be marked as most recent, as if the data had been updated.
+ *
+ * A UDS_POST request is a combination of UDS_QUERY and UDS_UPDATE. If there is already data
+ * associated with the provided name, that data is returned. If there is no existing association,
+ * the name is associated with the newly provided data. This request is equivalent to a UDS_QUERY
+ * request followed by a UDS_UPDATE request if no data is found, but it is much more efficient.
+ *
+ * A UDS_QUERY_NO_UPDATE request will return the data associated with the provided name, but will
+ * not change the recency of the entry for the name. This request is primarily useful for testing,
+ * to determine whether an entry exists without changing the internal state of the index.
+ *
+ * A UDS_DELETE request removes any data associated with the provided name. This operation is
+ * generally not necessary, because the index will automatically discard its oldest entries once it
+ * becomes full.
+ */
+
+/* General UDS constants and structures */
+
+enum uds_request_type {
+ /* Create or update the mapping for a name, and make the name most recent. */
+ UDS_UPDATE,
+
+ /* Return any mapped data for a name, and make the name most recent. */
+ UDS_QUERY,
+
+ /*
+ * Return any mapped data for a name, or map the provided data to the name if there is no
+ * current data, and make the name most recent.
+ */
+ UDS_POST,
+
+ /* Return any mapped data for a name without updating its recency. */
+ UDS_QUERY_NO_UPDATE,
+
+ /* Remove any mapping for a name. */
+ UDS_DELETE,
+
+};
+
+enum uds_open_index_type {
+ /* Create a new index. */
+ UDS_CREATE,
+
+ /* Load an existing index and try to recover if necessary. */
+ UDS_LOAD,
+
+ /* Load an existing index, but only if it was saved cleanly. */
+ UDS_NO_REBUILD,
+};
+
+enum {
+ /* The record name size in bytes */
+ UDS_RECORD_NAME_SIZE = 16,
+ /* The maximum record data size in bytes */
+ UDS_RECORD_DATA_SIZE = 16,
+};
+
+/*
+ * A type representing a UDS memory configuration which is either a positive integer number of
+ * gigabytes or one of the six special constants for configurations smaller than one gigabyte.
+ */
+typedef int uds_memory_config_size_t;
+
+enum {
+ /* The maximum configurable amount of memory */
+ UDS_MEMORY_CONFIG_MAX = 1024,
+ /* Flag indicating that the index has one less chapter than usual */
+ UDS_MEMORY_CONFIG_REDUCED = 0x1000,
+ UDS_MEMORY_CONFIG_REDUCED_MAX = 1024 + UDS_MEMORY_CONFIG_REDUCED,
+ /* Special values indicating sizes less than 1 GB */
+ UDS_MEMORY_CONFIG_256MB = -256,
+ UDS_MEMORY_CONFIG_512MB = -512,
+ UDS_MEMORY_CONFIG_768MB = -768,
+ UDS_MEMORY_CONFIG_REDUCED_256MB = -1280,
+ UDS_MEMORY_CONFIG_REDUCED_512MB = -1536,
+ UDS_MEMORY_CONFIG_REDUCED_768MB = -1792,
+};
+
+struct uds_record_name {
+ unsigned char name[UDS_RECORD_NAME_SIZE];
+};
+
+struct uds_record_data {
+ unsigned char data[UDS_RECORD_DATA_SIZE];
+};
+
+struct uds_volume_record {
+ struct uds_record_name name;
+ struct uds_record_data data;
+};
+
+struct uds_parameters {
+ /* The block_device used for storage */
+ struct block_device *bdev;
+ /* The maximum allowable size of the index on storage */
+ size_t size;
+ /* The offset where the index should start */
+ off_t offset;
+ /* The maximum memory allocation, in GB */
+ uds_memory_config_size_t memory_size;
+ /* Whether the index should include sparse chapters */
+ bool sparse;
+ /* A 64-bit nonce to validate the index */
+ u64 nonce;
+ /* The number of threads used to process index requests */
+ unsigned int zone_count;
+ /* The number of threads used to read volume pages */
+ unsigned int read_threads;
+};
+
+/*
+ * These statistics capture characteristics of the current index, including resource usage and
+ * requests processed since the index was opened.
+ */
+struct uds_index_stats {
+ /* The total number of records stored in the index */
+ u64 entries_indexed;
+ /* An estimate of the index's memory usage, in bytes */
+ u64 memory_used;
+ /* The number of collisions recorded in the volume index */
+ u64 collisions;
+ /* The number of entries discarded from the index since startup */
+ u64 entries_discarded;
+ /* The time at which these statistics were fetched */
+ s64 current_time;
+ /* The number of post calls that found an existing entry */
+ u64 posts_found;
+ /* The number of post calls that added an entry */
+ u64 posts_not_found;
+ /*
+ * The number of post calls that found an existing entry that is current enough to only
+ * exist in memory and not have been committed to disk yet
+ */
+ u64 in_memory_posts_found;
+ /*
+ * The number of post calls that found an existing entry in the dense portion of the index
+ */
+ u64 dense_posts_found;
+ /*
+ * The number of post calls that found an existing entry in the sparse portion of the index
+ */
+ u64 sparse_posts_found;
+ /* The number of update calls that updated an existing entry */
+ u64 updates_found;
+ /* The number of update calls that added a new entry */
+ u64 updates_not_found;
+ /* The number of delete requests that deleted an existing entry */
+ u64 deletions_found;
+ /* The number of delete requests that did nothing */
+ u64 deletions_not_found;
+ /* The number of query calls that found existing entry */
+ u64 queries_found;
+ /* The number of query calls that did not find an entry */
+ u64 queries_not_found;
+ /* The total number of requests processed */
+ u64 requests;
+};
+
+enum uds_index_region {
+ /* No location information has been determined */
+ UDS_LOCATION_UNKNOWN = 0,
+ /* The index page entry has been found */
+ UDS_LOCATION_INDEX_PAGE_LOOKUP,
+ /* The record page entry has been found */
+ UDS_LOCATION_RECORD_PAGE_LOOKUP,
+ /* The record is not in the index */
+ UDS_LOCATION_UNAVAILABLE,
+ /* The record was found in the open chapter */
+ UDS_LOCATION_IN_OPEN_CHAPTER,
+ /* The record was found in the dense part of the index */
+ UDS_LOCATION_IN_DENSE,
+ /* The record was found in the sparse part of the index */
+ UDS_LOCATION_IN_SPARSE,
+} __packed;
+
+/* Zone message requests are used to communicate between index zones. */
+enum uds_zone_message_type {
+ /* A standard request with no message */
+ UDS_MESSAGE_NONE = 0,
+ /* Add a chapter to the sparse chapter index cache */
+ UDS_MESSAGE_SPARSE_CACHE_BARRIER,
+ /* Close a chapter to keep the zone from falling behind */
+ UDS_MESSAGE_ANNOUNCE_CHAPTER_CLOSED,
+} __packed;
+
+struct uds_zone_message {
+ /* The type of message, determining how it will be processed */
+ enum uds_zone_message_type type;
+ /* The virtual chapter number to which the message applies */
+ u64 virtual_chapter;
+};
+
+struct uds_index_session;
+struct uds_index;
+struct uds_request;
+
+/* Once this callback has been invoked, the uds_request structure can be reused or freed. */
+typedef void (*uds_request_callback_fn)(struct uds_request *request);
+
+struct uds_request {
+ /* These input fields must be set before launching a request. */
+
+ /* The name of the record to look up or create */
+ struct uds_record_name record_name;
+ /* New data to associate with the record name, if applicable */
+ struct uds_record_data new_metadata;
+ /* A callback to invoke when the request is complete */
+ uds_request_callback_fn callback;
+ /* The index session that will manage this request */
+ struct uds_index_session *session;
+ /* The type of operation to perform, as describe above */
+ enum uds_request_type type;
+
+ /* These output fields are set when a request is complete. */
+
+ /* The existing data associated with the request name, if any */
+ struct uds_record_data old_metadata;
+ /* Either UDS_SUCCESS or an error code for the request */
+ int status;
+ /* True if the record name had an existing entry in the index */
+ bool found;
+
+ /*
+ * The remaining fields are used internally and should not be altered by clients. The index
+ * relies on zone_number being the first field in this section.
+ */
+
+ /* The number of the zone which will process this request*/
+ unsigned int zone_number;
+ /* A link for adding a request to a lock-free queue */
+ struct funnel_queue_entry queue_link;
+ /* A link for adding a request to a standard linked list */
+ struct uds_request *next_request;
+ /* A pointer to the index processing this request */
+ struct uds_index *index;
+ /* Control message for coordinating between zones */
+ struct uds_zone_message zone_message;
+ /* If true, process request immediately by waking the worker thread */
+ bool unbatched;
+ /* If true, continue this request before processing newer requests */
+ bool requeued;
+ /* The virtual chapter containing the record name, if known */
+ u64 virtual_chapter;
+ /* The region of the index containing the record name */
+ enum uds_index_region location;
+};
+
+/* Compute the number of bytes needed to store an index. */
+int __must_check uds_compute_index_size(const struct uds_parameters *parameters,
+ u64 *index_size);
+
+/* A session is required for most index operations. */
+int __must_check uds_create_index_session(struct uds_index_session **session);
+
+/* Destroying an index session also closes and saves the associated index. */
+int uds_destroy_index_session(struct uds_index_session *session);
+
+/*
+ * Create or open an index with an existing session. This operation fails if the index session is
+ * suspended, or if there is already an open index.
+ */
+int __must_check uds_open_index(enum uds_open_index_type open_type,
+ const struct uds_parameters *parameters,
+ struct uds_index_session *session);
+
+/*
+ * Wait until all callbacks for index operations are complete, and prevent new index operations
+ * from starting. New index operations will fail with EBUSY until the session is resumed. Also
+ * optionally saves the index.
+ */
+int __must_check uds_suspend_index_session(struct uds_index_session *session, bool save);
+
+/*
+ * Allow new index operations for an index, whether it was suspended or not. If the index is
+ * suspended and the supplied block device differs from the current backing store, the index will
+ * start using the new backing store instead.
+ */
+int __must_check uds_resume_index_session(struct uds_index_session *session,
+ struct block_device *bdev);
+
+/* Wait until all outstanding index operations are complete. */
+int __must_check uds_flush_index_session(struct uds_index_session *session);
+
+/* Close an index. This operation fails if the index session is suspended. */
+int __must_check uds_close_index(struct uds_index_session *session);
+
+/* Get index statistics since the last time the index was opened. */
+int __must_check uds_get_index_session_stats(struct uds_index_session *session,
+ struct uds_index_stats *stats);
+
+/* This function will fail if any required field of the request is not set. */
+int __must_check uds_launch_request(struct uds_request *request);
+
+struct cond_var {
+ wait_queue_head_t wait_queue;
+};
+
+static inline void uds_init_cond(struct cond_var *cv)
+{
+ init_waitqueue_head(&cv->wait_queue);
+}
+
+static inline void uds_signal_cond(struct cond_var *cv)
+{
+ wake_up(&cv->wait_queue);
+}
+
+static inline void uds_broadcast_cond(struct cond_var *cv)
+{
+ wake_up_all(&cv->wait_queue);
+}
+
+void uds_wait_cond(struct cond_var *cv, struct mutex *mutex);
+
+#endif /* INDEXER_H */
diff --git a/drivers/md/dm-vdo/indexer/io-factory.c b/drivers/md/dm-vdo/indexer/io-factory.c
new file mode 100644
index 000000000000..515765d35794
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/io-factory.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "io-factory.h"
+
+#include <linux/atomic.h>
+#include <linux/blkdev.h>
+#include <linux/err.h>
+#include <linux/mount.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+
+/*
+ * The I/O factory object manages access to index storage, which is a contiguous range of blocks on
+ * a block device.
+ *
+ * The factory holds the open device and is responsible for closing it. The factory has methods to
+ * make helper structures that can be used to access sections of the index.
+ */
+struct io_factory {
+ struct block_device *bdev;
+ atomic_t ref_count;
+};
+
+/* The buffered reader allows efficient I/O by reading page-sized segments into a buffer. */
+struct buffered_reader {
+ struct io_factory *factory;
+ struct dm_bufio_client *client;
+ struct dm_buffer *buffer;
+ sector_t limit;
+ sector_t block_number;
+ u8 *start;
+ u8 *end;
+};
+
+#define MAX_READ_AHEAD_BLOCKS 4
+
+/*
+ * The buffered writer allows efficient I/O by buffering writes and committing page-sized segments
+ * to storage.
+ */
+struct buffered_writer {
+ struct io_factory *factory;
+ struct dm_bufio_client *client;
+ struct dm_buffer *buffer;
+ sector_t limit;
+ sector_t block_number;
+ u8 *start;
+ u8 *end;
+ int error;
+};
+
+static void uds_get_io_factory(struct io_factory *factory)
+{
+ atomic_inc(&factory->ref_count);
+}
+
+int uds_make_io_factory(struct block_device *bdev, struct io_factory **factory_ptr)
+{
+ int result;
+ struct io_factory *factory;
+
+ result = vdo_allocate(1, struct io_factory, __func__, &factory);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ factory->bdev = bdev;
+ atomic_set_release(&factory->ref_count, 1);
+
+ *factory_ptr = factory;
+ return UDS_SUCCESS;
+}
+
+int uds_replace_storage(struct io_factory *factory, struct block_device *bdev)
+{
+ factory->bdev = bdev;
+ return UDS_SUCCESS;
+}
+
+/* Free an I/O factory once all references have been released. */
+void uds_put_io_factory(struct io_factory *factory)
+{
+ if (atomic_add_return(-1, &factory->ref_count) <= 0)
+ vdo_free(factory);
+}
+
+size_t uds_get_writable_size(struct io_factory *factory)
+{
+ return i_size_read(factory->bdev->bd_inode);
+}
+
+/* Create a struct dm_bufio_client for an index region starting at offset. */
+int uds_make_bufio(struct io_factory *factory, off_t block_offset, size_t block_size,
+ unsigned int reserved_buffers, struct dm_bufio_client **client_ptr)
+{
+ struct dm_bufio_client *client;
+
+ client = dm_bufio_client_create(factory->bdev, block_size, reserved_buffers, 0,
+ NULL, NULL, 0);
+ if (IS_ERR(client))
+ return -PTR_ERR(client);
+
+ dm_bufio_set_sector_offset(client, block_offset * SECTORS_PER_BLOCK);
+ *client_ptr = client;
+ return UDS_SUCCESS;
+}
+
+static void read_ahead(struct buffered_reader *reader, sector_t block_number)
+{
+ if (block_number < reader->limit) {
+ sector_t read_ahead = min((sector_t) MAX_READ_AHEAD_BLOCKS,
+ reader->limit - block_number);
+
+ dm_bufio_prefetch(reader->client, block_number, read_ahead);
+ }
+}
+
+void uds_free_buffered_reader(struct buffered_reader *reader)
+{
+ if (reader == NULL)
+ return;
+
+ if (reader->buffer != NULL)
+ dm_bufio_release(reader->buffer);
+
+ dm_bufio_client_destroy(reader->client);
+ uds_put_io_factory(reader->factory);
+ vdo_free(reader);
+}
+
+/* Create a buffered reader for an index region starting at offset. */
+int uds_make_buffered_reader(struct io_factory *factory, off_t offset, u64 block_count,
+ struct buffered_reader **reader_ptr)
+{
+ int result;
+ struct dm_bufio_client *client = NULL;
+ struct buffered_reader *reader = NULL;
+
+ result = uds_make_bufio(factory, offset, UDS_BLOCK_SIZE, 1, &client);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = vdo_allocate(1, struct buffered_reader, "buffered reader", &reader);
+ if (result != VDO_SUCCESS) {
+ dm_bufio_client_destroy(client);
+ return result;
+ }
+
+ *reader = (struct buffered_reader) {
+ .factory = factory,
+ .client = client,
+ .buffer = NULL,
+ .limit = block_count,
+ .block_number = 0,
+ .start = NULL,
+ .end = NULL,
+ };
+
+ read_ahead(reader, 0);
+ uds_get_io_factory(factory);
+ *reader_ptr = reader;
+ return UDS_SUCCESS;
+}
+
+static int position_reader(struct buffered_reader *reader, sector_t block_number,
+ off_t offset)
+{
+ struct dm_buffer *buffer = NULL;
+ void *data;
+
+ if ((reader->end == NULL) || (block_number != reader->block_number)) {
+ if (block_number >= reader->limit)
+ return UDS_OUT_OF_RANGE;
+
+ if (reader->buffer != NULL)
+ dm_bufio_release(vdo_forget(reader->buffer));
+
+ data = dm_bufio_read(reader->client, block_number, &buffer);
+ if (IS_ERR(data))
+ return -PTR_ERR(data);
+
+ reader->buffer = buffer;
+ reader->start = data;
+ if (block_number == reader->block_number + 1)
+ read_ahead(reader, block_number + 1);
+ }
+
+ reader->block_number = block_number;
+ reader->end = reader->start + offset;
+ return UDS_SUCCESS;
+}
+
+static size_t bytes_remaining_in_read_buffer(struct buffered_reader *reader)
+{
+ return (reader->end == NULL) ? 0 : reader->start + UDS_BLOCK_SIZE - reader->end;
+}
+
+static int reset_reader(struct buffered_reader *reader)
+{
+ sector_t block_number;
+
+ if (bytes_remaining_in_read_buffer(reader) > 0)
+ return UDS_SUCCESS;
+
+ block_number = reader->block_number;
+ if (reader->end != NULL)
+ block_number++;
+
+ return position_reader(reader, block_number, 0);
+}
+
+int uds_read_from_buffered_reader(struct buffered_reader *reader, u8 *data,
+ size_t length)
+{
+ int result = UDS_SUCCESS;
+ size_t chunk_size;
+
+ while (length > 0) {
+ result = reset_reader(reader);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ chunk_size = min(length, bytes_remaining_in_read_buffer(reader));
+ memcpy(data, reader->end, chunk_size);
+ length -= chunk_size;
+ data += chunk_size;
+ reader->end += chunk_size;
+ }
+
+ return UDS_SUCCESS;
+}
+
+/*
+ * Verify that the next data on the reader matches the required value. If the value matches, the
+ * matching contents are consumed. If the value does not match, the reader state is unchanged.
+ */
+int uds_verify_buffered_data(struct buffered_reader *reader, const u8 *value,
+ size_t length)
+{
+ int result = UDS_SUCCESS;
+ size_t chunk_size;
+ sector_t start_block_number = reader->block_number;
+ int start_offset = reader->end - reader->start;
+
+ while (length > 0) {
+ result = reset_reader(reader);
+ if (result != UDS_SUCCESS) {
+ result = UDS_CORRUPT_DATA;
+ break;
+ }
+
+ chunk_size = min(length, bytes_remaining_in_read_buffer(reader));
+ if (memcmp(value, reader->end, chunk_size) != 0) {
+ result = UDS_CORRUPT_DATA;
+ break;
+ }
+
+ length -= chunk_size;
+ value += chunk_size;
+ reader->end += chunk_size;
+ }
+
+ if (result != UDS_SUCCESS)
+ position_reader(reader, start_block_number, start_offset);
+
+ return result;
+}
+
+/* Create a buffered writer for an index region starting at offset. */
+int uds_make_buffered_writer(struct io_factory *factory, off_t offset, u64 block_count,
+ struct buffered_writer **writer_ptr)
+{
+ int result;
+ struct dm_bufio_client *client = NULL;
+ struct buffered_writer *writer;
+
+ result = uds_make_bufio(factory, offset, UDS_BLOCK_SIZE, 1, &client);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = vdo_allocate(1, struct buffered_writer, "buffered writer", &writer);
+ if (result != VDO_SUCCESS) {
+ dm_bufio_client_destroy(client);
+ return result;
+ }
+
+ *writer = (struct buffered_writer) {
+ .factory = factory,
+ .client = client,
+ .buffer = NULL,
+ .limit = block_count,
+ .start = NULL,
+ .end = NULL,
+ .block_number = 0,
+ .error = UDS_SUCCESS,
+ };
+
+ uds_get_io_factory(factory);
+ *writer_ptr = writer;
+ return UDS_SUCCESS;
+}
+
+static size_t get_remaining_write_space(struct buffered_writer *writer)
+{
+ return writer->start + UDS_BLOCK_SIZE - writer->end;
+}
+
+static int __must_check prepare_next_buffer(struct buffered_writer *writer)
+{
+ struct dm_buffer *buffer = NULL;
+ void *data;
+
+ if (writer->block_number >= writer->limit) {
+ writer->error = UDS_OUT_OF_RANGE;
+ return UDS_OUT_OF_RANGE;
+ }
+
+ data = dm_bufio_new(writer->client, writer->block_number, &buffer);
+ if (IS_ERR(data)) {
+ writer->error = -PTR_ERR(data);
+ return writer->error;
+ }
+
+ writer->buffer = buffer;
+ writer->start = data;
+ writer->end = data;
+ return UDS_SUCCESS;
+}
+
+static int flush_previous_buffer(struct buffered_writer *writer)
+{
+ size_t available;
+
+ if (writer->buffer == NULL)
+ return writer->error;
+
+ if (writer->error == UDS_SUCCESS) {
+ available = get_remaining_write_space(writer);
+
+ if (available > 0)
+ memset(writer->end, 0, available);
+
+ dm_bufio_mark_buffer_dirty(writer->buffer);
+ }
+
+ dm_bufio_release(writer->buffer);
+ writer->buffer = NULL;
+ writer->start = NULL;
+ writer->end = NULL;
+ writer->block_number++;
+ return writer->error;
+}
+
+void uds_free_buffered_writer(struct buffered_writer *writer)
+{
+ int result;
+
+ if (writer == NULL)
+ return;
+
+ flush_previous_buffer(writer);
+ result = -dm_bufio_write_dirty_buffers(writer->client);
+ if (result != UDS_SUCCESS)
+ vdo_log_warning_strerror(result, "%s: failed to sync storage", __func__);
+
+ dm_bufio_client_destroy(writer->client);
+ uds_put_io_factory(writer->factory);
+ vdo_free(writer);
+}
+
+/*
+ * Append data to the buffer, writing as needed. If no data is provided, zeros are written instead.
+ * If a write error occurs, it is recorded and returned on every subsequent write attempt.
+ */
+int uds_write_to_buffered_writer(struct buffered_writer *writer, const u8 *data,
+ size_t length)
+{
+ int result = writer->error;
+ size_t chunk_size;
+
+ while ((length > 0) && (result == UDS_SUCCESS)) {
+ if (writer->buffer == NULL) {
+ result = prepare_next_buffer(writer);
+ continue;
+ }
+
+ chunk_size = min(length, get_remaining_write_space(writer));
+ if (data == NULL) {
+ memset(writer->end, 0, chunk_size);
+ } else {
+ memcpy(writer->end, data, chunk_size);
+ data += chunk_size;
+ }
+
+ length -= chunk_size;
+ writer->end += chunk_size;
+
+ if (get_remaining_write_space(writer) == 0)
+ result = uds_flush_buffered_writer(writer);
+ }
+
+ return result;
+}
+
+int uds_flush_buffered_writer(struct buffered_writer *writer)
+{
+ if (writer->error != UDS_SUCCESS)
+ return writer->error;
+
+ return flush_previous_buffer(writer);
+}
diff --git a/drivers/md/dm-vdo/indexer/io-factory.h b/drivers/md/dm-vdo/indexer/io-factory.h
new file mode 100644
index 000000000000..7fb5a0616a79
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/io-factory.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_IO_FACTORY_H
+#define UDS_IO_FACTORY_H
+
+#include <linux/dm-bufio.h>
+
+/*
+ * The I/O factory manages all low-level I/O operations to the underlying storage device. Its main
+ * clients are the index layout and the volume. The buffered reader and buffered writer interfaces
+ * are helpers for accessing data in a contiguous range of storage blocks.
+ */
+
+struct buffered_reader;
+struct buffered_writer;
+
+struct io_factory;
+
+enum {
+ UDS_BLOCK_SIZE = 4096,
+ SECTORS_PER_BLOCK = UDS_BLOCK_SIZE >> SECTOR_SHIFT,
+};
+
+int __must_check uds_make_io_factory(struct block_device *bdev,
+ struct io_factory **factory_ptr);
+
+int __must_check uds_replace_storage(struct io_factory *factory,
+ struct block_device *bdev);
+
+void uds_put_io_factory(struct io_factory *factory);
+
+size_t __must_check uds_get_writable_size(struct io_factory *factory);
+
+int __must_check uds_make_bufio(struct io_factory *factory, off_t block_offset,
+ size_t block_size, unsigned int reserved_buffers,
+ struct dm_bufio_client **client_ptr);
+
+int __must_check uds_make_buffered_reader(struct io_factory *factory, off_t offset,
+ u64 block_count,
+ struct buffered_reader **reader_ptr);
+
+void uds_free_buffered_reader(struct buffered_reader *reader);
+
+int __must_check uds_read_from_buffered_reader(struct buffered_reader *reader, u8 *data,
+ size_t length);
+
+int __must_check uds_verify_buffered_data(struct buffered_reader *reader, const u8 *value,
+ size_t length);
+
+int __must_check uds_make_buffered_writer(struct io_factory *factory, off_t offset,
+ u64 block_count,
+ struct buffered_writer **writer_ptr);
+
+void uds_free_buffered_writer(struct buffered_writer *buffer);
+
+int __must_check uds_write_to_buffered_writer(struct buffered_writer *writer,
+ const u8 *data, size_t length);
+
+int __must_check uds_flush_buffered_writer(struct buffered_writer *writer);
+
+#endif /* UDS_IO_FACTORY_H */
diff --git a/drivers/md/dm-vdo/indexer/open-chapter.c b/drivers/md/dm-vdo/indexer/open-chapter.c
new file mode 100644
index 000000000000..4a67bcadaae0
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/open-chapter.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "open-chapter.h"
+
+#include <linux/log2.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "permassert.h"
+
+#include "config.h"
+#include "hash-utils.h"
+
+/*
+ * Each index zone has a dedicated open chapter zone structure which gets an equal share of the
+ * open chapter space. Records are assigned to zones based on their record name. Within each zone,
+ * records are stored in an array in the order they arrive. Additionally, a reference to each
+ * record is stored in a hash table to help determine if a new record duplicates an existing one.
+ * If new metadata for an existing name arrives, the record is altered in place. The array of
+ * records is 1-based so that record number 0 can be used to indicate an unused hash slot.
+ *
+ * Deleted records are marked with a flag rather than actually removed to simplify hash table
+ * management. The array of deleted flags overlays the array of hash slots, but the flags are
+ * indexed by record number instead of by record name. The number of hash slots will always be a
+ * power of two that is greater than the number of records to be indexed, guaranteeing that hash
+ * insertion cannot fail, and that there are sufficient flags for all records.
+ *
+ * Once any open chapter zone fills its available space, the chapter is closed. The records from
+ * each zone are interleaved to attempt to preserve temporal locality and assigned to record pages.
+ * Empty or deleted records are replaced by copies of a valid record so that the record pages only
+ * contain valid records. The chapter then constructs a delta index which maps each record name to
+ * the record page on which that record can be found, which is split into index pages. These
+ * structures are then passed to the volume to be recorded on storage.
+ *
+ * When the index is saved, the open chapter records are saved in a single array, once again
+ * interleaved to attempt to preserve temporal locality. When the index is reloaded, there may be a
+ * different number of zones than previously, so the records must be parcelled out to their new
+ * zones. In addition, depending on the distribution of record names, a new zone may have more
+ * records than it has space. In this case, the latest records for that zone will be discarded.
+ */
+
+static const u8 OPEN_CHAPTER_MAGIC[] = "ALBOC";
+static const u8 OPEN_CHAPTER_VERSION[] = "02.00";
+
+#define OPEN_CHAPTER_MAGIC_LENGTH (sizeof(OPEN_CHAPTER_MAGIC) - 1)
+#define OPEN_CHAPTER_VERSION_LENGTH (sizeof(OPEN_CHAPTER_VERSION) - 1)
+#define LOAD_RATIO 2
+
+static inline size_t records_size(const struct open_chapter_zone *open_chapter)
+{
+ return sizeof(struct uds_volume_record) * (1 + open_chapter->capacity);
+}
+
+static inline size_t slots_size(size_t slot_count)
+{
+ return sizeof(struct open_chapter_zone_slot) * slot_count;
+}
+
+int uds_make_open_chapter(const struct index_geometry *geometry, unsigned int zone_count,
+ struct open_chapter_zone **open_chapter_ptr)
+{
+ int result;
+ struct open_chapter_zone *open_chapter;
+ size_t capacity = geometry->records_per_chapter / zone_count;
+ size_t slot_count = (1 << bits_per(capacity * LOAD_RATIO));
+
+ result = vdo_allocate_extended(struct open_chapter_zone, slot_count,
+ struct open_chapter_zone_slot, "open chapter",
+ &open_chapter);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ open_chapter->slot_count = slot_count;
+ open_chapter->capacity = capacity;
+ result = vdo_allocate_cache_aligned(records_size(open_chapter), "record pages",
+ &open_chapter->records);
+ if (result != VDO_SUCCESS) {
+ uds_free_open_chapter(open_chapter);
+ return result;
+ }
+
+ *open_chapter_ptr = open_chapter;
+ return UDS_SUCCESS;
+}
+
+void uds_reset_open_chapter(struct open_chapter_zone *open_chapter)
+{
+ open_chapter->size = 0;
+ open_chapter->deletions = 0;
+
+ memset(open_chapter->records, 0, records_size(open_chapter));
+ memset(open_chapter->slots, 0, slots_size(open_chapter->slot_count));
+}
+
+static unsigned int probe_chapter_slots(struct open_chapter_zone *open_chapter,
+ const struct uds_record_name *name)
+{
+ struct uds_volume_record *record;
+ unsigned int slot_count = open_chapter->slot_count;
+ unsigned int slot = uds_name_to_hash_slot(name, slot_count);
+ unsigned int record_number;
+ unsigned int attempts = 1;
+
+ while (true) {
+ record_number = open_chapter->slots[slot].record_number;
+
+ /*
+ * If the hash slot is empty, we've reached the end of a chain without finding the
+ * record and should terminate the search.
+ */
+ if (record_number == 0)
+ return slot;
+
+ /*
+ * If the name of the record referenced by the slot matches and has not been
+ * deleted, then we've found the requested name.
+ */
+ record = &open_chapter->records[record_number];
+ if ((memcmp(&record->name, name, UDS_RECORD_NAME_SIZE) == 0) &&
+ !open_chapter->slots[record_number].deleted)
+ return slot;
+
+ /*
+ * Quadratic probing: advance the probe by 1, 2, 3, etc. and try again. This
+ * performs better than linear probing and works best for 2^N slots.
+ */
+ slot = (slot + attempts++) % slot_count;
+ }
+}
+
+void uds_search_open_chapter(struct open_chapter_zone *open_chapter,
+ const struct uds_record_name *name,
+ struct uds_record_data *metadata, bool *found)
+{
+ unsigned int slot;
+ unsigned int record_number;
+
+ slot = probe_chapter_slots(open_chapter, name);
+ record_number = open_chapter->slots[slot].record_number;
+ if (record_number == 0) {
+ *found = false;
+ } else {
+ *found = true;
+ *metadata = open_chapter->records[record_number].data;
+ }
+}
+
+/* Add a record to the open chapter zone and return the remaining space. */
+int uds_put_open_chapter(struct open_chapter_zone *open_chapter,
+ const struct uds_record_name *name,
+ const struct uds_record_data *metadata)
+{
+ unsigned int slot;
+ unsigned int record_number;
+ struct uds_volume_record *record;
+
+ if (open_chapter->size >= open_chapter->capacity)
+ return 0;
+
+ slot = probe_chapter_slots(open_chapter, name);
+ record_number = open_chapter->slots[slot].record_number;
+
+ if (record_number == 0) {
+ record_number = ++open_chapter->size;
+ open_chapter->slots[slot].record_number = record_number;
+ }
+
+ record = &open_chapter->records[record_number];
+ record->name = *name;
+ record->data = *metadata;
+
+ return open_chapter->capacity - open_chapter->size;
+}
+
+void uds_remove_from_open_chapter(struct open_chapter_zone *open_chapter,
+ const struct uds_record_name *name)
+{
+ unsigned int slot;
+ unsigned int record_number;
+
+ slot = probe_chapter_slots(open_chapter, name);
+ record_number = open_chapter->slots[slot].record_number;
+
+ if (record_number > 0) {
+ open_chapter->slots[record_number].deleted = true;
+ open_chapter->deletions += 1;
+ }
+}
+
+void uds_free_open_chapter(struct open_chapter_zone *open_chapter)
+{
+ if (open_chapter != NULL) {
+ vdo_free(open_chapter->records);
+ vdo_free(open_chapter);
+ }
+}
+
+/* Map each record name to its record page number in the delta chapter index. */
+static int fill_delta_chapter_index(struct open_chapter_zone **chapter_zones,
+ unsigned int zone_count,
+ struct open_chapter_index *index,
+ struct uds_volume_record *collated_records)
+{
+ int result;
+ unsigned int records_per_chapter;
+ unsigned int records_per_page;
+ unsigned int record_index;
+ unsigned int records = 0;
+ u32 page_number;
+ unsigned int z;
+ int overflow_count = 0;
+ struct uds_volume_record *fill_record = NULL;
+
+ /*
+ * The record pages should not have any empty space, so find a record with which to fill
+ * the chapter zone if it was closed early, and also to replace any deleted records. The
+ * last record in any filled zone is guaranteed to not have been deleted, so use one of
+ * those.
+ */
+ for (z = 0; z < zone_count; z++) {
+ struct open_chapter_zone *zone = chapter_zones[z];
+
+ if (zone->size == zone->capacity) {
+ fill_record = &zone->records[zone->size];
+ break;
+ }
+ }
+
+ records_per_chapter = index->geometry->records_per_chapter;
+ records_per_page = index->geometry->records_per_page;
+
+ for (records = 0; records < records_per_chapter; records++) {
+ struct uds_volume_record *record = &collated_records[records];
+ struct open_chapter_zone *open_chapter;
+
+ /* The record arrays in the zones are 1-based. */
+ record_index = 1 + (records / zone_count);
+ page_number = records / records_per_page;
+ open_chapter = chapter_zones[records % zone_count];
+
+ /* Use the fill record in place of an unused record. */
+ if (record_index > open_chapter->size ||
+ open_chapter->slots[record_index].deleted) {
+ *record = *fill_record;
+ continue;
+ }
+
+ *record = open_chapter->records[record_index];
+ result = uds_put_open_chapter_index_record(index, &record->name,
+ page_number);
+ switch (result) {
+ case UDS_SUCCESS:
+ break;
+ case UDS_OVERFLOW:
+ overflow_count++;
+ break;
+ default:
+ vdo_log_error_strerror(result,
+ "failed to build open chapter index");
+ return result;
+ }
+ }
+
+ if (overflow_count > 0)
+ vdo_log_warning("Failed to add %d entries to chapter index",
+ overflow_count);
+
+ return UDS_SUCCESS;
+}
+
+int uds_close_open_chapter(struct open_chapter_zone **chapter_zones,
+ unsigned int zone_count, struct volume *volume,
+ struct open_chapter_index *chapter_index,
+ struct uds_volume_record *collated_records,
+ u64 virtual_chapter_number)
+{
+ int result;
+
+ uds_empty_open_chapter_index(chapter_index, virtual_chapter_number);
+ result = fill_delta_chapter_index(chapter_zones, zone_count, chapter_index,
+ collated_records);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ return uds_write_chapter(volume, chapter_index, collated_records);
+}
+
+int uds_save_open_chapter(struct uds_index *index, struct buffered_writer *writer)
+{
+ int result;
+ struct open_chapter_zone *open_chapter;
+ struct uds_volume_record *record;
+ u8 record_count_data[sizeof(u32)];
+ u32 record_count = 0;
+ unsigned int record_index;
+ unsigned int z;
+
+ result = uds_write_to_buffered_writer(writer, OPEN_CHAPTER_MAGIC,
+ OPEN_CHAPTER_MAGIC_LENGTH);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = uds_write_to_buffered_writer(writer, OPEN_CHAPTER_VERSION,
+ OPEN_CHAPTER_VERSION_LENGTH);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ for (z = 0; z < index->zone_count; z++) {
+ open_chapter = index->zones[z]->open_chapter;
+ record_count += open_chapter->size - open_chapter->deletions;
+ }
+
+ put_unaligned_le32(record_count, record_count_data);
+ result = uds_write_to_buffered_writer(writer, record_count_data,
+ sizeof(record_count_data));
+ if (result != UDS_SUCCESS)
+ return result;
+
+ record_index = 1;
+ while (record_count > 0) {
+ for (z = 0; z < index->zone_count; z++) {
+ open_chapter = index->zones[z]->open_chapter;
+ if (record_index > open_chapter->size)
+ continue;
+
+ if (open_chapter->slots[record_index].deleted)
+ continue;
+
+ record = &open_chapter->records[record_index];
+ result = uds_write_to_buffered_writer(writer, (u8 *) record,
+ sizeof(*record));
+ if (result != UDS_SUCCESS)
+ return result;
+
+ record_count--;
+ }
+
+ record_index++;
+ }
+
+ return uds_flush_buffered_writer(writer);
+}
+
+u64 uds_compute_saved_open_chapter_size(struct index_geometry *geometry)
+{
+ unsigned int records_per_chapter = geometry->records_per_chapter;
+
+ return OPEN_CHAPTER_MAGIC_LENGTH + OPEN_CHAPTER_VERSION_LENGTH + sizeof(u32) +
+ records_per_chapter * sizeof(struct uds_volume_record);
+}
+
+static int load_version20(struct uds_index *index, struct buffered_reader *reader)
+{
+ int result;
+ u32 record_count;
+ u8 record_count_data[sizeof(u32)];
+ struct uds_volume_record record;
+
+ /*
+ * Track which zones cannot accept any more records. If the open chapter had a different
+ * number of zones previously, some new zones may have more records than they have space
+ * for. These overflow records will be discarded.
+ */
+ bool full_flags[MAX_ZONES] = {
+ false,
+ };
+
+ result = uds_read_from_buffered_reader(reader, (u8 *) &record_count_data,
+ sizeof(record_count_data));
+ if (result != UDS_SUCCESS)
+ return result;
+
+ record_count = get_unaligned_le32(record_count_data);
+ while (record_count-- > 0) {
+ unsigned int zone = 0;
+
+ result = uds_read_from_buffered_reader(reader, (u8 *) &record,
+ sizeof(record));
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (index->zone_count > 1)
+ zone = uds_get_volume_index_zone(index->volume_index,
+ &record.name);
+
+ if (!full_flags[zone]) {
+ struct open_chapter_zone *open_chapter;
+ unsigned int remaining;
+
+ open_chapter = index->zones[zone]->open_chapter;
+ remaining = uds_put_open_chapter(open_chapter, &record.name,
+ &record.data);
+ /* Do not allow any zone to fill completely. */
+ full_flags[zone] = (remaining <= 1);
+ }
+ }
+
+ return UDS_SUCCESS;
+}
+
+int uds_load_open_chapter(struct uds_index *index, struct buffered_reader *reader)
+{
+ u8 version[OPEN_CHAPTER_VERSION_LENGTH];
+ int result;
+
+ result = uds_verify_buffered_data(reader, OPEN_CHAPTER_MAGIC,
+ OPEN_CHAPTER_MAGIC_LENGTH);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = uds_read_from_buffered_reader(reader, version, sizeof(version));
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (memcmp(OPEN_CHAPTER_VERSION, version, sizeof(version)) != 0) {
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "Invalid open chapter version: %.*s",
+ (int) sizeof(version), version);
+ }
+
+ return load_version20(index, reader);
+}
diff --git a/drivers/md/dm-vdo/indexer/open-chapter.h b/drivers/md/dm-vdo/indexer/open-chapter.h
new file mode 100644
index 000000000000..a4250bb19525
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/open-chapter.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_OPEN_CHAPTER_H
+#define UDS_OPEN_CHAPTER_H
+
+#include "chapter-index.h"
+#include "geometry.h"
+#include "index.h"
+#include "volume.h"
+
+/*
+ * The open chapter tracks the newest records in memory. Like the index as a whole, each open
+ * chapter is divided into a number of independent zones which are interleaved when the chapter is
+ * committed to the volume.
+ */
+
+enum {
+ OPEN_CHAPTER_RECORD_NUMBER_BITS = 23,
+};
+
+struct open_chapter_zone_slot {
+ /* If non-zero, the record number addressed by this hash slot */
+ unsigned int record_number : OPEN_CHAPTER_RECORD_NUMBER_BITS;
+ /* If true, the record at the index of this hash slot was deleted */
+ bool deleted : 1;
+} __packed;
+
+struct open_chapter_zone {
+ /* The maximum number of records that can be stored */
+ unsigned int capacity;
+ /* The number of records stored */
+ unsigned int size;
+ /* The number of deleted records */
+ unsigned int deletions;
+ /* Array of chunk records, 1-based */
+ struct uds_volume_record *records;
+ /* The number of slots in the hash table */
+ unsigned int slot_count;
+ /* The hash table slots, referencing virtual record numbers */
+ struct open_chapter_zone_slot slots[];
+};
+
+int __must_check uds_make_open_chapter(const struct index_geometry *geometry,
+ unsigned int zone_count,
+ struct open_chapter_zone **open_chapter_ptr);
+
+void uds_reset_open_chapter(struct open_chapter_zone *open_chapter);
+
+void uds_search_open_chapter(struct open_chapter_zone *open_chapter,
+ const struct uds_record_name *name,
+ struct uds_record_data *metadata, bool *found);
+
+int __must_check uds_put_open_chapter(struct open_chapter_zone *open_chapter,
+ const struct uds_record_name *name,
+ const struct uds_record_data *metadata);
+
+void uds_remove_from_open_chapter(struct open_chapter_zone *open_chapter,
+ const struct uds_record_name *name);
+
+void uds_free_open_chapter(struct open_chapter_zone *open_chapter);
+
+int __must_check uds_close_open_chapter(struct open_chapter_zone **chapter_zones,
+ unsigned int zone_count, struct volume *volume,
+ struct open_chapter_index *chapter_index,
+ struct uds_volume_record *collated_records,
+ u64 virtual_chapter_number);
+
+int __must_check uds_save_open_chapter(struct uds_index *index,
+ struct buffered_writer *writer);
+
+int __must_check uds_load_open_chapter(struct uds_index *index,
+ struct buffered_reader *reader);
+
+u64 uds_compute_saved_open_chapter_size(struct index_geometry *geometry);
+
+#endif /* UDS_OPEN_CHAPTER_H */
diff --git a/drivers/md/dm-vdo/indexer/radix-sort.c b/drivers/md/dm-vdo/indexer/radix-sort.c
new file mode 100644
index 000000000000..66b8c706a1ef
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/radix-sort.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "radix-sort.h"
+
+#include <linux/limits.h>
+#include <linux/types.h>
+
+#include "memory-alloc.h"
+#include "string-utils.h"
+
+/*
+ * This implementation allocates one large object to do the sorting, which can be reused as many
+ * times as desired. The amount of memory required is logarithmically proportional to the number of
+ * keys to be sorted.
+ */
+
+/* Piles smaller than this are handled with a simple insertion sort. */
+#define INSERTION_SORT_THRESHOLD 12
+
+/* Sort keys are pointers to immutable fixed-length arrays of bytes. */
+typedef const u8 *sort_key_t;
+
+/*
+ * The keys are separated into piles based on the byte in each keys at the current offset, so the
+ * number of keys with each byte must be counted.
+ */
+struct histogram {
+ /* The number of non-empty bins */
+ u16 used;
+ /* The index (key byte) of the first non-empty bin */
+ u16 first;
+ /* The index (key byte) of the last non-empty bin */
+ u16 last;
+ /* The number of occurrences of each specific byte */
+ u32 size[256];
+};
+
+/*
+ * Sub-tasks are manually managed on a stack, both for performance and to put a logarithmic bound
+ * on the stack space needed.
+ */
+struct task {
+ /* Pointer to the first key to sort. */
+ sort_key_t *first_key;
+ /* Pointer to the last key to sort. */
+ sort_key_t *last_key;
+ /* The offset into the key at which to continue sorting. */
+ u16 offset;
+ /* The number of bytes remaining in the sort keys. */
+ u16 length;
+};
+
+struct radix_sorter {
+ unsigned int count;
+ struct histogram bins;
+ sort_key_t *pile[256];
+ struct task *end_of_stack;
+ struct task insertion_list[256];
+ struct task stack[];
+};
+
+/* Compare a segment of two fixed-length keys starting at an offset. */
+static inline int compare(sort_key_t key1, sort_key_t key2, u16 offset, u16 length)
+{
+ return memcmp(&key1[offset], &key2[offset], length);
+}
+
+/* Insert the next unsorted key into an array of sorted keys. */
+static inline void insert_key(const struct task task, sort_key_t *next)
+{
+ /* Pull the unsorted key out, freeing up the array slot. */
+ sort_key_t unsorted = *next;
+
+ /* Compare the key to the preceding sorted entries, shifting down ones that are larger. */
+ while ((--next >= task.first_key) &&
+ (compare(unsorted, next[0], task.offset, task.length) < 0))
+ next[1] = next[0];
+
+ /* Insert the key into the last slot that was cleared, sorting it. */
+ next[1] = unsorted;
+}
+
+/*
+ * Sort a range of key segments using an insertion sort. This simple sort is faster than the
+ * 256-way radix sort when the number of keys to sort is small.
+ */
+static inline void insertion_sort(const struct task task)
+{
+ sort_key_t *next;
+
+ for (next = task.first_key + 1; next <= task.last_key; next++)
+ insert_key(task, next);
+}
+
+/* Push a sorting task onto a task stack. */
+static inline void push_task(struct task **stack_pointer, sort_key_t *first_key,
+ u32 count, u16 offset, u16 length)
+{
+ struct task *task = (*stack_pointer)++;
+
+ task->first_key = first_key;
+ task->last_key = &first_key[count - 1];
+ task->offset = offset;
+ task->length = length;
+}
+
+static inline void swap_keys(sort_key_t *a, sort_key_t *b)
+{
+ sort_key_t c = *a;
+ *a = *b;
+ *b = c;
+}
+
+/*
+ * Count the number of times each byte value appears in the arrays of keys to sort at the current
+ * offset, keeping track of the number of non-empty bins, and the index of the first and last
+ * non-empty bin.
+ */
+static inline void measure_bins(const struct task task, struct histogram *bins)
+{
+ sort_key_t *key_ptr;
+
+ /*
+ * Subtle invariant: bins->used and bins->size[] are zero because the sorting code clears
+ * it all out as it goes. Even though this structure is re-used, we don't need to pay to
+ * zero it before starting a new tally.
+ */
+ bins->first = U8_MAX;
+ bins->last = 0;
+
+ for (key_ptr = task.first_key; key_ptr <= task.last_key; key_ptr++) {
+ /* Increment the count for the byte in the key at the current offset. */
+ u8 bin = (*key_ptr)[task.offset];
+ u32 size = ++bins->size[bin];
+
+ /* Track non-empty bins. */
+ if (size == 1) {
+ bins->used += 1;
+ if (bin < bins->first)
+ bins->first = bin;
+
+ if (bin > bins->last)
+ bins->last = bin;
+ }
+ }
+}
+
+/*
+ * Convert the bin sizes to pointers to where each pile goes.
+ *
+ * pile[0] = first_key + bin->size[0],
+ * pile[1] = pile[0] + bin->size[1], etc.
+ *
+ * After the keys are moved to the appropriate pile, we'll need to sort each of the piles by the
+ * next radix position. A new task is put on the stack for each pile containing lots of keys, or a
+ * new task is put on the list for each pile containing few keys.
+ *
+ * @stack: pointer the top of the stack
+ * @end_of_stack: the end of the stack
+ * @list: pointer the head of the list
+ * @pile: array for pointers to the end of each pile
+ * @bins: the histogram of the sizes of each pile
+ * @first_key: the first key of the stack
+ * @offset: the next radix position to sort by
+ * @length: the number of bytes remaining in the sort keys
+ *
+ * Return: UDS_SUCCESS or an error code
+ */
+static inline int push_bins(struct task **stack, struct task *end_of_stack,
+ struct task **list, sort_key_t *pile[],
+ struct histogram *bins, sort_key_t *first_key,
+ u16 offset, u16 length)
+{
+ sort_key_t *pile_start = first_key;
+ int bin;
+
+ for (bin = bins->first; ; bin++) {
+ u32 size = bins->size[bin];
+
+ /* Skip empty piles. */
+ if (size == 0)
+ continue;
+
+ /* There's no need to sort empty keys. */
+ if (length > 0) {
+ if (size > INSERTION_SORT_THRESHOLD) {
+ if (*stack >= end_of_stack)
+ return UDS_BAD_STATE;
+
+ push_task(stack, pile_start, size, offset, length);
+ } else if (size > 1) {
+ push_task(list, pile_start, size, offset, length);
+ }
+ }
+
+ pile_start += size;
+ pile[bin] = pile_start;
+ if (--bins->used == 0)
+ break;
+ }
+
+ return UDS_SUCCESS;
+}
+
+int uds_make_radix_sorter(unsigned int count, struct radix_sorter **sorter)
+{
+ int result;
+ unsigned int stack_size = count / INSERTION_SORT_THRESHOLD;
+ struct radix_sorter *radix_sorter;
+
+ result = vdo_allocate_extended(struct radix_sorter, stack_size, struct task,
+ __func__, &radix_sorter);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ radix_sorter->count = count;
+ radix_sorter->end_of_stack = radix_sorter->stack + stack_size;
+ *sorter = radix_sorter;
+ return UDS_SUCCESS;
+}
+
+void uds_free_radix_sorter(struct radix_sorter *sorter)
+{
+ vdo_free(sorter);
+}
+
+/*
+ * Sort pointers to fixed-length keys (arrays of bytes) using a radix sort. The sort implementation
+ * is unstable, so the relative ordering of equal keys is not preserved.
+ */
+int uds_radix_sort(struct radix_sorter *sorter, const unsigned char *keys[],
+ unsigned int count, unsigned short length)
+{
+ struct task start;
+ struct histogram *bins = &sorter->bins;
+ sort_key_t **pile = sorter->pile;
+ struct task *task_stack = sorter->stack;
+
+ /* All zero-length keys are identical and therefore already sorted. */
+ if ((count == 0) || (length == 0))
+ return UDS_SUCCESS;
+
+ /* The initial task is to sort the entire length of all the keys. */
+ start = (struct task) {
+ .first_key = keys,
+ .last_key = &keys[count - 1],
+ .offset = 0,
+ .length = length,
+ };
+
+ if (count <= INSERTION_SORT_THRESHOLD) {
+ insertion_sort(start);
+ return UDS_SUCCESS;
+ }
+
+ if (count > sorter->count)
+ return UDS_INVALID_ARGUMENT;
+
+ /*
+ * Repeatedly consume a sorting task from the stack and process it, pushing new sub-tasks
+ * onto the stack for each radix-sorted pile. When all tasks and sub-tasks have been
+ * processed, the stack will be empty and all the keys in the starting task will be fully
+ * sorted.
+ */
+ for (*task_stack = start; task_stack >= sorter->stack; task_stack--) {
+ const struct task task = *task_stack;
+ struct task *insertion_task_list;
+ int result;
+ sort_key_t *fence;
+ sort_key_t *end;
+
+ measure_bins(task, bins);
+
+ /*
+ * Now that we know how large each bin is, generate pointers for each of the piles
+ * and push a new task to sort each pile by the next radix byte.
+ */
+ insertion_task_list = sorter->insertion_list;
+ result = push_bins(&task_stack, sorter->end_of_stack,
+ &insertion_task_list, pile, bins, task.first_key,
+ task.offset + 1, task.length - 1);
+ if (result != UDS_SUCCESS) {
+ memset(bins, 0, sizeof(*bins));
+ return result;
+ }
+
+ /* Now bins->used is zero again. */
+
+ /*
+ * Don't bother processing the last pile: when piles 0..N-1 are all in place, then
+ * pile N must also be in place.
+ */
+ end = task.last_key - bins->size[bins->last];
+ bins->size[bins->last] = 0;
+
+ for (fence = task.first_key; fence <= end; ) {
+ u8 bin;
+ sort_key_t key = *fence;
+
+ /*
+ * The radix byte of the key tells us which pile it belongs in. Swap it for
+ * an unprocessed item just below that pile, and repeat.
+ */
+ while (--pile[bin = key[task.offset]] > fence)
+ swap_keys(pile[bin], &key);
+
+ /*
+ * The pile reached the fence. Put the key at the bottom of that pile,
+ * completing it, and advance the fence to the next pile.
+ */
+ *fence = key;
+ fence += bins->size[bin];
+ bins->size[bin] = 0;
+ }
+
+ /* Now bins->size[] is all zero again. */
+
+ /*
+ * When the number of keys in a task gets small enough, it is faster to use an
+ * insertion sort than to keep subdividing into tiny piles.
+ */
+ while (--insertion_task_list >= sorter->insertion_list)
+ insertion_sort(*insertion_task_list);
+ }
+
+ return UDS_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/indexer/radix-sort.h b/drivers/md/dm-vdo/indexer/radix-sort.h
new file mode 100644
index 000000000000..812949bc2cee
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/radix-sort.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_RADIX_SORT_H
+#define UDS_RADIX_SORT_H
+
+/*
+ * Radix sort is implemented using an American Flag sort, an unstable, in-place 8-bit radix
+ * exchange sort. This is adapted from the algorithm in the paper by Peter M. McIlroy, Keith
+ * Bostic, and M. Douglas McIlroy, "Engineering Radix Sort".
+ *
+ * http://www.usenix.org/publications/compsystems/1993/win_mcilroy.pdf
+ */
+
+struct radix_sorter;
+
+int __must_check uds_make_radix_sorter(unsigned int count, struct radix_sorter **sorter);
+
+void uds_free_radix_sorter(struct radix_sorter *sorter);
+
+int __must_check uds_radix_sort(struct radix_sorter *sorter, const unsigned char *keys[],
+ unsigned int count, unsigned short length);
+
+#endif /* UDS_RADIX_SORT_H */
diff --git a/drivers/md/dm-vdo/indexer/sparse-cache.c b/drivers/md/dm-vdo/indexer/sparse-cache.c
new file mode 100644
index 000000000000..28920167827c
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/sparse-cache.c
@@ -0,0 +1,624 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "sparse-cache.h"
+
+#include <linux/cache.h>
+#include <linux/delay.h>
+#include <linux/dm-bufio.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "chapter-index.h"
+#include "config.h"
+#include "index.h"
+
+/*
+ * Since the cache is small, it is implemented as a simple array of cache entries. Searching for a
+ * specific virtual chapter is implemented as a linear search. The cache replacement policy is
+ * least-recently-used (LRU). Again, the small size of the cache allows the LRU order to be
+ * maintained by shifting entries in an array list.
+ *
+ * Changing the contents of the cache requires the coordinated participation of all zone threads
+ * via the careful use of barrier messages sent to all the index zones by the triage queue worker
+ * thread. The critical invariant for coordination is that the cache membership must not change
+ * between updates, so that all calls to uds_sparse_cache_contains() from the zone threads must all
+ * receive the same results for every virtual chapter number. To ensure that critical invariant,
+ * state changes such as "that virtual chapter is no longer in the volume" and "skip searching that
+ * chapter because it has had too many cache misses" are represented separately from the cache
+ * membership information (the virtual chapter number).
+ *
+ * As a result of this invariant, we have the guarantee that every zone thread will call
+ * uds_update_sparse_cache() once and exactly once to request a chapter that is not in the cache,
+ * and the serialization of the barrier requests from the triage queue ensures they will all
+ * request the same chapter number. This means the only synchronization we need can be provided by
+ * a pair of thread barriers used only in the uds_update_sparse_cache() call, providing a critical
+ * section where a single zone thread can drive the cache update while all the other zone threads
+ * are known to be blocked, waiting in the second barrier. Outside that critical section, all the
+ * zone threads implicitly hold a shared lock. Inside it, the thread for zone zero holds an
+ * exclusive lock. No other threads may access or modify the cache entries.
+ *
+ * Chapter statistics must only be modified by a single thread, which is also the zone zero thread.
+ * All fields that might be frequently updated by that thread are kept in separate cache-aligned
+ * structures so they will not cause cache contention via "false sharing" with the fields that are
+ * frequently accessed by all of the zone threads.
+ *
+ * The LRU order is managed independently by each zone thread, and each zone uses its own list for
+ * searching and cache membership queries. The zone zero list is used to decide which chapter to
+ * evict when the cache is updated, and its search list is copied to the other threads at that
+ * time.
+ *
+ * The virtual chapter number field of the cache entry is the single field indicating whether a
+ * chapter is a member of the cache or not. The value NO_CHAPTER is used to represent a null or
+ * undefined chapter number. When present in the virtual chapter number field of a
+ * cached_chapter_index, it indicates that the cache entry is dead, and all the other fields of
+ * that entry (other than immutable pointers to cache memory) are undefined and irrelevant. Any
+ * cache entry that is not marked as dead is fully defined and a member of the cache, and
+ * uds_sparse_cache_contains() will always return true for any virtual chapter number that appears
+ * in any of the cache entries.
+ *
+ * A chapter index that is a member of the cache may be excluded from searches between calls to
+ * uds_update_sparse_cache() in two different ways. First, when a chapter falls off the end of the
+ * volume, its virtual chapter number will be less that the oldest virtual chapter number. Since
+ * that chapter is no longer part of the volume, there's no point in continuing to search that
+ * chapter index. Once invalidated, that virtual chapter will still be considered a member of the
+ * cache, but it will no longer be searched for matching names.
+ *
+ * The second mechanism is a heuristic based on keeping track of the number of consecutive search
+ * misses in a given chapter index. Once that count exceeds a threshold, the skip_search flag will
+ * be set to true, causing the chapter to be skipped when searching the entire cache, but still
+ * allowing it to be found when searching for a hook in that specific chapter. Finding a hook will
+ * clear the skip_search flag, once again allowing the non-hook searches to use that cache entry.
+ * Again, regardless of the state of the skip_search flag, the virtual chapter must still
+ * considered to be a member of the cache for uds_sparse_cache_contains().
+ */
+
+#define SKIP_SEARCH_THRESHOLD 20000
+#define ZONE_ZERO 0
+
+/*
+ * These counters are essentially fields of the struct cached_chapter_index, but are segregated
+ * into this structure because they are frequently modified. They are grouped and aligned to keep
+ * them on different cache lines from the chapter fields that are accessed far more often than they
+ * are updated.
+ */
+struct __aligned(L1_CACHE_BYTES) cached_index_counters {
+ u64 consecutive_misses;
+};
+
+struct __aligned(L1_CACHE_BYTES) cached_chapter_index {
+ /*
+ * The virtual chapter number of the cached chapter index. NO_CHAPTER means this cache
+ * entry is unused. This field must only be modified in the critical section in
+ * uds_update_sparse_cache().
+ */
+ u64 virtual_chapter;
+
+ u32 index_pages_count;
+
+ /*
+ * These pointers are immutable during the life of the cache. The contents of the arrays
+ * change when the cache entry is replaced.
+ */
+ struct delta_index_page *index_pages;
+ struct dm_buffer **page_buffers;
+
+ /*
+ * If set, skip the chapter when searching the entire cache. This flag is just a
+ * performance optimization. This flag is mutable between cache updates, but it rarely
+ * changes and is frequently accessed, so it groups with the immutable fields.
+ */
+ bool skip_search;
+
+ /*
+ * The cache-aligned counters change often and are placed at the end of the structure to
+ * prevent false sharing with the more stable fields above.
+ */
+ struct cached_index_counters counters;
+};
+
+/*
+ * A search_list represents an ordering of the sparse chapter index cache entry array, from most
+ * recently accessed to least recently accessed, which is the order in which the indexes should be
+ * searched and the reverse order in which they should be evicted from the cache.
+ *
+ * Cache entries that are dead or empty are kept at the end of the list, avoiding the need to even
+ * iterate over them to search, and ensuring that dead entries are replaced before any live entries
+ * are evicted.
+ *
+ * The search list is instantiated for each zone thread, avoiding any need for synchronization. The
+ * structure is allocated on a cache boundary to avoid false sharing of memory cache lines between
+ * zone threads.
+ */
+struct search_list {
+ u8 capacity;
+ u8 first_dead_entry;
+ struct cached_chapter_index *entries[];
+};
+
+struct threads_barrier {
+ /* Lock for this barrier object */
+ struct semaphore lock;
+ /* Semaphore for threads waiting at this barrier */
+ struct semaphore wait;
+ /* Number of threads which have arrived */
+ int arrived;
+ /* Total number of threads using this barrier */
+ int thread_count;
+};
+
+struct sparse_cache {
+ const struct index_geometry *geometry;
+ unsigned int capacity;
+ unsigned int zone_count;
+
+ unsigned int skip_threshold;
+ struct search_list *search_lists[MAX_ZONES];
+ struct cached_chapter_index **scratch_entries;
+
+ struct threads_barrier begin_update_barrier;
+ struct threads_barrier end_update_barrier;
+
+ struct cached_chapter_index chapters[];
+};
+
+static void initialize_threads_barrier(struct threads_barrier *barrier,
+ unsigned int thread_count)
+{
+ sema_init(&barrier->lock, 1);
+ barrier->arrived = 0;
+ barrier->thread_count = thread_count;
+ sema_init(&barrier->wait, 0);
+}
+
+static inline void __down(struct semaphore *semaphore)
+{
+ /*
+ * Do not use down(semaphore). Instead use down_interruptible so that
+ * we do not get 120 second stall messages in kern.log.
+ */
+ while (down_interruptible(semaphore) != 0) {
+ /*
+ * If we're called from a user-mode process (e.g., "dmsetup
+ * remove") while waiting for an operation that may take a
+ * while (e.g., UDS index save), and a signal is sent (SIGINT,
+ * SIGUSR2), then down_interruptible will not block. If that
+ * happens, sleep briefly to avoid keeping the CPU locked up in
+ * this loop. We could just call cond_resched, but then we'd
+ * still keep consuming CPU time slices and swamp other threads
+ * trying to do computational work.
+ */
+ fsleep(1000);
+ }
+}
+
+static void enter_threads_barrier(struct threads_barrier *barrier)
+{
+ __down(&barrier->lock);
+ if (++barrier->arrived == barrier->thread_count) {
+ /* last thread */
+ int i;
+
+ for (i = 1; i < barrier->thread_count; i++)
+ up(&barrier->wait);
+
+ barrier->arrived = 0;
+ up(&barrier->lock);
+ } else {
+ up(&barrier->lock);
+ __down(&barrier->wait);
+ }
+}
+
+static int __must_check initialize_cached_chapter_index(struct cached_chapter_index *chapter,
+ const struct index_geometry *geometry)
+{
+ int result;
+
+ chapter->virtual_chapter = NO_CHAPTER;
+ chapter->index_pages_count = geometry->index_pages_per_chapter;
+
+ result = vdo_allocate(chapter->index_pages_count, struct delta_index_page,
+ __func__, &chapter->index_pages);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return vdo_allocate(chapter->index_pages_count, struct dm_buffer *,
+ "sparse index volume pages", &chapter->page_buffers);
+}
+
+static int __must_check make_search_list(struct sparse_cache *cache,
+ struct search_list **list_ptr)
+{
+ struct search_list *list;
+ unsigned int bytes;
+ u8 i;
+ int result;
+
+ bytes = (sizeof(struct search_list) +
+ (cache->capacity * sizeof(struct cached_chapter_index *)));
+ result = vdo_allocate_cache_aligned(bytes, "search list", &list);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ list->capacity = cache->capacity;
+ list->first_dead_entry = 0;
+
+ for (i = 0; i < list->capacity; i++)
+ list->entries[i] = &cache->chapters[i];
+
+ *list_ptr = list;
+ return UDS_SUCCESS;
+}
+
+int uds_make_sparse_cache(const struct index_geometry *geometry, unsigned int capacity,
+ unsigned int zone_count, struct sparse_cache **cache_ptr)
+{
+ int result;
+ unsigned int i;
+ struct sparse_cache *cache;
+ unsigned int bytes;
+
+ bytes = (sizeof(struct sparse_cache) + (capacity * sizeof(struct cached_chapter_index)));
+ result = vdo_allocate_cache_aligned(bytes, "sparse cache", &cache);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ cache->geometry = geometry;
+ cache->capacity = capacity;
+ cache->zone_count = zone_count;
+
+ /*
+ * Scale down the skip threshold since the cache only counts cache misses in zone zero, but
+ * requests are being handled in all zones.
+ */
+ cache->skip_threshold = (SKIP_SEARCH_THRESHOLD / zone_count);
+
+ initialize_threads_barrier(&cache->begin_update_barrier, zone_count);
+ initialize_threads_barrier(&cache->end_update_barrier, zone_count);
+
+ for (i = 0; i < capacity; i++) {
+ result = initialize_cached_chapter_index(&cache->chapters[i], geometry);
+ if (result != UDS_SUCCESS)
+ goto out;
+ }
+
+ for (i = 0; i < zone_count; i++) {
+ result = make_search_list(cache, &cache->search_lists[i]);
+ if (result != UDS_SUCCESS)
+ goto out;
+ }
+
+ /* purge_search_list() needs some temporary lists for sorting. */
+ result = vdo_allocate(capacity * 2, struct cached_chapter_index *,
+ "scratch entries", &cache->scratch_entries);
+ if (result != VDO_SUCCESS)
+ goto out;
+
+ *cache_ptr = cache;
+ return UDS_SUCCESS;
+out:
+ uds_free_sparse_cache(cache);
+ return result;
+}
+
+static inline void set_skip_search(struct cached_chapter_index *chapter,
+ bool skip_search)
+{
+ /* Check before setting to reduce cache line contention. */
+ if (READ_ONCE(chapter->skip_search) != skip_search)
+ WRITE_ONCE(chapter->skip_search, skip_search);
+}
+
+static void score_search_hit(struct cached_chapter_index *chapter)
+{
+ chapter->counters.consecutive_misses = 0;
+ set_skip_search(chapter, false);
+}
+
+static void score_search_miss(struct sparse_cache *cache,
+ struct cached_chapter_index *chapter)
+{
+ chapter->counters.consecutive_misses++;
+ if (chapter->counters.consecutive_misses > cache->skip_threshold)
+ set_skip_search(chapter, true);
+}
+
+static void release_cached_chapter_index(struct cached_chapter_index *chapter)
+{
+ unsigned int i;
+
+ chapter->virtual_chapter = NO_CHAPTER;
+ if (chapter->page_buffers == NULL)
+ return;
+
+ for (i = 0; i < chapter->index_pages_count; i++) {
+ if (chapter->page_buffers[i] != NULL)
+ dm_bufio_release(vdo_forget(chapter->page_buffers[i]));
+ }
+}
+
+void uds_free_sparse_cache(struct sparse_cache *cache)
+{
+ unsigned int i;
+
+ if (cache == NULL)
+ return;
+
+ vdo_free(cache->scratch_entries);
+
+ for (i = 0; i < cache->zone_count; i++)
+ vdo_free(cache->search_lists[i]);
+
+ for (i = 0; i < cache->capacity; i++) {
+ release_cached_chapter_index(&cache->chapters[i]);
+ vdo_free(cache->chapters[i].index_pages);
+ vdo_free(cache->chapters[i].page_buffers);
+ }
+
+ vdo_free(cache);
+}
+
+/*
+ * Take the indicated element of the search list and move it to the start, pushing the pointers
+ * previously before it back down the list.
+ */
+static inline void set_newest_entry(struct search_list *search_list, u8 index)
+{
+ struct cached_chapter_index *newest;
+
+ if (index > 0) {
+ newest = search_list->entries[index];
+ memmove(&search_list->entries[1], &search_list->entries[0],
+ index * sizeof(struct cached_chapter_index *));
+ search_list->entries[0] = newest;
+ }
+
+ /*
+ * This function may have moved a dead chapter to the front of the list for reuse, in which
+ * case the set of dead chapters becomes smaller.
+ */
+ if (search_list->first_dead_entry <= index)
+ search_list->first_dead_entry++;
+}
+
+bool uds_sparse_cache_contains(struct sparse_cache *cache, u64 virtual_chapter,
+ unsigned int zone_number)
+{
+ struct search_list *search_list;
+ struct cached_chapter_index *chapter;
+ u8 i;
+
+ /*
+ * The correctness of the barriers depends on the invariant that between calls to
+ * uds_update_sparse_cache(), the answers this function returns must never vary: the result
+ * for a given chapter must be identical across zones. That invariant must be maintained
+ * even if the chapter falls off the end of the volume, or if searching it is disabled
+ * because of too many search misses.
+ */
+ search_list = cache->search_lists[zone_number];
+ for (i = 0; i < search_list->first_dead_entry; i++) {
+ chapter = search_list->entries[i];
+
+ if (virtual_chapter == chapter->virtual_chapter) {
+ if (zone_number == ZONE_ZERO)
+ score_search_hit(chapter);
+
+ set_newest_entry(search_list, i);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*
+ * Re-sort cache entries into three sets (active, skippable, and dead) while maintaining the LRU
+ * ordering that already existed. This operation must only be called during the critical section in
+ * uds_update_sparse_cache().
+ */
+static void purge_search_list(struct search_list *search_list,
+ struct sparse_cache *cache, u64 oldest_virtual_chapter)
+{
+ struct cached_chapter_index **entries;
+ struct cached_chapter_index **skipped;
+ struct cached_chapter_index **dead;
+ struct cached_chapter_index *chapter;
+ unsigned int next_alive = 0;
+ unsigned int next_skipped = 0;
+ unsigned int next_dead = 0;
+ unsigned int i;
+
+ entries = &search_list->entries[0];
+ skipped = &cache->scratch_entries[0];
+ dead = &cache->scratch_entries[search_list->capacity];
+
+ for (i = 0; i < search_list->first_dead_entry; i++) {
+ chapter = search_list->entries[i];
+ if ((chapter->virtual_chapter < oldest_virtual_chapter) ||
+ (chapter->virtual_chapter == NO_CHAPTER))
+ dead[next_dead++] = chapter;
+ else if (chapter->skip_search)
+ skipped[next_skipped++] = chapter;
+ else
+ entries[next_alive++] = chapter;
+ }
+
+ memcpy(&entries[next_alive], skipped,
+ next_skipped * sizeof(struct cached_chapter_index *));
+ memcpy(&entries[next_alive + next_skipped], dead,
+ next_dead * sizeof(struct cached_chapter_index *));
+ search_list->first_dead_entry = next_alive + next_skipped;
+}
+
+static int __must_check cache_chapter_index(struct cached_chapter_index *chapter,
+ u64 virtual_chapter,
+ const struct volume *volume)
+{
+ int result;
+
+ release_cached_chapter_index(chapter);
+
+ result = uds_read_chapter_index_from_volume(volume, virtual_chapter,
+ chapter->page_buffers,
+ chapter->index_pages);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ chapter->counters.consecutive_misses = 0;
+ chapter->virtual_chapter = virtual_chapter;
+ chapter->skip_search = false;
+
+ return UDS_SUCCESS;
+}
+
+static inline void copy_search_list(const struct search_list *source,
+ struct search_list *target)
+{
+ *target = *source;
+ memcpy(target->entries, source->entries,
+ source->capacity * sizeof(struct cached_chapter_index *));
+}
+
+/*
+ * Update the sparse cache to contain a chapter index. This function must be called by all the zone
+ * threads with the same chapter number to correctly enter the thread barriers used to synchronize
+ * the cache updates.
+ */
+int uds_update_sparse_cache(struct index_zone *zone, u64 virtual_chapter)
+{
+ int result = UDS_SUCCESS;
+ const struct uds_index *index = zone->index;
+ struct sparse_cache *cache = index->volume->sparse_cache;
+
+ if (uds_sparse_cache_contains(cache, virtual_chapter, zone->id))
+ return UDS_SUCCESS;
+
+ /*
+ * Wait for every zone thread to reach its corresponding barrier request and invoke this
+ * function before starting to modify the cache.
+ */
+ enter_threads_barrier(&cache->begin_update_barrier);
+
+ /*
+ * This is the start of the critical section: the zone zero thread is captain, effectively
+ * holding an exclusive lock on the sparse cache. All the other zone threads must do
+ * nothing between the two barriers. They will wait at the end_update_barrier again for the
+ * captain to finish the update.
+ */
+
+ if (zone->id == ZONE_ZERO) {
+ unsigned int z;
+ struct search_list *list = cache->search_lists[ZONE_ZERO];
+
+ purge_search_list(list, cache, zone->oldest_virtual_chapter);
+
+ if (virtual_chapter >= index->oldest_virtual_chapter) {
+ set_newest_entry(list, list->capacity - 1);
+ result = cache_chapter_index(list->entries[0], virtual_chapter,
+ index->volume);
+ }
+
+ for (z = 1; z < cache->zone_count; z++)
+ copy_search_list(list, cache->search_lists[z]);
+ }
+
+ /*
+ * This is the end of the critical section. All cache invariants must have been restored.
+ */
+ enter_threads_barrier(&cache->end_update_barrier);
+ return result;
+}
+
+void uds_invalidate_sparse_cache(struct sparse_cache *cache)
+{
+ unsigned int i;
+
+ for (i = 0; i < cache->capacity; i++)
+ release_cached_chapter_index(&cache->chapters[i]);
+}
+
+static inline bool should_skip_chapter(struct cached_chapter_index *chapter,
+ u64 oldest_chapter, u64 requested_chapter)
+{
+ if ((chapter->virtual_chapter == NO_CHAPTER) ||
+ (chapter->virtual_chapter < oldest_chapter))
+ return true;
+
+ if (requested_chapter != NO_CHAPTER)
+ return requested_chapter != chapter->virtual_chapter;
+ else
+ return READ_ONCE(chapter->skip_search);
+}
+
+static int __must_check search_cached_chapter_index(struct cached_chapter_index *chapter,
+ const struct index_geometry *geometry,
+ const struct index_page_map *index_page_map,
+ const struct uds_record_name *name,
+ u16 *record_page_ptr)
+{
+ u32 physical_chapter =
+ uds_map_to_physical_chapter(geometry, chapter->virtual_chapter);
+ u32 index_page_number =
+ uds_find_index_page_number(index_page_map, name, physical_chapter);
+ struct delta_index_page *index_page =
+ &chapter->index_pages[index_page_number];
+
+ return uds_search_chapter_index_page(index_page, geometry, name,
+ record_page_ptr);
+}
+
+int uds_search_sparse_cache(struct index_zone *zone, const struct uds_record_name *name,
+ u64 *virtual_chapter_ptr, u16 *record_page_ptr)
+{
+ int result;
+ struct volume *volume = zone->index->volume;
+ struct sparse_cache *cache = volume->sparse_cache;
+ struct cached_chapter_index *chapter;
+ struct search_list *search_list;
+ u8 i;
+ /* Search the entire cache unless a specific chapter was requested. */
+ bool search_one = (*virtual_chapter_ptr != NO_CHAPTER);
+
+ *record_page_ptr = NO_CHAPTER_INDEX_ENTRY;
+ search_list = cache->search_lists[zone->id];
+ for (i = 0; i < search_list->first_dead_entry; i++) {
+ chapter = search_list->entries[i];
+
+ if (should_skip_chapter(chapter, zone->oldest_virtual_chapter,
+ *virtual_chapter_ptr))
+ continue;
+
+ result = search_cached_chapter_index(chapter, cache->geometry,
+ volume->index_page_map, name,
+ record_page_ptr);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (*record_page_ptr != NO_CHAPTER_INDEX_ENTRY) {
+ /*
+ * In theory, this might be a false match while a true match exists in
+ * another chapter, but that's a very rare case and not worth the extra
+ * search complexity.
+ */
+ set_newest_entry(search_list, i);
+ if (zone->id == ZONE_ZERO)
+ score_search_hit(chapter);
+
+ *virtual_chapter_ptr = chapter->virtual_chapter;
+ return UDS_SUCCESS;
+ }
+
+ if (zone->id == ZONE_ZERO)
+ score_search_miss(cache, chapter);
+
+ if (search_one)
+ break;
+ }
+
+ return UDS_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/indexer/sparse-cache.h b/drivers/md/dm-vdo/indexer/sparse-cache.h
new file mode 100644
index 000000000000..45e2dcf165b5
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/sparse-cache.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_SPARSE_CACHE_H
+#define UDS_SPARSE_CACHE_H
+
+#include "geometry.h"
+#include "indexer.h"
+
+/*
+ * The sparse cache is a cache of entire chapter indexes from sparse chapters used for searching
+ * for names after all other search paths have failed. It contains only complete chapter indexes;
+ * record pages from sparse chapters and single index pages used for resolving hooks are kept in
+ * the regular page cache in the volume.
+ *
+ * The most important property of this cache is the absence of synchronization for read operations.
+ * Safe concurrent access to the cache by the zone threads is controlled by the triage queue and
+ * the barrier requests it issues to the zone queues. The set of cached chapters does not and must
+ * not change between the carefully coordinated calls to uds_update_sparse_cache() from the zone
+ * threads. Outside of updates, every zone will get the same result when calling
+ * uds_sparse_cache_contains() as every other zone.
+ */
+
+struct index_zone;
+struct sparse_cache;
+
+int __must_check uds_make_sparse_cache(const struct index_geometry *geometry,
+ unsigned int capacity, unsigned int zone_count,
+ struct sparse_cache **cache_ptr);
+
+void uds_free_sparse_cache(struct sparse_cache *cache);
+
+bool uds_sparse_cache_contains(struct sparse_cache *cache, u64 virtual_chapter,
+ unsigned int zone_number);
+
+int __must_check uds_update_sparse_cache(struct index_zone *zone, u64 virtual_chapter);
+
+void uds_invalidate_sparse_cache(struct sparse_cache *cache);
+
+int __must_check uds_search_sparse_cache(struct index_zone *zone,
+ const struct uds_record_name *name,
+ u64 *virtual_chapter_ptr, u16 *record_page_ptr);
+
+#endif /* UDS_SPARSE_CACHE_H */
diff --git a/drivers/md/dm-vdo/indexer/volume-index.c b/drivers/md/dm-vdo/indexer/volume-index.c
new file mode 100644
index 000000000000..12f954a0c532
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/volume-index.c
@@ -0,0 +1,1283 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+#include "volume-index.h"
+
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/cache.h>
+#include <linux/compiler.h>
+#include <linux/log2.h>
+
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "permassert.h"
+#include "thread-utils.h"
+
+#include "config.h"
+#include "geometry.h"
+#include "hash-utils.h"
+#include "indexer.h"
+
+/*
+ * The volume index is a combination of two separate subindexes, one containing sparse hook entries
+ * (retained for all chapters), and one containing the remaining entries (retained only for the
+ * dense chapters). If there are no sparse chapters, only the non-hook sub index is used, and it
+ * will contain all records for all chapters.
+ *
+ * The volume index is also divided into zones, with one thread operating on each zone. Each
+ * incoming request is dispatched to the appropriate thread, and then to the appropriate subindex.
+ * Each delta list is handled by a single zone. To ensure that the distribution of delta lists to
+ * zones doesn't underflow (leaving some zone with no delta lists), the minimum number of delta
+ * lists must be the square of the maximum zone count for both subindexes.
+ *
+ * Each subindex zone is a delta index where the payload is a chapter number. The volume index can
+ * compute the delta list number, address, and zone number from the record name in order to
+ * dispatch record handling to the correct structures.
+ *
+ * Most operations that use all the zones take place either before request processing is allowed,
+ * or after all requests have been flushed in order to shut down. The only multi-threaded operation
+ * supported during normal operation is the uds_lookup_volume_index_name() method, used to determine
+ * whether a new chapter should be loaded into the sparse index cache. This operation only uses the
+ * sparse hook subindex, and the zone mutexes are used to make this operation safe.
+ *
+ * There are three ways of expressing chapter numbers in the volume index: virtual, index, and
+ * rolling. The interface to the volume index uses virtual chapter numbers, which are 64 bits long.
+ * Internally the subindex stores only the minimal number of bits necessary by masking away the
+ * high-order bits. When the index needs to deal with ordering of index chapter numbers, as when
+ * flushing entries from older chapters, it rolls the index chapter number around so that the
+ * smallest one in use is mapped to 0. See convert_index_to_virtual() or flush_invalid_entries()
+ * for an example of this technique.
+ *
+ * For efficiency, when older chapter numbers become invalid, the index does not immediately remove
+ * the invalidated entries. Instead it lazily removes them from a given delta list the next time it
+ * walks that list during normal operation. Because of this, the index size must be increased
+ * somewhat to accommodate all the invalid entries that have not yet been removed. For the standard
+ * index sizes, this requires about 4 chapters of old entries per 1024 chapters of valid entries in
+ * the index.
+ */
+
+struct sub_index_parameters {
+ /* The number of bits in address mask */
+ u8 address_bits;
+ /* The number of bits in chapter number */
+ u8 chapter_bits;
+ /* The mean delta */
+ u32 mean_delta;
+ /* The number of delta lists */
+ u64 list_count;
+ /* The number of chapters used */
+ u32 chapter_count;
+ /* The number of bits per chapter */
+ size_t chapter_size_in_bits;
+ /* The number of bytes of delta list memory */
+ size_t memory_size;
+ /* The number of bytes the index should keep free at all times */
+ size_t target_free_bytes;
+};
+
+struct split_config {
+ /* The hook subindex configuration */
+ struct uds_configuration hook_config;
+ struct index_geometry hook_geometry;
+
+ /* The non-hook subindex configuration */
+ struct uds_configuration non_hook_config;
+ struct index_geometry non_hook_geometry;
+};
+
+struct chapter_range {
+ u32 chapter_start;
+ u32 chapter_count;
+};
+
+#define MAGIC_SIZE 8
+
+static const char MAGIC_START_5[] = "MI5-0005";
+
+struct sub_index_data {
+ char magic[MAGIC_SIZE]; /* MAGIC_START_5 */
+ u64 volume_nonce;
+ u64 virtual_chapter_low;
+ u64 virtual_chapter_high;
+ u32 first_list;
+ u32 list_count;
+};
+
+static const char MAGIC_START_6[] = "MI6-0001";
+
+struct volume_index_data {
+ char magic[MAGIC_SIZE]; /* MAGIC_START_6 */
+ u32 sparse_sample_rate;
+};
+
+static inline u32 extract_address(const struct volume_sub_index *sub_index,
+ const struct uds_record_name *name)
+{
+ return uds_extract_volume_index_bytes(name) & sub_index->address_mask;
+}
+
+static inline u32 extract_dlist_num(const struct volume_sub_index *sub_index,
+ const struct uds_record_name *name)
+{
+ u64 bits = uds_extract_volume_index_bytes(name);
+
+ return (bits >> sub_index->address_bits) % sub_index->list_count;
+}
+
+static inline const struct volume_sub_index_zone *
+get_zone_for_record(const struct volume_index_record *record)
+{
+ return &record->sub_index->zones[record->zone_number];
+}
+
+static inline u64 convert_index_to_virtual(const struct volume_index_record *record,
+ u32 index_chapter)
+{
+ const struct volume_sub_index_zone *volume_index_zone = get_zone_for_record(record);
+ u32 rolling_chapter = ((index_chapter - volume_index_zone->virtual_chapter_low) &
+ record->sub_index->chapter_mask);
+
+ return volume_index_zone->virtual_chapter_low + rolling_chapter;
+}
+
+static inline u32 convert_virtual_to_index(const struct volume_sub_index *sub_index,
+ u64 virtual_chapter)
+{
+ return virtual_chapter & sub_index->chapter_mask;
+}
+
+static inline bool is_virtual_chapter_indexed(const struct volume_index_record *record,
+ u64 virtual_chapter)
+{
+ const struct volume_sub_index_zone *volume_index_zone = get_zone_for_record(record);
+
+ return ((virtual_chapter >= volume_index_zone->virtual_chapter_low) &&
+ (virtual_chapter <= volume_index_zone->virtual_chapter_high));
+}
+
+static inline bool has_sparse(const struct volume_index *volume_index)
+{
+ return volume_index->sparse_sample_rate > 0;
+}
+
+bool uds_is_volume_index_sample(const struct volume_index *volume_index,
+ const struct uds_record_name *name)
+{
+ if (!has_sparse(volume_index))
+ return false;
+
+ return (uds_extract_sampling_bytes(name) % volume_index->sparse_sample_rate) == 0;
+}
+
+static inline const struct volume_sub_index *
+get_volume_sub_index(const struct volume_index *volume_index,
+ const struct uds_record_name *name)
+{
+ return (uds_is_volume_index_sample(volume_index, name) ?
+ &volume_index->vi_hook :
+ &volume_index->vi_non_hook);
+}
+
+static unsigned int get_volume_sub_index_zone(const struct volume_sub_index *sub_index,
+ const struct uds_record_name *name)
+{
+ return extract_dlist_num(sub_index, name) / sub_index->delta_index.lists_per_zone;
+}
+
+unsigned int uds_get_volume_index_zone(const struct volume_index *volume_index,
+ const struct uds_record_name *name)
+{
+ return get_volume_sub_index_zone(get_volume_sub_index(volume_index, name), name);
+}
+
+#define DELTA_LIST_SIZE 256
+
+static int compute_volume_sub_index_parameters(const struct uds_configuration *config,
+ struct sub_index_parameters *params)
+{
+ u64 entries_in_volume_index, address_span;
+ u32 chapters_in_volume_index, invalid_chapters;
+ u32 rounded_chapters;
+ u64 delta_list_records;
+ u32 address_count;
+ u64 index_size_in_bits;
+ size_t expected_index_size;
+ u64 min_delta_lists = MAX_ZONES * MAX_ZONES;
+ struct index_geometry *geometry = config->geometry;
+ u64 records_per_chapter = geometry->records_per_chapter;
+
+ params->chapter_count = geometry->chapters_per_volume;
+ /*
+ * Make sure that the number of delta list records in the volume index does not change when
+ * the volume is reduced by one chapter. This preserves the mapping from name to volume
+ * index delta list.
+ */
+ rounded_chapters = params->chapter_count;
+ if (uds_is_reduced_index_geometry(geometry))
+ rounded_chapters += 1;
+ delta_list_records = records_per_chapter * rounded_chapters;
+ address_count = config->volume_index_mean_delta * DELTA_LIST_SIZE;
+ params->list_count = max(delta_list_records / DELTA_LIST_SIZE, min_delta_lists);
+ params->address_bits = bits_per(address_count - 1);
+ params->chapter_bits = bits_per(rounded_chapters - 1);
+ if ((u32) params->list_count != params->list_count) {
+ return vdo_log_warning_strerror(UDS_INVALID_ARGUMENT,
+ "cannot initialize volume index with %llu delta lists",
+ (unsigned long long) params->list_count);
+ }
+
+ if (params->address_bits > 31) {
+ return vdo_log_warning_strerror(UDS_INVALID_ARGUMENT,
+ "cannot initialize volume index with %u address bits",
+ params->address_bits);
+ }
+
+ /*
+ * The probability that a given delta list is not touched during the writing of an entire
+ * chapter is:
+ *
+ * double p_not_touched = pow((double) (params->list_count - 1) / params->list_count,
+ * records_per_chapter);
+ *
+ * For the standard index sizes, about 78% of the delta lists are not touched, and
+ * therefore contain old index entries that have not been eliminated by the lazy LRU
+ * processing. Then the number of old index entries that accumulate over the entire index,
+ * in terms of full chapters worth of entries, is:
+ *
+ * double invalid_chapters = p_not_touched / (1.0 - p_not_touched);
+ *
+ * For the standard index sizes, the index needs about 3.5 chapters of space for the old
+ * entries in a 1024 chapter index, so round this up to use 4 chapters per 1024 chapters in
+ * the index.
+ */
+ invalid_chapters = max(rounded_chapters / 256, 2U);
+ chapters_in_volume_index = rounded_chapters + invalid_chapters;
+ entries_in_volume_index = records_per_chapter * chapters_in_volume_index;
+
+ address_span = params->list_count << params->address_bits;
+ params->mean_delta = address_span / entries_in_volume_index;
+
+ /*
+ * Compute the expected size of a full index, then set the total memory to be 6% larger
+ * than that expected size. This number should be large enough that there are not many
+ * rebalances when the index is full.
+ */
+ params->chapter_size_in_bits = uds_compute_delta_index_size(records_per_chapter,
+ params->mean_delta,
+ params->chapter_bits);
+ index_size_in_bits = params->chapter_size_in_bits * chapters_in_volume_index;
+ expected_index_size = index_size_in_bits / BITS_PER_BYTE;
+ params->memory_size = expected_index_size * 106 / 100;
+
+ params->target_free_bytes = expected_index_size / 20;
+ return UDS_SUCCESS;
+}
+
+static void uninitialize_volume_sub_index(struct volume_sub_index *sub_index)
+{
+ vdo_free(vdo_forget(sub_index->flush_chapters));
+ vdo_free(vdo_forget(sub_index->zones));
+ uds_uninitialize_delta_index(&sub_index->delta_index);
+}
+
+void uds_free_volume_index(struct volume_index *volume_index)
+{
+ if (volume_index == NULL)
+ return;
+
+ if (volume_index->zones != NULL)
+ vdo_free(vdo_forget(volume_index->zones));
+
+ uninitialize_volume_sub_index(&volume_index->vi_non_hook);
+ uninitialize_volume_sub_index(&volume_index->vi_hook);
+ vdo_free(volume_index);
+}
+
+
+static int compute_volume_sub_index_save_bytes(const struct uds_configuration *config,
+ size_t *bytes)
+{
+ struct sub_index_parameters params = { .address_bits = 0 };
+ int result;
+
+ result = compute_volume_sub_index_parameters(config, &params);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ *bytes = (sizeof(struct sub_index_data) + params.list_count * sizeof(u64) +
+ uds_compute_delta_index_save_bytes(params.list_count,
+ params.memory_size));
+ return UDS_SUCCESS;
+}
+
+/* This function is only useful if the configuration includes sparse chapters. */
+static void split_configuration(const struct uds_configuration *config,
+ struct split_config *split)
+{
+ u64 sample_rate, sample_records;
+ u64 dense_chapters, sparse_chapters;
+
+ /* Start with copies of the base configuration. */
+ split->hook_config = *config;
+ split->hook_geometry = *config->geometry;
+ split->hook_config.geometry = &split->hook_geometry;
+ split->non_hook_config = *config;
+ split->non_hook_geometry = *config->geometry;
+ split->non_hook_config.geometry = &split->non_hook_geometry;
+
+ sample_rate = config->sparse_sample_rate;
+ sparse_chapters = config->geometry->sparse_chapters_per_volume;
+ dense_chapters = config->geometry->chapters_per_volume - sparse_chapters;
+ sample_records = config->geometry->records_per_chapter / sample_rate;
+
+ /* Adjust the number of records indexed for each chapter. */
+ split->hook_geometry.records_per_chapter = sample_records;
+ split->non_hook_geometry.records_per_chapter -= sample_records;
+
+ /* Adjust the number of chapters indexed. */
+ split->hook_geometry.sparse_chapters_per_volume = 0;
+ split->non_hook_geometry.sparse_chapters_per_volume = 0;
+ split->non_hook_geometry.chapters_per_volume = dense_chapters;
+}
+
+static int compute_volume_index_save_bytes(const struct uds_configuration *config,
+ size_t *bytes)
+{
+ size_t hook_bytes, non_hook_bytes;
+ struct split_config split;
+ int result;
+
+ if (!uds_is_sparse_index_geometry(config->geometry))
+ return compute_volume_sub_index_save_bytes(config, bytes);
+
+ split_configuration(config, &split);
+ result = compute_volume_sub_index_save_bytes(&split.hook_config, &hook_bytes);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = compute_volume_sub_index_save_bytes(&split.non_hook_config,
+ &non_hook_bytes);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ *bytes = sizeof(struct volume_index_data) + hook_bytes + non_hook_bytes;
+ return UDS_SUCCESS;
+}
+
+int uds_compute_volume_index_save_blocks(const struct uds_configuration *config,
+ size_t block_size, u64 *block_count)
+{
+ size_t bytes;
+ int result;
+
+ result = compute_volume_index_save_bytes(config, &bytes);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ bytes += sizeof(struct delta_list_save_info);
+ *block_count = DIV_ROUND_UP(bytes, block_size) + MAX_ZONES;
+ return UDS_SUCCESS;
+}
+
+/* Flush invalid entries while walking the delta list. */
+static inline int flush_invalid_entries(struct volume_index_record *record,
+ struct chapter_range *flush_range,
+ u32 *next_chapter_to_invalidate)
+{
+ int result;
+
+ result = uds_next_delta_index_entry(&record->delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ while (!record->delta_entry.at_end) {
+ u32 index_chapter = uds_get_delta_entry_value(&record->delta_entry);
+ u32 relative_chapter = ((index_chapter - flush_range->chapter_start) &
+ record->sub_index->chapter_mask);
+
+ if (likely(relative_chapter >= flush_range->chapter_count)) {
+ if (relative_chapter < *next_chapter_to_invalidate)
+ *next_chapter_to_invalidate = relative_chapter;
+ break;
+ }
+
+ result = uds_remove_delta_index_entry(&record->delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ return UDS_SUCCESS;
+}
+
+/* Find the matching record, or the list offset where the record would go. */
+static int get_volume_index_entry(struct volume_index_record *record, u32 list_number,
+ u32 key, struct chapter_range *flush_range)
+{
+ struct volume_index_record other_record;
+ const struct volume_sub_index *sub_index = record->sub_index;
+ u32 next_chapter_to_invalidate = sub_index->chapter_mask;
+ int result;
+
+ result = uds_start_delta_index_search(&sub_index->delta_index, list_number, 0,
+ &record->delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ do {
+ result = flush_invalid_entries(record, flush_range,
+ &next_chapter_to_invalidate);
+ if (result != UDS_SUCCESS)
+ return result;
+ } while (!record->delta_entry.at_end && (key > record->delta_entry.key));
+
+ result = uds_remember_delta_index_offset(&record->delta_entry);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ /* Check any collision records for a more precise match. */
+ other_record = *record;
+ if (!other_record.delta_entry.at_end && (key == other_record.delta_entry.key)) {
+ for (;;) {
+ u8 collision_name[UDS_RECORD_NAME_SIZE];
+
+ result = flush_invalid_entries(&other_record, flush_range,
+ &next_chapter_to_invalidate);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (other_record.delta_entry.at_end ||
+ !other_record.delta_entry.is_collision)
+ break;
+
+ result = uds_get_delta_entry_collision(&other_record.delta_entry,
+ collision_name);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (memcmp(collision_name, record->name, UDS_RECORD_NAME_SIZE) == 0) {
+ *record = other_record;
+ break;
+ }
+ }
+ }
+ while (!other_record.delta_entry.at_end) {
+ result = flush_invalid_entries(&other_record, flush_range,
+ &next_chapter_to_invalidate);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+ next_chapter_to_invalidate += flush_range->chapter_start;
+ next_chapter_to_invalidate &= sub_index->chapter_mask;
+ flush_range->chapter_start = next_chapter_to_invalidate;
+ flush_range->chapter_count = 0;
+ return UDS_SUCCESS;
+}
+
+static int get_volume_sub_index_record(struct volume_sub_index *sub_index,
+ const struct uds_record_name *name,
+ struct volume_index_record *record)
+{
+ int result;
+ const struct volume_sub_index_zone *volume_index_zone;
+ u32 address = extract_address(sub_index, name);
+ u32 delta_list_number = extract_dlist_num(sub_index, name);
+ u64 flush_chapter = sub_index->flush_chapters[delta_list_number];
+
+ record->sub_index = sub_index;
+ record->mutex = NULL;
+ record->name = name;
+ record->zone_number = delta_list_number / sub_index->delta_index.lists_per_zone;
+ volume_index_zone = get_zone_for_record(record);
+
+ if (flush_chapter < volume_index_zone->virtual_chapter_low) {
+ struct chapter_range range;
+ u64 flush_count = volume_index_zone->virtual_chapter_low - flush_chapter;
+
+ range.chapter_start = convert_virtual_to_index(sub_index, flush_chapter);
+ range.chapter_count = (flush_count > sub_index->chapter_mask ?
+ sub_index->chapter_mask + 1 :
+ flush_count);
+ result = get_volume_index_entry(record, delta_list_number, address,
+ &range);
+ flush_chapter = convert_index_to_virtual(record, range.chapter_start);
+ if (flush_chapter > volume_index_zone->virtual_chapter_high)
+ flush_chapter = volume_index_zone->virtual_chapter_high;
+ sub_index->flush_chapters[delta_list_number] = flush_chapter;
+ } else {
+ result = uds_get_delta_index_entry(&sub_index->delta_index,
+ delta_list_number, address,
+ name->name, &record->delta_entry);
+ }
+
+ if (result != UDS_SUCCESS)
+ return result;
+
+ record->is_found =
+ (!record->delta_entry.at_end && (record->delta_entry.key == address));
+ if (record->is_found) {
+ u32 index_chapter = uds_get_delta_entry_value(&record->delta_entry);
+
+ record->virtual_chapter = convert_index_to_virtual(record, index_chapter);
+ }
+
+ record->is_collision = record->delta_entry.is_collision;
+ return UDS_SUCCESS;
+}
+
+int uds_get_volume_index_record(struct volume_index *volume_index,
+ const struct uds_record_name *name,
+ struct volume_index_record *record)
+{
+ int result;
+
+ if (uds_is_volume_index_sample(volume_index, name)) {
+ /*
+ * Other threads cannot be allowed to call uds_lookup_volume_index_name() while
+ * this thread is finding the volume index record. Due to the lazy LRU flushing of
+ * the volume index, uds_get_volume_index_record() is not a read-only operation.
+ */
+ unsigned int zone =
+ get_volume_sub_index_zone(&volume_index->vi_hook, name);
+ struct mutex *mutex = &volume_index->zones[zone].hook_mutex;
+
+ mutex_lock(mutex);
+ result = get_volume_sub_index_record(&volume_index->vi_hook, name,
+ record);
+ mutex_unlock(mutex);
+ /* Remember the mutex so that other operations on the index record can use it. */
+ record->mutex = mutex;
+ } else {
+ result = get_volume_sub_index_record(&volume_index->vi_non_hook, name,
+ record);
+ }
+
+ return result;
+}
+
+int uds_put_volume_index_record(struct volume_index_record *record, u64 virtual_chapter)
+{
+ int result;
+ u32 address;
+ const struct volume_sub_index *sub_index = record->sub_index;
+
+ if (!is_virtual_chapter_indexed(record, virtual_chapter)) {
+ u64 low = get_zone_for_record(record)->virtual_chapter_low;
+ u64 high = get_zone_for_record(record)->virtual_chapter_high;
+
+ return vdo_log_warning_strerror(UDS_INVALID_ARGUMENT,
+ "cannot put record into chapter number %llu that is out of the valid range %llu to %llu",
+ (unsigned long long) virtual_chapter,
+ (unsigned long long) low,
+ (unsigned long long) high);
+ }
+ address = extract_address(sub_index, record->name);
+ if (unlikely(record->mutex != NULL))
+ mutex_lock(record->mutex);
+ result = uds_put_delta_index_entry(&record->delta_entry, address,
+ convert_virtual_to_index(sub_index,
+ virtual_chapter),
+ record->is_found ? record->name->name : NULL);
+ if (unlikely(record->mutex != NULL))
+ mutex_unlock(record->mutex);
+ switch (result) {
+ case UDS_SUCCESS:
+ record->virtual_chapter = virtual_chapter;
+ record->is_collision = record->delta_entry.is_collision;
+ record->is_found = true;
+ break;
+ case UDS_OVERFLOW:
+ vdo_log_ratelimit(vdo_log_warning_strerror, UDS_OVERFLOW,
+ "Volume index entry dropped due to overflow condition");
+ uds_log_delta_index_entry(&record->delta_entry);
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+int uds_remove_volume_index_record(struct volume_index_record *record)
+{
+ int result;
+
+ if (!record->is_found)
+ return vdo_log_warning_strerror(UDS_BAD_STATE,
+ "illegal operation on new record");
+
+ /* Mark the record so that it cannot be used again */
+ record->is_found = false;
+ if (unlikely(record->mutex != NULL))
+ mutex_lock(record->mutex);
+ result = uds_remove_delta_index_entry(&record->delta_entry);
+ if (unlikely(record->mutex != NULL))
+ mutex_unlock(record->mutex);
+ return result;
+}
+
+static void set_volume_sub_index_zone_open_chapter(struct volume_sub_index *sub_index,
+ unsigned int zone_number,
+ u64 virtual_chapter)
+{
+ u64 used_bits = 0;
+ struct volume_sub_index_zone *zone = &sub_index->zones[zone_number];
+ struct delta_zone *delta_zone;
+ u32 i;
+
+ zone->virtual_chapter_low = (virtual_chapter >= sub_index->chapter_count ?
+ virtual_chapter - sub_index->chapter_count + 1 :
+ 0);
+ zone->virtual_chapter_high = virtual_chapter;
+
+ /* Check to see if the new zone data is too large. */
+ delta_zone = &sub_index->delta_index.delta_zones[zone_number];
+ for (i = 1; i <= delta_zone->list_count; i++)
+ used_bits += delta_zone->delta_lists[i].size;
+
+ if (used_bits > sub_index->max_zone_bits) {
+ /* Expire enough chapters to free the desired space. */
+ u64 expire_count =
+ 1 + (used_bits - sub_index->max_zone_bits) / sub_index->chapter_zone_bits;
+
+ if (expire_count == 1) {
+ vdo_log_ratelimit(vdo_log_info,
+ "zone %u: At chapter %llu, expiring chapter %llu early",
+ zone_number,
+ (unsigned long long) virtual_chapter,
+ (unsigned long long) zone->virtual_chapter_low);
+ zone->early_flushes++;
+ zone->virtual_chapter_low++;
+ } else {
+ u64 first_expired = zone->virtual_chapter_low;
+
+ if (first_expired + expire_count < zone->virtual_chapter_high) {
+ zone->early_flushes += expire_count;
+ zone->virtual_chapter_low += expire_count;
+ } else {
+ zone->early_flushes +=
+ zone->virtual_chapter_high - zone->virtual_chapter_low;
+ zone->virtual_chapter_low = zone->virtual_chapter_high;
+ }
+ vdo_log_ratelimit(vdo_log_info,
+ "zone %u: At chapter %llu, expiring chapters %llu to %llu early",
+ zone_number,
+ (unsigned long long) virtual_chapter,
+ (unsigned long long) first_expired,
+ (unsigned long long) zone->virtual_chapter_low - 1);
+ }
+ }
+}
+
+void uds_set_volume_index_zone_open_chapter(struct volume_index *volume_index,
+ unsigned int zone_number,
+ u64 virtual_chapter)
+{
+ struct mutex *mutex = &volume_index->zones[zone_number].hook_mutex;
+
+ set_volume_sub_index_zone_open_chapter(&volume_index->vi_non_hook, zone_number,
+ virtual_chapter);
+
+ /*
+ * Other threads cannot be allowed to call uds_lookup_volume_index_name() while the open
+ * chapter number is changing.
+ */
+ if (has_sparse(volume_index)) {
+ mutex_lock(mutex);
+ set_volume_sub_index_zone_open_chapter(&volume_index->vi_hook,
+ zone_number, virtual_chapter);
+ mutex_unlock(mutex);
+ }
+}
+
+/*
+ * Set the newest open chapter number for the index, while also advancing the oldest valid chapter
+ * number.
+ */
+void uds_set_volume_index_open_chapter(struct volume_index *volume_index,
+ u64 virtual_chapter)
+{
+ unsigned int zone;
+
+ for (zone = 0; zone < volume_index->zone_count; zone++)
+ uds_set_volume_index_zone_open_chapter(volume_index, zone, virtual_chapter);
+}
+
+int uds_set_volume_index_record_chapter(struct volume_index_record *record,
+ u64 virtual_chapter)
+{
+ const struct volume_sub_index *sub_index = record->sub_index;
+ int result;
+
+ if (!record->is_found)
+ return vdo_log_warning_strerror(UDS_BAD_STATE,
+ "illegal operation on new record");
+
+ if (!is_virtual_chapter_indexed(record, virtual_chapter)) {
+ u64 low = get_zone_for_record(record)->virtual_chapter_low;
+ u64 high = get_zone_for_record(record)->virtual_chapter_high;
+
+ return vdo_log_warning_strerror(UDS_INVALID_ARGUMENT,
+ "cannot set chapter number %llu that is out of the valid range %llu to %llu",
+ (unsigned long long) virtual_chapter,
+ (unsigned long long) low,
+ (unsigned long long) high);
+ }
+
+ if (unlikely(record->mutex != NULL))
+ mutex_lock(record->mutex);
+ result = uds_set_delta_entry_value(&record->delta_entry,
+ convert_virtual_to_index(sub_index,
+ virtual_chapter));
+ if (unlikely(record->mutex != NULL))
+ mutex_unlock(record->mutex);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ record->virtual_chapter = virtual_chapter;
+ return UDS_SUCCESS;
+}
+
+static u64 lookup_volume_sub_index_name(const struct volume_sub_index *sub_index,
+ const struct uds_record_name *name)
+{
+ int result;
+ u32 address = extract_address(sub_index, name);
+ u32 delta_list_number = extract_dlist_num(sub_index, name);
+ unsigned int zone_number = get_volume_sub_index_zone(sub_index, name);
+ const struct volume_sub_index_zone *zone = &sub_index->zones[zone_number];
+ u64 virtual_chapter;
+ u32 index_chapter;
+ u32 rolling_chapter;
+ struct delta_index_entry delta_entry;
+
+ result = uds_get_delta_index_entry(&sub_index->delta_index, delta_list_number,
+ address, name->name, &delta_entry);
+ if (result != UDS_SUCCESS)
+ return NO_CHAPTER;
+
+ if (delta_entry.at_end || (delta_entry.key != address))
+ return NO_CHAPTER;
+
+ index_chapter = uds_get_delta_entry_value(&delta_entry);
+ rolling_chapter = (index_chapter - zone->virtual_chapter_low) & sub_index->chapter_mask;
+
+ virtual_chapter = zone->virtual_chapter_low + rolling_chapter;
+ if (virtual_chapter > zone->virtual_chapter_high)
+ return NO_CHAPTER;
+
+ return virtual_chapter;
+}
+
+/* Do a read-only lookup of the record name for sparse cache management. */
+u64 uds_lookup_volume_index_name(const struct volume_index *volume_index,
+ const struct uds_record_name *name)
+{
+ unsigned int zone_number = uds_get_volume_index_zone(volume_index, name);
+ struct mutex *mutex = &volume_index->zones[zone_number].hook_mutex;
+ u64 virtual_chapter;
+
+ if (!uds_is_volume_index_sample(volume_index, name))
+ return NO_CHAPTER;
+
+ mutex_lock(mutex);
+ virtual_chapter = lookup_volume_sub_index_name(&volume_index->vi_hook, name);
+ mutex_unlock(mutex);
+
+ return virtual_chapter;
+}
+
+static void abort_restoring_volume_sub_index(struct volume_sub_index *sub_index)
+{
+ uds_reset_delta_index(&sub_index->delta_index);
+}
+
+static void abort_restoring_volume_index(struct volume_index *volume_index)
+{
+ abort_restoring_volume_sub_index(&volume_index->vi_non_hook);
+ if (has_sparse(volume_index))
+ abort_restoring_volume_sub_index(&volume_index->vi_hook);
+}
+
+static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index,
+ struct buffered_reader **readers,
+ unsigned int reader_count)
+{
+ unsigned int z;
+ int result;
+ u64 virtual_chapter_low = 0, virtual_chapter_high = 0;
+ unsigned int i;
+
+ for (i = 0; i < reader_count; i++) {
+ struct sub_index_data header;
+ u8 buffer[sizeof(struct sub_index_data)];
+ size_t offset = 0;
+ u32 j;
+
+ result = uds_read_from_buffered_reader(readers[i], buffer,
+ sizeof(buffer));
+ if (result != UDS_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to read volume index header");
+ }
+
+ memcpy(&header.magic, buffer, MAGIC_SIZE);
+ offset += MAGIC_SIZE;
+ decode_u64_le(buffer, &offset, &header.volume_nonce);
+ decode_u64_le(buffer, &offset, &header.virtual_chapter_low);
+ decode_u64_le(buffer, &offset, &header.virtual_chapter_high);
+ decode_u32_le(buffer, &offset, &header.first_list);
+ decode_u32_le(buffer, &offset, &header.list_count);
+
+ result = VDO_ASSERT(offset == sizeof(buffer),
+ "%zu bytes decoded of %zu expected", offset,
+ sizeof(buffer));
+ if (result != VDO_SUCCESS)
+ result = UDS_CORRUPT_DATA;
+
+ if (memcmp(header.magic, MAGIC_START_5, MAGIC_SIZE) != 0) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "volume index file had bad magic number");
+ }
+
+ if (sub_index->volume_nonce == 0) {
+ sub_index->volume_nonce = header.volume_nonce;
+ } else if (header.volume_nonce != sub_index->volume_nonce) {
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "volume index volume nonce incorrect");
+ }
+
+ if (i == 0) {
+ virtual_chapter_low = header.virtual_chapter_low;
+ virtual_chapter_high = header.virtual_chapter_high;
+ } else if (virtual_chapter_high != header.virtual_chapter_high) {
+ u64 low = header.virtual_chapter_low;
+ u64 high = header.virtual_chapter_high;
+
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "Inconsistent volume index zone files: Chapter range is [%llu,%llu], chapter range %d is [%llu,%llu]",
+ (unsigned long long) virtual_chapter_low,
+ (unsigned long long) virtual_chapter_high,
+ i, (unsigned long long) low,
+ (unsigned long long) high);
+ } else if (virtual_chapter_low < header.virtual_chapter_low) {
+ virtual_chapter_low = header.virtual_chapter_low;
+ }
+
+ for (j = 0; j < header.list_count; j++) {
+ u8 decoded[sizeof(u64)];
+
+ result = uds_read_from_buffered_reader(readers[i], decoded,
+ sizeof(u64));
+ if (result != UDS_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to read volume index flush ranges");
+ }
+
+ sub_index->flush_chapters[header.first_list + j] =
+ get_unaligned_le64(decoded);
+ }
+ }
+
+ for (z = 0; z < sub_index->zone_count; z++) {
+ memset(&sub_index->zones[z], 0, sizeof(struct volume_sub_index_zone));
+ sub_index->zones[z].virtual_chapter_low = virtual_chapter_low;
+ sub_index->zones[z].virtual_chapter_high = virtual_chapter_high;
+ }
+
+ result = uds_start_restoring_delta_index(&sub_index->delta_index, readers,
+ reader_count);
+ if (result != UDS_SUCCESS)
+ return vdo_log_warning_strerror(result, "restoring delta index failed");
+
+ return UDS_SUCCESS;
+}
+
+static int start_restoring_volume_index(struct volume_index *volume_index,
+ struct buffered_reader **buffered_readers,
+ unsigned int reader_count)
+{
+ unsigned int i;
+ int result;
+
+ if (!has_sparse(volume_index)) {
+ return start_restoring_volume_sub_index(&volume_index->vi_non_hook,
+ buffered_readers, reader_count);
+ }
+
+ for (i = 0; i < reader_count; i++) {
+ struct volume_index_data header;
+ u8 buffer[sizeof(struct volume_index_data)];
+ size_t offset = 0;
+
+ result = uds_read_from_buffered_reader(buffered_readers[i], buffer,
+ sizeof(buffer));
+ if (result != UDS_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to read volume index header");
+ }
+
+ memcpy(&header.magic, buffer, MAGIC_SIZE);
+ offset += MAGIC_SIZE;
+ decode_u32_le(buffer, &offset, &header.sparse_sample_rate);
+
+ result = VDO_ASSERT(offset == sizeof(buffer),
+ "%zu bytes decoded of %zu expected", offset,
+ sizeof(buffer));
+ if (result != VDO_SUCCESS)
+ result = UDS_CORRUPT_DATA;
+
+ if (memcmp(header.magic, MAGIC_START_6, MAGIC_SIZE) != 0)
+ return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "volume index file had bad magic number");
+
+ if (i == 0) {
+ volume_index->sparse_sample_rate = header.sparse_sample_rate;
+ } else if (volume_index->sparse_sample_rate != header.sparse_sample_rate) {
+ vdo_log_warning_strerror(UDS_CORRUPT_DATA,
+ "Inconsistent sparse sample rate in delta index zone files: %u vs. %u",
+ volume_index->sparse_sample_rate,
+ header.sparse_sample_rate);
+ return UDS_CORRUPT_DATA;
+ }
+ }
+
+ result = start_restoring_volume_sub_index(&volume_index->vi_non_hook,
+ buffered_readers, reader_count);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ return start_restoring_volume_sub_index(&volume_index->vi_hook, buffered_readers,
+ reader_count);
+}
+
+static int finish_restoring_volume_sub_index(struct volume_sub_index *sub_index,
+ struct buffered_reader **buffered_readers,
+ unsigned int reader_count)
+{
+ return uds_finish_restoring_delta_index(&sub_index->delta_index,
+ buffered_readers, reader_count);
+}
+
+static int finish_restoring_volume_index(struct volume_index *volume_index,
+ struct buffered_reader **buffered_readers,
+ unsigned int reader_count)
+{
+ int result;
+
+ result = finish_restoring_volume_sub_index(&volume_index->vi_non_hook,
+ buffered_readers, reader_count);
+ if ((result == UDS_SUCCESS) && has_sparse(volume_index)) {
+ result = finish_restoring_volume_sub_index(&volume_index->vi_hook,
+ buffered_readers,
+ reader_count);
+ }
+
+ return result;
+}
+
+int uds_load_volume_index(struct volume_index *volume_index,
+ struct buffered_reader **readers, unsigned int reader_count)
+{
+ int result;
+
+ /* Start by reading the header section of the stream. */
+ result = start_restoring_volume_index(volume_index, readers, reader_count);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = finish_restoring_volume_index(volume_index, readers, reader_count);
+ if (result != UDS_SUCCESS) {
+ abort_restoring_volume_index(volume_index);
+ return result;
+ }
+
+ /* Check the final guard lists to make sure there is no extra data. */
+ result = uds_check_guard_delta_lists(readers, reader_count);
+ if (result != UDS_SUCCESS)
+ abort_restoring_volume_index(volume_index);
+
+ return result;
+}
+
+static int start_saving_volume_sub_index(const struct volume_sub_index *sub_index,
+ unsigned int zone_number,
+ struct buffered_writer *buffered_writer)
+{
+ int result;
+ struct volume_sub_index_zone *volume_index_zone = &sub_index->zones[zone_number];
+ u32 first_list = sub_index->delta_index.delta_zones[zone_number].first_list;
+ u32 list_count = sub_index->delta_index.delta_zones[zone_number].list_count;
+ u8 buffer[sizeof(struct sub_index_data)];
+ size_t offset = 0;
+ u32 i;
+
+ memcpy(buffer, MAGIC_START_5, MAGIC_SIZE);
+ offset += MAGIC_SIZE;
+ encode_u64_le(buffer, &offset, sub_index->volume_nonce);
+ encode_u64_le(buffer, &offset, volume_index_zone->virtual_chapter_low);
+ encode_u64_le(buffer, &offset, volume_index_zone->virtual_chapter_high);
+ encode_u32_le(buffer, &offset, first_list);
+ encode_u32_le(buffer, &offset, list_count);
+
+ result = VDO_ASSERT(offset == sizeof(struct sub_index_data),
+ "%zu bytes of config written, of %zu expected", offset,
+ sizeof(struct sub_index_data));
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = uds_write_to_buffered_writer(buffered_writer, buffer, offset);
+ if (result != UDS_SUCCESS)
+ return vdo_log_warning_strerror(result,
+ "failed to write volume index header");
+
+ for (i = 0; i < list_count; i++) {
+ u8 encoded[sizeof(u64)];
+
+ put_unaligned_le64(sub_index->flush_chapters[first_list + i], &encoded);
+ result = uds_write_to_buffered_writer(buffered_writer, encoded,
+ sizeof(u64));
+ if (result != UDS_SUCCESS) {
+ return vdo_log_warning_strerror(result,
+ "failed to write volume index flush ranges");
+ }
+ }
+
+ return uds_start_saving_delta_index(&sub_index->delta_index, zone_number,
+ buffered_writer);
+}
+
+static int start_saving_volume_index(const struct volume_index *volume_index,
+ unsigned int zone_number,
+ struct buffered_writer *writer)
+{
+ u8 buffer[sizeof(struct volume_index_data)];
+ size_t offset = 0;
+ int result;
+
+ if (!has_sparse(volume_index)) {
+ return start_saving_volume_sub_index(&volume_index->vi_non_hook,
+ zone_number, writer);
+ }
+
+ memcpy(buffer, MAGIC_START_6, MAGIC_SIZE);
+ offset += MAGIC_SIZE;
+ encode_u32_le(buffer, &offset, volume_index->sparse_sample_rate);
+ result = VDO_ASSERT(offset == sizeof(struct volume_index_data),
+ "%zu bytes of header written, of %zu expected", offset,
+ sizeof(struct volume_index_data));
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = uds_write_to_buffered_writer(writer, buffer, offset);
+ if (result != UDS_SUCCESS) {
+ vdo_log_warning_strerror(result, "failed to write volume index header");
+ return result;
+ }
+
+ result = start_saving_volume_sub_index(&volume_index->vi_non_hook, zone_number,
+ writer);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ return start_saving_volume_sub_index(&volume_index->vi_hook, zone_number,
+ writer);
+}
+
+static int finish_saving_volume_sub_index(const struct volume_sub_index *sub_index,
+ unsigned int zone_number)
+{
+ return uds_finish_saving_delta_index(&sub_index->delta_index, zone_number);
+}
+
+static int finish_saving_volume_index(const struct volume_index *volume_index,
+ unsigned int zone_number)
+{
+ int result;
+
+ result = finish_saving_volume_sub_index(&volume_index->vi_non_hook, zone_number);
+ if ((result == UDS_SUCCESS) && has_sparse(volume_index))
+ result = finish_saving_volume_sub_index(&volume_index->vi_hook, zone_number);
+ return result;
+}
+
+int uds_save_volume_index(struct volume_index *volume_index,
+ struct buffered_writer **writers, unsigned int writer_count)
+{
+ int result = UDS_SUCCESS;
+ unsigned int zone;
+
+ for (zone = 0; zone < writer_count; zone++) {
+ result = start_saving_volume_index(volume_index, zone, writers[zone]);
+ if (result != UDS_SUCCESS)
+ break;
+
+ result = finish_saving_volume_index(volume_index, zone);
+ if (result != UDS_SUCCESS)
+ break;
+
+ result = uds_write_guard_delta_list(writers[zone]);
+ if (result != UDS_SUCCESS)
+ break;
+
+ result = uds_flush_buffered_writer(writers[zone]);
+ if (result != UDS_SUCCESS)
+ break;
+ }
+
+ return result;
+}
+
+static void get_volume_sub_index_stats(const struct volume_sub_index *sub_index,
+ struct volume_index_stats *stats)
+{
+ struct delta_index_stats dis;
+ unsigned int z;
+
+ uds_get_delta_index_stats(&sub_index->delta_index, &dis);
+ stats->rebalance_time = dis.rebalance_time;
+ stats->rebalance_count = dis.rebalance_count;
+ stats->record_count = dis.record_count;
+ stats->collision_count = dis.collision_count;
+ stats->discard_count = dis.discard_count;
+ stats->overflow_count = dis.overflow_count;
+ stats->delta_lists = dis.list_count;
+ stats->early_flushes = 0;
+ for (z = 0; z < sub_index->zone_count; z++)
+ stats->early_flushes += sub_index->zones[z].early_flushes;
+}
+
+void uds_get_volume_index_stats(const struct volume_index *volume_index,
+ struct volume_index_stats *stats)
+{
+ struct volume_index_stats sparse_stats;
+
+ get_volume_sub_index_stats(&volume_index->vi_non_hook, stats);
+ if (!has_sparse(volume_index))
+ return;
+
+ get_volume_sub_index_stats(&volume_index->vi_hook, &sparse_stats);
+ stats->rebalance_time += sparse_stats.rebalance_time;
+ stats->rebalance_count += sparse_stats.rebalance_count;
+ stats->record_count += sparse_stats.record_count;
+ stats->collision_count += sparse_stats.collision_count;
+ stats->discard_count += sparse_stats.discard_count;
+ stats->overflow_count += sparse_stats.overflow_count;
+ stats->delta_lists += sparse_stats.delta_lists;
+ stats->early_flushes += sparse_stats.early_flushes;
+}
+
+static int initialize_volume_sub_index(const struct uds_configuration *config,
+ u64 volume_nonce, u8 tag,
+ struct volume_sub_index *sub_index)
+{
+ struct sub_index_parameters params = { .address_bits = 0 };
+ unsigned int zone_count = config->zone_count;
+ u64 available_bytes = 0;
+ unsigned int z;
+ int result;
+
+ result = compute_volume_sub_index_parameters(config, &params);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ sub_index->address_bits = params.address_bits;
+ sub_index->address_mask = (1u << params.address_bits) - 1;
+ sub_index->chapter_bits = params.chapter_bits;
+ sub_index->chapter_mask = (1u << params.chapter_bits) - 1;
+ sub_index->chapter_count = params.chapter_count;
+ sub_index->list_count = params.list_count;
+ sub_index->zone_count = zone_count;
+ sub_index->chapter_zone_bits = params.chapter_size_in_bits / zone_count;
+ sub_index->volume_nonce = volume_nonce;
+
+ result = uds_initialize_delta_index(&sub_index->delta_index, zone_count,
+ params.list_count, params.mean_delta,
+ params.chapter_bits, params.memory_size,
+ tag);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ for (z = 0; z < sub_index->delta_index.zone_count; z++)
+ available_bytes += sub_index->delta_index.delta_zones[z].size;
+ available_bytes -= params.target_free_bytes;
+ sub_index->max_zone_bits = (available_bytes * BITS_PER_BYTE) / zone_count;
+ sub_index->memory_size = (sub_index->delta_index.memory_size +
+ sizeof(struct volume_sub_index) +
+ (params.list_count * sizeof(u64)) +
+ (zone_count * sizeof(struct volume_sub_index_zone)));
+
+ /* The following arrays are initialized to all zeros. */
+ result = vdo_allocate(params.list_count, u64, "first chapter to flush",
+ &sub_index->flush_chapters);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return vdo_allocate(zone_count, struct volume_sub_index_zone,
+ "volume index zones", &sub_index->zones);
+}
+
+int uds_make_volume_index(const struct uds_configuration *config, u64 volume_nonce,
+ struct volume_index **volume_index_ptr)
+{
+ struct split_config split;
+ unsigned int zone;
+ struct volume_index *volume_index;
+ int result;
+
+ result = vdo_allocate(1, struct volume_index, "volume index", &volume_index);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ volume_index->zone_count = config->zone_count;
+
+ if (!uds_is_sparse_index_geometry(config->geometry)) {
+ result = initialize_volume_sub_index(config, volume_nonce, 'm',
+ &volume_index->vi_non_hook);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume_index(volume_index);
+ return result;
+ }
+
+ volume_index->memory_size = volume_index->vi_non_hook.memory_size;
+ *volume_index_ptr = volume_index;
+ return UDS_SUCCESS;
+ }
+
+ volume_index->sparse_sample_rate = config->sparse_sample_rate;
+
+ result = vdo_allocate(config->zone_count, struct volume_index_zone,
+ "volume index zones", &volume_index->zones);
+ if (result != VDO_SUCCESS) {
+ uds_free_volume_index(volume_index);
+ return result;
+ }
+
+ for (zone = 0; zone < config->zone_count; zone++)
+ mutex_init(&volume_index->zones[zone].hook_mutex);
+
+ split_configuration(config, &split);
+ result = initialize_volume_sub_index(&split.non_hook_config, volume_nonce, 'd',
+ &volume_index->vi_non_hook);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume_index(volume_index);
+ return vdo_log_error_strerror(result,
+ "Error creating non hook volume index");
+ }
+
+ result = initialize_volume_sub_index(&split.hook_config, volume_nonce, 's',
+ &volume_index->vi_hook);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume_index(volume_index);
+ return vdo_log_error_strerror(result,
+ "Error creating hook volume index");
+ }
+
+ volume_index->memory_size =
+ volume_index->vi_non_hook.memory_size + volume_index->vi_hook.memory_size;
+ *volume_index_ptr = volume_index;
+ return UDS_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/indexer/volume-index.h b/drivers/md/dm-vdo/indexer/volume-index.h
new file mode 100644
index 000000000000..583998c547b7
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/volume-index.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_VOLUME_INDEX_H
+#define UDS_VOLUME_INDEX_H
+
+#include <linux/limits.h>
+
+#include "thread-utils.h"
+
+#include "config.h"
+#include "delta-index.h"
+#include "indexer.h"
+
+/*
+ * The volume index is the primary top-level index for UDS. It contains records which map a record
+ * name to the chapter where a record with that name is stored. This mapping can definitively say
+ * when no record exists. However, because we only use a subset of the name for this index, it
+ * cannot definitively say that a record for the entry does exist. It can only say that if a record
+ * exists, it will be in a particular chapter. The request can then be dispatched to that chapter
+ * for further processing.
+ *
+ * If the volume_index_record does not actually match the record name, the index can store a more
+ * specific collision record to disambiguate the new entry from the existing one. Index entries are
+ * managed with volume_index_record structures.
+ */
+
+#define NO_CHAPTER U64_MAX
+
+struct volume_index_stats {
+ /* Nanoseconds spent rebalancing */
+ ktime_t rebalance_time;
+ /* Number of memory rebalances */
+ u32 rebalance_count;
+ /* The number of records in the index */
+ u64 record_count;
+ /* The number of collision records */
+ u64 collision_count;
+ /* The number of records removed */
+ u64 discard_count;
+ /* The number of UDS_OVERFLOWs detected */
+ u64 overflow_count;
+ /* The number of delta lists */
+ u32 delta_lists;
+ /* Number of early flushes */
+ u64 early_flushes;
+};
+
+struct volume_sub_index_zone {
+ u64 virtual_chapter_low;
+ u64 virtual_chapter_high;
+ u64 early_flushes;
+} __aligned(L1_CACHE_BYTES);
+
+struct volume_sub_index {
+ /* The delta index */
+ struct delta_index delta_index;
+ /* The first chapter to be flushed in each zone */
+ u64 *flush_chapters;
+ /* The zones */
+ struct volume_sub_index_zone *zones;
+ /* The volume nonce */
+ u64 volume_nonce;
+ /* Expected size of a chapter (per zone) */
+ u64 chapter_zone_bits;
+ /* Maximum size of the index (per zone) */
+ u64 max_zone_bits;
+ /* The number of bits in address mask */
+ u8 address_bits;
+ /* Mask to get address within delta list */
+ u32 address_mask;
+ /* The number of bits in chapter number */
+ u8 chapter_bits;
+ /* The largest storable chapter number */
+ u32 chapter_mask;
+ /* The number of chapters used */
+ u32 chapter_count;
+ /* The number of delta lists */
+ u32 list_count;
+ /* The number of zones */
+ unsigned int zone_count;
+ /* The amount of memory allocated */
+ u64 memory_size;
+};
+
+struct volume_index_zone {
+ /* Protects the sampled index in this zone */
+ struct mutex hook_mutex;
+} __aligned(L1_CACHE_BYTES);
+
+struct volume_index {
+ u32 sparse_sample_rate;
+ unsigned int zone_count;
+ u64 memory_size;
+ struct volume_sub_index vi_non_hook;
+ struct volume_sub_index vi_hook;
+ struct volume_index_zone *zones;
+};
+
+/*
+ * The volume_index_record structure is used to facilitate processing of a record name. A client
+ * first calls uds_get_volume_index_record() to find the volume index record for a record name. The
+ * fields of the record can then be examined to determine the state of the record.
+ *
+ * If is_found is false, then the index did not find an entry for the record name. Calling
+ * uds_put_volume_index_record() will insert a new entry for that name at the proper place.
+ *
+ * If is_found is true, then we did find an entry for the record name, and the virtual_chapter and
+ * is_collision fields reflect the entry found. Subsequently, a call to
+ * uds_remove_volume_index_record() will remove the entry, a call to
+ * uds_set_volume_index_record_chapter() will update the existing entry, and a call to
+ * uds_put_volume_index_record() will insert a new collision record after the existing entry.
+ */
+struct volume_index_record {
+ /* Public fields */
+
+ /* Chapter where the record info is found */
+ u64 virtual_chapter;
+ /* This record is a collision */
+ bool is_collision;
+ /* This record is the requested record */
+ bool is_found;
+
+ /* Private fields */
+
+ /* Zone that contains this name */
+ unsigned int zone_number;
+ /* The volume index */
+ struct volume_sub_index *sub_index;
+ /* Mutex for accessing this delta index entry in the hook index */
+ struct mutex *mutex;
+ /* The record name to which this record refers */
+ const struct uds_record_name *name;
+ /* The delta index entry for this record */
+ struct delta_index_entry delta_entry;
+};
+
+int __must_check uds_make_volume_index(const struct uds_configuration *config,
+ u64 volume_nonce,
+ struct volume_index **volume_index);
+
+void uds_free_volume_index(struct volume_index *volume_index);
+
+int __must_check uds_compute_volume_index_save_blocks(const struct uds_configuration *config,
+ size_t block_size,
+ u64 *block_count);
+
+unsigned int __must_check uds_get_volume_index_zone(const struct volume_index *volume_index,
+ const struct uds_record_name *name);
+
+bool __must_check uds_is_volume_index_sample(const struct volume_index *volume_index,
+ const struct uds_record_name *name);
+
+/*
+ * This function is only used to manage sparse cache membership. Most requests should use
+ * uds_get_volume_index_record() to look up index records instead.
+ */
+u64 __must_check uds_lookup_volume_index_name(const struct volume_index *volume_index,
+ const struct uds_record_name *name);
+
+int __must_check uds_get_volume_index_record(struct volume_index *volume_index,
+ const struct uds_record_name *name,
+ struct volume_index_record *record);
+
+int __must_check uds_put_volume_index_record(struct volume_index_record *record,
+ u64 virtual_chapter);
+
+int __must_check uds_remove_volume_index_record(struct volume_index_record *record);
+
+int __must_check uds_set_volume_index_record_chapter(struct volume_index_record *record,
+ u64 virtual_chapter);
+
+void uds_set_volume_index_open_chapter(struct volume_index *volume_index,
+ u64 virtual_chapter);
+
+void uds_set_volume_index_zone_open_chapter(struct volume_index *volume_index,
+ unsigned int zone_number,
+ u64 virtual_chapter);
+
+int __must_check uds_load_volume_index(struct volume_index *volume_index,
+ struct buffered_reader **readers,
+ unsigned int reader_count);
+
+int __must_check uds_save_volume_index(struct volume_index *volume_index,
+ struct buffered_writer **writers,
+ unsigned int writer_count);
+
+void uds_get_volume_index_stats(const struct volume_index *volume_index,
+ struct volume_index_stats *stats);
+
+#endif /* UDS_VOLUME_INDEX_H */
diff --git a/drivers/md/dm-vdo/indexer/volume.c b/drivers/md/dm-vdo/indexer/volume.c
new file mode 100644
index 000000000000..655453bb276b
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/volume.c
@@ -0,0 +1,1693 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "volume.h"
+
+#include <linux/atomic.h>
+#include <linux/dm-bufio.h>
+#include <linux/err.h>
+
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+#include "string-utils.h"
+#include "thread-utils.h"
+
+#include "chapter-index.h"
+#include "config.h"
+#include "geometry.h"
+#include "hash-utils.h"
+#include "index.h"
+#include "sparse-cache.h"
+
+/*
+ * The first block of the volume layout is reserved for the volume header, which is no longer used.
+ * The remainder of the volume is divided into chapters consisting of several pages of records, and
+ * several pages of static index to use to find those records. The index pages are recorded first,
+ * followed by the record pages. The chapters are written in order as they are filled, so the
+ * volume storage acts as a circular log of the most recent chapters, with each new chapter
+ * overwriting the oldest saved one.
+ *
+ * When a new chapter is filled and closed, the records from that chapter are sorted and
+ * interleaved in approximate temporal order, and assigned to record pages. Then a static delta
+ * index is generated to store which record page contains each record. The in-memory index page map
+ * is also updated to indicate which delta lists fall on each chapter index page. This means that
+ * when a record is read, the volume only has to load a single index page and a single record page,
+ * rather than search the entire chapter. These index and record pages are written to storage, and
+ * the index pages are transferred to the page cache under the theory that the most recently
+ * written chapter is likely to be accessed again soon.
+ *
+ * When reading a record, the volume index will indicate which chapter should contain it. The
+ * volume uses the index page map to determine which chapter index page needs to be loaded, and
+ * then reads the relevant record page number from the chapter index. Both index and record pages
+ * are stored in a page cache when read for the common case that subsequent records need the same
+ * pages. The page cache evicts the least recently accessed entries when caching new pages. In
+ * addition, the volume uses dm-bufio to manage access to the storage, which may allow for
+ * additional caching depending on available system resources.
+ *
+ * Record requests are handled from cached pages when possible. If a page needs to be read, it is
+ * placed on a queue along with the request that wants to read it. Any requests for the same page
+ * that arrive while the read is pending are added to the queue entry. A separate reader thread
+ * handles the queued reads, adding the page to the cache and updating any requests queued with it
+ * so they can continue processing. This allows the index zone threads to continue processing new
+ * requests rather than wait for the storage reads.
+ *
+ * When an index rebuild is necessary, the volume reads each stored chapter to determine which
+ * range of chapters contain valid records, so that those records can be used to reconstruct the
+ * in-memory volume index.
+ */
+
+/* The maximum allowable number of contiguous bad chapters */
+#define MAX_BAD_CHAPTERS 100
+#define VOLUME_CACHE_MAX_ENTRIES (U16_MAX >> 1)
+#define VOLUME_CACHE_QUEUED_FLAG (1 << 15)
+#define VOLUME_CACHE_MAX_QUEUED_READS 4096
+
+static const u64 BAD_CHAPTER = U64_MAX;
+
+/*
+ * The invalidate counter is two 32 bits fields stored together atomically. The low order 32 bits
+ * are the physical page number of the cached page being read. The high order 32 bits are a
+ * sequence number. This value is written when the zone that owns it begins or completes a cache
+ * search. Any other thread will only read the counter in wait_for_pending_searches() while waiting
+ * to update the cache contents.
+ */
+union invalidate_counter {
+ u64 value;
+ struct {
+ u32 page;
+ u32 counter;
+ };
+};
+
+static inline u32 map_to_page_number(struct index_geometry *geometry, u32 physical_page)
+{
+ return (physical_page - HEADER_PAGES_PER_VOLUME) % geometry->pages_per_chapter;
+}
+
+static inline u32 map_to_chapter_number(struct index_geometry *geometry, u32 physical_page)
+{
+ return (physical_page - HEADER_PAGES_PER_VOLUME) / geometry->pages_per_chapter;
+}
+
+static inline bool is_record_page(struct index_geometry *geometry, u32 physical_page)
+{
+ return map_to_page_number(geometry, physical_page) >= geometry->index_pages_per_chapter;
+}
+
+static u32 map_to_physical_page(const struct index_geometry *geometry, u32 chapter, u32 page)
+{
+ /* Page zero is the header page, so the first chapter index page is page one. */
+ return HEADER_PAGES_PER_VOLUME + (geometry->pages_per_chapter * chapter) + page;
+}
+
+static inline union invalidate_counter get_invalidate_counter(struct page_cache *cache,
+ unsigned int zone_number)
+{
+ return (union invalidate_counter) {
+ .value = READ_ONCE(cache->search_pending_counters[zone_number].atomic_value),
+ };
+}
+
+static inline void set_invalidate_counter(struct page_cache *cache,
+ unsigned int zone_number,
+ union invalidate_counter invalidate_counter)
+{
+ WRITE_ONCE(cache->search_pending_counters[zone_number].atomic_value,
+ invalidate_counter.value);
+}
+
+static inline bool search_pending(union invalidate_counter invalidate_counter)
+{
+ return (invalidate_counter.counter & 1) != 0;
+}
+
+/* Lock the cache for a zone in order to search for a page. */
+static void begin_pending_search(struct page_cache *cache, u32 physical_page,
+ unsigned int zone_number)
+{
+ union invalidate_counter invalidate_counter =
+ get_invalidate_counter(cache, zone_number);
+
+ invalidate_counter.page = physical_page;
+ invalidate_counter.counter++;
+ set_invalidate_counter(cache, zone_number, invalidate_counter);
+ VDO_ASSERT_LOG_ONLY(search_pending(invalidate_counter),
+ "Search is pending for zone %u", zone_number);
+ /*
+ * This memory barrier ensures that the write to the invalidate counter is seen by other
+ * threads before this thread accesses the cached page. The corresponding read memory
+ * barrier is in wait_for_pending_searches().
+ */
+ smp_mb();
+}
+
+/* Unlock the cache for a zone by clearing its invalidate counter. */
+static void end_pending_search(struct page_cache *cache, unsigned int zone_number)
+{
+ union invalidate_counter invalidate_counter;
+
+ /*
+ * This memory barrier ensures that this thread completes reads of the
+ * cached page before other threads see the write to the invalidate
+ * counter.
+ */
+ smp_mb();
+
+ invalidate_counter = get_invalidate_counter(cache, zone_number);
+ VDO_ASSERT_LOG_ONLY(search_pending(invalidate_counter),
+ "Search is pending for zone %u", zone_number);
+ invalidate_counter.counter++;
+ set_invalidate_counter(cache, zone_number, invalidate_counter);
+}
+
+static void wait_for_pending_searches(struct page_cache *cache, u32 physical_page)
+{
+ union invalidate_counter initial_counters[MAX_ZONES];
+ unsigned int i;
+
+ /*
+ * We hold the read_threads_mutex. We are waiting for threads that do not hold the
+ * read_threads_mutex. Those threads have "locked" their targeted page by setting the
+ * search_pending_counter. The corresponding write memory barrier is in
+ * begin_pending_search().
+ */
+ smp_mb();
+
+ for (i = 0; i < cache->zone_count; i++)
+ initial_counters[i] = get_invalidate_counter(cache, i);
+ for (i = 0; i < cache->zone_count; i++) {
+ if (search_pending(initial_counters[i]) &&
+ (initial_counters[i].page == physical_page)) {
+ /*
+ * There is an active search using the physical page. We need to wait for
+ * the search to finish.
+ *
+ * FIXME: Investigate using wait_event() to wait for the search to finish.
+ */
+ while (initial_counters[i].value ==
+ get_invalidate_counter(cache, i).value)
+ cond_resched();
+ }
+ }
+}
+
+static void release_page_buffer(struct cached_page *page)
+{
+ if (page->buffer != NULL)
+ dm_bufio_release(vdo_forget(page->buffer));
+}
+
+static void clear_cache_page(struct page_cache *cache, struct cached_page *page)
+{
+ /* Do not clear read_pending because the read queue relies on it. */
+ release_page_buffer(page);
+ page->physical_page = cache->indexable_pages;
+ WRITE_ONCE(page->last_used, 0);
+}
+
+static void make_page_most_recent(struct page_cache *cache, struct cached_page *page)
+{
+ /*
+ * ASSERTION: We are either a zone thread holding a search_pending_counter, or we are any
+ * thread holding the read_threads_mutex.
+ */
+ if (atomic64_read(&cache->clock) != READ_ONCE(page->last_used))
+ WRITE_ONCE(page->last_used, atomic64_inc_return(&cache->clock));
+}
+
+/* Select a page to remove from the cache to make space for a new entry. */
+static struct cached_page *select_victim_in_cache(struct page_cache *cache)
+{
+ struct cached_page *page;
+ int oldest_index = 0;
+ s64 oldest_time = S64_MAX;
+ s64 last_used;
+ u16 i;
+
+ /* Find the oldest unclaimed page. We hold the read_threads_mutex. */
+ for (i = 0; i < cache->cache_slots; i++) {
+ /* A page with a pending read must not be replaced. */
+ if (cache->cache[i].read_pending)
+ continue;
+
+ last_used = READ_ONCE(cache->cache[i].last_used);
+ if (last_used <= oldest_time) {
+ oldest_time = last_used;
+ oldest_index = i;
+ }
+ }
+
+ page = &cache->cache[oldest_index];
+ if (page->physical_page != cache->indexable_pages) {
+ WRITE_ONCE(cache->index[page->physical_page], cache->cache_slots);
+ wait_for_pending_searches(cache, page->physical_page);
+ }
+
+ page->read_pending = true;
+ clear_cache_page(cache, page);
+ return page;
+}
+
+/* Make a newly filled cache entry available to other threads. */
+static int put_page_in_cache(struct page_cache *cache, u32 physical_page,
+ struct cached_page *page)
+{
+ int result;
+
+ /* We hold the read_threads_mutex. */
+ result = VDO_ASSERT((page->read_pending), "page to install has a pending read");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ page->physical_page = physical_page;
+ make_page_most_recent(cache, page);
+ page->read_pending = false;
+
+ /*
+ * We hold the read_threads_mutex, but we must have a write memory barrier before making
+ * the cached_page available to the readers that do not hold the mutex. The corresponding
+ * read memory barrier is in get_page_and_index().
+ */
+ smp_wmb();
+
+ /* This assignment also clears the queued flag. */
+ WRITE_ONCE(cache->index[physical_page], page - cache->cache);
+ return UDS_SUCCESS;
+}
+
+static void cancel_page_in_cache(struct page_cache *cache, u32 physical_page,
+ struct cached_page *page)
+{
+ int result;
+
+ /* We hold the read_threads_mutex. */
+ result = VDO_ASSERT((page->read_pending), "page to install has a pending read");
+ if (result != VDO_SUCCESS)
+ return;
+
+ clear_cache_page(cache, page);
+ page->read_pending = false;
+
+ /* Clear the mapping and the queued flag for the new page. */
+ WRITE_ONCE(cache->index[physical_page], cache->cache_slots);
+}
+
+static inline u16 next_queue_position(u16 position)
+{
+ return (position + 1) % VOLUME_CACHE_MAX_QUEUED_READS;
+}
+
+static inline void advance_queue_position(u16 *position)
+{
+ *position = next_queue_position(*position);
+}
+
+static inline bool read_queue_is_full(struct page_cache *cache)
+{
+ return cache->read_queue_first == next_queue_position(cache->read_queue_last);
+}
+
+static bool enqueue_read(struct page_cache *cache, struct uds_request *request,
+ u32 physical_page)
+{
+ struct queued_read *queue_entry;
+ u16 last = cache->read_queue_last;
+ u16 read_queue_index;
+
+ /* We hold the read_threads_mutex. */
+ if ((cache->index[physical_page] & VOLUME_CACHE_QUEUED_FLAG) == 0) {
+ /* This page has no existing entry in the queue. */
+ if (read_queue_is_full(cache))
+ return false;
+
+ /* Fill in the read queue entry. */
+ cache->read_queue[last].physical_page = physical_page;
+ cache->read_queue[last].invalid = false;
+ cache->read_queue[last].first_request = NULL;
+ cache->read_queue[last].last_request = NULL;
+
+ /* Point the cache index to the read queue entry. */
+ read_queue_index = last;
+ WRITE_ONCE(cache->index[physical_page],
+ read_queue_index | VOLUME_CACHE_QUEUED_FLAG);
+
+ advance_queue_position(&cache->read_queue_last);
+ } else {
+ /* It's already queued, so add this request to the existing entry. */
+ read_queue_index = cache->index[physical_page] & ~VOLUME_CACHE_QUEUED_FLAG;
+ }
+
+ request->next_request = NULL;
+ queue_entry = &cache->read_queue[read_queue_index];
+ if (queue_entry->first_request == NULL)
+ queue_entry->first_request = request;
+ else
+ queue_entry->last_request->next_request = request;
+ queue_entry->last_request = request;
+
+ return true;
+}
+
+static void enqueue_page_read(struct volume *volume, struct uds_request *request,
+ u32 physical_page)
+{
+ /* Mark the page as queued, so that chapter invalidation knows to cancel a read. */
+ while (!enqueue_read(&volume->page_cache, request, physical_page)) {
+ vdo_log_debug("Read queue full, waiting for reads to finish");
+ uds_wait_cond(&volume->read_threads_read_done_cond,
+ &volume->read_threads_mutex);
+ }
+
+ uds_signal_cond(&volume->read_threads_cond);
+}
+
+/*
+ * Reserve the next read queue entry for processing, but do not actually remove it from the queue.
+ * Must be followed by release_queued_requests().
+ */
+static struct queued_read *reserve_read_queue_entry(struct page_cache *cache)
+{
+ /* We hold the read_threads_mutex. */
+ struct queued_read *entry;
+ u16 index_value;
+ bool queued;
+
+ /* No items to dequeue */
+ if (cache->read_queue_next_read == cache->read_queue_last)
+ return NULL;
+
+ entry = &cache->read_queue[cache->read_queue_next_read];
+ index_value = cache->index[entry->physical_page];
+ queued = (index_value & VOLUME_CACHE_QUEUED_FLAG) != 0;
+ /* Check to see if it's still queued before resetting. */
+ if (entry->invalid && queued)
+ WRITE_ONCE(cache->index[entry->physical_page], cache->cache_slots);
+
+ /*
+ * If a synchronous read has taken this page, set invalid to true so it doesn't get
+ * overwritten. Requests will just be requeued.
+ */
+ if (!queued)
+ entry->invalid = true;
+
+ entry->reserved = true;
+ advance_queue_position(&cache->read_queue_next_read);
+ return entry;
+}
+
+static inline struct queued_read *wait_to_reserve_read_queue_entry(struct volume *volume)
+{
+ struct queued_read *queue_entry = NULL;
+
+ while (!volume->read_threads_exiting) {
+ queue_entry = reserve_read_queue_entry(&volume->page_cache);
+ if (queue_entry != NULL)
+ break;
+
+ uds_wait_cond(&volume->read_threads_cond, &volume->read_threads_mutex);
+ }
+
+ return queue_entry;
+}
+
+static int init_chapter_index_page(const struct volume *volume, u8 *index_page,
+ u32 chapter, u32 index_page_number,
+ struct delta_index_page *chapter_index_page)
+{
+ u64 ci_virtual;
+ u32 ci_chapter;
+ u32 lowest_list;
+ u32 highest_list;
+ struct index_geometry *geometry = volume->geometry;
+ int result;
+
+ result = uds_initialize_chapter_index_page(chapter_index_page, geometry,
+ index_page, volume->nonce);
+ if (volume->lookup_mode == LOOKUP_FOR_REBUILD)
+ return result;
+
+ if (result != UDS_SUCCESS) {
+ return vdo_log_error_strerror(result,
+ "Reading chapter index page for chapter %u page %u",
+ chapter, index_page_number);
+ }
+
+ uds_get_list_number_bounds(volume->index_page_map, chapter, index_page_number,
+ &lowest_list, &highest_list);
+ ci_virtual = chapter_index_page->virtual_chapter_number;
+ ci_chapter = uds_map_to_physical_chapter(geometry, ci_virtual);
+ if ((chapter == ci_chapter) &&
+ (lowest_list == chapter_index_page->lowest_list_number) &&
+ (highest_list == chapter_index_page->highest_list_number))
+ return UDS_SUCCESS;
+
+ vdo_log_warning("Index page map updated to %llu",
+ (unsigned long long) volume->index_page_map->last_update);
+ vdo_log_warning("Page map expects that chapter %u page %u has range %u to %u, but chapter index page has chapter %llu with range %u to %u",
+ chapter, index_page_number, lowest_list, highest_list,
+ (unsigned long long) ci_virtual,
+ chapter_index_page->lowest_list_number,
+ chapter_index_page->highest_list_number);
+ return vdo_log_error_strerror(UDS_CORRUPT_DATA,
+ "index page map mismatch with chapter index");
+}
+
+static int initialize_index_page(const struct volume *volume, u32 physical_page,
+ struct cached_page *page)
+{
+ u32 chapter = map_to_chapter_number(volume->geometry, physical_page);
+ u32 index_page_number = map_to_page_number(volume->geometry, physical_page);
+
+ return init_chapter_index_page(volume, dm_bufio_get_block_data(page->buffer),
+ chapter, index_page_number, &page->index_page);
+}
+
+static bool search_record_page(const u8 record_page[],
+ const struct uds_record_name *name,
+ const struct index_geometry *geometry,
+ struct uds_record_data *metadata)
+{
+ /*
+ * The array of records is sorted by name and stored as a binary tree in heap order, so the
+ * root of the tree is the first array element.
+ */
+ u32 node = 0;
+ const struct uds_volume_record *records = (const struct uds_volume_record *) record_page;
+
+ while (node < geometry->records_per_page) {
+ int result;
+ const struct uds_volume_record *record = &records[node];
+
+ result = memcmp(name, &record->name, UDS_RECORD_NAME_SIZE);
+ if (result == 0) {
+ if (metadata != NULL)
+ *metadata = record->data;
+ return true;
+ }
+
+ /* The children of node N are at indexes 2N+1 and 2N+2. */
+ node = ((2 * node) + ((result < 0) ? 1 : 2));
+ }
+
+ return false;
+}
+
+/*
+ * If we've read in a record page, we're going to do an immediate search, to speed up processing by
+ * avoiding get_record_from_zone(), and to ensure that requests make progress even when queued. If
+ * we've read in an index page, we save the record page number so we don't have to resolve the
+ * index page again. We use the location, virtual_chapter, and old_metadata fields in the request
+ * to allow the index code to know where to begin processing the request again.
+ */
+static int search_page(struct cached_page *page, const struct volume *volume,
+ struct uds_request *request, u32 physical_page)
+{
+ int result;
+ enum uds_index_region location;
+ u16 record_page_number;
+
+ if (is_record_page(volume->geometry, physical_page)) {
+ if (search_record_page(dm_bufio_get_block_data(page->buffer),
+ &request->record_name, volume->geometry,
+ &request->old_metadata))
+ location = UDS_LOCATION_RECORD_PAGE_LOOKUP;
+ else
+ location = UDS_LOCATION_UNAVAILABLE;
+ } else {
+ result = uds_search_chapter_index_page(&page->index_page,
+ volume->geometry,
+ &request->record_name,
+ &record_page_number);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (record_page_number == NO_CHAPTER_INDEX_ENTRY) {
+ location = UDS_LOCATION_UNAVAILABLE;
+ } else {
+ location = UDS_LOCATION_INDEX_PAGE_LOOKUP;
+ *((u16 *) &request->old_metadata) = record_page_number;
+ }
+ }
+
+ request->location = location;
+ request->found = false;
+ return UDS_SUCCESS;
+}
+
+static int process_entry(struct volume *volume, struct queued_read *entry)
+{
+ u32 page_number = entry->physical_page;
+ struct uds_request *request;
+ struct cached_page *page = NULL;
+ u8 *page_data;
+ int result;
+
+ if (entry->invalid) {
+ vdo_log_debug("Requeuing requests for invalid page");
+ return UDS_SUCCESS;
+ }
+
+ page = select_victim_in_cache(&volume->page_cache);
+
+ mutex_unlock(&volume->read_threads_mutex);
+ page_data = dm_bufio_read(volume->client, page_number, &page->buffer);
+ mutex_lock(&volume->read_threads_mutex);
+ if (IS_ERR(page_data)) {
+ result = -PTR_ERR(page_data);
+ vdo_log_warning_strerror(result,
+ "error reading physical page %u from volume",
+ page_number);
+ cancel_page_in_cache(&volume->page_cache, page_number, page);
+ return result;
+ }
+
+ if (entry->invalid) {
+ vdo_log_warning("Page %u invalidated after read", page_number);
+ cancel_page_in_cache(&volume->page_cache, page_number, page);
+ return UDS_SUCCESS;
+ }
+
+ if (!is_record_page(volume->geometry, page_number)) {
+ result = initialize_index_page(volume, page_number, page);
+ if (result != UDS_SUCCESS) {
+ vdo_log_warning("Error initializing chapter index page");
+ cancel_page_in_cache(&volume->page_cache, page_number, page);
+ return result;
+ }
+ }
+
+ result = put_page_in_cache(&volume->page_cache, page_number, page);
+ if (result != UDS_SUCCESS) {
+ vdo_log_warning("Error putting page %u in cache", page_number);
+ cancel_page_in_cache(&volume->page_cache, page_number, page);
+ return result;
+ }
+
+ request = entry->first_request;
+ while ((request != NULL) && (result == UDS_SUCCESS)) {
+ result = search_page(page, volume, request, page_number);
+ request = request->next_request;
+ }
+
+ return result;
+}
+
+static void release_queued_requests(struct volume *volume, struct queued_read *entry,
+ int result)
+{
+ struct page_cache *cache = &volume->page_cache;
+ u16 next_read = cache->read_queue_next_read;
+ struct uds_request *request;
+ struct uds_request *next;
+
+ for (request = entry->first_request; request != NULL; request = next) {
+ next = request->next_request;
+ request->status = result;
+ request->requeued = true;
+ uds_enqueue_request(request, STAGE_INDEX);
+ }
+
+ entry->reserved = false;
+
+ /* Move the read_queue_first pointer as far as we can. */
+ while ((cache->read_queue_first != next_read) &&
+ (!cache->read_queue[cache->read_queue_first].reserved))
+ advance_queue_position(&cache->read_queue_first);
+ uds_broadcast_cond(&volume->read_threads_read_done_cond);
+}
+
+static void read_thread_function(void *arg)
+{
+ struct volume *volume = arg;
+
+ vdo_log_debug("reader starting");
+ mutex_lock(&volume->read_threads_mutex);
+ while (true) {
+ struct queued_read *queue_entry;
+ int result;
+
+ queue_entry = wait_to_reserve_read_queue_entry(volume);
+ if (volume->read_threads_exiting)
+ break;
+
+ result = process_entry(volume, queue_entry);
+ release_queued_requests(volume, queue_entry, result);
+ }
+ mutex_unlock(&volume->read_threads_mutex);
+ vdo_log_debug("reader done");
+}
+
+static void get_page_and_index(struct page_cache *cache, u32 physical_page,
+ int *queue_index, struct cached_page **page_ptr)
+{
+ u16 index_value;
+ u16 index;
+ bool queued;
+
+ /*
+ * ASSERTION: We are either a zone thread holding a search_pending_counter, or we are any
+ * thread holding the read_threads_mutex.
+ *
+ * Holding only a search_pending_counter is the most frequent case.
+ */
+ /*
+ * It would be unlikely for the compiler to turn the usage of index_value into two reads of
+ * cache->index, but it would be possible and very bad if those reads did not return the
+ * same bits.
+ */
+ index_value = READ_ONCE(cache->index[physical_page]);
+ queued = (index_value & VOLUME_CACHE_QUEUED_FLAG) != 0;
+ index = index_value & ~VOLUME_CACHE_QUEUED_FLAG;
+
+ if (!queued && (index < cache->cache_slots)) {
+ *page_ptr = &cache->cache[index];
+ /*
+ * We have acquired access to the cached page, but unless we hold the
+ * read_threads_mutex, we need a read memory barrier now. The corresponding write
+ * memory barrier is in put_page_in_cache().
+ */
+ smp_rmb();
+ } else {
+ *page_ptr = NULL;
+ }
+
+ *queue_index = queued ? index : -1;
+}
+
+static void get_page_from_cache(struct page_cache *cache, u32 physical_page,
+ struct cached_page **page)
+{
+ /*
+ * ASSERTION: We are in a zone thread.
+ * ASSERTION: We holding a search_pending_counter or the read_threads_mutex.
+ */
+ int queue_index = -1;
+
+ get_page_and_index(cache, physical_page, &queue_index, page);
+}
+
+static int read_page_locked(struct volume *volume, u32 physical_page,
+ struct cached_page **page_ptr)
+{
+ int result = UDS_SUCCESS;
+ struct cached_page *page = NULL;
+ u8 *page_data;
+
+ page = select_victim_in_cache(&volume->page_cache);
+ page_data = dm_bufio_read(volume->client, physical_page, &page->buffer);
+ if (IS_ERR(page_data)) {
+ result = -PTR_ERR(page_data);
+ vdo_log_warning_strerror(result,
+ "error reading physical page %u from volume",
+ physical_page);
+ cancel_page_in_cache(&volume->page_cache, physical_page, page);
+ return result;
+ }
+
+ if (!is_record_page(volume->geometry, physical_page)) {
+ result = initialize_index_page(volume, physical_page, page);
+ if (result != UDS_SUCCESS) {
+ if (volume->lookup_mode != LOOKUP_FOR_REBUILD)
+ vdo_log_warning("Corrupt index page %u", physical_page);
+ cancel_page_in_cache(&volume->page_cache, physical_page, page);
+ return result;
+ }
+ }
+
+ result = put_page_in_cache(&volume->page_cache, physical_page, page);
+ if (result != UDS_SUCCESS) {
+ vdo_log_warning("Error putting page %u in cache", physical_page);
+ cancel_page_in_cache(&volume->page_cache, physical_page, page);
+ return result;
+ }
+
+ *page_ptr = page;
+ return UDS_SUCCESS;
+}
+
+/* Retrieve a page from the cache while holding the read threads mutex. */
+static int get_volume_page_locked(struct volume *volume, u32 physical_page,
+ struct cached_page **page_ptr)
+{
+ int result;
+ struct cached_page *page = NULL;
+
+ get_page_from_cache(&volume->page_cache, physical_page, &page);
+ if (page == NULL) {
+ result = read_page_locked(volume, physical_page, &page);
+ if (result != UDS_SUCCESS)
+ return result;
+ } else {
+ make_page_most_recent(&volume->page_cache, page);
+ }
+
+ *page_ptr = page;
+ return UDS_SUCCESS;
+}
+
+/* Retrieve a page from the cache while holding a search_pending lock. */
+static int get_volume_page_protected(struct volume *volume, struct uds_request *request,
+ u32 physical_page, struct cached_page **page_ptr)
+{
+ struct cached_page *page;
+
+ get_page_from_cache(&volume->page_cache, physical_page, &page);
+ if (page != NULL) {
+ if (request->zone_number == 0) {
+ /* Only one zone is allowed to update the LRU. */
+ make_page_most_recent(&volume->page_cache, page);
+ }
+
+ *page_ptr = page;
+ return UDS_SUCCESS;
+ }
+
+ /* Prepare to enqueue a read for the page. */
+ end_pending_search(&volume->page_cache, request->zone_number);
+ mutex_lock(&volume->read_threads_mutex);
+
+ /*
+ * Do the lookup again while holding the read mutex (no longer the fast case so this should
+ * be fine to repeat). We need to do this because a page may have been added to the cache
+ * by a reader thread between the time we searched above and the time we went to actually
+ * try to enqueue it below. This could result in us enqueuing another read for a page which
+ * is already in the cache, which would mean we end up with two entries in the cache for
+ * the same page.
+ */
+ get_page_from_cache(&volume->page_cache, physical_page, &page);
+ if (page == NULL) {
+ enqueue_page_read(volume, request, physical_page);
+ /*
+ * The performance gain from unlocking first, while "search pending" mode is off,
+ * turns out to be significant in some cases. The page is not available yet so
+ * the order does not matter for correctness as it does below.
+ */
+ mutex_unlock(&volume->read_threads_mutex);
+ begin_pending_search(&volume->page_cache, physical_page,
+ request->zone_number);
+ return UDS_QUEUED;
+ }
+
+ /*
+ * Now that the page is loaded, the volume needs to switch to "reader thread unlocked" and
+ * "search pending" state in careful order so no other thread can mess with the data before
+ * the caller gets to look at it.
+ */
+ begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
+ mutex_unlock(&volume->read_threads_mutex);
+ *page_ptr = page;
+ return UDS_SUCCESS;
+}
+
+static int get_volume_page(struct volume *volume, u32 chapter, u32 page_number,
+ struct cached_page **page_ptr)
+{
+ int result;
+ u32 physical_page = map_to_physical_page(volume->geometry, chapter, page_number);
+
+ mutex_lock(&volume->read_threads_mutex);
+ result = get_volume_page_locked(volume, physical_page, page_ptr);
+ mutex_unlock(&volume->read_threads_mutex);
+ return result;
+}
+
+int uds_get_volume_record_page(struct volume *volume, u32 chapter, u32 page_number,
+ u8 **data_ptr)
+{
+ int result;
+ struct cached_page *page = NULL;
+
+ result = get_volume_page(volume, chapter, page_number, &page);
+ if (result == UDS_SUCCESS)
+ *data_ptr = dm_bufio_get_block_data(page->buffer);
+ return result;
+}
+
+int uds_get_volume_index_page(struct volume *volume, u32 chapter, u32 page_number,
+ struct delta_index_page **index_page_ptr)
+{
+ int result;
+ struct cached_page *page = NULL;
+
+ result = get_volume_page(volume, chapter, page_number, &page);
+ if (result == UDS_SUCCESS)
+ *index_page_ptr = &page->index_page;
+ return result;
+}
+
+/*
+ * Find the record page associated with a name in a given index page. This will return UDS_QUEUED
+ * if the page in question must be read from storage.
+ */
+static int search_cached_index_page(struct volume *volume, struct uds_request *request,
+ u32 chapter, u32 index_page_number,
+ u16 *record_page_number)
+{
+ int result;
+ struct cached_page *page = NULL;
+ u32 physical_page = map_to_physical_page(volume->geometry, chapter,
+ index_page_number);
+
+ /*
+ * Make sure the invalidate counter is updated before we try and read the mapping. This
+ * prevents this thread from reading a page in the cache which has already been marked for
+ * invalidation by the reader thread, before the reader thread has noticed that the
+ * invalidate_counter has been incremented.
+ */
+ begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
+
+ result = get_volume_page_protected(volume, request, physical_page, &page);
+ if (result != UDS_SUCCESS) {
+ end_pending_search(&volume->page_cache, request->zone_number);
+ return result;
+ }
+
+ result = uds_search_chapter_index_page(&page->index_page, volume->geometry,
+ &request->record_name,
+ record_page_number);
+ end_pending_search(&volume->page_cache, request->zone_number);
+ return result;
+}
+
+/*
+ * Find the metadata associated with a name in a given record page. This will return UDS_QUEUED if
+ * the page in question must be read from storage.
+ */
+int uds_search_cached_record_page(struct volume *volume, struct uds_request *request,
+ u32 chapter, u16 record_page_number, bool *found)
+{
+ struct cached_page *record_page;
+ struct index_geometry *geometry = volume->geometry;
+ int result;
+ u32 physical_page, page_number;
+
+ *found = false;
+ if (record_page_number == NO_CHAPTER_INDEX_ENTRY)
+ return UDS_SUCCESS;
+
+ result = VDO_ASSERT(record_page_number < geometry->record_pages_per_chapter,
+ "0 <= %d < %u", record_page_number,
+ geometry->record_pages_per_chapter);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ page_number = geometry->index_pages_per_chapter + record_page_number;
+
+ physical_page = map_to_physical_page(volume->geometry, chapter, page_number);
+
+ /*
+ * Make sure the invalidate counter is updated before we try and read the mapping. This
+ * prevents this thread from reading a page in the cache which has already been marked for
+ * invalidation by the reader thread, before the reader thread has noticed that the
+ * invalidate_counter has been incremented.
+ */
+ begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
+
+ result = get_volume_page_protected(volume, request, physical_page, &record_page);
+ if (result != UDS_SUCCESS) {
+ end_pending_search(&volume->page_cache, request->zone_number);
+ return result;
+ }
+
+ if (search_record_page(dm_bufio_get_block_data(record_page->buffer),
+ &request->record_name, geometry, &request->old_metadata))
+ *found = true;
+
+ end_pending_search(&volume->page_cache, request->zone_number);
+ return UDS_SUCCESS;
+}
+
+void uds_prefetch_volume_chapter(const struct volume *volume, u32 chapter)
+{
+ const struct index_geometry *geometry = volume->geometry;
+ u32 physical_page = map_to_physical_page(geometry, chapter, 0);
+
+ dm_bufio_prefetch(volume->client, physical_page, geometry->pages_per_chapter);
+}
+
+int uds_read_chapter_index_from_volume(const struct volume *volume, u64 virtual_chapter,
+ struct dm_buffer *volume_buffers[],
+ struct delta_index_page index_pages[])
+{
+ int result;
+ u32 i;
+ const struct index_geometry *geometry = volume->geometry;
+ u32 physical_chapter = uds_map_to_physical_chapter(geometry, virtual_chapter);
+ u32 physical_page = map_to_physical_page(geometry, physical_chapter, 0);
+
+ dm_bufio_prefetch(volume->client, physical_page, geometry->index_pages_per_chapter);
+ for (i = 0; i < geometry->index_pages_per_chapter; i++) {
+ u8 *index_page;
+
+ index_page = dm_bufio_read(volume->client, physical_page + i,
+ &volume_buffers[i]);
+ if (IS_ERR(index_page)) {
+ result = -PTR_ERR(index_page);
+ vdo_log_warning_strerror(result,
+ "error reading physical page %u",
+ physical_page);
+ return result;
+ }
+
+ result = init_chapter_index_page(volume, index_page, physical_chapter, i,
+ &index_pages[i]);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ return UDS_SUCCESS;
+}
+
+int uds_search_volume_page_cache(struct volume *volume, struct uds_request *request,
+ bool *found)
+{
+ int result;
+ u32 physical_chapter =
+ uds_map_to_physical_chapter(volume->geometry, request->virtual_chapter);
+ u32 index_page_number;
+ u16 record_page_number;
+
+ index_page_number = uds_find_index_page_number(volume->index_page_map,
+ &request->record_name,
+ physical_chapter);
+
+ if (request->location == UDS_LOCATION_INDEX_PAGE_LOOKUP) {
+ record_page_number = *((u16 *) &request->old_metadata);
+ } else {
+ result = search_cached_index_page(volume, request, physical_chapter,
+ index_page_number,
+ &record_page_number);
+ if (result != UDS_SUCCESS)
+ return result;
+ }
+
+ return uds_search_cached_record_page(volume, request, physical_chapter,
+ record_page_number, found);
+}
+
+int uds_search_volume_page_cache_for_rebuild(struct volume *volume,
+ const struct uds_record_name *name,
+ u64 virtual_chapter, bool *found)
+{
+ int result;
+ struct index_geometry *geometry = volume->geometry;
+ struct cached_page *page;
+ u32 physical_chapter = uds_map_to_physical_chapter(geometry, virtual_chapter);
+ u32 index_page_number;
+ u16 record_page_number;
+ u32 page_number;
+
+ *found = false;
+ index_page_number =
+ uds_find_index_page_number(volume->index_page_map, name,
+ physical_chapter);
+ result = get_volume_page(volume, physical_chapter, index_page_number, &page);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = uds_search_chapter_index_page(&page->index_page, geometry, name,
+ &record_page_number);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ if (record_page_number == NO_CHAPTER_INDEX_ENTRY)
+ return UDS_SUCCESS;
+
+ page_number = geometry->index_pages_per_chapter + record_page_number;
+ result = get_volume_page(volume, physical_chapter, page_number, &page);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ *found = search_record_page(dm_bufio_get_block_data(page->buffer), name,
+ geometry, NULL);
+ return UDS_SUCCESS;
+}
+
+static void invalidate_page(struct page_cache *cache, u32 physical_page)
+{
+ struct cached_page *page;
+ int queue_index = -1;
+
+ /* We hold the read_threads_mutex. */
+ get_page_and_index(cache, physical_page, &queue_index, &page);
+ if (page != NULL) {
+ WRITE_ONCE(cache->index[page->physical_page], cache->cache_slots);
+ wait_for_pending_searches(cache, page->physical_page);
+ clear_cache_page(cache, page);
+ } else if (queue_index > -1) {
+ vdo_log_debug("setting pending read to invalid");
+ cache->read_queue[queue_index].invalid = true;
+ }
+}
+
+void uds_forget_chapter(struct volume *volume, u64 virtual_chapter)
+{
+ u32 physical_chapter =
+ uds_map_to_physical_chapter(volume->geometry, virtual_chapter);
+ u32 first_page = map_to_physical_page(volume->geometry, physical_chapter, 0);
+ u32 i;
+
+ vdo_log_debug("forgetting chapter %llu", (unsigned long long) virtual_chapter);
+ mutex_lock(&volume->read_threads_mutex);
+ for (i = 0; i < volume->geometry->pages_per_chapter; i++)
+ invalidate_page(&volume->page_cache, first_page + i);
+ mutex_unlock(&volume->read_threads_mutex);
+}
+
+/*
+ * Donate an index pages from a newly written chapter to the page cache since it is likely to be
+ * used again soon. The caller must already hold the reader thread mutex.
+ */
+static int donate_index_page_locked(struct volume *volume, u32 physical_chapter,
+ u32 index_page_number, struct dm_buffer *page_buffer)
+{
+ int result;
+ struct cached_page *page = NULL;
+ u32 physical_page =
+ map_to_physical_page(volume->geometry, physical_chapter,
+ index_page_number);
+
+ page = select_victim_in_cache(&volume->page_cache);
+ page->buffer = page_buffer;
+ result = init_chapter_index_page(volume, dm_bufio_get_block_data(page_buffer),
+ physical_chapter, index_page_number,
+ &page->index_page);
+ if (result != UDS_SUCCESS) {
+ vdo_log_warning("Error initialize chapter index page");
+ cancel_page_in_cache(&volume->page_cache, physical_page, page);
+ return result;
+ }
+
+ result = put_page_in_cache(&volume->page_cache, physical_page, page);
+ if (result != UDS_SUCCESS) {
+ vdo_log_warning("Error putting page %u in cache", physical_page);
+ cancel_page_in_cache(&volume->page_cache, physical_page, page);
+ return result;
+ }
+
+ return UDS_SUCCESS;
+}
+
+static int write_index_pages(struct volume *volume, u32 physical_chapter_number,
+ struct open_chapter_index *chapter_index)
+{
+ struct index_geometry *geometry = volume->geometry;
+ struct dm_buffer *page_buffer;
+ u32 first_index_page = map_to_physical_page(geometry, physical_chapter_number, 0);
+ u32 delta_list_number = 0;
+ u32 index_page_number;
+
+ for (index_page_number = 0;
+ index_page_number < geometry->index_pages_per_chapter;
+ index_page_number++) {
+ u8 *page_data;
+ u32 physical_page = first_index_page + index_page_number;
+ u32 lists_packed;
+ bool last_page;
+ int result;
+
+ page_data = dm_bufio_new(volume->client, physical_page, &page_buffer);
+ if (IS_ERR(page_data)) {
+ return vdo_log_warning_strerror(-PTR_ERR(page_data),
+ "failed to prepare index page");
+ }
+
+ last_page = ((index_page_number + 1) == geometry->index_pages_per_chapter);
+ result = uds_pack_open_chapter_index_page(chapter_index, page_data,
+ delta_list_number, last_page,
+ &lists_packed);
+ if (result != UDS_SUCCESS) {
+ dm_bufio_release(page_buffer);
+ return vdo_log_warning_strerror(result,
+ "failed to pack index page");
+ }
+
+ dm_bufio_mark_buffer_dirty(page_buffer);
+
+ if (lists_packed == 0) {
+ vdo_log_debug("no delta lists packed on chapter %u page %u",
+ physical_chapter_number, index_page_number);
+ } else {
+ delta_list_number += lists_packed;
+ }
+
+ uds_update_index_page_map(volume->index_page_map,
+ chapter_index->virtual_chapter_number,
+ physical_chapter_number, index_page_number,
+ delta_list_number - 1);
+
+ mutex_lock(&volume->read_threads_mutex);
+ result = donate_index_page_locked(volume, physical_chapter_number,
+ index_page_number, page_buffer);
+ mutex_unlock(&volume->read_threads_mutex);
+ if (result != UDS_SUCCESS) {
+ dm_bufio_release(page_buffer);
+ return result;
+ }
+ }
+
+ return UDS_SUCCESS;
+}
+
+static u32 encode_tree(u8 record_page[],
+ const struct uds_volume_record *sorted_pointers[],
+ u32 next_record, u32 node, u32 node_count)
+{
+ if (node < node_count) {
+ u32 child = (2 * node) + 1;
+
+ next_record = encode_tree(record_page, sorted_pointers, next_record,
+ child, node_count);
+
+ /*
+ * In-order traversal: copy the contents of the next record into the page at the
+ * node offset.
+ */
+ memcpy(&record_page[node * BYTES_PER_RECORD],
+ sorted_pointers[next_record++], BYTES_PER_RECORD);
+
+ next_record = encode_tree(record_page, sorted_pointers, next_record,
+ child + 1, node_count);
+ }
+
+ return next_record;
+}
+
+static int encode_record_page(const struct volume *volume,
+ const struct uds_volume_record records[], u8 record_page[])
+{
+ int result;
+ u32 i;
+ u32 records_per_page = volume->geometry->records_per_page;
+ const struct uds_volume_record **record_pointers = volume->record_pointers;
+
+ for (i = 0; i < records_per_page; i++)
+ record_pointers[i] = &records[i];
+
+ /*
+ * Sort the record pointers by using just the names in the records, which is less work than
+ * sorting the entire record values.
+ */
+ BUILD_BUG_ON(offsetof(struct uds_volume_record, name) != 0);
+ result = uds_radix_sort(volume->radix_sorter, (const u8 **) record_pointers,
+ records_per_page, UDS_RECORD_NAME_SIZE);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ encode_tree(record_page, record_pointers, 0, 0, records_per_page);
+ return UDS_SUCCESS;
+}
+
+static int write_record_pages(struct volume *volume, u32 physical_chapter_number,
+ const struct uds_volume_record *records)
+{
+ u32 record_page_number;
+ struct index_geometry *geometry = volume->geometry;
+ struct dm_buffer *page_buffer;
+ const struct uds_volume_record *next_record = records;
+ u32 first_record_page = map_to_physical_page(geometry, physical_chapter_number,
+ geometry->index_pages_per_chapter);
+
+ for (record_page_number = 0;
+ record_page_number < geometry->record_pages_per_chapter;
+ record_page_number++) {
+ u8 *page_data;
+ u32 physical_page = first_record_page + record_page_number;
+ int result;
+
+ page_data = dm_bufio_new(volume->client, physical_page, &page_buffer);
+ if (IS_ERR(page_data)) {
+ return vdo_log_warning_strerror(-PTR_ERR(page_data),
+ "failed to prepare record page");
+ }
+
+ result = encode_record_page(volume, next_record, page_data);
+ if (result != UDS_SUCCESS) {
+ dm_bufio_release(page_buffer);
+ return vdo_log_warning_strerror(result,
+ "failed to encode record page %u",
+ record_page_number);
+ }
+
+ next_record += geometry->records_per_page;
+ dm_bufio_mark_buffer_dirty(page_buffer);
+ dm_bufio_release(page_buffer);
+ }
+
+ return UDS_SUCCESS;
+}
+
+int uds_write_chapter(struct volume *volume, struct open_chapter_index *chapter_index,
+ const struct uds_volume_record *records)
+{
+ int result;
+ u32 physical_chapter_number =
+ uds_map_to_physical_chapter(volume->geometry,
+ chapter_index->virtual_chapter_number);
+
+ result = write_index_pages(volume, physical_chapter_number, chapter_index);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = write_record_pages(volume, physical_chapter_number, records);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ result = -dm_bufio_write_dirty_buffers(volume->client);
+ if (result != UDS_SUCCESS)
+ vdo_log_error_strerror(result, "cannot sync chapter to volume");
+
+ return result;
+}
+
+static void probe_chapter(struct volume *volume, u32 chapter_number,
+ u64 *virtual_chapter_number)
+{
+ const struct index_geometry *geometry = volume->geometry;
+ u32 expected_list_number = 0;
+ u32 i;
+ u64 vcn = BAD_CHAPTER;
+
+ *virtual_chapter_number = BAD_CHAPTER;
+ dm_bufio_prefetch(volume->client,
+ map_to_physical_page(geometry, chapter_number, 0),
+ geometry->index_pages_per_chapter);
+
+ for (i = 0; i < geometry->index_pages_per_chapter; i++) {
+ struct delta_index_page *page;
+ int result;
+
+ result = uds_get_volume_index_page(volume, chapter_number, i, &page);
+ if (result != UDS_SUCCESS)
+ return;
+
+ if (page->virtual_chapter_number == BAD_CHAPTER) {
+ vdo_log_error("corrupt index page in chapter %u",
+ chapter_number);
+ return;
+ }
+
+ if (vcn == BAD_CHAPTER) {
+ vcn = page->virtual_chapter_number;
+ } else if (page->virtual_chapter_number != vcn) {
+ vdo_log_error("inconsistent chapter %u index page %u: expected vcn %llu, got vcn %llu",
+ chapter_number, i, (unsigned long long) vcn,
+ (unsigned long long) page->virtual_chapter_number);
+ return;
+ }
+
+ if (expected_list_number != page->lowest_list_number) {
+ vdo_log_error("inconsistent chapter %u index page %u: expected list number %u, got list number %u",
+ chapter_number, i, expected_list_number,
+ page->lowest_list_number);
+ return;
+ }
+ expected_list_number = page->highest_list_number + 1;
+
+ result = uds_validate_chapter_index_page(page, geometry);
+ if (result != UDS_SUCCESS)
+ return;
+ }
+
+ if (chapter_number != uds_map_to_physical_chapter(geometry, vcn)) {
+ vdo_log_error("chapter %u vcn %llu is out of phase (%u)", chapter_number,
+ (unsigned long long) vcn, geometry->chapters_per_volume);
+ return;
+ }
+
+ *virtual_chapter_number = vcn;
+}
+
+/* Find the last valid physical chapter in the volume. */
+static void find_real_end_of_volume(struct volume *volume, u32 limit, u32 *limit_ptr)
+{
+ u32 span = 1;
+ u32 tries = 0;
+
+ while (limit > 0) {
+ u32 chapter = (span > limit) ? 0 : limit - span;
+ u64 vcn = 0;
+
+ probe_chapter(volume, chapter, &vcn);
+ if (vcn == BAD_CHAPTER) {
+ limit = chapter;
+ if (++tries > 1)
+ span *= 2;
+ } else {
+ if (span == 1)
+ break;
+ span /= 2;
+ tries = 0;
+ }
+ }
+
+ *limit_ptr = limit;
+}
+
+static int find_chapter_limits(struct volume *volume, u32 chapter_limit, u64 *lowest_vcn,
+ u64 *highest_vcn)
+{
+ struct index_geometry *geometry = volume->geometry;
+ u64 zero_vcn;
+ u64 lowest = BAD_CHAPTER;
+ u64 highest = BAD_CHAPTER;
+ u64 moved_chapter = BAD_CHAPTER;
+ u32 left_chapter = 0;
+ u32 right_chapter = 0;
+ u32 bad_chapters = 0;
+
+ /*
+ * This method assumes there is at most one run of contiguous bad chapters caused by
+ * unflushed writes. Either the bad spot is at the beginning and end, or somewhere in the
+ * middle. Wherever it is, the highest and lowest VCNs are adjacent to it. Otherwise the
+ * volume is cleanly saved and somewhere in the middle of it the highest VCN immediately
+ * precedes the lowest one.
+ */
+
+ /* It doesn't matter if this results in a bad spot (BAD_CHAPTER). */
+ probe_chapter(volume, 0, &zero_vcn);
+
+ /*
+ * Binary search for end of the discontinuity in the monotonically increasing virtual
+ * chapter numbers; bad spots are treated as a span of BAD_CHAPTER values. In effect we're
+ * searching for the index of the smallest value less than zero_vcn. In the case we go off
+ * the end it means that chapter 0 has the lowest vcn.
+ *
+ * If a virtual chapter is out-of-order, it will be the one moved by conversion. Always
+ * skip over the moved chapter when searching, adding it to the range at the end if
+ * necessary.
+ */
+ if (geometry->remapped_physical > 0) {
+ u64 remapped_vcn;
+
+ probe_chapter(volume, geometry->remapped_physical, &remapped_vcn);
+ if (remapped_vcn == geometry->remapped_virtual)
+ moved_chapter = geometry->remapped_physical;
+ }
+
+ left_chapter = 0;
+ right_chapter = chapter_limit;
+
+ while (left_chapter < right_chapter) {
+ u64 probe_vcn;
+ u32 chapter = (left_chapter + right_chapter) / 2;
+
+ if (chapter == moved_chapter)
+ chapter--;
+
+ probe_chapter(volume, chapter, &probe_vcn);
+ if (zero_vcn <= probe_vcn) {
+ left_chapter = chapter + 1;
+ if (left_chapter == moved_chapter)
+ left_chapter++;
+ } else {
+ right_chapter = chapter;
+ }
+ }
+
+ /* If left_chapter goes off the end, chapter 0 has the lowest virtual chapter number.*/
+ if (left_chapter >= chapter_limit)
+ left_chapter = 0;
+
+ /* At this point, left_chapter is the chapter with the lowest virtual chapter number. */
+ probe_chapter(volume, left_chapter, &lowest);
+
+ /* The moved chapter might be the lowest in the range. */
+ if ((moved_chapter != BAD_CHAPTER) && (lowest == geometry->remapped_virtual + 1))
+ lowest = geometry->remapped_virtual;
+
+ /*
+ * Circularly scan backwards, moving over any bad chapters until encountering a good one,
+ * which is the chapter with the highest vcn.
+ */
+ while (highest == BAD_CHAPTER) {
+ right_chapter = (right_chapter + chapter_limit - 1) % chapter_limit;
+ if (right_chapter == moved_chapter)
+ continue;
+
+ probe_chapter(volume, right_chapter, &highest);
+ if (bad_chapters++ >= MAX_BAD_CHAPTERS) {
+ vdo_log_error("too many bad chapters in volume: %u",
+ bad_chapters);
+ return UDS_CORRUPT_DATA;
+ }
+ }
+
+ *lowest_vcn = lowest;
+ *highest_vcn = highest;
+ return UDS_SUCCESS;
+}
+
+/*
+ * Find the highest and lowest contiguous chapters present in the volume and determine their
+ * virtual chapter numbers. This is used by rebuild.
+ */
+int uds_find_volume_chapter_boundaries(struct volume *volume, u64 *lowest_vcn,
+ u64 *highest_vcn, bool *is_empty)
+{
+ u32 chapter_limit = volume->geometry->chapters_per_volume;
+
+ find_real_end_of_volume(volume, chapter_limit, &chapter_limit);
+ if (chapter_limit == 0) {
+ *lowest_vcn = 0;
+ *highest_vcn = 0;
+ *is_empty = true;
+ return UDS_SUCCESS;
+ }
+
+ *is_empty = false;
+ return find_chapter_limits(volume, chapter_limit, lowest_vcn, highest_vcn);
+}
+
+int __must_check uds_replace_volume_storage(struct volume *volume,
+ struct index_layout *layout,
+ struct block_device *bdev)
+{
+ int result;
+ u32 i;
+
+ result = uds_replace_index_layout_storage(layout, bdev);
+ if (result != UDS_SUCCESS)
+ return result;
+
+ /* Release all outstanding dm_bufio objects */
+ for (i = 0; i < volume->page_cache.indexable_pages; i++)
+ volume->page_cache.index[i] = volume->page_cache.cache_slots;
+ for (i = 0; i < volume->page_cache.cache_slots; i++)
+ clear_cache_page(&volume->page_cache, &volume->page_cache.cache[i]);
+ if (volume->sparse_cache != NULL)
+ uds_invalidate_sparse_cache(volume->sparse_cache);
+ if (volume->client != NULL)
+ dm_bufio_client_destroy(vdo_forget(volume->client));
+
+ return uds_open_volume_bufio(layout, volume->geometry->bytes_per_page,
+ volume->reserved_buffers, &volume->client);
+}
+
+static int __must_check initialize_page_cache(struct page_cache *cache,
+ const struct index_geometry *geometry,
+ u32 chapters_in_cache,
+ unsigned int zone_count)
+{
+ int result;
+ u32 i;
+
+ cache->indexable_pages = geometry->pages_per_volume + 1;
+ cache->cache_slots = chapters_in_cache * geometry->record_pages_per_chapter;
+ cache->zone_count = zone_count;
+ atomic64_set(&cache->clock, 1);
+
+ result = VDO_ASSERT((cache->cache_slots <= VOLUME_CACHE_MAX_ENTRIES),
+ "requested cache size, %u, within limit %u",
+ cache->cache_slots, VOLUME_CACHE_MAX_ENTRIES);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(VOLUME_CACHE_MAX_QUEUED_READS, struct queued_read,
+ "volume read queue", &cache->read_queue);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(cache->zone_count, struct search_pending_counter,
+ "Volume Cache Zones", &cache->search_pending_counters);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(cache->indexable_pages, u16, "page cache index",
+ &cache->index);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(cache->cache_slots, struct cached_page, "page cache cache",
+ &cache->cache);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* Initialize index values to invalid values. */
+ for (i = 0; i < cache->indexable_pages; i++)
+ cache->index[i] = cache->cache_slots;
+
+ for (i = 0; i < cache->cache_slots; i++)
+ clear_cache_page(cache, &cache->cache[i]);
+
+ return UDS_SUCCESS;
+}
+
+int uds_make_volume(const struct uds_configuration *config, struct index_layout *layout,
+ struct volume **new_volume)
+{
+ unsigned int i;
+ struct volume *volume = NULL;
+ struct index_geometry *geometry;
+ unsigned int reserved_buffers;
+ int result;
+
+ result = vdo_allocate(1, struct volume, "volume", &volume);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ volume->nonce = uds_get_volume_nonce(layout);
+
+ result = uds_copy_index_geometry(config->geometry, &volume->geometry);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume(volume);
+ return vdo_log_warning_strerror(result,
+ "failed to allocate geometry: error");
+ }
+ geometry = volume->geometry;
+
+ /*
+ * Reserve a buffer for each entry in the page cache, one for the chapter writer, and one
+ * for each entry in the sparse cache.
+ */
+ reserved_buffers = config->cache_chapters * geometry->record_pages_per_chapter;
+ reserved_buffers += 1;
+ if (uds_is_sparse_index_geometry(geometry))
+ reserved_buffers += (config->cache_chapters * geometry->index_pages_per_chapter);
+ volume->reserved_buffers = reserved_buffers;
+ result = uds_open_volume_bufio(layout, geometry->bytes_per_page,
+ volume->reserved_buffers, &volume->client);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume(volume);
+ return result;
+ }
+
+ result = uds_make_radix_sorter(geometry->records_per_page,
+ &volume->radix_sorter);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume(volume);
+ return result;
+ }
+
+ result = vdo_allocate(geometry->records_per_page,
+ const struct uds_volume_record *, "record pointers",
+ &volume->record_pointers);
+ if (result != VDO_SUCCESS) {
+ uds_free_volume(volume);
+ return result;
+ }
+
+ if (uds_is_sparse_index_geometry(geometry)) {
+ size_t page_size = sizeof(struct delta_index_page) + geometry->bytes_per_page;
+
+ result = uds_make_sparse_cache(geometry, config->cache_chapters,
+ config->zone_count,
+ &volume->sparse_cache);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume(volume);
+ return result;
+ }
+
+ volume->cache_size =
+ page_size * geometry->index_pages_per_chapter * config->cache_chapters;
+ }
+
+ result = initialize_page_cache(&volume->page_cache, geometry,
+ config->cache_chapters, config->zone_count);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume(volume);
+ return result;
+ }
+
+ volume->cache_size += volume->page_cache.cache_slots * sizeof(struct delta_index_page);
+ result = uds_make_index_page_map(geometry, &volume->index_page_map);
+ if (result != UDS_SUCCESS) {
+ uds_free_volume(volume);
+ return result;
+ }
+
+ mutex_init(&volume->read_threads_mutex);
+ uds_init_cond(&volume->read_threads_read_done_cond);
+ uds_init_cond(&volume->read_threads_cond);
+
+ result = vdo_allocate(config->read_threads, struct thread *, "reader threads",
+ &volume->reader_threads);
+ if (result != VDO_SUCCESS) {
+ uds_free_volume(volume);
+ return result;
+ }
+
+ for (i = 0; i < config->read_threads; i++) {
+ result = vdo_create_thread(read_thread_function, (void *) volume,
+ "reader", &volume->reader_threads[i]);
+ if (result != VDO_SUCCESS) {
+ uds_free_volume(volume);
+ return result;
+ }
+
+ volume->read_thread_count = i + 1;
+ }
+
+ *new_volume = volume;
+ return UDS_SUCCESS;
+}
+
+static void uninitialize_page_cache(struct page_cache *cache)
+{
+ u16 i;
+
+ if (cache->cache != NULL) {
+ for (i = 0; i < cache->cache_slots; i++)
+ release_page_buffer(&cache->cache[i]);
+ }
+ vdo_free(cache->index);
+ vdo_free(cache->cache);
+ vdo_free(cache->search_pending_counters);
+ vdo_free(cache->read_queue);
+}
+
+void uds_free_volume(struct volume *volume)
+{
+ if (volume == NULL)
+ return;
+
+ if (volume->reader_threads != NULL) {
+ unsigned int i;
+
+ /* This works even if some threads weren't started. */
+ mutex_lock(&volume->read_threads_mutex);
+ volume->read_threads_exiting = true;
+ uds_broadcast_cond(&volume->read_threads_cond);
+ mutex_unlock(&volume->read_threads_mutex);
+ for (i = 0; i < volume->read_thread_count; i++)
+ vdo_join_threads(volume->reader_threads[i]);
+ vdo_free(volume->reader_threads);
+ volume->reader_threads = NULL;
+ }
+
+ /* Must destroy the client AFTER freeing the cached pages. */
+ uninitialize_page_cache(&volume->page_cache);
+ uds_free_sparse_cache(volume->sparse_cache);
+ if (volume->client != NULL)
+ dm_bufio_client_destroy(vdo_forget(volume->client));
+
+ uds_free_index_page_map(volume->index_page_map);
+ uds_free_radix_sorter(volume->radix_sorter);
+ vdo_free(volume->geometry);
+ vdo_free(volume->record_pointers);
+ vdo_free(volume);
+}
diff --git a/drivers/md/dm-vdo/indexer/volume.h b/drivers/md/dm-vdo/indexer/volume.h
new file mode 100644
index 000000000000..8679a5e55347
--- /dev/null
+++ b/drivers/md/dm-vdo/indexer/volume.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_VOLUME_H
+#define UDS_VOLUME_H
+
+#include <linux/atomic.h>
+#include <linux/cache.h>
+#include <linux/dm-bufio.h>
+#include <linux/limits.h>
+
+#include "permassert.h"
+#include "thread-utils.h"
+
+#include "chapter-index.h"
+#include "config.h"
+#include "geometry.h"
+#include "indexer.h"
+#include "index-layout.h"
+#include "index-page-map.h"
+#include "radix-sort.h"
+#include "sparse-cache.h"
+
+/*
+ * The volume manages deduplication records on permanent storage. The term "volume" can also refer
+ * to the region of permanent storage where the records (and the chapters containing them) are
+ * stored. The volume handles all I/O to this region by reading, caching, and writing chapter pages
+ * as necessary.
+ */
+
+enum index_lookup_mode {
+ /* Always do lookups in all chapters normally */
+ LOOKUP_NORMAL,
+ /* Only do a subset of lookups needed when rebuilding an index */
+ LOOKUP_FOR_REBUILD,
+};
+
+struct queued_read {
+ bool invalid;
+ bool reserved;
+ u32 physical_page;
+ struct uds_request *first_request;
+ struct uds_request *last_request;
+};
+
+struct __aligned(L1_CACHE_BYTES) search_pending_counter {
+ u64 atomic_value;
+};
+
+struct cached_page {
+ /* Whether this page is currently being read asynchronously */
+ bool read_pending;
+ /* The physical page stored in this cache entry */
+ u32 physical_page;
+ /* The value of the volume clock when this page was last used */
+ s64 last_used;
+ /* The cached page buffer */
+ struct dm_buffer *buffer;
+ /* The chapter index page, meaningless for record pages */
+ struct delta_index_page index_page;
+};
+
+struct page_cache {
+ /* The number of zones */
+ unsigned int zone_count;
+ /* The number of volume pages that can be cached */
+ u32 indexable_pages;
+ /* The maximum number of simultaneously cached pages */
+ u16 cache_slots;
+ /* An index for each physical page noting where it is in the cache */
+ u16 *index;
+ /* The array of cached pages */
+ struct cached_page *cache;
+ /* A counter for each zone tracking if a search is occurring there */
+ struct search_pending_counter *search_pending_counters;
+ /* The read queue entries as a circular array */
+ struct queued_read *read_queue;
+
+ /* All entries above this point are constant after initialization. */
+
+ /*
+ * These values are all indexes into the array of read queue entries. New entries in the
+ * read queue are enqueued at read_queue_last. To dequeue entries, a reader thread gets the
+ * lock and then claims the entry pointed to by read_queue_next_read and increments that
+ * value. After the read is completed, the reader thread calls release_read_queue_entry(),
+ * which increments read_queue_first until it points to a pending read, or is equal to
+ * read_queue_next_read. This means that if multiple reads are outstanding,
+ * read_queue_first might not advance until the last of the reads finishes.
+ */
+ u16 read_queue_first;
+ u16 read_queue_next_read;
+ u16 read_queue_last;
+
+ atomic64_t clock;
+};
+
+struct volume {
+ struct index_geometry *geometry;
+ struct dm_bufio_client *client;
+ u64 nonce;
+ size_t cache_size;
+
+ /* A single page worth of records, for sorting */
+ const struct uds_volume_record **record_pointers;
+ /* Sorter for sorting records within each page */
+ struct radix_sorter *radix_sorter;
+
+ struct sparse_cache *sparse_cache;
+ struct page_cache page_cache;
+ struct index_page_map *index_page_map;
+
+ struct mutex read_threads_mutex;
+ struct cond_var read_threads_cond;
+ struct cond_var read_threads_read_done_cond;
+ struct thread **reader_threads;
+ unsigned int read_thread_count;
+ bool read_threads_exiting;
+
+ enum index_lookup_mode lookup_mode;
+ unsigned int reserved_buffers;
+};
+
+int __must_check uds_make_volume(const struct uds_configuration *config,
+ struct index_layout *layout,
+ struct volume **new_volume);
+
+void uds_free_volume(struct volume *volume);
+
+int __must_check uds_replace_volume_storage(struct volume *volume,
+ struct index_layout *layout,
+ struct block_device *bdev);
+
+int __must_check uds_find_volume_chapter_boundaries(struct volume *volume,
+ u64 *lowest_vcn, u64 *highest_vcn,
+ bool *is_empty);
+
+int __must_check uds_search_volume_page_cache(struct volume *volume,
+ struct uds_request *request,
+ bool *found);
+
+int __must_check uds_search_volume_page_cache_for_rebuild(struct volume *volume,
+ const struct uds_record_name *name,
+ u64 virtual_chapter,
+ bool *found);
+
+int __must_check uds_search_cached_record_page(struct volume *volume,
+ struct uds_request *request, u32 chapter,
+ u16 record_page_number, bool *found);
+
+void uds_forget_chapter(struct volume *volume, u64 chapter);
+
+int __must_check uds_write_chapter(struct volume *volume,
+ struct open_chapter_index *chapter_index,
+ const struct uds_volume_record records[]);
+
+void uds_prefetch_volume_chapter(const struct volume *volume, u32 chapter);
+
+int __must_check uds_read_chapter_index_from_volume(const struct volume *volume,
+ u64 virtual_chapter,
+ struct dm_buffer *volume_buffers[],
+ struct delta_index_page index_pages[]);
+
+int __must_check uds_get_volume_record_page(struct volume *volume, u32 chapter,
+ u32 page_number, u8 **data_ptr);
+
+int __must_check uds_get_volume_index_page(struct volume *volume, u32 chapter,
+ u32 page_number,
+ struct delta_index_page **page_ptr);
+
+#endif /* UDS_VOLUME_H */
diff --git a/drivers/md/dm-vdo/int-map.c b/drivers/md/dm-vdo/int-map.c
new file mode 100644
index 000000000000..3aa438f84ea1
--- /dev/null
+++ b/drivers/md/dm-vdo/int-map.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+/**
+ * DOC:
+ *
+ * Hash table implementation of a map from integers to pointers, implemented using the Hopscotch
+ * Hashing algorithm by Herlihy, Shavit, and Tzafrir (see
+ * http://en.wikipedia.org/wiki/Hopscotch_hashing). This implementation does not contain any of the
+ * locking/concurrency features of the algorithm, just the collision resolution scheme.
+ *
+ * Hopscotch Hashing is based on hashing with open addressing and linear probing. All the entries
+ * are stored in a fixed array of buckets, with no dynamic allocation for collisions. Unlike linear
+ * probing, all the entries that hash to a given bucket are stored within a fixed neighborhood
+ * starting at that bucket. Chaining is effectively represented as a bit vector relative to each
+ * bucket instead of as pointers or explicit offsets.
+ *
+ * When an empty bucket cannot be found within a given neighborhood, subsequent neighborhoods are
+ * searched, and one or more entries will "hop" into those neighborhoods. When this process works,
+ * an empty bucket will move into the desired neighborhood, allowing the entry to be added. When
+ * that process fails (typically when the buckets are around 90% full), the table must be resized
+ * and the all entries rehashed and added to the expanded table.
+ *
+ * Unlike linear probing, the number of buckets that must be searched in the worst case has a fixed
+ * upper bound (the size of the neighborhood). Those entries occupy a small number of memory cache
+ * lines, leading to improved use of the cache (fewer misses on both successful and unsuccessful
+ * searches). Hopscotch hashing outperforms linear probing at much higher load factors, so even
+ * with the increased memory burden for maintaining the hop vectors, less memory is needed to
+ * achieve that performance. Hopscotch is also immune to "contamination" from deleting entries
+ * since entries are genuinely removed instead of being replaced by a placeholder.
+ *
+ * The published description of the algorithm used a bit vector, but the paper alludes to an offset
+ * scheme which is used by this implementation. Since the entries in the neighborhood are within N
+ * entries of the hash bucket at the start of the neighborhood, a pair of small offset fields each
+ * log2(N) bits wide is all that's needed to maintain the hops as a linked list. In order to encode
+ * "no next hop" (i.e. NULL) as the natural initial value of zero, the offsets are biased by one
+ * (i.e. 0 => NULL, 1 => offset=0, 2 => offset=1, etc.) We can represent neighborhoods of up to 255
+ * entries with just 8+8=16 bits per entry. The hop list is sorted by hop offset so the first entry
+ * in the list is always the bucket closest to the start of the neighborhood.
+ *
+ * While individual accesses tend to be very fast, the table resize operations are very, very
+ * expensive. If an upper bound on the latency of adding an entry to the table is needed, we either
+ * need to ensure the table is pre-sized to be large enough so no resize is ever needed, or we'll
+ * need to develop an approach to incrementally resize the table.
+ */
+
+#include "int-map.h"
+
+#include <linux/minmax.h>
+
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "permassert.h"
+
+#define DEFAULT_CAPACITY 16 /* the number of neighborhoods in a new table */
+#define NEIGHBORHOOD 255 /* the number of buckets in each neighborhood */
+#define MAX_PROBES 1024 /* limit on the number of probes for a free bucket */
+#define NULL_HOP_OFFSET 0 /* the hop offset value terminating the hop list */
+#define DEFAULT_LOAD 75 /* a compromise between memory use and performance */
+
+/**
+ * struct bucket - hash bucket
+ *
+ * Buckets are packed together to reduce memory usage and improve cache efficiency. It would be
+ * tempting to encode the hop offsets separately and maintain alignment of key/value pairs, but
+ * it's crucial to keep the hop fields near the buckets that they use them so they'll tend to share
+ * cache lines.
+ */
+struct __packed bucket {
+ /**
+ * @first_hop: The biased offset of the first entry in the hop list of the neighborhood
+ * that hashes to this bucket.
+ */
+ u8 first_hop;
+ /** @next_hop: The biased offset of the next bucket in the hop list. */
+ u8 next_hop;
+ /** @key: The key stored in this bucket. */
+ u64 key;
+ /** @value: The value stored in this bucket (NULL if empty). */
+ void *value;
+};
+
+/**
+ * struct int_map - The concrete definition of the opaque int_map type.
+ *
+ * To avoid having to wrap the neighborhoods of the last entries back around to the start of the
+ * bucket array, we allocate a few more buckets at the end of the array instead, which is why
+ * capacity and bucket_count are different.
+ */
+struct int_map {
+ /** @size: The number of entries stored in the map. */
+ size_t size;
+ /** @capacity: The number of neighborhoods in the map. */
+ size_t capacity;
+ /* @bucket_count: The number of buckets in the bucket array. */
+ size_t bucket_count;
+ /** @buckets: The array of hash buckets. */
+ struct bucket *buckets;
+};
+
+/**
+ * mix() - The Google CityHash 16-byte hash mixing function.
+ * @input1: The first input value.
+ * @input2: The second input value.
+ *
+ * Return: A hash of the two inputs.
+ */
+static u64 mix(u64 input1, u64 input2)
+{
+ static const u64 CITY_MULTIPLIER = 0x9ddfea08eb382d69ULL;
+ u64 hash = (input1 ^ input2);
+
+ hash *= CITY_MULTIPLIER;
+ hash ^= (hash >> 47);
+ hash ^= input2;
+ hash *= CITY_MULTIPLIER;
+ hash ^= (hash >> 47);
+ hash *= CITY_MULTIPLIER;
+ return hash;
+}
+
+/**
+ * hash_key() - Calculate a 64-bit non-cryptographic hash value for the provided 64-bit integer
+ * key.
+ * @key: The mapping key.
+ *
+ * The implementation is based on Google's CityHash, only handling the specific case of an 8-byte
+ * input.
+ *
+ * Return: The hash of the mapping key.
+ */
+static u64 hash_key(u64 key)
+{
+ /*
+ * Aliasing restrictions forbid us from casting pointer types, so use a union to convert a
+ * single u64 to two u32 values.
+ */
+ union {
+ u64 u64;
+ u32 u32[2];
+ } pun = {.u64 = key};
+
+ return mix(sizeof(key) + (((u64) pun.u32[0]) << 3), pun.u32[1]);
+}
+
+/**
+ * allocate_buckets() - Initialize an int_map.
+ * @map: The map to initialize.
+ * @capacity: The initial capacity of the map.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int allocate_buckets(struct int_map *map, size_t capacity)
+{
+ map->size = 0;
+ map->capacity = capacity;
+
+ /*
+ * Allocate NEIGHBORHOOD - 1 extra buckets so the last bucket can have a full neighborhood
+ * without have to wrap back around to element zero.
+ */
+ map->bucket_count = capacity + (NEIGHBORHOOD - 1);
+ return vdo_allocate(map->bucket_count, struct bucket,
+ "struct int_map buckets", &map->buckets);
+}
+
+/**
+ * vdo_int_map_create() - Allocate and initialize an int_map.
+ * @initial_capacity: The number of entries the map should initially be capable of holding (zero
+ * tells the map to use its own small default).
+ * @map_ptr: Output, a pointer to hold the new int_map.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_int_map_create(size_t initial_capacity, struct int_map **map_ptr)
+{
+ struct int_map *map;
+ int result;
+ size_t capacity;
+
+ result = vdo_allocate(1, struct int_map, "struct int_map", &map);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* Use the default capacity if the caller did not specify one. */
+ capacity = (initial_capacity > 0) ? initial_capacity : DEFAULT_CAPACITY;
+
+ /*
+ * Scale up the capacity by the specified initial load factor. (i.e to hold 1000 entries at
+ * 80% load we need a capacity of 1250)
+ */
+ capacity = capacity * 100 / DEFAULT_LOAD;
+
+ result = allocate_buckets(map, capacity);
+ if (result != VDO_SUCCESS) {
+ vdo_int_map_free(vdo_forget(map));
+ return result;
+ }
+
+ *map_ptr = map;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_int_map_free() - Free an int_map.
+ * @map: The int_map to free.
+ *
+ * NOTE: The map does not own the pointer values stored in the map and they are not freed by this
+ * call.
+ */
+void vdo_int_map_free(struct int_map *map)
+{
+ if (map == NULL)
+ return;
+
+ vdo_free(vdo_forget(map->buckets));
+ vdo_free(vdo_forget(map));
+}
+
+/**
+ * vdo_int_map_size() - Get the number of entries stored in an int_map.
+ * @map: The int_map to query.
+ *
+ * Return: The number of entries in the map.
+ */
+size_t vdo_int_map_size(const struct int_map *map)
+{
+ return map->size;
+}
+
+/**
+ * dereference_hop() - Convert a biased hop offset within a neighborhood to a pointer to the bucket
+ * it references.
+ * @neighborhood: The first bucket in the neighborhood.
+ * @hop_offset: The biased hop offset to the desired bucket.
+ *
+ * Return: NULL if hop_offset is zero, otherwise a pointer to the bucket in the neighborhood at
+ * hop_offset - 1.
+ */
+static struct bucket *dereference_hop(struct bucket *neighborhood, unsigned int hop_offset)
+{
+ BUILD_BUG_ON(NULL_HOP_OFFSET != 0);
+ if (hop_offset == NULL_HOP_OFFSET)
+ return NULL;
+
+ return &neighborhood[hop_offset - 1];
+}
+
+/**
+ * insert_in_hop_list() - Add a bucket into the hop list for the neighborhood.
+ * @neighborhood: The first bucket in the neighborhood.
+ * @new_bucket: The bucket to add to the hop list.
+ *
+ * The bucket is inserted it into the list so the hop list remains sorted by hop offset.
+ */
+static void insert_in_hop_list(struct bucket *neighborhood, struct bucket *new_bucket)
+{
+ /* Zero indicates a NULL hop offset, so bias the hop offset by one. */
+ int hop_offset = 1 + (new_bucket - neighborhood);
+
+ /* Handle the special case of adding a bucket at the start of the list. */
+ int next_hop = neighborhood->first_hop;
+
+ if ((next_hop == NULL_HOP_OFFSET) || (next_hop > hop_offset)) {
+ new_bucket->next_hop = next_hop;
+ neighborhood->first_hop = hop_offset;
+ return;
+ }
+
+ /* Search the hop list for the insertion point that maintains the sort order. */
+ for (;;) {
+ struct bucket *bucket = dereference_hop(neighborhood, next_hop);
+
+ next_hop = bucket->next_hop;
+
+ if ((next_hop == NULL_HOP_OFFSET) || (next_hop > hop_offset)) {
+ new_bucket->next_hop = next_hop;
+ bucket->next_hop = hop_offset;
+ return;
+ }
+ }
+}
+
+/**
+ * select_bucket() - Select and return the hash bucket for a given search key.
+ * @map: The map to search.
+ * @key: The mapping key.
+ */
+static struct bucket *select_bucket(const struct int_map *map, u64 key)
+{
+ /*
+ * Calculate a good hash value for the provided key. We want exactly 32 bits, so mask the
+ * result.
+ */
+ u64 hash = hash_key(key) & 0xFFFFFFFF;
+
+ /*
+ * Scale the 32-bit hash to a bucket index by treating it as a binary fraction and
+ * multiplying that by the capacity. If the hash is uniformly distributed over [0 ..
+ * 2^32-1], then (hash * capacity / 2^32) should be uniformly distributed over [0 ..
+ * capacity-1]. The multiply and shift is much faster than a divide (modulus) on X86 CPUs.
+ */
+ return &map->buckets[(hash * map->capacity) >> 32];
+}
+
+/**
+ * search_hop_list() - Search the hop list associated with given hash bucket for a given search
+ * key.
+ * @map: The map being searched.
+ * @bucket: The map bucket to search for the key.
+ * @key: The mapping key.
+ * @previous_ptr: Output. if not NULL, a pointer in which to store the bucket in the list preceding
+ * the one that had the matching key
+ *
+ * If the key is found, returns a pointer to the entry (bucket or collision), otherwise returns
+ * NULL.
+ *
+ * Return: An entry that matches the key, or NULL if not found.
+ */
+static struct bucket *search_hop_list(struct int_map *map __always_unused,
+ struct bucket *bucket,
+ u64 key,
+ struct bucket **previous_ptr)
+{
+ struct bucket *previous = NULL;
+ unsigned int next_hop = bucket->first_hop;
+
+ while (next_hop != NULL_HOP_OFFSET) {
+ /*
+ * Check the neighboring bucket indexed by the offset for the
+ * desired key.
+ */
+ struct bucket *entry = dereference_hop(bucket, next_hop);
+
+ if ((key == entry->key) && (entry->value != NULL)) {
+ if (previous_ptr != NULL)
+ *previous_ptr = previous;
+ return entry;
+ }
+ next_hop = entry->next_hop;
+ previous = entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * vdo_int_map_get() - Retrieve the value associated with a given key from the int_map.
+ * @map: The int_map to query.
+ * @key: The key to look up.
+ *
+ * Return: The value associated with the given key, or NULL if the key is not mapped to any value.
+ */
+void *vdo_int_map_get(struct int_map *map, u64 key)
+{
+ struct bucket *match = search_hop_list(map, select_bucket(map, key), key, NULL);
+
+ return ((match != NULL) ? match->value : NULL);
+}
+
+/**
+ * resize_buckets() - Increase the number of hash buckets.
+ * @map: The map to resize.
+ *
+ * Resizes and rehashes all the existing entries, storing them in the new buckets.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int resize_buckets(struct int_map *map)
+{
+ int result;
+ size_t i;
+
+ /* Copy the top-level map data to the stack. */
+ struct int_map old_map = *map;
+
+ /* Re-initialize the map to be empty and 50% larger. */
+ size_t new_capacity = map->capacity / 2 * 3;
+
+ vdo_log_info("%s: attempting resize from %zu to %zu, current size=%zu",
+ __func__, map->capacity, new_capacity, map->size);
+ result = allocate_buckets(map, new_capacity);
+ if (result != VDO_SUCCESS) {
+ *map = old_map;
+ return result;
+ }
+
+ /* Populate the new hash table from the entries in the old bucket array. */
+ for (i = 0; i < old_map.bucket_count; i++) {
+ struct bucket *entry = &old_map.buckets[i];
+
+ if (entry->value == NULL)
+ continue;
+
+ result = vdo_int_map_put(map, entry->key, entry->value, true, NULL);
+ if (result != VDO_SUCCESS) {
+ /* Destroy the new partial map and restore the map from the stack. */
+ vdo_free(vdo_forget(map->buckets));
+ *map = old_map;
+ return result;
+ }
+ }
+
+ /* Destroy the old bucket array. */
+ vdo_free(vdo_forget(old_map.buckets));
+ return VDO_SUCCESS;
+}
+
+/**
+ * find_empty_bucket() - Probe the bucket array starting at the given bucket for the next empty
+ * bucket, returning a pointer to it.
+ * @map: The map containing the buckets to search.
+ * @bucket: The bucket at which to start probing.
+ * @max_probes: The maximum number of buckets to search.
+ *
+ * NULL will be returned if the search reaches the end of the bucket array or if the number of
+ * linear probes exceeds a specified limit.
+ *
+ * Return: The next empty bucket, or NULL if the search failed.
+ */
+static struct bucket *
+find_empty_bucket(struct int_map *map, struct bucket *bucket, unsigned int max_probes)
+{
+ /*
+ * Limit the search to either the nearer of the end of the bucket array or a fixed distance
+ * beyond the initial bucket.
+ */
+ ptrdiff_t remaining = &map->buckets[map->bucket_count] - bucket;
+ struct bucket *sentinel = &bucket[min_t(ptrdiff_t, remaining, max_probes)];
+ struct bucket *entry;
+
+ for (entry = bucket; entry < sentinel; entry++) {
+ if (entry->value == NULL)
+ return entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * move_empty_bucket() - Move an empty bucket closer to the start of the bucket array.
+ * @map: The map containing the bucket.
+ * @hole: The empty bucket to fill with an entry that precedes it in one of its enclosing
+ * neighborhoods.
+ *
+ * This searches the neighborhoods that contain the empty bucket for a non-empty bucket closer to
+ * the start of the array. If such a bucket is found, this swaps the two buckets by moving the
+ * entry to the empty bucket.
+ *
+ * Return: The bucket that was vacated by moving its entry to the provided hole, or NULL if no
+ * entry could be moved.
+ */
+static struct bucket *move_empty_bucket(struct int_map *map __always_unused,
+ struct bucket *hole)
+{
+ /*
+ * Examine every neighborhood that the empty bucket is part of, starting with the one in
+ * which it is the last bucket. No boundary check is needed for the negative array
+ * arithmetic since this function is only called when hole is at least NEIGHBORHOOD cells
+ * deeper into the array than a valid bucket.
+ */
+ struct bucket *bucket;
+
+ for (bucket = &hole[1 - NEIGHBORHOOD]; bucket < hole; bucket++) {
+ /*
+ * Find the entry that is nearest to the bucket, which means it will be nearest to
+ * the hash bucket whose neighborhood is full.
+ */
+ struct bucket *new_hole = dereference_hop(bucket, bucket->first_hop);
+
+ if (new_hole == NULL) {
+ /*
+ * There are no buckets in this neighborhood that are in use by this one
+ * (they must all be owned by overlapping neighborhoods).
+ */
+ continue;
+ }
+
+ /*
+ * Skip this bucket if its first entry is actually further away than the hole that
+ * we're already trying to fill.
+ */
+ if (hole < new_hole)
+ continue;
+
+ /*
+ * We've found an entry in this neighborhood that we can "hop" further away, moving
+ * the hole closer to the hash bucket, if not all the way into its neighborhood.
+ */
+
+ /*
+ * The entry that will be the new hole is the first bucket in the list, so setting
+ * first_hop is all that's needed remove it from the list.
+ */
+ bucket->first_hop = new_hole->next_hop;
+ new_hole->next_hop = NULL_HOP_OFFSET;
+
+ /* Move the entry into the original hole. */
+ hole->key = new_hole->key;
+ hole->value = new_hole->value;
+ new_hole->value = NULL;
+
+ /* Insert the filled hole into the hop list for the neighborhood. */
+ insert_in_hop_list(bucket, hole);
+ return new_hole;
+ }
+
+ /* We couldn't find an entry to relocate to the hole. */
+ return NULL;
+}
+
+/**
+ * update_mapping() - Find and update any existing mapping for a given key, returning the value
+ * associated with the key in the provided pointer.
+ * @map: The int_map to attempt to modify.
+ * @neighborhood: The first bucket in the neighborhood that would contain the search key
+ * @key: The key with which to associate the new value.
+ * @new_value: The value to be associated with the key.
+ * @update: Whether to overwrite an existing value.
+ * @old_value_ptr: a pointer in which to store the old value (unmodified if no mapping was found)
+ *
+ * Return: true if the map contains a mapping for the key, false if it does not.
+ */
+static bool update_mapping(struct int_map *map, struct bucket *neighborhood,
+ u64 key, void *new_value, bool update, void **old_value_ptr)
+{
+ struct bucket *bucket = search_hop_list(map, neighborhood, key, NULL);
+
+ if (bucket == NULL) {
+ /* There is no bucket containing the key in the neighborhood. */
+ return false;
+ }
+
+ /*
+ * Return the value of the current mapping (if desired) and update the mapping with the new
+ * value (if desired).
+ */
+ if (old_value_ptr != NULL)
+ *old_value_ptr = bucket->value;
+ if (update)
+ bucket->value = new_value;
+ return true;
+}
+
+/**
+ * find_or_make_vacancy() - Find an empty bucket.
+ * @map: The int_map to search or modify.
+ * @neighborhood: The first bucket in the neighborhood in which an empty bucket is needed for a new
+ * mapping.
+ *
+ * Find an empty bucket in a specified neighborhood for a new mapping or attempt to re-arrange
+ * mappings so there is such a bucket. This operation may fail (returning NULL) if an empty bucket
+ * is not available or could not be relocated to the neighborhood.
+ *
+ * Return: a pointer to an empty bucket in the desired neighborhood, or NULL if a vacancy could not
+ * be found or arranged.
+ */
+static struct bucket *find_or_make_vacancy(struct int_map *map,
+ struct bucket *neighborhood)
+{
+ /* Probe within and beyond the neighborhood for the first empty bucket. */
+ struct bucket *hole = find_empty_bucket(map, neighborhood, MAX_PROBES);
+
+ /*
+ * Keep trying until the empty bucket is in the bucket's neighborhood or we are unable to
+ * move it any closer by swapping it with a filled bucket.
+ */
+ while (hole != NULL) {
+ int distance = hole - neighborhood;
+
+ if (distance < NEIGHBORHOOD) {
+ /*
+ * We've found or relocated an empty bucket close enough to the initial
+ * hash bucket to be referenced by its hop vector.
+ */
+ return hole;
+ }
+
+ /*
+ * The nearest empty bucket isn't within the neighborhood that must contain the new
+ * entry, so try to swap it with bucket that is closer.
+ */
+ hole = move_empty_bucket(map, hole);
+ }
+
+ return NULL;
+}
+
+/**
+ * vdo_int_map_put() - Try to associate a value with an integer.
+ * @map: The int_map to attempt to modify.
+ * @key: The key with which to associate the new value.
+ * @new_value: The value to be associated with the key.
+ * @update: Whether to overwrite an existing value.
+ * @old_value_ptr: A pointer in which to store either the old value (if the key was already mapped)
+ * or NULL if the map did not contain the key; NULL may be provided if the caller
+ * does not need to know the old value
+ *
+ * Try to associate a value (a pointer) with an integer in an int_map. If the map already contains
+ * a mapping for the provided key, the old value is only replaced with the specified value if
+ * update is true. In either case the old value is returned. If the map does not already contain a
+ * value for the specified key, the new value is added regardless of the value of update.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_int_map_put(struct int_map *map, u64 key, void *new_value, bool update,
+ void **old_value_ptr)
+{
+ struct bucket *neighborhood, *bucket;
+
+ if (unlikely(new_value == NULL))
+ return -EINVAL;
+
+ /*
+ * Select the bucket at the start of the neighborhood that must contain any entry for the
+ * provided key.
+ */
+ neighborhood = select_bucket(map, key);
+
+ /*
+ * Check whether the neighborhood already contains an entry for the key, in which case we
+ * optionally update it, returning the old value.
+ */
+ if (update_mapping(map, neighborhood, key, new_value, update, old_value_ptr))
+ return VDO_SUCCESS;
+
+ /*
+ * Find an empty bucket in the desired neighborhood for the new entry or re-arrange entries
+ * in the map so there is such a bucket. This operation will usually succeed; the loop body
+ * will only be executed on the rare occasions that we have to resize the map.
+ */
+ while ((bucket = find_or_make_vacancy(map, neighborhood)) == NULL) {
+ int result;
+
+ /*
+ * There is no empty bucket in which to put the new entry in the current map, so
+ * we're forced to allocate a new bucket array with a larger capacity, re-hash all
+ * the entries into those buckets, and try again (a very expensive operation for
+ * large maps).
+ */
+ result = resize_buckets(map);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /*
+ * Resizing the map invalidates all pointers to buckets, so recalculate the
+ * neighborhood pointer.
+ */
+ neighborhood = select_bucket(map, key);
+ }
+
+ /* Put the new entry in the empty bucket, adding it to the neighborhood. */
+ bucket->key = key;
+ bucket->value = new_value;
+ insert_in_hop_list(neighborhood, bucket);
+ map->size += 1;
+
+ /* There was no existing entry, so there was no old value to be returned. */
+ if (old_value_ptr != NULL)
+ *old_value_ptr = NULL;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_int_map_remove() - Remove the mapping for a given key from the int_map.
+ * @map: The int_map from which to remove the mapping.
+ * @key: The key whose mapping is to be removed.
+ *
+ * Return: the value that was associated with the key, or NULL if it was not mapped.
+ */
+void *vdo_int_map_remove(struct int_map *map, u64 key)
+{
+ void *value;
+
+ /* Select the bucket to search and search it for an existing entry. */
+ struct bucket *bucket = select_bucket(map, key);
+ struct bucket *previous;
+ struct bucket *victim = search_hop_list(map, bucket, key, &previous);
+
+ if (victim == NULL) {
+ /* There is no matching entry to remove. */
+ return NULL;
+ }
+
+ /*
+ * We found an entry to remove. Save the mapped value to return later and empty the bucket.
+ */
+ map->size -= 1;
+ value = victim->value;
+ victim->value = NULL;
+ victim->key = 0;
+
+ /* The victim bucket is now empty, but it still needs to be spliced out of the hop list. */
+ if (previous == NULL) {
+ /* The victim is the head of the list, so swing first_hop. */
+ bucket->first_hop = victim->next_hop;
+ } else {
+ previous->next_hop = victim->next_hop;
+ }
+
+ victim->next_hop = NULL_HOP_OFFSET;
+ return value;
+}
diff --git a/drivers/md/dm-vdo/int-map.h b/drivers/md/dm-vdo/int-map.h
new file mode 100644
index 000000000000..1858ad799887
--- /dev/null
+++ b/drivers/md/dm-vdo/int-map.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_INT_MAP_H
+#define VDO_INT_MAP_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+/**
+ * DOC: int_map
+ *
+ * An int_map associates pointers (void *) with integer keys (u64). NULL pointer values are
+ * not supported.
+ *
+ * The map is implemented as hash table, which should provide constant-time insert, query, and
+ * remove operations, although the insert may occasionally grow the table, which is linear in the
+ * number of entries in the map. The table will grow as needed to hold new entries, but will not
+ * shrink as entries are removed.
+ */
+
+struct int_map;
+
+int __must_check vdo_int_map_create(size_t initial_capacity, struct int_map **map_ptr);
+
+void vdo_int_map_free(struct int_map *map);
+
+size_t vdo_int_map_size(const struct int_map *map);
+
+void *vdo_int_map_get(struct int_map *map, u64 key);
+
+int __must_check vdo_int_map_put(struct int_map *map, u64 key, void *new_value,
+ bool update, void **old_value_ptr);
+
+void *vdo_int_map_remove(struct int_map *map, u64 key);
+
+#endif /* VDO_INT_MAP_H */
diff --git a/drivers/md/dm-vdo/io-submitter.c b/drivers/md/dm-vdo/io-submitter.c
new file mode 100644
index 000000000000..9a3716bb3c05
--- /dev/null
+++ b/drivers/md/dm-vdo/io-submitter.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "io-submitter.h"
+
+#include <linux/bio.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "data-vio.h"
+#include "logger.h"
+#include "types.h"
+#include "vdo.h"
+#include "vio.h"
+
+/*
+ * Submission of bio operations to the underlying storage device will go through a separate work
+ * queue thread (or more than one) to prevent blocking in other threads if the storage device has a
+ * full queue. The plug structure allows that thread to do better batching of requests to make the
+ * I/O more efficient.
+ *
+ * When multiple worker threads are used, a thread is chosen for a I/O operation submission based
+ * on the PBN, so a given PBN will consistently wind up on the same thread. Flush operations are
+ * assigned round-robin.
+ *
+ * The map (protected by the mutex) collects pending I/O operations so that the worker thread can
+ * reorder them to try to encourage I/O request merging in the request queue underneath.
+ */
+struct bio_queue_data {
+ struct vdo_work_queue *queue;
+ struct blk_plug plug;
+ struct int_map *map;
+ struct mutex lock;
+ unsigned int queue_number;
+};
+
+struct io_submitter {
+ unsigned int num_bio_queues_used;
+ unsigned int bio_queue_rotation_interval;
+ struct bio_queue_data bio_queue_data[];
+};
+
+static void start_bio_queue(void *ptr)
+{
+ struct bio_queue_data *bio_queue_data = ptr;
+
+ blk_start_plug(&bio_queue_data->plug);
+}
+
+static void finish_bio_queue(void *ptr)
+{
+ struct bio_queue_data *bio_queue_data = ptr;
+
+ blk_finish_plug(&bio_queue_data->plug);
+}
+
+static const struct vdo_work_queue_type bio_queue_type = {
+ .start = start_bio_queue,
+ .finish = finish_bio_queue,
+ .max_priority = BIO_Q_MAX_PRIORITY,
+ .default_priority = BIO_Q_DATA_PRIORITY,
+};
+
+/**
+ * count_all_bios() - Determine which bio counter to use.
+ * @vio: The vio associated with the bio.
+ * @bio: The bio to count.
+ */
+static void count_all_bios(struct vio *vio, struct bio *bio)
+{
+ struct atomic_statistics *stats = &vio->completion.vdo->stats;
+
+ if (is_data_vio(vio)) {
+ vdo_count_bios(&stats->bios_out, bio);
+ return;
+ }
+
+ vdo_count_bios(&stats->bios_meta, bio);
+ if (vio->type == VIO_TYPE_RECOVERY_JOURNAL)
+ vdo_count_bios(&stats->bios_journal, bio);
+ else if (vio->type == VIO_TYPE_BLOCK_MAP)
+ vdo_count_bios(&stats->bios_page_cache, bio);
+}
+
+/**
+ * assert_in_bio_zone() - Assert that a vio is in the correct bio zone and not in interrupt
+ * context.
+ * @vio: The vio to check.
+ */
+static void assert_in_bio_zone(struct vio *vio)
+{
+ VDO_ASSERT_LOG_ONLY(!in_interrupt(), "not in interrupt context");
+ assert_vio_in_bio_zone(vio);
+}
+
+/**
+ * send_bio_to_device() - Update stats and tracing info, then submit the supplied bio to the OS for
+ * processing.
+ * @vio: The vio associated with the bio.
+ * @bio: The bio to submit to the OS.
+ */
+static void send_bio_to_device(struct vio *vio, struct bio *bio)
+{
+ struct vdo *vdo = vio->completion.vdo;
+
+ assert_in_bio_zone(vio);
+ atomic64_inc(&vdo->stats.bios_submitted);
+ count_all_bios(vio, bio);
+ bio_set_dev(bio, vdo_get_backing_device(vdo));
+ submit_bio_noacct(bio);
+}
+
+/**
+ * vdo_submit_vio() - Submits a vio's bio to the underlying block device. May block if the device
+ * is busy. This callback should be used by vios which did not attempt to merge.
+ */
+void vdo_submit_vio(struct vdo_completion *completion)
+{
+ struct vio *vio = as_vio(completion);
+
+ send_bio_to_device(vio, vio->bio);
+}
+
+/**
+ * get_bio_list() - Extract the list of bios to submit from a vio.
+ * @vio: The vio submitting I/O.
+ *
+ * The list will always contain at least one entry (the bio for the vio on which it is called), but
+ * other bios may have been merged with it as well.
+ *
+ * Return: bio The head of the bio list to submit.
+ */
+static struct bio *get_bio_list(struct vio *vio)
+{
+ struct bio *bio;
+ struct io_submitter *submitter = vio->completion.vdo->io_submitter;
+ struct bio_queue_data *bio_queue_data = &(submitter->bio_queue_data[vio->bio_zone]);
+
+ assert_in_bio_zone(vio);
+
+ mutex_lock(&bio_queue_data->lock);
+ vdo_int_map_remove(bio_queue_data->map,
+ vio->bios_merged.head->bi_iter.bi_sector);
+ vdo_int_map_remove(bio_queue_data->map,
+ vio->bios_merged.tail->bi_iter.bi_sector);
+ bio = vio->bios_merged.head;
+ bio_list_init(&vio->bios_merged);
+ mutex_unlock(&bio_queue_data->lock);
+
+ return bio;
+}
+
+/**
+ * submit_data_vio() - Submit a data_vio's bio to the storage below along with
+ * any bios that have been merged with it.
+ *
+ * Context: This call may block and so should only be called from a bio thread.
+ */
+static void submit_data_vio(struct vdo_completion *completion)
+{
+ struct bio *bio, *next;
+ struct vio *vio = as_vio(completion);
+
+ assert_in_bio_zone(vio);
+ for (bio = get_bio_list(vio); bio != NULL; bio = next) {
+ next = bio->bi_next;
+ bio->bi_next = NULL;
+ send_bio_to_device((struct vio *) bio->bi_private, bio);
+ }
+}
+
+/**
+ * get_mergeable_locked() - Attempt to find an already queued bio that the current bio can be
+ * merged with.
+ * @map: The bio map to use for merging.
+ * @vio: The vio we want to merge.
+ * @back_merge: Set to true for a back merge, false for a front merge.
+ *
+ * There are two types of merging possible, forward and backward, which are distinguished by a flag
+ * that uses kernel elevator terminology.
+ *
+ * Return: the vio to merge to, NULL if no merging is possible.
+ */
+static struct vio *get_mergeable_locked(struct int_map *map, struct vio *vio,
+ bool back_merge)
+{
+ struct bio *bio = vio->bio;
+ sector_t merge_sector = bio->bi_iter.bi_sector;
+ struct vio *vio_merge;
+
+ if (back_merge)
+ merge_sector -= VDO_SECTORS_PER_BLOCK;
+ else
+ merge_sector += VDO_SECTORS_PER_BLOCK;
+
+ vio_merge = vdo_int_map_get(map, merge_sector);
+
+ if (vio_merge == NULL)
+ return NULL;
+
+ if (vio->completion.priority != vio_merge->completion.priority)
+ return NULL;
+
+ if (bio_data_dir(bio) != bio_data_dir(vio_merge->bio))
+ return NULL;
+
+ if (bio_list_empty(&vio_merge->bios_merged))
+ return NULL;
+
+ if (back_merge) {
+ return (vio_merge->bios_merged.tail->bi_iter.bi_sector == merge_sector ?
+ vio_merge : NULL);
+ }
+
+ return (vio_merge->bios_merged.head->bi_iter.bi_sector == merge_sector ?
+ vio_merge : NULL);
+}
+
+static int map_merged_vio(struct int_map *bio_map, struct vio *vio)
+{
+ int result;
+ sector_t bio_sector;
+
+ bio_sector = vio->bios_merged.head->bi_iter.bi_sector;
+ result = vdo_int_map_put(bio_map, bio_sector, vio, true, NULL);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ bio_sector = vio->bios_merged.tail->bi_iter.bi_sector;
+ return vdo_int_map_put(bio_map, bio_sector, vio, true, NULL);
+}
+
+static int merge_to_prev_tail(struct int_map *bio_map, struct vio *vio,
+ struct vio *prev_vio)
+{
+ vdo_int_map_remove(bio_map, prev_vio->bios_merged.tail->bi_iter.bi_sector);
+ bio_list_merge(&prev_vio->bios_merged, &vio->bios_merged);
+ return map_merged_vio(bio_map, prev_vio);
+}
+
+static int merge_to_next_head(struct int_map *bio_map, struct vio *vio,
+ struct vio *next_vio)
+{
+ /*
+ * Handle "next merge" and "gap fill" cases the same way so as to reorder bios in a way
+ * that's compatible with using funnel queues in work queues. This avoids removing an
+ * existing completion.
+ */
+ vdo_int_map_remove(bio_map, next_vio->bios_merged.head->bi_iter.bi_sector);
+ bio_list_merge_head(&next_vio->bios_merged, &vio->bios_merged);
+ return map_merged_vio(bio_map, next_vio);
+}
+
+/**
+ * try_bio_map_merge() - Attempt to merge a vio's bio with other pending I/Os.
+ * @vio: The vio to merge.
+ *
+ * Currently this is only used for data_vios, but is broken out for future use with metadata vios.
+ *
+ * Return: whether or not the vio was merged.
+ */
+static bool try_bio_map_merge(struct vio *vio)
+{
+ int result;
+ bool merged = true;
+ struct bio *bio = vio->bio;
+ struct vio *prev_vio, *next_vio;
+ struct vdo *vdo = vio->completion.vdo;
+ struct bio_queue_data *bio_queue_data =
+ &vdo->io_submitter->bio_queue_data[vio->bio_zone];
+
+ bio->bi_next = NULL;
+ bio_list_init(&vio->bios_merged);
+ bio_list_add(&vio->bios_merged, bio);
+
+ mutex_lock(&bio_queue_data->lock);
+ prev_vio = get_mergeable_locked(bio_queue_data->map, vio, true);
+ next_vio = get_mergeable_locked(bio_queue_data->map, vio, false);
+ if (prev_vio == next_vio)
+ next_vio = NULL;
+
+ if ((prev_vio == NULL) && (next_vio == NULL)) {
+ /* no merge. just add to bio_queue */
+ merged = false;
+ result = vdo_int_map_put(bio_queue_data->map,
+ bio->bi_iter.bi_sector,
+ vio, true, NULL);
+ } else if (next_vio == NULL) {
+ /* Only prev. merge to prev's tail */
+ result = merge_to_prev_tail(bio_queue_data->map, vio, prev_vio);
+ } else {
+ /* Only next. merge to next's head */
+ result = merge_to_next_head(bio_queue_data->map, vio, next_vio);
+ }
+ mutex_unlock(&bio_queue_data->lock);
+
+ /* We don't care about failure of int_map_put in this case. */
+ VDO_ASSERT_LOG_ONLY(result == VDO_SUCCESS, "bio map insertion succeeds");
+ return merged;
+}
+
+/**
+ * vdo_submit_data_vio() - Submit I/O for a data_vio.
+ * @data_vio: the data_vio for which to issue I/O.
+ *
+ * If possible, this I/O will be merged other pending I/Os. Otherwise, the data_vio will be sent to
+ * the appropriate bio zone directly.
+ */
+void vdo_submit_data_vio(struct data_vio *data_vio)
+{
+ if (try_bio_map_merge(&data_vio->vio))
+ return;
+
+ launch_data_vio_bio_zone_callback(data_vio, submit_data_vio);
+}
+
+/**
+ * __submit_metadata_vio() - Submit I/O for a metadata vio.
+ * @vio: the vio for which to issue I/O
+ * @physical: the physical block number to read or write
+ * @callback: the bio endio function which will be called after the I/O completes
+ * @error_handler: the handler for submission or I/O errors (may be NULL)
+ * @operation: the type of I/O to perform
+ * @data: the buffer to read or write (may be NULL)
+ *
+ * The vio is enqueued on a vdo bio queue so that bio submission (which may block) does not block
+ * other vdo threads.
+ *
+ * That the error handler will run on the correct thread is only true so long as the thread calling
+ * this function, and the thread set in the endio callback are the same, as well as the fact that
+ * no error can occur on the bio queue. Currently this is true for all callers, but additional care
+ * will be needed if this ever changes.
+ */
+void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
+ bio_end_io_t callback, vdo_action_fn error_handler,
+ blk_opf_t operation, char *data)
+{
+ int result;
+ struct vdo_completion *completion = &vio->completion;
+ const struct admin_state_code *code = vdo_get_admin_state(completion->vdo);
+
+
+ VDO_ASSERT_LOG_ONLY(!code->quiescent, "I/O not allowed in state %s", code->name);
+ VDO_ASSERT_LOG_ONLY(vio->bio->bi_next == NULL, "metadata bio has no next bio");
+
+ vdo_reset_completion(completion);
+ completion->error_handler = error_handler;
+ result = vio_reset_bio(vio, data, callback, operation | REQ_META, physical);
+ if (result != VDO_SUCCESS) {
+ continue_vio(vio, result);
+ return;
+ }
+
+ vdo_set_completion_callback(completion, vdo_submit_vio,
+ get_vio_bio_zone_thread_id(vio));
+ vdo_launch_completion_with_priority(completion, get_metadata_priority(vio));
+}
+
+/**
+ * vdo_make_io_submitter() - Create an io_submitter structure.
+ * @thread_count: Number of bio-submission threads to set up.
+ * @rotation_interval: Interval to use when rotating between bio-submission threads when enqueuing
+ * completions.
+ * @max_requests_active: Number of bios for merge tracking.
+ * @vdo: The vdo which will use this submitter.
+ * @io_submitter: pointer to the new data structure.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_interval,
+ unsigned int max_requests_active, struct vdo *vdo,
+ struct io_submitter **io_submitter_ptr)
+{
+ unsigned int i;
+ struct io_submitter *io_submitter;
+ int result;
+
+ result = vdo_allocate_extended(struct io_submitter, thread_count,
+ struct bio_queue_data, "bio submission data",
+ &io_submitter);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ io_submitter->bio_queue_rotation_interval = rotation_interval;
+
+ /* Setup for each bio-submission work queue */
+ for (i = 0; i < thread_count; i++) {
+ struct bio_queue_data *bio_queue_data = &io_submitter->bio_queue_data[i];
+
+ mutex_init(&bio_queue_data->lock);
+ /*
+ * One I/O operation per request, but both first & last sector numbers.
+ *
+ * If requests are assigned to threads round-robin, they should be distributed
+ * quite evenly. But if they're assigned based on PBN, things can sometimes be very
+ * uneven. So for now, we'll assume that all requests *may* wind up on one thread,
+ * and thus all in the same map.
+ */
+ result = vdo_int_map_create(max_requests_active * 2,
+ &bio_queue_data->map);
+ if (result != VDO_SUCCESS) {
+ /*
+ * Clean up the partially initialized bio-queue entirely and indicate that
+ * initialization failed.
+ */
+ vdo_log_error("bio map initialization failed %d", result);
+ vdo_cleanup_io_submitter(io_submitter);
+ vdo_free_io_submitter(io_submitter);
+ return result;
+ }
+
+ bio_queue_data->queue_number = i;
+ result = vdo_make_thread(vdo, vdo->thread_config.bio_threads[i],
+ &bio_queue_type, 1, (void **) &bio_queue_data);
+ if (result != VDO_SUCCESS) {
+ /*
+ * Clean up the partially initialized bio-queue entirely and indicate that
+ * initialization failed.
+ */
+ vdo_int_map_free(vdo_forget(bio_queue_data->map));
+ vdo_log_error("bio queue initialization failed %d", result);
+ vdo_cleanup_io_submitter(io_submitter);
+ vdo_free_io_submitter(io_submitter);
+ return result;
+ }
+
+ bio_queue_data->queue = vdo->threads[vdo->thread_config.bio_threads[i]].queue;
+ io_submitter->num_bio_queues_used++;
+ }
+
+ *io_submitter_ptr = io_submitter;
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_cleanup_io_submitter() - Tear down the io_submitter fields as needed for a physical layer.
+ * @io_submitter: The I/O submitter data to tear down (may be NULL).
+ */
+void vdo_cleanup_io_submitter(struct io_submitter *io_submitter)
+{
+ int i;
+
+ if (io_submitter == NULL)
+ return;
+
+ for (i = io_submitter->num_bio_queues_used - 1; i >= 0; i--)
+ vdo_finish_work_queue(io_submitter->bio_queue_data[i].queue);
+}
+
+/**
+ * vdo_free_io_submitter() - Free the io_submitter fields and structure as needed.
+ * @io_submitter: The I/O submitter data to destroy.
+ *
+ * This must be called after vdo_cleanup_io_submitter(). It is used to release resources late in
+ * the shutdown process to avoid or reduce the chance of race conditions.
+ */
+void vdo_free_io_submitter(struct io_submitter *io_submitter)
+{
+ int i;
+
+ if (io_submitter == NULL)
+ return;
+
+ for (i = io_submitter->num_bio_queues_used - 1; i >= 0; i--) {
+ io_submitter->num_bio_queues_used--;
+ /* vdo_destroy() will free the work queue, so just give up our reference to it. */
+ vdo_forget(io_submitter->bio_queue_data[i].queue);
+ vdo_int_map_free(vdo_forget(io_submitter->bio_queue_data[i].map));
+ }
+ vdo_free(io_submitter);
+}
diff --git a/drivers/md/dm-vdo/io-submitter.h b/drivers/md/dm-vdo/io-submitter.h
new file mode 100644
index 000000000000..80748699496f
--- /dev/null
+++ b/drivers/md/dm-vdo/io-submitter.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_IO_SUBMITTER_H
+#define VDO_IO_SUBMITTER_H
+
+#include <linux/bio.h>
+
+#include "types.h"
+
+struct io_submitter;
+
+int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_interval,
+ unsigned int max_requests_active, struct vdo *vdo,
+ struct io_submitter **io_submitter);
+
+void vdo_cleanup_io_submitter(struct io_submitter *io_submitter);
+
+void vdo_free_io_submitter(struct io_submitter *io_submitter);
+
+void vdo_submit_vio(struct vdo_completion *completion);
+
+void vdo_submit_data_vio(struct data_vio *data_vio);
+
+void __submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
+ bio_end_io_t callback, vdo_action_fn error_handler,
+ blk_opf_t operation, char *data);
+
+static inline void vdo_submit_metadata_vio(struct vio *vio, physical_block_number_t physical,
+ bio_end_io_t callback, vdo_action_fn error_handler,
+ blk_opf_t operation)
+{
+ __submit_metadata_vio(vio, physical, callback, error_handler,
+ operation, vio->data);
+}
+
+static inline void vdo_submit_flush_vio(struct vio *vio, bio_end_io_t callback,
+ vdo_action_fn error_handler)
+{
+ /* FIXME: Can we just use REQ_OP_FLUSH? */
+ __submit_metadata_vio(vio, 0, callback, error_handler,
+ REQ_OP_WRITE | REQ_PREFLUSH, NULL);
+}
+
+#endif /* VDO_IO_SUBMITTER_H */
diff --git a/drivers/md/dm-vdo/logger.c b/drivers/md/dm-vdo/logger.c
new file mode 100644
index 000000000000..3f7dc2cb6b98
--- /dev/null
+++ b/drivers/md/dm-vdo/logger.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "logger.h"
+
+#include <asm/current.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+
+#include "errors.h"
+#include "thread-device.h"
+#include "thread-utils.h"
+
+int vdo_log_level = VDO_LOG_DEFAULT;
+
+int vdo_get_log_level(void)
+{
+ int log_level_latch = READ_ONCE(vdo_log_level);
+
+ if (unlikely(log_level_latch > VDO_LOG_MAX)) {
+ log_level_latch = VDO_LOG_DEFAULT;
+ WRITE_ONCE(vdo_log_level, log_level_latch);
+ }
+ return log_level_latch;
+}
+
+static const char *get_current_interrupt_type(void)
+{
+ if (in_nmi())
+ return "NMI";
+
+ if (in_irq())
+ return "HI";
+
+ if (in_softirq())
+ return "SI";
+
+ return "INTR";
+}
+
+/**
+ * emit_log_message_to_kernel() - Emit a log message to the kernel at the specified priority.
+ *
+ * @priority: The priority at which to log the message
+ * @fmt: The format string of the message
+ */
+static void emit_log_message_to_kernel(int priority, const char *fmt, ...)
+{
+ va_list args;
+ struct va_format vaf;
+
+ if (priority > vdo_get_log_level())
+ return;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ switch (priority) {
+ case VDO_LOG_EMERG:
+ case VDO_LOG_ALERT:
+ case VDO_LOG_CRIT:
+ pr_crit("%pV", &vaf);
+ break;
+ case VDO_LOG_ERR:
+ pr_err("%pV", &vaf);
+ break;
+ case VDO_LOG_WARNING:
+ pr_warn("%pV", &vaf);
+ break;
+ case VDO_LOG_NOTICE:
+ case VDO_LOG_INFO:
+ pr_info("%pV", &vaf);
+ break;
+ case VDO_LOG_DEBUG:
+ pr_debug("%pV", &vaf);
+ break;
+ default:
+ printk(KERN_DEFAULT "%pV", &vaf);
+ break;
+ }
+
+ va_end(args);
+}
+
+/**
+ * emit_log_message() - Emit a log message to the kernel log in a format suited to the current
+ * thread context.
+ *
+ * Context info formats:
+ *
+ * interrupt: uds[NMI]: blah
+ * kvdo thread: kvdo12:foobarQ: blah
+ * thread w/device id: kvdo12:myprog: blah
+ * other thread: uds: myprog: blah
+ *
+ * Fields: module name, interrupt level, process name, device ID.
+ *
+ * @priority: the priority at which to log the message
+ * @module: The name of the module doing the logging
+ * @prefix: The prefix of the log message
+ * @vaf1: The first message format descriptor
+ * @vaf2: The second message format descriptor
+ */
+static void emit_log_message(int priority, const char *module, const char *prefix,
+ const struct va_format *vaf1, const struct va_format *vaf2)
+{
+ int device_instance;
+
+ /*
+ * In interrupt context, identify the interrupt type and module. Ignore the process/thread
+ * since it could be anything.
+ */
+ if (in_interrupt()) {
+ const char *type = get_current_interrupt_type();
+
+ emit_log_message_to_kernel(priority, "%s[%s]: %s%pV%pV\n", module, type,
+ prefix, vaf1, vaf2);
+ return;
+ }
+
+ /* Not at interrupt level; we have a process we can look at, and might have a device ID. */
+ device_instance = vdo_get_thread_device_id();
+ if (device_instance >= 0) {
+ emit_log_message_to_kernel(priority, "%s%u:%s: %s%pV%pV\n", module,
+ device_instance, current->comm, prefix, vaf1,
+ vaf2);
+ return;
+ }
+
+ /*
+ * If it's a kernel thread and the module name is a prefix of its name, assume it is ours
+ * and only identify the thread.
+ */
+ if (((current->flags & PF_KTHREAD) != 0) &&
+ (strncmp(module, current->comm, strlen(module)) == 0)) {
+ emit_log_message_to_kernel(priority, "%s: %s%pV%pV\n", current->comm,
+ prefix, vaf1, vaf2);
+ return;
+ }
+
+ /* Identify the module and the process. */
+ emit_log_message_to_kernel(priority, "%s: %s: %s%pV%pV\n", module, current->comm,
+ prefix, vaf1, vaf2);
+}
+
+/*
+ * vdo_log_embedded_message() - Log a message embedded within another message.
+ * @priority: the priority at which to log the message
+ * @module: the name of the module doing the logging
+ * @prefix: optional string prefix to message, may be NULL
+ * @fmt1: format of message first part (required)
+ * @args1: arguments for message first part (required)
+ * @fmt2: format of message second part
+ */
+void vdo_log_embedded_message(int priority, const char *module, const char *prefix,
+ const char *fmt1, va_list args1, const char *fmt2, ...)
+{
+ va_list args1_copy;
+ va_list args2;
+ struct va_format vaf1, vaf2;
+
+ va_start(args2, fmt2);
+
+ if (module == NULL)
+ module = VDO_LOGGING_MODULE_NAME;
+
+ if (prefix == NULL)
+ prefix = "";
+
+ /*
+ * It is implementation dependent whether va_list is defined as an array type that decays
+ * to a pointer when passed as an argument. Copy args1 and args2 with va_copy so that vaf1
+ * and vaf2 get proper va_list pointers irrespective of how va_list is defined.
+ */
+ va_copy(args1_copy, args1);
+ vaf1.fmt = fmt1;
+ vaf1.va = &args1_copy;
+
+ vaf2.fmt = fmt2;
+ vaf2.va = &args2;
+
+ emit_log_message(priority, module, prefix, &vaf1, &vaf2);
+
+ va_end(args1_copy);
+ va_end(args2);
+}
+
+int vdo_vlog_strerror(int priority, int errnum, const char *module, const char *format,
+ va_list args)
+{
+ char errbuf[VDO_MAX_ERROR_MESSAGE_SIZE];
+ const char *message = uds_string_error(errnum, errbuf, sizeof(errbuf));
+
+ vdo_log_embedded_message(priority, module, NULL, format, args, ": %s (%d)",
+ message, errnum);
+ return errnum;
+}
+
+int __vdo_log_strerror(int priority, int errnum, const char *module, const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ vdo_vlog_strerror(priority, errnum, module, format, args);
+ va_end(args);
+ return errnum;
+}
+
+void vdo_log_backtrace(int priority)
+{
+ if (priority > vdo_get_log_level())
+ return;
+
+ dump_stack();
+}
+
+void __vdo_log_message(int priority, const char *module, const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ vdo_log_embedded_message(priority, module, NULL, format, args, "%s", "");
+ va_end(args);
+}
+
+/*
+ * Sleep or delay a few milliseconds in an attempt to allow the log buffers to be flushed lest they
+ * be overrun.
+ */
+void vdo_pause_for_logger(void)
+{
+ fsleep(4000);
+}
diff --git a/drivers/md/dm-vdo/logger.h b/drivers/md/dm-vdo/logger.h
new file mode 100644
index 000000000000..ae6ad691c027
--- /dev/null
+++ b/drivers/md/dm-vdo/logger.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_LOGGER_H
+#define VDO_LOGGER_H
+
+#include <linux/kern_levels.h>
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+#include <linux/device-mapper.h>
+
+/* Custom logging utilities for UDS */
+
+enum {
+ VDO_LOG_EMERG = LOGLEVEL_EMERG,
+ VDO_LOG_ALERT = LOGLEVEL_ALERT,
+ VDO_LOG_CRIT = LOGLEVEL_CRIT,
+ VDO_LOG_ERR = LOGLEVEL_ERR,
+ VDO_LOG_WARNING = LOGLEVEL_WARNING,
+ VDO_LOG_NOTICE = LOGLEVEL_NOTICE,
+ VDO_LOG_INFO = LOGLEVEL_INFO,
+ VDO_LOG_DEBUG = LOGLEVEL_DEBUG,
+
+ VDO_LOG_MAX = VDO_LOG_DEBUG,
+ VDO_LOG_DEFAULT = VDO_LOG_INFO,
+};
+
+extern int vdo_log_level;
+
+#define DM_MSG_PREFIX "vdo"
+#define VDO_LOGGING_MODULE_NAME DM_NAME ": " DM_MSG_PREFIX
+
+/* Apply a rate limiter to a log method call. */
+#define vdo_log_ratelimit(log_fn, ...) \
+ do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) { \
+ log_fn(__VA_ARGS__); \
+ } \
+ } while (0)
+
+int vdo_get_log_level(void);
+
+void vdo_log_embedded_message(int priority, const char *module, const char *prefix,
+ const char *fmt1, va_list args1, const char *fmt2, ...)
+ __printf(4, 0) __printf(6, 7);
+
+void vdo_log_backtrace(int priority);
+
+/* All log functions will preserve the caller's value of errno. */
+
+#define vdo_log_strerror(priority, errnum, ...) \
+ __vdo_log_strerror(priority, errnum, VDO_LOGGING_MODULE_NAME, __VA_ARGS__)
+
+int __vdo_log_strerror(int priority, int errnum, const char *module,
+ const char *format, ...)
+ __printf(4, 5);
+
+int vdo_vlog_strerror(int priority, int errnum, const char *module, const char *format,
+ va_list args)
+ __printf(4, 0);
+
+/* Log an error prefixed with the string associated with the errnum. */
+#define vdo_log_error_strerror(errnum, ...) \
+ vdo_log_strerror(VDO_LOG_ERR, errnum, __VA_ARGS__)
+
+#define vdo_log_debug_strerror(errnum, ...) \
+ vdo_log_strerror(VDO_LOG_DEBUG, errnum, __VA_ARGS__)
+
+#define vdo_log_info_strerror(errnum, ...) \
+ vdo_log_strerror(VDO_LOG_INFO, errnum, __VA_ARGS__)
+
+#define vdo_log_warning_strerror(errnum, ...) \
+ vdo_log_strerror(VDO_LOG_WARNING, errnum, __VA_ARGS__)
+
+#define vdo_log_fatal_strerror(errnum, ...) \
+ vdo_log_strerror(VDO_LOG_CRIT, errnum, __VA_ARGS__)
+
+#define vdo_log_message(priority, ...) \
+ __vdo_log_message(priority, VDO_LOGGING_MODULE_NAME, __VA_ARGS__)
+
+void __vdo_log_message(int priority, const char *module, const char *format, ...)
+ __printf(3, 4);
+
+#define vdo_log_debug(...) vdo_log_message(VDO_LOG_DEBUG, __VA_ARGS__)
+
+#define vdo_log_info(...) vdo_log_message(VDO_LOG_INFO, __VA_ARGS__)
+
+#define vdo_log_warning(...) vdo_log_message(VDO_LOG_WARNING, __VA_ARGS__)
+
+#define vdo_log_error(...) vdo_log_message(VDO_LOG_ERR, __VA_ARGS__)
+
+#define vdo_log_fatal(...) vdo_log_message(VDO_LOG_CRIT, __VA_ARGS__)
+
+void vdo_pause_for_logger(void);
+#endif /* VDO_LOGGER_H */
diff --git a/drivers/md/dm-vdo/logical-zone.c b/drivers/md/dm-vdo/logical-zone.c
new file mode 100644
index 000000000000..026f031ffc9e
--- /dev/null
+++ b/drivers/md/dm-vdo/logical-zone.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "logical-zone.h"
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+#include "string-utils.h"
+
+#include "action-manager.h"
+#include "admin-state.h"
+#include "block-map.h"
+#include "completion.h"
+#include "constants.h"
+#include "data-vio.h"
+#include "flush.h"
+#include "int-map.h"
+#include "physical-zone.h"
+#include "vdo.h"
+
+#define ALLOCATIONS_PER_ZONE 128
+
+/**
+ * as_logical_zone() - Convert a generic vdo_completion to a logical_zone.
+ * @completion: The completion to convert.
+ *
+ * Return: The completion as a logical_zone.
+ */
+static struct logical_zone *as_logical_zone(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_GENERATION_FLUSHED_COMPLETION);
+ return container_of(completion, struct logical_zone, completion);
+}
+
+/* get_thread_id_for_zone() - Implements vdo_zone_thread_getter_fn. */
+static thread_id_t get_thread_id_for_zone(void *context, zone_count_t zone_number)
+{
+ struct logical_zones *zones = context;
+
+ return zones->zones[zone_number].thread_id;
+}
+
+/**
+ * initialize_zone() - Initialize a logical zone.
+ * @zones: The logical_zones to which this zone belongs.
+ * @zone_number: The logical_zone's index.
+ */
+static int initialize_zone(struct logical_zones *zones, zone_count_t zone_number)
+{
+ int result;
+ struct vdo *vdo = zones->vdo;
+ struct logical_zone *zone = &zones->zones[zone_number];
+ zone_count_t allocation_zone_number;
+
+ result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->lbn_operations);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (zone_number < vdo->thread_config.logical_zone_count - 1)
+ zone->next = &zones->zones[zone_number + 1];
+
+ vdo_initialize_completion(&zone->completion, vdo,
+ VDO_GENERATION_FLUSHED_COMPLETION);
+ zone->zones = zones;
+ zone->zone_number = zone_number;
+ zone->thread_id = vdo->thread_config.logical_threads[zone_number];
+ zone->block_map_zone = &vdo->block_map->zones[zone_number];
+ INIT_LIST_HEAD(&zone->write_vios);
+ vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+
+ allocation_zone_number = zone->thread_id % vdo->thread_config.physical_zone_count;
+ zone->allocation_zone = &vdo->physical_zones->zones[allocation_zone_number];
+
+ return vdo_make_default_thread(vdo, zone->thread_id);
+}
+
+/**
+ * vdo_make_logical_zones() - Create a set of logical zones.
+ * @vdo: The vdo to which the zones will belong.
+ * @zones_ptr: A pointer to hold the new zones.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_make_logical_zones(struct vdo *vdo, struct logical_zones **zones_ptr)
+{
+ struct logical_zones *zones;
+ int result;
+ zone_count_t zone;
+ zone_count_t zone_count = vdo->thread_config.logical_zone_count;
+
+ if (zone_count == 0)
+ return VDO_SUCCESS;
+
+ result = vdo_allocate_extended(struct logical_zones, zone_count,
+ struct logical_zone, __func__, &zones);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ zones->vdo = vdo;
+ zones->zone_count = zone_count;
+ for (zone = 0; zone < zone_count; zone++) {
+ result = initialize_zone(zones, zone);
+ if (result != VDO_SUCCESS) {
+ vdo_free_logical_zones(zones);
+ return result;
+ }
+ }
+
+ result = vdo_make_action_manager(zones->zone_count, get_thread_id_for_zone,
+ vdo->thread_config.admin_thread, zones, NULL,
+ vdo, &zones->manager);
+ if (result != VDO_SUCCESS) {
+ vdo_free_logical_zones(zones);
+ return result;
+ }
+
+ *zones_ptr = zones;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_free_logical_zones() - Free a set of logical zones.
+ * @zones: The set of zones to free.
+ */
+void vdo_free_logical_zones(struct logical_zones *zones)
+{
+ zone_count_t index;
+
+ if (zones == NULL)
+ return;
+
+ vdo_free(vdo_forget(zones->manager));
+
+ for (index = 0; index < zones->zone_count; index++)
+ vdo_int_map_free(vdo_forget(zones->zones[index].lbn_operations));
+
+ vdo_free(zones);
+}
+
+static inline void assert_on_zone_thread(struct logical_zone *zone, const char *what)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id),
+ "%s() called on correct thread", what);
+}
+
+/**
+ * check_for_drain_complete() - Check whether this zone has drained.
+ * @zone: The zone to check.
+ */
+static void check_for_drain_complete(struct logical_zone *zone)
+{
+ if (!vdo_is_state_draining(&zone->state) || zone->notifying ||
+ !list_empty(&zone->write_vios))
+ return;
+
+ vdo_finish_draining(&zone->state);
+}
+
+/**
+ * initiate_drain() - Initiate a drain.
+ *
+ * Implements vdo_admin_initiator_fn.
+ */
+static void initiate_drain(struct admin_state *state)
+{
+ check_for_drain_complete(container_of(state, struct logical_zone, state));
+}
+
+/**
+ * drain_logical_zone() - Drain a logical zone.
+ *
+ * Implements vdo_zone_action_fn.
+ */
+static void drain_logical_zone(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct logical_zones *zones = context;
+
+ vdo_start_draining(&zones->zones[zone_number].state,
+ vdo_get_current_manager_operation(zones->manager), parent,
+ initiate_drain);
+}
+
+void vdo_drain_logical_zones(struct logical_zones *zones,
+ const struct admin_state_code *operation,
+ struct vdo_completion *parent)
+{
+ vdo_schedule_operation(zones->manager, operation, NULL, drain_logical_zone, NULL,
+ parent);
+}
+
+/**
+ * resume_logical_zone() - Resume a logical zone.
+ *
+ * Implements vdo_zone_action_fn.
+ */
+static void resume_logical_zone(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct logical_zone *zone = &(((struct logical_zones *) context)->zones[zone_number]);
+
+ vdo_fail_completion(parent, vdo_resume_if_quiescent(&zone->state));
+}
+
+/**
+ * vdo_resume_logical_zones() - Resume a set of logical zones.
+ * @zones: The logical zones to resume.
+ * @parent: The object to notify when the zones have resumed.
+ */
+void vdo_resume_logical_zones(struct logical_zones *zones, struct vdo_completion *parent)
+{
+ vdo_schedule_operation(zones->manager, VDO_ADMIN_STATE_RESUMING, NULL,
+ resume_logical_zone, NULL, parent);
+}
+
+/**
+ * update_oldest_active_generation() - Update the oldest active generation.
+ * @zone: The zone.
+ *
+ * Return: true if the oldest active generation has changed.
+ */
+static bool update_oldest_active_generation(struct logical_zone *zone)
+{
+ struct data_vio *data_vio =
+ list_first_entry_or_null(&zone->write_vios, struct data_vio,
+ write_entry);
+ sequence_number_t oldest =
+ (data_vio == NULL) ? zone->flush_generation : data_vio->flush_generation;
+
+ if (oldest == zone->oldest_active_generation)
+ return false;
+
+ WRITE_ONCE(zone->oldest_active_generation, oldest);
+ return true;
+}
+
+/**
+ * vdo_increment_logical_zone_flush_generation() - Increment the flush generation in a logical
+ * zone.
+ * @zone: The logical zone.
+ * @expected_generation: The expected value of the flush generation before the increment.
+ */
+void vdo_increment_logical_zone_flush_generation(struct logical_zone *zone,
+ sequence_number_t expected_generation)
+{
+ assert_on_zone_thread(zone, __func__);
+ VDO_ASSERT_LOG_ONLY((zone->flush_generation == expected_generation),
+ "logical zone %u flush generation %llu should be %llu before increment",
+ zone->zone_number, (unsigned long long) zone->flush_generation,
+ (unsigned long long) expected_generation);
+
+ zone->flush_generation++;
+ zone->ios_in_flush_generation = 0;
+ update_oldest_active_generation(zone);
+}
+
+/**
+ * vdo_acquire_flush_generation_lock() - Acquire the shared lock on a flush generation by a write
+ * data_vio.
+ * @data_vio: The data_vio.
+ */
+void vdo_acquire_flush_generation_lock(struct data_vio *data_vio)
+{
+ struct logical_zone *zone = data_vio->logical.zone;
+
+ assert_on_zone_thread(zone, __func__);
+ VDO_ASSERT_LOG_ONLY(vdo_is_state_normal(&zone->state), "vdo state is normal");
+
+ data_vio->flush_generation = zone->flush_generation;
+ list_add_tail(&data_vio->write_entry, &zone->write_vios);
+ zone->ios_in_flush_generation++;
+}
+
+static void attempt_generation_complete_notification(struct vdo_completion *completion);
+
+/**
+ * notify_flusher() - Notify the flush that at least one generation no longer has active VIOs.
+ * @completion: The zone completion.
+ *
+ * This callback is registered in attempt_generation_complete_notification().
+ */
+static void notify_flusher(struct vdo_completion *completion)
+{
+ struct logical_zone *zone = as_logical_zone(completion);
+
+ vdo_complete_flushes(zone->zones->vdo->flusher);
+ vdo_launch_completion_callback(completion,
+ attempt_generation_complete_notification,
+ zone->thread_id);
+}
+
+/**
+ * attempt_generation_complete_notification() - Notify the flusher if some generation no
+ * longer has active VIOs.
+ * @completion: The zone completion.
+ */
+static void attempt_generation_complete_notification(struct vdo_completion *completion)
+{
+ struct logical_zone *zone = as_logical_zone(completion);
+
+ assert_on_zone_thread(zone, __func__);
+ if (zone->oldest_active_generation <= zone->notification_generation) {
+ zone->notifying = false;
+ check_for_drain_complete(zone);
+ return;
+ }
+
+ zone->notifying = true;
+ zone->notification_generation = zone->oldest_active_generation;
+ vdo_launch_completion_callback(&zone->completion, notify_flusher,
+ vdo_get_flusher_thread_id(zone->zones->vdo->flusher));
+}
+
+/**
+ * vdo_release_flush_generation_lock() - Release the shared lock on a flush generation held by a
+ * write data_vio.
+ * @data_vio: The data_vio whose lock is to be released.
+ *
+ * If there are pending flushes, and this data_vio completes the oldest generation active in this
+ * zone, an attempt will be made to finish any flushes which may now be complete.
+ */
+void vdo_release_flush_generation_lock(struct data_vio *data_vio)
+{
+ struct logical_zone *zone = data_vio->logical.zone;
+
+ assert_on_zone_thread(zone, __func__);
+
+ if (!data_vio_has_flush_generation_lock(data_vio))
+ return;
+
+ list_del_init(&data_vio->write_entry);
+ VDO_ASSERT_LOG_ONLY((zone->oldest_active_generation <= data_vio->flush_generation),
+ "data_vio releasing lock on generation %llu is not older than oldest active generation %llu",
+ (unsigned long long) data_vio->flush_generation,
+ (unsigned long long) zone->oldest_active_generation);
+
+ if (!update_oldest_active_generation(zone) || zone->notifying)
+ return;
+
+ attempt_generation_complete_notification(&zone->completion);
+}
+
+struct physical_zone *vdo_get_next_allocation_zone(struct logical_zone *zone)
+{
+ if (zone->allocation_count == ALLOCATIONS_PER_ZONE) {
+ zone->allocation_count = 0;
+ zone->allocation_zone = zone->allocation_zone->next;
+ }
+
+ zone->allocation_count++;
+ return zone->allocation_zone;
+}
+
+/**
+ * vdo_dump_logical_zone() - Dump information about a logical zone to the log for debugging.
+ * @zone: The zone to dump
+ *
+ * Context: the information is dumped in a thread-unsafe fashion.
+ *
+ */
+void vdo_dump_logical_zone(const struct logical_zone *zone)
+{
+ vdo_log_info("logical_zone %u", zone->zone_number);
+ vdo_log_info(" flush_generation=%llu oldest_active_generation=%llu notification_generation=%llu notifying=%s ios_in_flush_generation=%llu",
+ (unsigned long long) READ_ONCE(zone->flush_generation),
+ (unsigned long long) READ_ONCE(zone->oldest_active_generation),
+ (unsigned long long) READ_ONCE(zone->notification_generation),
+ vdo_bool_to_string(READ_ONCE(zone->notifying)),
+ (unsigned long long) READ_ONCE(zone->ios_in_flush_generation));
+}
diff --git a/drivers/md/dm-vdo/logical-zone.h b/drivers/md/dm-vdo/logical-zone.h
new file mode 100644
index 000000000000..1b666c84a193
--- /dev/null
+++ b/drivers/md/dm-vdo/logical-zone.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_LOGICAL_ZONE_H
+#define VDO_LOGICAL_ZONE_H
+
+#include <linux/list.h>
+
+#include "admin-state.h"
+#include "int-map.h"
+#include "types.h"
+
+struct physical_zone;
+
+struct logical_zone {
+ /* The completion for flush notifications */
+ struct vdo_completion completion;
+ /* The owner of this zone */
+ struct logical_zones *zones;
+ /* Which logical zone this is */
+ zone_count_t zone_number;
+ /* The thread id for this zone */
+ thread_id_t thread_id;
+ /* In progress operations keyed by LBN */
+ struct int_map *lbn_operations;
+ /* The logical to physical map */
+ struct block_map_zone *block_map_zone;
+ /* The current flush generation */
+ sequence_number_t flush_generation;
+ /*
+ * The oldest active generation in this zone. This is mutated only on the logical zone
+ * thread but is queried from the flusher thread.
+ */
+ sequence_number_t oldest_active_generation;
+ /* The number of IOs in the current flush generation */
+ block_count_t ios_in_flush_generation;
+ /* The youngest generation of the current notification */
+ sequence_number_t notification_generation;
+ /* Whether a notification is in progress */
+ bool notifying;
+ /* The queue of active data write VIOs */
+ struct list_head write_vios;
+ /* The administrative state of the zone */
+ struct admin_state state;
+ /* The physical zone from which to allocate */
+ struct physical_zone *allocation_zone;
+ /* The number of allocations done from the current allocation_zone */
+ block_count_t allocation_count;
+ /* The next zone */
+ struct logical_zone *next;
+};
+
+struct logical_zones {
+ /* The vdo whose zones these are */
+ struct vdo *vdo;
+ /* The manager for administrative actions */
+ struct action_manager *manager;
+ /* The number of zones */
+ zone_count_t zone_count;
+ /* The logical zones themselves */
+ struct logical_zone zones[];
+};
+
+int __must_check vdo_make_logical_zones(struct vdo *vdo,
+ struct logical_zones **zones_ptr);
+
+void vdo_free_logical_zones(struct logical_zones *zones);
+
+void vdo_drain_logical_zones(struct logical_zones *zones,
+ const struct admin_state_code *operation,
+ struct vdo_completion *completion);
+
+void vdo_resume_logical_zones(struct logical_zones *zones,
+ struct vdo_completion *parent);
+
+void vdo_increment_logical_zone_flush_generation(struct logical_zone *zone,
+ sequence_number_t expected_generation);
+
+void vdo_acquire_flush_generation_lock(struct data_vio *data_vio);
+
+void vdo_release_flush_generation_lock(struct data_vio *data_vio);
+
+struct physical_zone * __must_check vdo_get_next_allocation_zone(struct logical_zone *zone);
+
+void vdo_dump_logical_zone(const struct logical_zone *zone);
+
+#endif /* VDO_LOGICAL_ZONE_H */
diff --git a/drivers/md/dm-vdo/memory-alloc.c b/drivers/md/dm-vdo/memory-alloc.c
new file mode 100644
index 000000000000..185f259c7245
--- /dev/null
+++ b/drivers/md/dm-vdo/memory-alloc.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/sched/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+/*
+ * UDS and VDO keep track of which threads are allowed to allocate memory freely, and which threads
+ * must be careful to not do a memory allocation that does an I/O request. The 'allocating_threads'
+ * thread_registry and its associated methods implement this tracking.
+ */
+static struct thread_registry allocating_threads;
+
+static inline bool allocations_allowed(void)
+{
+ return vdo_lookup_thread(&allocating_threads) != NULL;
+}
+
+/*
+ * Register the current thread as an allocating thread.
+ *
+ * An optional flag location can be supplied indicating whether, at any given point in time, the
+ * threads associated with that flag should be allocating storage. If the flag is false, a message
+ * will be logged.
+ *
+ * If no flag is supplied, the thread is always allowed to allocate storage without complaint.
+ *
+ * @new_thread: registered_thread structure to use for the current thread
+ * @flag_ptr: Location of the allocation-allowed flag
+ */
+void vdo_register_allocating_thread(struct registered_thread *new_thread,
+ const bool *flag_ptr)
+{
+ if (flag_ptr == NULL) {
+ static const bool allocation_always_allowed = true;
+
+ flag_ptr = &allocation_always_allowed;
+ }
+
+ vdo_register_thread(&allocating_threads, new_thread, flag_ptr);
+}
+
+/* Unregister the current thread as an allocating thread. */
+void vdo_unregister_allocating_thread(void)
+{
+ vdo_unregister_thread(&allocating_threads);
+}
+
+/*
+ * We track how much memory has been allocated and freed. When we unload the module, we log an
+ * error if we have not freed all the memory that we allocated. Nearly all memory allocation and
+ * freeing is done using this module.
+ *
+ * We do not use kernel functions like the kvasprintf() method, which allocate memory indirectly
+ * using kmalloc.
+ *
+ * These data structures and methods are used to track the amount of memory used.
+ */
+
+/*
+ * We allocate very few large objects, and allocation/deallocation isn't done in a
+ * performance-critical stage for us, so a linked list should be fine.
+ */
+struct vmalloc_block_info {
+ void *ptr;
+ size_t size;
+ struct vmalloc_block_info *next;
+};
+
+static struct {
+ spinlock_t lock;
+ size_t kmalloc_blocks;
+ size_t kmalloc_bytes;
+ size_t vmalloc_blocks;
+ size_t vmalloc_bytes;
+ size_t peak_bytes;
+ struct vmalloc_block_info *vmalloc_list;
+} memory_stats __cacheline_aligned;
+
+static void update_peak_usage(void)
+{
+ size_t total_bytes = memory_stats.kmalloc_bytes + memory_stats.vmalloc_bytes;
+
+ if (total_bytes > memory_stats.peak_bytes)
+ memory_stats.peak_bytes = total_bytes;
+}
+
+static void add_kmalloc_block(size_t size)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&memory_stats.lock, flags);
+ memory_stats.kmalloc_blocks++;
+ memory_stats.kmalloc_bytes += size;
+ update_peak_usage();
+ spin_unlock_irqrestore(&memory_stats.lock, flags);
+}
+
+static void remove_kmalloc_block(size_t size)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&memory_stats.lock, flags);
+ memory_stats.kmalloc_blocks--;
+ memory_stats.kmalloc_bytes -= size;
+ spin_unlock_irqrestore(&memory_stats.lock, flags);
+}
+
+static void add_vmalloc_block(struct vmalloc_block_info *block)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&memory_stats.lock, flags);
+ block->next = memory_stats.vmalloc_list;
+ memory_stats.vmalloc_list = block;
+ memory_stats.vmalloc_blocks++;
+ memory_stats.vmalloc_bytes += block->size;
+ update_peak_usage();
+ spin_unlock_irqrestore(&memory_stats.lock, flags);
+}
+
+static void remove_vmalloc_block(void *ptr)
+{
+ struct vmalloc_block_info *block;
+ struct vmalloc_block_info **block_ptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&memory_stats.lock, flags);
+ for (block_ptr = &memory_stats.vmalloc_list;
+ (block = *block_ptr) != NULL;
+ block_ptr = &block->next) {
+ if (block->ptr == ptr) {
+ *block_ptr = block->next;
+ memory_stats.vmalloc_blocks--;
+ memory_stats.vmalloc_bytes -= block->size;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&memory_stats.lock, flags);
+ if (block != NULL)
+ vdo_free(block);
+ else
+ vdo_log_info("attempting to remove ptr %px not found in vmalloc list", ptr);
+}
+
+/*
+ * Determine whether allocating a memory block should use kmalloc or __vmalloc.
+ *
+ * vmalloc can allocate any integral number of pages.
+ *
+ * kmalloc can allocate any number of bytes up to a configured limit, which defaults to 8 megabytes
+ * on some systems. kmalloc is especially good when memory is being both allocated and freed, and
+ * it does this efficiently in a multi CPU environment.
+ *
+ * kmalloc usually rounds the size of the block up to the next power of two, so when the requested
+ * block is bigger than PAGE_SIZE / 2 bytes, kmalloc will never give you less space than the
+ * corresponding vmalloc allocation. Sometimes vmalloc will use less overhead than kmalloc.
+ *
+ * The advantages of kmalloc do not help out UDS or VDO, because we allocate all our memory up
+ * front and do not free and reallocate it. Sometimes we have problems using kmalloc, because the
+ * Linux memory page map can become so fragmented that kmalloc will not give us a 32KB chunk. We
+ * have used vmalloc as a backup to kmalloc in the past, and a follow-up vmalloc of 32KB will work.
+ * But there is no strong case to be made for using kmalloc over vmalloc for these size chunks.
+ *
+ * The kmalloc/vmalloc boundary is set at 4KB, and kmalloc gets the 4KB requests. There is no
+ * strong reason for favoring either kmalloc or vmalloc for 4KB requests, except that tracking
+ * vmalloc statistics uses a linked list implementation. Using a simple test, this choice of
+ * boundary results in 132 vmalloc calls. Using vmalloc for requests of exactly 4KB results in an
+ * additional 6374 vmalloc calls, which is much less efficient for tracking.
+ *
+ * @size: How many bytes to allocate
+ */
+static inline bool use_kmalloc(size_t size)
+{
+ return size <= PAGE_SIZE;
+}
+
+/*
+ * Allocate storage based on memory size and alignment, logging an error if the allocation fails.
+ * The memory will be zeroed.
+ *
+ * @size: The size of an object
+ * @align: The required alignment
+ * @what: What is being allocated (for error logging)
+ * @ptr: A pointer to hold the allocated memory
+ *
+ * Return: VDO_SUCCESS or an error code
+ */
+int vdo_allocate_memory(size_t size, size_t align, const char *what, void *ptr)
+{
+ /*
+ * The __GFP_RETRY_MAYFAIL flag means the VM implementation will retry memory reclaim
+ * procedures that have previously failed if there is some indication that progress has
+ * been made elsewhere. It can wait for other tasks to attempt high level approaches to
+ * freeing memory such as compaction (which removes fragmentation) and page-out. There is
+ * still a definite limit to the number of retries, but it is a larger limit than with
+ * __GFP_NORETRY. Allocations with this flag may fail, but only when there is genuinely
+ * little unused memory. While these allocations do not directly trigger the OOM killer,
+ * their failure indicates that the system is likely to need to use the OOM killer soon.
+ * The caller must handle failure, but can reasonably do so by failing a higher-level
+ * request, or completing it only in a much less efficient manner.
+ */
+ const gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL;
+ unsigned int noio_flags;
+ bool allocations_restricted = !allocations_allowed();
+ unsigned long start_time;
+ void *p = NULL;
+
+ if (unlikely(ptr == NULL))
+ return -EINVAL;
+
+ if (size == 0) {
+ *((void **) ptr) = NULL;
+ return VDO_SUCCESS;
+ }
+
+ if (allocations_restricted)
+ noio_flags = memalloc_noio_save();
+
+ start_time = jiffies;
+ if (use_kmalloc(size) && (align < PAGE_SIZE)) {
+ p = kmalloc(size, gfp_flags | __GFP_NOWARN);
+ if (p == NULL) {
+ /*
+ * It is possible for kmalloc to fail to allocate memory because there is
+ * no page available. A short sleep may allow the page reclaimer to
+ * free a page.
+ */
+ fsleep(1000);
+ p = kmalloc(size, gfp_flags);
+ }
+
+ if (p != NULL)
+ add_kmalloc_block(ksize(p));
+ } else {
+ struct vmalloc_block_info *block;
+
+ if (vdo_allocate(1, struct vmalloc_block_info, __func__, &block) == VDO_SUCCESS) {
+ /*
+ * It is possible for __vmalloc to fail to allocate memory because there
+ * are no pages available. A short sleep may allow the page reclaimer
+ * to free enough pages for a small allocation.
+ *
+ * For larger allocations, the page_alloc code is racing against the page
+ * reclaimer. If the page reclaimer can stay ahead of page_alloc, the
+ * __vmalloc will succeed. But if page_alloc overtakes the page reclaimer,
+ * the allocation fails. It is possible that more retries will succeed.
+ */
+ for (;;) {
+ p = __vmalloc(size, gfp_flags | __GFP_NOWARN);
+ if (p != NULL)
+ break;
+
+ if (jiffies_to_msecs(jiffies - start_time) > 1000) {
+ /* Try one more time, logging a failure for this call. */
+ p = __vmalloc(size, gfp_flags);
+ break;
+ }
+
+ fsleep(1000);
+ }
+
+ if (p == NULL) {
+ vdo_free(block);
+ } else {
+ block->ptr = p;
+ block->size = PAGE_ALIGN(size);
+ add_vmalloc_block(block);
+ }
+ }
+ }
+
+ if (allocations_restricted)
+ memalloc_noio_restore(noio_flags);
+
+ if (unlikely(p == NULL)) {
+ vdo_log_error("Could not allocate %zu bytes for %s in %u msecs",
+ size, what, jiffies_to_msecs(jiffies - start_time));
+ return -ENOMEM;
+ }
+
+ *((void **) ptr) = p;
+ return VDO_SUCCESS;
+}
+
+/*
+ * Allocate storage based on memory size, failing immediately if the required memory is not
+ * available. The memory will be zeroed.
+ *
+ * @size: The size of an object.
+ * @what: What is being allocated (for error logging)
+ *
+ * Return: pointer to the allocated memory, or NULL if the required space is not available.
+ */
+void *vdo_allocate_memory_nowait(size_t size, const char *what __maybe_unused)
+{
+ void *p = kmalloc(size, GFP_NOWAIT | __GFP_ZERO);
+
+ if (p != NULL)
+ add_kmalloc_block(ksize(p));
+
+ return p;
+}
+
+void vdo_free(void *ptr)
+{
+ if (ptr != NULL) {
+ if (is_vmalloc_addr(ptr)) {
+ remove_vmalloc_block(ptr);
+ vfree(ptr);
+ } else {
+ remove_kmalloc_block(ksize(ptr));
+ kfree(ptr);
+ }
+ }
+}
+
+/*
+ * Reallocate dynamically allocated memory. There are no alignment guarantees for the reallocated
+ * memory. If the new memory is larger than the old memory, the new space will be zeroed.
+ *
+ * @ptr: The memory to reallocate.
+ * @old_size: The old size of the memory
+ * @size: The new size to allocate
+ * @what: What is being allocated (for error logging)
+ * @new_ptr: A pointer to hold the reallocated pointer
+ *
+ * Return: VDO_SUCCESS or an error code
+ */
+int vdo_reallocate_memory(void *ptr, size_t old_size, size_t size, const char *what,
+ void *new_ptr)
+{
+ int result;
+
+ if (size == 0) {
+ vdo_free(ptr);
+ *(void **) new_ptr = NULL;
+ return VDO_SUCCESS;
+ }
+
+ result = vdo_allocate(size, char, what, new_ptr);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (ptr != NULL) {
+ if (old_size < size)
+ size = old_size;
+
+ memcpy(*((void **) new_ptr), ptr, size);
+ vdo_free(ptr);
+ }
+
+ return VDO_SUCCESS;
+}
+
+int vdo_duplicate_string(const char *string, const char *what, char **new_string)
+{
+ int result;
+ u8 *dup;
+
+ result = vdo_allocate(strlen(string) + 1, u8, what, &dup);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ memcpy(dup, string, strlen(string) + 1);
+ *new_string = dup;
+ return VDO_SUCCESS;
+}
+
+void vdo_memory_init(void)
+{
+ spin_lock_init(&memory_stats.lock);
+ vdo_initialize_thread_registry(&allocating_threads);
+}
+
+void vdo_memory_exit(void)
+{
+ VDO_ASSERT_LOG_ONLY(memory_stats.kmalloc_bytes == 0,
+ "kmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel",
+ memory_stats.kmalloc_bytes, memory_stats.kmalloc_blocks);
+ VDO_ASSERT_LOG_ONLY(memory_stats.vmalloc_bytes == 0,
+ "vmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel",
+ memory_stats.vmalloc_bytes, memory_stats.vmalloc_blocks);
+ vdo_log_debug("peak usage %zd bytes", memory_stats.peak_bytes);
+}
+
+void vdo_get_memory_stats(u64 *bytes_used, u64 *peak_bytes_used)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&memory_stats.lock, flags);
+ *bytes_used = memory_stats.kmalloc_bytes + memory_stats.vmalloc_bytes;
+ *peak_bytes_used = memory_stats.peak_bytes;
+ spin_unlock_irqrestore(&memory_stats.lock, flags);
+}
+
+/*
+ * Report stats on any allocated memory that we're tracking. Not all allocation types are
+ * guaranteed to be tracked in bytes (e.g., bios).
+ */
+void vdo_report_memory_usage(void)
+{
+ unsigned long flags;
+ u64 kmalloc_blocks;
+ u64 kmalloc_bytes;
+ u64 vmalloc_blocks;
+ u64 vmalloc_bytes;
+ u64 peak_usage;
+ u64 total_bytes;
+
+ spin_lock_irqsave(&memory_stats.lock, flags);
+ kmalloc_blocks = memory_stats.kmalloc_blocks;
+ kmalloc_bytes = memory_stats.kmalloc_bytes;
+ vmalloc_blocks = memory_stats.vmalloc_blocks;
+ vmalloc_bytes = memory_stats.vmalloc_bytes;
+ peak_usage = memory_stats.peak_bytes;
+ spin_unlock_irqrestore(&memory_stats.lock, flags);
+ total_bytes = kmalloc_bytes + vmalloc_bytes;
+ vdo_log_info("current module memory tracking (actual allocation sizes, not requested):");
+ vdo_log_info(" %llu bytes in %llu kmalloc blocks",
+ (unsigned long long) kmalloc_bytes,
+ (unsigned long long) kmalloc_blocks);
+ vdo_log_info(" %llu bytes in %llu vmalloc blocks",
+ (unsigned long long) vmalloc_bytes,
+ (unsigned long long) vmalloc_blocks);
+ vdo_log_info(" total %llu bytes, peak usage %llu bytes",
+ (unsigned long long) total_bytes, (unsigned long long) peak_usage);
+}
diff --git a/drivers/md/dm-vdo/memory-alloc.h b/drivers/md/dm-vdo/memory-alloc.h
new file mode 100644
index 000000000000..0093d9f940d9
--- /dev/null
+++ b/drivers/md/dm-vdo/memory-alloc.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_MEMORY_ALLOC_H
+#define VDO_MEMORY_ALLOC_H
+
+#include <linux/cache.h>
+#include <linux/io.h> /* for PAGE_SIZE */
+
+#include "permassert.h"
+#include "thread-registry.h"
+
+/* Custom memory allocation function that tracks memory usage */
+int __must_check vdo_allocate_memory(size_t size, size_t align, const char *what, void *ptr);
+
+/*
+ * Allocate storage based on element counts, sizes, and alignment.
+ *
+ * This is a generalized form of our allocation use case: It allocates an array of objects,
+ * optionally preceded by one object of another type (i.e., a struct with trailing variable-length
+ * array), with the alignment indicated.
+ *
+ * Why is this inline? The sizes and alignment will always be constant, when invoked through the
+ * macros below, and often the count will be a compile-time constant 1 or the number of extra bytes
+ * will be a compile-time constant 0. So at least some of the arithmetic can usually be optimized
+ * away, and the run-time selection between allocation functions always can. In many cases, it'll
+ * boil down to just a function call with a constant size.
+ *
+ * @count: The number of objects to allocate
+ * @size: The size of an object
+ * @extra: The number of additional bytes to allocate
+ * @align: The required alignment
+ * @what: What is being allocated (for error logging)
+ * @ptr: A pointer to hold the allocated memory
+ *
+ * Return: VDO_SUCCESS or an error code
+ */
+static inline int __vdo_do_allocation(size_t count, size_t size, size_t extra,
+ size_t align, const char *what, void *ptr)
+{
+ size_t total_size = count * size + extra;
+
+ /* Overflow check: */
+ if ((size > 0) && (count > ((SIZE_MAX - extra) / size))) {
+ /*
+ * This is kind of a hack: We rely on the fact that SIZE_MAX would cover the entire
+ * address space (minus one byte) and thus the system can never allocate that much
+ * and the call will always fail. So we can report an overflow as "out of memory"
+ * by asking for "merely" SIZE_MAX bytes.
+ */
+ total_size = SIZE_MAX;
+ }
+
+ return vdo_allocate_memory(total_size, align, what, ptr);
+}
+
+/*
+ * Allocate one or more elements of the indicated type, logging an error if the allocation fails.
+ * The memory will be zeroed.
+ *
+ * @COUNT: The number of objects to allocate
+ * @TYPE: The type of objects to allocate. This type determines the alignment of the allocation.
+ * @WHAT: What is being allocated (for error logging)
+ * @PTR: A pointer to hold the allocated memory
+ *
+ * Return: VDO_SUCCESS or an error code
+ */
+#define vdo_allocate(COUNT, TYPE, WHAT, PTR) \
+ __vdo_do_allocation(COUNT, sizeof(TYPE), 0, __alignof__(TYPE), WHAT, PTR)
+
+/*
+ * Allocate one object of an indicated type, followed by one or more elements of a second type,
+ * logging an error if the allocation fails. The memory will be zeroed.
+ *
+ * @TYPE1: The type of the primary object to allocate. This type determines the alignment of the
+ * allocated memory.
+ * @COUNT: The number of objects to allocate
+ * @TYPE2: The type of array objects to allocate
+ * @WHAT: What is being allocated (for error logging)
+ * @PTR: A pointer to hold the allocated memory
+ *
+ * Return: VDO_SUCCESS or an error code
+ */
+#define vdo_allocate_extended(TYPE1, COUNT, TYPE2, WHAT, PTR) \
+ __extension__({ \
+ int _result; \
+ TYPE1 **_ptr = (PTR); \
+ BUILD_BUG_ON(__alignof__(TYPE1) < __alignof__(TYPE2)); \
+ _result = __vdo_do_allocation(COUNT, \
+ sizeof(TYPE2), \
+ sizeof(TYPE1), \
+ __alignof__(TYPE1), \
+ WHAT, \
+ _ptr); \
+ _result; \
+ })
+
+/*
+ * Allocate memory starting on a cache line boundary, logging an error if the allocation fails. The
+ * memory will be zeroed.
+ *
+ * @size: The number of bytes to allocate
+ * @what: What is being allocated (for error logging)
+ * @ptr: A pointer to hold the allocated memory
+ *
+ * Return: VDO_SUCCESS or an error code
+ */
+static inline int __must_check vdo_allocate_cache_aligned(size_t size, const char *what, void *ptr)
+{
+ return vdo_allocate_memory(size, L1_CACHE_BYTES, what, ptr);
+}
+
+/*
+ * Allocate one element of the indicated type immediately, failing if the required memory is not
+ * immediately available.
+ *
+ * @size: The number of bytes to allocate
+ * @what: What is being allocated (for error logging)
+ *
+ * Return: pointer to the memory, or NULL if the memory is not available.
+ */
+void *__must_check vdo_allocate_memory_nowait(size_t size, const char *what);
+
+int __must_check vdo_reallocate_memory(void *ptr, size_t old_size, size_t size,
+ const char *what, void *new_ptr);
+
+int __must_check vdo_duplicate_string(const char *string, const char *what,
+ char **new_string);
+
+/* Free memory allocated with vdo_allocate(). */
+void vdo_free(void *ptr);
+
+static inline void *__vdo_forget(void **ptr_ptr)
+{
+ void *ptr = *ptr_ptr;
+
+ *ptr_ptr = NULL;
+ return ptr;
+}
+
+/*
+ * Null out a pointer and return a copy to it. This macro should be used when passing a pointer to
+ * a function for which it is not safe to access the pointer once the function returns.
+ */
+#define vdo_forget(ptr) __vdo_forget((void **) &(ptr))
+
+void vdo_memory_init(void);
+
+void vdo_memory_exit(void);
+
+void vdo_register_allocating_thread(struct registered_thread *new_thread,
+ const bool *flag_ptr);
+
+void vdo_unregister_allocating_thread(void);
+
+void vdo_get_memory_stats(u64 *bytes_used, u64 *peak_bytes_used);
+
+void vdo_report_memory_usage(void);
+
+#endif /* VDO_MEMORY_ALLOC_H */
diff --git a/drivers/md/dm-vdo/message-stats.c b/drivers/md/dm-vdo/message-stats.c
new file mode 100644
index 000000000000..2802cf92922b
--- /dev/null
+++ b/drivers/md/dm-vdo/message-stats.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "dedupe.h"
+#include "logger.h"
+#include "memory-alloc.h"
+#include "message-stats.h"
+#include "statistics.h"
+#include "thread-device.h"
+#include "vdo.h"
+
+static void write_u64(char *prefix, u64 value, char *suffix, char **buf,
+ unsigned int *maxlen)
+{
+ int count;
+
+ count = scnprintf(*buf, *maxlen, "%s%llu%s", prefix == NULL ? "" : prefix,
+ value, suffix == NULL ? "" : suffix);
+ *buf += count;
+ *maxlen -= count;
+}
+
+static void write_u32(char *prefix, u32 value, char *suffix, char **buf,
+ unsigned int *maxlen)
+{
+ int count;
+
+ count = scnprintf(*buf, *maxlen, "%s%u%s", prefix == NULL ? "" : prefix,
+ value, suffix == NULL ? "" : suffix);
+ *buf += count;
+ *maxlen -= count;
+}
+
+static void write_block_count_t(char *prefix, block_count_t value, char *suffix,
+ char **buf, unsigned int *maxlen)
+{
+ int count;
+
+ count = scnprintf(*buf, *maxlen, "%s%llu%s", prefix == NULL ? "" : prefix,
+ value, suffix == NULL ? "" : suffix);
+ *buf += count;
+ *maxlen -= count;
+}
+
+static void write_string(char *prefix, char *value, char *suffix, char **buf,
+ unsigned int *maxlen)
+{
+ int count;
+
+ count = scnprintf(*buf, *maxlen, "%s%s%s", prefix == NULL ? "" : prefix,
+ value, suffix == NULL ? "" : suffix);
+ *buf += count;
+ *maxlen -= count;
+}
+
+static void write_bool(char *prefix, bool value, char *suffix, char **buf,
+ unsigned int *maxlen)
+{
+ int count;
+
+ count = scnprintf(*buf, *maxlen, "%s%d%s", prefix == NULL ? "" : prefix,
+ value, suffix == NULL ? "" : suffix);
+ *buf += count;
+ *maxlen -= count;
+}
+
+static void write_u8(char *prefix, u8 value, char *suffix, char **buf,
+ unsigned int *maxlen)
+{
+ int count;
+
+ count = scnprintf(*buf, *maxlen, "%s%u%s", prefix == NULL ? "" : prefix,
+ value, suffix == NULL ? "" : suffix);
+ *buf += count;
+ *maxlen -= count;
+}
+
+static void write_block_allocator_statistics(char *prefix,
+ struct block_allocator_statistics *stats,
+ char *suffix, char **buf,
+ unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* The total number of slabs from which blocks may be allocated */
+ write_u64("slabCount : ", stats->slab_count, ", ", buf, maxlen);
+ /* The total number of slabs from which blocks have ever been allocated */
+ write_u64("slabsOpened : ", stats->slabs_opened, ", ", buf, maxlen);
+ /* The number of times since loading that a slab has been re-opened */
+ write_u64("slabsReopened : ", stats->slabs_reopened, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_commit_statistics(char *prefix, struct commit_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* The total number of items on which processing has started */
+ write_u64("started : ", stats->started, ", ", buf, maxlen);
+ /* The total number of items for which a write operation has been issued */
+ write_u64("written : ", stats->written, ", ", buf, maxlen);
+ /* The total number of items for which a write operation has completed */
+ write_u64("committed : ", stats->committed, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_recovery_journal_statistics(char *prefix,
+ struct recovery_journal_statistics *stats,
+ char *suffix, char **buf,
+ unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Number of times the on-disk journal was full */
+ write_u64("diskFull : ", stats->disk_full, ", ", buf, maxlen);
+ /* Number of times the recovery journal requested slab journal commits. */
+ write_u64("slabJournalCommitsRequested : ",
+ stats->slab_journal_commits_requested, ", ", buf, maxlen);
+ /* Write/Commit totals for individual journal entries */
+ write_commit_statistics("entries : ", &stats->entries, ", ", buf, maxlen);
+ /* Write/Commit totals for journal blocks */
+ write_commit_statistics("blocks : ", &stats->blocks, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_packer_statistics(char *prefix, struct packer_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Number of compressed data items written since startup */
+ write_u64("compressedFragmentsWritten : ",
+ stats->compressed_fragments_written, ", ", buf, maxlen);
+ /* Number of blocks containing compressed items written since startup */
+ write_u64("compressedBlocksWritten : ",
+ stats->compressed_blocks_written, ", ", buf, maxlen);
+ /* Number of VIOs that are pending in the packer */
+ write_u64("compressedFragmentsInPacker : ",
+ stats->compressed_fragments_in_packer, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_slab_journal_statistics(char *prefix,
+ struct slab_journal_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Number of times the on-disk journal was full */
+ write_u64("diskFullCount : ", stats->disk_full_count, ", ", buf, maxlen);
+ /* Number of times an entry was added over the flush threshold */
+ write_u64("flushCount : ", stats->flush_count, ", ", buf, maxlen);
+ /* Number of times an entry was added over the block threshold */
+ write_u64("blockedCount : ", stats->blocked_count, ", ", buf, maxlen);
+ /* Number of times a tail block was written */
+ write_u64("blocksWritten : ", stats->blocks_written, ", ", buf, maxlen);
+ /* Number of times we had to wait for the tail to write */
+ write_u64("tailBusyCount : ", stats->tail_busy_count, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_slab_summary_statistics(char *prefix,
+ struct slab_summary_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Number of blocks written */
+ write_u64("blocksWritten : ", stats->blocks_written, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_ref_counts_statistics(char *prefix, struct ref_counts_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Number of reference blocks written */
+ write_u64("blocksWritten : ", stats->blocks_written, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_block_map_statistics(char *prefix, struct block_map_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* number of dirty (resident) pages */
+ write_u32("dirtyPages : ", stats->dirty_pages, ", ", buf, maxlen);
+ /* number of clean (resident) pages */
+ write_u32("cleanPages : ", stats->clean_pages, ", ", buf, maxlen);
+ /* number of free pages */
+ write_u32("freePages : ", stats->free_pages, ", ", buf, maxlen);
+ /* number of pages in failed state */
+ write_u32("failedPages : ", stats->failed_pages, ", ", buf, maxlen);
+ /* number of pages incoming */
+ write_u32("incomingPages : ", stats->incoming_pages, ", ", buf, maxlen);
+ /* number of pages outgoing */
+ write_u32("outgoingPages : ", stats->outgoing_pages, ", ", buf, maxlen);
+ /* how many times free page not avail */
+ write_u32("cachePressure : ", stats->cache_pressure, ", ", buf, maxlen);
+ /* number of get_vdo_page() calls for read */
+ write_u64("readCount : ", stats->read_count, ", ", buf, maxlen);
+ /* number of get_vdo_page() calls for write */
+ write_u64("writeCount : ", stats->write_count, ", ", buf, maxlen);
+ /* number of times pages failed to read */
+ write_u64("failedReads : ", stats->failed_reads, ", ", buf, maxlen);
+ /* number of times pages failed to write */
+ write_u64("failedWrites : ", stats->failed_writes, ", ", buf, maxlen);
+ /* number of gets that are reclaimed */
+ write_u64("reclaimed : ", stats->reclaimed, ", ", buf, maxlen);
+ /* number of gets for outgoing pages */
+ write_u64("readOutgoing : ", stats->read_outgoing, ", ", buf, maxlen);
+ /* number of gets that were already there */
+ write_u64("foundInCache : ", stats->found_in_cache, ", ", buf, maxlen);
+ /* number of gets requiring discard */
+ write_u64("discardRequired : ", stats->discard_required, ", ", buf, maxlen);
+ /* number of gets enqueued for their page */
+ write_u64("waitForPage : ", stats->wait_for_page, ", ", buf, maxlen);
+ /* number of gets that have to fetch */
+ write_u64("fetchRequired : ", stats->fetch_required, ", ", buf, maxlen);
+ /* number of page fetches */
+ write_u64("pagesLoaded : ", stats->pages_loaded, ", ", buf, maxlen);
+ /* number of page saves */
+ write_u64("pagesSaved : ", stats->pages_saved, ", ", buf, maxlen);
+ /* the number of flushes issued */
+ write_u64("flushCount : ", stats->flush_count, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_hash_lock_statistics(char *prefix, struct hash_lock_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Number of times the UDS advice proved correct */
+ write_u64("dedupeAdviceValid : ", stats->dedupe_advice_valid, ", ", buf, maxlen);
+ /* Number of times the UDS advice proved incorrect */
+ write_u64("dedupeAdviceStale : ", stats->dedupe_advice_stale, ", ", buf, maxlen);
+ /* Number of writes with the same data as another in-flight write */
+ write_u64("concurrentDataMatches : ", stats->concurrent_data_matches,
+ ", ", buf, maxlen);
+ /* Number of writes whose hash collided with an in-flight write */
+ write_u64("concurrentHashCollisions : ",
+ stats->concurrent_hash_collisions, ", ", buf, maxlen);
+ /* Current number of dedupe queries that are in flight */
+ write_u32("currDedupeQueries : ", stats->curr_dedupe_queries, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_error_statistics(char *prefix, struct error_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* number of times VDO got an invalid dedupe advice PBN from UDS */
+ write_u64("invalidAdvicePBNCount : ", stats->invalid_advice_pbn_count,
+ ", ", buf, maxlen);
+ /* number of times a VIO completed with a VDO_NO_SPACE error */
+ write_u64("noSpaceErrorCount : ", stats->no_space_error_count, ", ",
+ buf, maxlen);
+ /* number of times a VIO completed with a VDO_READ_ONLY error */
+ write_u64("readOnlyErrorCount : ", stats->read_only_error_count, ", ",
+ buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_bio_stats(char *prefix, struct bio_stats *stats, char *suffix,
+ char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Number of REQ_OP_READ bios */
+ write_u64("read : ", stats->read, ", ", buf, maxlen);
+ /* Number of REQ_OP_WRITE bios with data */
+ write_u64("write : ", stats->write, ", ", buf, maxlen);
+ /* Number of bios tagged with REQ_PREFLUSH and containing no data */
+ write_u64("emptyFlush : ", stats->empty_flush, ", ", buf, maxlen);
+ /* Number of REQ_OP_DISCARD bios */
+ write_u64("discard : ", stats->discard, ", ", buf, maxlen);
+ /* Number of bios tagged with REQ_PREFLUSH */
+ write_u64("flush : ", stats->flush, ", ", buf, maxlen);
+ /* Number of bios tagged with REQ_FUA */
+ write_u64("fua : ", stats->fua, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_memory_usage(char *prefix, struct memory_usage *stats, char *suffix,
+ char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Tracked bytes currently allocated. */
+ write_u64("bytesUsed : ", stats->bytes_used, ", ", buf, maxlen);
+ /* Maximum tracked bytes allocated. */
+ write_u64("peakBytesUsed : ", stats->peak_bytes_used, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_index_statistics(char *prefix, struct index_statistics *stats,
+ char *suffix, char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ /* Number of records stored in the index */
+ write_u64("entriesIndexed : ", stats->entries_indexed, ", ", buf, maxlen);
+ /* Number of post calls that found an existing entry */
+ write_u64("postsFound : ", stats->posts_found, ", ", buf, maxlen);
+ /* Number of post calls that added a new entry */
+ write_u64("postsNotFound : ", stats->posts_not_found, ", ", buf, maxlen);
+ /* Number of query calls that found an existing entry */
+ write_u64("queriesFound : ", stats->queries_found, ", ", buf, maxlen);
+ /* Number of query calls that added a new entry */
+ write_u64("queriesNotFound : ", stats->queries_not_found, ", ", buf, maxlen);
+ /* Number of update calls that found an existing entry */
+ write_u64("updatesFound : ", stats->updates_found, ", ", buf, maxlen);
+ /* Number of update calls that added a new entry */
+ write_u64("updatesNotFound : ", stats->updates_not_found, ", ", buf, maxlen);
+ /* Number of entries discarded */
+ write_u64("entriesDiscarded : ", stats->entries_discarded, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+static void write_vdo_statistics(char *prefix, struct vdo_statistics *stats, char *suffix,
+ char **buf, unsigned int *maxlen)
+{
+ write_string(prefix, "{ ", NULL, buf, maxlen);
+ write_u32("version : ", stats->version, ", ", buf, maxlen);
+ /* Number of blocks used for data */
+ write_u64("dataBlocksUsed : ", stats->data_blocks_used, ", ", buf, maxlen);
+ /* Number of blocks used for VDO metadata */
+ write_u64("overheadBlocksUsed : ", stats->overhead_blocks_used, ", ",
+ buf, maxlen);
+ /* Number of logical blocks that are currently mapped to physical blocks */
+ write_u64("logicalBlocksUsed : ", stats->logical_blocks_used, ", ", buf, maxlen);
+ /* number of physical blocks */
+ write_block_count_t("physicalBlocks : ", stats->physical_blocks, ", ",
+ buf, maxlen);
+ /* number of logical blocks */
+ write_block_count_t("logicalBlocks : ", stats->logical_blocks, ", ",
+ buf, maxlen);
+ /* Size of the block map page cache, in bytes */
+ write_u64("blockMapCacheSize : ", stats->block_map_cache_size, ", ",
+ buf, maxlen);
+ /* The physical block size */
+ write_u64("blockSize : ", stats->block_size, ", ", buf, maxlen);
+ /* Number of times the VDO has successfully recovered */
+ write_u64("completeRecoveries : ", stats->complete_recoveries, ", ",
+ buf, maxlen);
+ /* Number of times the VDO has recovered from read-only mode */
+ write_u64("readOnlyRecoveries : ", stats->read_only_recoveries, ", ",
+ buf, maxlen);
+ /* String describing the operating mode of the VDO */
+ write_string("mode : ", stats->mode, ", ", buf, maxlen);
+ /* Whether the VDO is in recovery mode */
+ write_bool("inRecoveryMode : ", stats->in_recovery_mode, ", ", buf, maxlen);
+ /* What percentage of recovery mode work has been completed */
+ write_u8("recoveryPercentage : ", stats->recovery_percentage, ", ", buf, maxlen);
+ /* The statistics for the compressed block packer */
+ write_packer_statistics("packer : ", &stats->packer, ", ", buf, maxlen);
+ /* Counters for events in the block allocator */
+ write_block_allocator_statistics("allocator : ", &stats->allocator,
+ ", ", buf, maxlen);
+ /* Counters for events in the recovery journal */
+ write_recovery_journal_statistics("journal : ", &stats->journal, ", ",
+ buf, maxlen);
+ /* The statistics for the slab journals */
+ write_slab_journal_statistics("slabJournal : ", &stats->slab_journal,
+ ", ", buf, maxlen);
+ /* The statistics for the slab summary */
+ write_slab_summary_statistics("slabSummary : ", &stats->slab_summary,
+ ", ", buf, maxlen);
+ /* The statistics for the reference counts */
+ write_ref_counts_statistics("refCounts : ", &stats->ref_counts, ", ",
+ buf, maxlen);
+ /* The statistics for the block map */
+ write_block_map_statistics("blockMap : ", &stats->block_map, ", ", buf, maxlen);
+ /* The dedupe statistics from hash locks */
+ write_hash_lock_statistics("hashLock : ", &stats->hash_lock, ", ", buf, maxlen);
+ /* Counts of error conditions */
+ write_error_statistics("errors : ", &stats->errors, ", ", buf, maxlen);
+ /* The VDO instance */
+ write_u32("instance : ", stats->instance, ", ", buf, maxlen);
+ /* Current number of active VIOs */
+ write_u32("currentVIOsInProgress : ", stats->current_vios_in_progress,
+ ", ", buf, maxlen);
+ /* Maximum number of active VIOs */
+ write_u32("maxVIOs : ", stats->max_vios, ", ", buf, maxlen);
+ /* Number of times the UDS index was too slow in responding */
+ write_u64("dedupeAdviceTimeouts : ", stats->dedupe_advice_timeouts,
+ ", ", buf, maxlen);
+ /* Number of flush requests submitted to the storage device */
+ write_u64("flushOut : ", stats->flush_out, ", ", buf, maxlen);
+ /* Logical block size */
+ write_u64("logicalBlockSize : ", stats->logical_block_size, ", ", buf, maxlen);
+ /* Bios submitted into VDO from above */
+ write_bio_stats("biosIn : ", &stats->bios_in, ", ", buf, maxlen);
+ write_bio_stats("biosInPartial : ", &stats->bios_in_partial, ", ", buf, maxlen);
+ /* Bios submitted onward for user data */
+ write_bio_stats("biosOut : ", &stats->bios_out, ", ", buf, maxlen);
+ /* Bios submitted onward for metadata */
+ write_bio_stats("biosMeta : ", &stats->bios_meta, ", ", buf, maxlen);
+ write_bio_stats("biosJournal : ", &stats->bios_journal, ", ", buf, maxlen);
+ write_bio_stats("biosPageCache : ", &stats->bios_page_cache, ", ", buf, maxlen);
+ write_bio_stats("biosOutCompleted : ", &stats->bios_out_completed, ", ",
+ buf, maxlen);
+ write_bio_stats("biosMetaCompleted : ", &stats->bios_meta_completed,
+ ", ", buf, maxlen);
+ write_bio_stats("biosJournalCompleted : ",
+ &stats->bios_journal_completed, ", ", buf, maxlen);
+ write_bio_stats("biosPageCacheCompleted : ",
+ &stats->bios_page_cache_completed, ", ", buf, maxlen);
+ write_bio_stats("biosAcknowledged : ", &stats->bios_acknowledged, ", ",
+ buf, maxlen);
+ write_bio_stats("biosAcknowledgedPartial : ",
+ &stats->bios_acknowledged_partial, ", ", buf, maxlen);
+ /* Current number of bios in progress */
+ write_bio_stats("biosInProgress : ", &stats->bios_in_progress, ", ",
+ buf, maxlen);
+ /* Memory usage stats. */
+ write_memory_usage("memoryUsage : ", &stats->memory_usage, ", ", buf, maxlen);
+ /* The statistics for the UDS index */
+ write_index_statistics("index : ", &stats->index, ", ", buf, maxlen);
+ write_string(NULL, "}", suffix, buf, maxlen);
+}
+
+int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen)
+{
+ struct vdo_statistics *stats;
+ int result;
+
+ result = vdo_allocate(1, struct vdo_statistics, __func__, &stats);
+ if (result != VDO_SUCCESS) {
+ vdo_log_error("Cannot allocate memory to write VDO statistics");
+ return result;
+ }
+
+ vdo_fetch_statistics(vdo, stats);
+ write_vdo_statistics(NULL, stats, NULL, &buf, &maxlen);
+ vdo_free(stats);
+ return VDO_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/message-stats.h b/drivers/md/dm-vdo/message-stats.h
new file mode 100644
index 000000000000..f7fceca9acab
--- /dev/null
+++ b/drivers/md/dm-vdo/message-stats.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_MESSAGE_STATS_H
+#define VDO_MESSAGE_STATS_H
+
+#include "types.h"
+
+int vdo_write_stats(struct vdo *vdo, char *buf, unsigned int maxlen);
+
+#endif /* VDO_MESSAGE_STATS_H */
diff --git a/drivers/md/dm-vdo/murmurhash3.c b/drivers/md/dm-vdo/murmurhash3.c
new file mode 100644
index 000000000000..00c9b9c05001
--- /dev/null
+++ b/drivers/md/dm-vdo/murmurhash3.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: LGPL-2.1+
+/*
+ * MurmurHash3 was written by Austin Appleby, and is placed in the public
+ * domain. The author hereby disclaims copyright to this source code.
+ *
+ * Adapted by John Wiele (jwiele@redhat.com).
+ */
+
+#include "murmurhash3.h"
+
+static inline u64 rotl64(u64 x, s8 r)
+{
+ return (x << r) | (x >> (64 - r));
+}
+
+#define ROTL64(x, y) rotl64(x, y)
+static __always_inline u64 getblock64(const u64 *p, int i)
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return p[i];
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return __builtin_bswap64(p[i]);
+#else
+#error "can't figure out byte order"
+#endif
+}
+
+static __always_inline void putblock64(u64 *p, int i, u64 value)
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ p[i] = value;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ p[i] = __builtin_bswap64(value);
+#else
+#error "can't figure out byte order"
+#endif
+}
+
+/* Finalization mix - force all bits of a hash block to avalanche */
+
+static __always_inline u64 fmix64(u64 k)
+{
+ k ^= k >> 33;
+ k *= 0xff51afd7ed558ccdLLU;
+ k ^= k >> 33;
+ k *= 0xc4ceb9fe1a85ec53LLU;
+ k ^= k >> 33;
+
+ return k;
+}
+
+void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
+{
+ const u8 *data = key;
+ const int nblocks = len / 16;
+
+ u64 h1 = seed;
+ u64 h2 = seed;
+
+ const u64 c1 = 0x87c37b91114253d5LLU;
+ const u64 c2 = 0x4cf5ad432745937fLLU;
+
+ /* body */
+
+ const u64 *blocks = (const u64 *)(data);
+
+ int i;
+
+ for (i = 0; i < nblocks; i++) {
+ u64 k1 = getblock64(blocks, i * 2 + 0);
+ u64 k2 = getblock64(blocks, i * 2 + 1);
+
+ k1 *= c1;
+ k1 = ROTL64(k1, 31);
+ k1 *= c2;
+ h1 ^= k1;
+
+ h1 = ROTL64(h1, 27);
+ h1 += h2;
+ h1 = h1 * 5 + 0x52dce729;
+
+ k2 *= c2;
+ k2 = ROTL64(k2, 33);
+ k2 *= c1;
+ h2 ^= k2;
+
+ h2 = ROTL64(h2, 31);
+ h2 += h1;
+ h2 = h2 * 5 + 0x38495ab5;
+ }
+
+ /* tail */
+
+ {
+ const u8 *tail = (const u8 *)(data + nblocks * 16);
+
+ u64 k1 = 0;
+ u64 k2 = 0;
+
+ switch (len & 15) {
+ case 15:
+ k2 ^= ((u64)tail[14]) << 48;
+ fallthrough;
+ case 14:
+ k2 ^= ((u64)tail[13]) << 40;
+ fallthrough;
+ case 13:
+ k2 ^= ((u64)tail[12]) << 32;
+ fallthrough;
+ case 12:
+ k2 ^= ((u64)tail[11]) << 24;
+ fallthrough;
+ case 11:
+ k2 ^= ((u64)tail[10]) << 16;
+ fallthrough;
+ case 10:
+ k2 ^= ((u64)tail[9]) << 8;
+ fallthrough;
+ case 9:
+ k2 ^= ((u64)tail[8]) << 0;
+ k2 *= c2;
+ k2 = ROTL64(k2, 33);
+ k2 *= c1;
+ h2 ^= k2;
+ fallthrough;
+
+ case 8:
+ k1 ^= ((u64)tail[7]) << 56;
+ fallthrough;
+ case 7:
+ k1 ^= ((u64)tail[6]) << 48;
+ fallthrough;
+ case 6:
+ k1 ^= ((u64)tail[5]) << 40;
+ fallthrough;
+ case 5:
+ k1 ^= ((u64)tail[4]) << 32;
+ fallthrough;
+ case 4:
+ k1 ^= ((u64)tail[3]) << 24;
+ fallthrough;
+ case 3:
+ k1 ^= ((u64)tail[2]) << 16;
+ fallthrough;
+ case 2:
+ k1 ^= ((u64)tail[1]) << 8;
+ fallthrough;
+ case 1:
+ k1 ^= ((u64)tail[0]) << 0;
+ k1 *= c1;
+ k1 = ROTL64(k1, 31);
+ k1 *= c2;
+ h1 ^= k1;
+ break;
+ default:
+ break;
+ };
+ }
+ /* finalization */
+
+ h1 ^= len;
+ h2 ^= len;
+
+ h1 += h2;
+ h2 += h1;
+
+ h1 = fmix64(h1);
+ h2 = fmix64(h2);
+
+ h1 += h2;
+ h2 += h1;
+
+ putblock64((u64 *)out, 0, h1);
+ putblock64((u64 *)out, 1, h2);
+}
diff --git a/drivers/md/dm-vdo/murmurhash3.h b/drivers/md/dm-vdo/murmurhash3.h
new file mode 100644
index 000000000000..d84711ddb659
--- /dev/null
+++ b/drivers/md/dm-vdo/murmurhash3.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/*
+ * MurmurHash3 was written by Austin Appleby, and is placed in the public
+ * domain. The author hereby disclaims copyright to this source code.
+ */
+
+#ifndef _MURMURHASH3_H_
+#define _MURMURHASH3_H_
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+void murmurhash3_128(const void *key, int len, u32 seed, void *out);
+
+#endif /* _MURMURHASH3_H_ */
diff --git a/drivers/md/dm-vdo/numeric.h b/drivers/md/dm-vdo/numeric.h
new file mode 100644
index 000000000000..dc8c400b21d2
--- /dev/null
+++ b/drivers/md/dm-vdo/numeric.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_NUMERIC_H
+#define UDS_NUMERIC_H
+
+#include <asm/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/*
+ * These utilities encode or decode a number from an offset in a larger data buffer and then
+ * advance the offset pointer to the next field in the buffer.
+ */
+
+static inline void decode_s64_le(const u8 *buffer, size_t *offset, s64 *decoded)
+{
+ *decoded = get_unaligned_le64(buffer + *offset);
+ *offset += sizeof(s64);
+}
+
+static inline void encode_s64_le(u8 *data, size_t *offset, s64 to_encode)
+{
+ put_unaligned_le64(to_encode, data + *offset);
+ *offset += sizeof(s64);
+}
+
+static inline void decode_u64_le(const u8 *buffer, size_t *offset, u64 *decoded)
+{
+ *decoded = get_unaligned_le64(buffer + *offset);
+ *offset += sizeof(u64);
+}
+
+static inline void encode_u64_le(u8 *data, size_t *offset, u64 to_encode)
+{
+ put_unaligned_le64(to_encode, data + *offset);
+ *offset += sizeof(u64);
+}
+
+static inline void decode_s32_le(const u8 *buffer, size_t *offset, s32 *decoded)
+{
+ *decoded = get_unaligned_le32(buffer + *offset);
+ *offset += sizeof(s32);
+}
+
+static inline void encode_s32_le(u8 *data, size_t *offset, s32 to_encode)
+{
+ put_unaligned_le32(to_encode, data + *offset);
+ *offset += sizeof(s32);
+}
+
+static inline void decode_u32_le(const u8 *buffer, size_t *offset, u32 *decoded)
+{
+ *decoded = get_unaligned_le32(buffer + *offset);
+ *offset += sizeof(u32);
+}
+
+static inline void encode_u32_le(u8 *data, size_t *offset, u32 to_encode)
+{
+ put_unaligned_le32(to_encode, data + *offset);
+ *offset += sizeof(u32);
+}
+
+static inline void decode_u16_le(const u8 *buffer, size_t *offset, u16 *decoded)
+{
+ *decoded = get_unaligned_le16(buffer + *offset);
+ *offset += sizeof(u16);
+}
+
+static inline void encode_u16_le(u8 *data, size_t *offset, u16 to_encode)
+{
+ put_unaligned_le16(to_encode, data + *offset);
+ *offset += sizeof(u16);
+}
+
+#endif /* UDS_NUMERIC_H */
diff --git a/drivers/md/dm-vdo/packer.c b/drivers/md/dm-vdo/packer.c
new file mode 100644
index 000000000000..16cf29b4c90a
--- /dev/null
+++ b/drivers/md/dm-vdo/packer.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "packer.h"
+
+#include <linux/atomic.h>
+#include <linux/blkdev.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+#include "string-utils.h"
+
+#include "admin-state.h"
+#include "completion.h"
+#include "constants.h"
+#include "data-vio.h"
+#include "dedupe.h"
+#include "encodings.h"
+#include "io-submitter.h"
+#include "physical-zone.h"
+#include "status-codes.h"
+#include "vdo.h"
+#include "vio.h"
+
+static const struct version_number COMPRESSED_BLOCK_1_0 = {
+ .major_version = 1,
+ .minor_version = 0,
+};
+
+#define COMPRESSED_BLOCK_1_0_SIZE (4 + 4 + (2 * VDO_MAX_COMPRESSION_SLOTS))
+
+/**
+ * vdo_get_compressed_block_fragment() - Get a reference to a compressed fragment from a compressed
+ * block.
+ * @mapping_state [in] The mapping state for the look up.
+ * @compressed_block [in] The compressed block that was read from disk.
+ * @fragment_offset [out] The offset of the fragment within a compressed block.
+ * @fragment_size [out] The size of the fragment.
+ *
+ * Return: If a valid compressed fragment is found, VDO_SUCCESS; otherwise, VDO_INVALID_FRAGMENT if
+ * the fragment is invalid.
+ */
+int vdo_get_compressed_block_fragment(enum block_mapping_state mapping_state,
+ struct compressed_block *block,
+ u16 *fragment_offset, u16 *fragment_size)
+{
+ u16 compressed_size;
+ u16 offset = 0;
+ unsigned int i;
+ u8 slot;
+ struct version_number version;
+
+ if (!vdo_is_state_compressed(mapping_state))
+ return VDO_INVALID_FRAGMENT;
+
+ version = vdo_unpack_version_number(block->header.version);
+ if (!vdo_are_same_version(version, COMPRESSED_BLOCK_1_0))
+ return VDO_INVALID_FRAGMENT;
+
+ slot = mapping_state - VDO_MAPPING_STATE_COMPRESSED_BASE;
+ if (slot >= VDO_MAX_COMPRESSION_SLOTS)
+ return VDO_INVALID_FRAGMENT;
+
+ compressed_size = __le16_to_cpu(block->header.sizes[slot]);
+ for (i = 0; i < slot; i++) {
+ offset += __le16_to_cpu(block->header.sizes[i]);
+ if (offset >= VDO_COMPRESSED_BLOCK_DATA_SIZE)
+ return VDO_INVALID_FRAGMENT;
+ }
+
+ if ((offset + compressed_size) > VDO_COMPRESSED_BLOCK_DATA_SIZE)
+ return VDO_INVALID_FRAGMENT;
+
+ *fragment_offset = offset;
+ *fragment_size = compressed_size;
+ return VDO_SUCCESS;
+}
+
+/**
+ * assert_on_packer_thread() - Check that we are on the packer thread.
+ * @packer: The packer.
+ * @caller: The function which is asserting.
+ */
+static inline void assert_on_packer_thread(struct packer *packer, const char *caller)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == packer->thread_id),
+ "%s() called from packer thread", caller);
+}
+
+/**
+ * insert_in_sorted_list() - Insert a bin to the list.
+ * @packer: The packer.
+ * @bin: The bin to move to its sorted position.
+ *
+ * The list is in ascending order of free space. Since all bins are already in the list, this
+ * actually moves the bin to the correct position in the list.
+ */
+static void insert_in_sorted_list(struct packer *packer, struct packer_bin *bin)
+{
+ struct packer_bin *active_bin;
+
+ list_for_each_entry(active_bin, &packer->bins, list)
+ if (active_bin->free_space > bin->free_space) {
+ list_move_tail(&bin->list, &active_bin->list);
+ return;
+ }
+
+ list_move_tail(&bin->list, &packer->bins);
+}
+
+/**
+ * make_bin() - Allocate a bin and put it into the packer's list.
+ * @packer: The packer.
+ */
+static int __must_check make_bin(struct packer *packer)
+{
+ struct packer_bin *bin;
+ int result;
+
+ result = vdo_allocate_extended(struct packer_bin, VDO_MAX_COMPRESSION_SLOTS,
+ struct vio *, __func__, &bin);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ bin->free_space = VDO_COMPRESSED_BLOCK_DATA_SIZE;
+ INIT_LIST_HEAD(&bin->list);
+ list_add_tail(&bin->list, &packer->bins);
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_make_packer() - Make a new block packer.
+ *
+ * @vdo: The vdo to which this packer belongs.
+ * @bin_count: The number of partial bins to keep in memory.
+ * @packer_ptr: A pointer to hold the new packer.
+ *
+ * Return: VDO_SUCCESS or an error
+ */
+int vdo_make_packer(struct vdo *vdo, block_count_t bin_count, struct packer **packer_ptr)
+{
+ struct packer *packer;
+ block_count_t i;
+ int result;
+
+ result = vdo_allocate(1, struct packer, __func__, &packer);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ packer->thread_id = vdo->thread_config.packer_thread;
+ packer->size = bin_count;
+ INIT_LIST_HEAD(&packer->bins);
+ vdo_set_admin_state_code(&packer->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+
+ for (i = 0; i < bin_count; i++) {
+ result = make_bin(packer);
+ if (result != VDO_SUCCESS) {
+ vdo_free_packer(packer);
+ return result;
+ }
+ }
+
+ /*
+ * The canceled bin can hold up to half the number of user vios. Every canceled vio in the
+ * bin must have a canceler for which it is waiting, and any canceler will only have
+ * canceled one lock holder at a time.
+ */
+ result = vdo_allocate_extended(struct packer_bin, MAXIMUM_VDO_USER_VIOS / 2,
+ struct vio *, __func__, &packer->canceled_bin);
+ if (result != VDO_SUCCESS) {
+ vdo_free_packer(packer);
+ return result;
+ }
+
+ result = vdo_make_default_thread(vdo, packer->thread_id);
+ if (result != VDO_SUCCESS) {
+ vdo_free_packer(packer);
+ return result;
+ }
+
+ *packer_ptr = packer;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_free_packer() - Free a block packer.
+ * @packer: The packer to free.
+ */
+void vdo_free_packer(struct packer *packer)
+{
+ struct packer_bin *bin, *tmp;
+
+ if (packer == NULL)
+ return;
+
+ list_for_each_entry_safe(bin, tmp, &packer->bins, list) {
+ list_del_init(&bin->list);
+ vdo_free(bin);
+ }
+
+ vdo_free(vdo_forget(packer->canceled_bin));
+ vdo_free(packer);
+}
+
+/**
+ * get_packer_from_data_vio() - Get the packer from a data_vio.
+ * @data_vio: The data_vio.
+ *
+ * Return: The packer from the VDO to which the data_vio belongs.
+ */
+static inline struct packer *get_packer_from_data_vio(struct data_vio *data_vio)
+{
+ return vdo_from_data_vio(data_vio)->packer;
+}
+
+/**
+ * vdo_get_packer_statistics() - Get the current statistics from the packer.
+ * @packer: The packer to query.
+ *
+ * Return: a copy of the current statistics for the packer.
+ */
+struct packer_statistics vdo_get_packer_statistics(const struct packer *packer)
+{
+ const struct packer_statistics *stats = &packer->statistics;
+
+ return (struct packer_statistics) {
+ .compressed_fragments_written = READ_ONCE(stats->compressed_fragments_written),
+ .compressed_blocks_written = READ_ONCE(stats->compressed_blocks_written),
+ .compressed_fragments_in_packer = READ_ONCE(stats->compressed_fragments_in_packer),
+ };
+}
+
+/**
+ * abort_packing() - Abort packing a data_vio.
+ * @data_vio: The data_vio to abort.
+ */
+static void abort_packing(struct data_vio *data_vio)
+{
+ struct packer *packer = get_packer_from_data_vio(data_vio);
+
+ WRITE_ONCE(packer->statistics.compressed_fragments_in_packer,
+ packer->statistics.compressed_fragments_in_packer - 1);
+
+ write_data_vio(data_vio);
+}
+
+/**
+ * release_compressed_write_waiter() - Update a data_vio for which a successful compressed write
+ * has completed and send it on its way.
+
+ * @data_vio: The data_vio to release.
+ * @allocation: The allocation to which the compressed block was written.
+ */
+static void release_compressed_write_waiter(struct data_vio *data_vio,
+ struct allocation *allocation)
+{
+ data_vio->new_mapped = (struct zoned_pbn) {
+ .pbn = allocation->pbn,
+ .zone = allocation->zone,
+ .state = data_vio->compression.slot + VDO_MAPPING_STATE_COMPRESSED_BASE,
+ };
+
+ vdo_share_compressed_write_lock(data_vio, allocation->lock);
+ update_metadata_for_data_vio_write(data_vio, allocation->lock);
+}
+
+/**
+ * finish_compressed_write() - Finish a compressed block write.
+ * @completion: The compressed write completion.
+ *
+ * This callback is registered in continue_after_allocation().
+ */
+static void finish_compressed_write(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ struct data_vio *client, *next;
+
+ assert_data_vio_in_allocated_zone(agent);
+
+ /*
+ * Process all the non-agent waiters first to ensure that the pbn lock can not be released
+ * until all of them have had a chance to journal their increfs.
+ */
+ for (client = agent->compression.next_in_batch; client != NULL; client = next) {
+ next = client->compression.next_in_batch;
+ release_compressed_write_waiter(client, &agent->allocation);
+ }
+
+ completion->error_handler = handle_data_vio_error;
+ release_compressed_write_waiter(agent, &agent->allocation);
+}
+
+static void handle_compressed_write_error(struct vdo_completion *completion)
+{
+ struct data_vio *agent = as_data_vio(completion);
+ struct allocation *allocation = &agent->allocation;
+ struct data_vio *client, *next;
+
+ if (vdo_requeue_completion_if_needed(completion, allocation->zone->thread_id))
+ return;
+
+ update_vio_error_stats(as_vio(completion),
+ "Completing compressed write vio for physical block %llu with error",
+ (unsigned long long) allocation->pbn);
+
+ for (client = agent->compression.next_in_batch; client != NULL; client = next) {
+ next = client->compression.next_in_batch;
+ write_data_vio(client);
+ }
+
+ /* Now that we've released the batch from the packer, forget the error and continue on. */
+ vdo_reset_completion(completion);
+ completion->error_handler = handle_data_vio_error;
+ write_data_vio(agent);
+}
+
+/**
+ * add_to_bin() - Put a data_vio in a specific packer_bin in which it will definitely fit.
+ * @bin: The bin in which to put the data_vio.
+ * @data_vio: The data_vio to add.
+ */
+static void add_to_bin(struct packer_bin *bin, struct data_vio *data_vio)
+{
+ data_vio->compression.bin = bin;
+ data_vio->compression.slot = bin->slots_used;
+ bin->incoming[bin->slots_used++] = data_vio;
+}
+
+/**
+ * remove_from_bin() - Get the next data_vio whose compression has not been canceled from a bin.
+ * @packer: The packer.
+ * @bin: The bin from which to get a data_vio.
+ *
+ * Any canceled data_vios will be moved to the canceled bin.
+ * Return: An uncanceled data_vio from the bin or NULL if there are none.
+ */
+static struct data_vio *remove_from_bin(struct packer *packer, struct packer_bin *bin)
+{
+ while (bin->slots_used > 0) {
+ struct data_vio *data_vio = bin->incoming[--bin->slots_used];
+
+ if (!advance_data_vio_compression_stage(data_vio).may_not_compress) {
+ data_vio->compression.bin = NULL;
+ return data_vio;
+ }
+
+ add_to_bin(packer->canceled_bin, data_vio);
+ }
+
+ /* The bin is now empty. */
+ bin->free_space = VDO_COMPRESSED_BLOCK_DATA_SIZE;
+ return NULL;
+}
+
+/**
+ * initialize_compressed_block() - Initialize a compressed block.
+ * @block: The compressed block to initialize.
+ * @size: The size of the agent's fragment.
+ *
+ * This method initializes the compressed block in the compressed write agent. Because the
+ * compressor already put the agent's compressed fragment at the start of the compressed block's
+ * data field, it needn't be copied. So all we need do is initialize the header and set the size of
+ * the agent's fragment.
+ */
+static void initialize_compressed_block(struct compressed_block *block, u16 size)
+{
+ /*
+ * Make sure the block layout isn't accidentally changed by changing the length of the
+ * block header.
+ */
+ BUILD_BUG_ON(sizeof(struct compressed_block_header) != COMPRESSED_BLOCK_1_0_SIZE);
+
+ block->header.version = vdo_pack_version_number(COMPRESSED_BLOCK_1_0);
+ block->header.sizes[0] = __cpu_to_le16(size);
+}
+
+/**
+ * pack_fragment() - Pack a data_vio's fragment into the compressed block in which it is already
+ * known to fit.
+ * @compression: The agent's compression_state to pack in to.
+ * @data_vio: The data_vio to pack.
+ * @offset: The offset into the compressed block at which to pack the fragment.
+ * @compressed_block: The compressed block which will be written out when batch is fully packed.
+ *
+ * Return: The new amount of space used.
+ */
+static block_size_t __must_check pack_fragment(struct compression_state *compression,
+ struct data_vio *data_vio,
+ block_size_t offset, slot_number_t slot,
+ struct compressed_block *block)
+{
+ struct compression_state *to_pack = &data_vio->compression;
+ char *fragment = to_pack->block->data;
+
+ to_pack->next_in_batch = compression->next_in_batch;
+ compression->next_in_batch = data_vio;
+ to_pack->slot = slot;
+ block->header.sizes[slot] = __cpu_to_le16(to_pack->size);
+ memcpy(&block->data[offset], fragment, to_pack->size);
+ return (offset + to_pack->size);
+}
+
+/**
+ * compressed_write_end_io() - The bio_end_io for a compressed block write.
+ * @bio: The bio for the compressed write.
+ */
+static void compressed_write_end_io(struct bio *bio)
+{
+ struct data_vio *data_vio = vio_as_data_vio(bio->bi_private);
+
+ vdo_count_completed_bios(bio);
+ set_data_vio_allocated_zone_callback(data_vio, finish_compressed_write);
+ continue_data_vio_with_error(data_vio, blk_status_to_errno(bio->bi_status));
+}
+
+/**
+ * write_bin() - Write out a bin.
+ * @packer: The packer.
+ * @bin: The bin to write.
+ */
+static void write_bin(struct packer *packer, struct packer_bin *bin)
+{
+ int result;
+ block_size_t offset;
+ slot_number_t slot = 1;
+ struct compression_state *compression;
+ struct compressed_block *block;
+ struct data_vio *agent = remove_from_bin(packer, bin);
+ struct data_vio *client;
+ struct packer_statistics *stats;
+
+ if (agent == NULL)
+ return;
+
+ compression = &agent->compression;
+ compression->slot = 0;
+ block = compression->block;
+ initialize_compressed_block(block, compression->size);
+ offset = compression->size;
+
+ while ((client = remove_from_bin(packer, bin)) != NULL)
+ offset = pack_fragment(compression, client, offset, slot++, block);
+
+ /*
+ * If the batch contains only a single vio, then we save nothing by saving the compressed
+ * form. Continue processing the single vio in the batch.
+ */
+ if (slot == 1) {
+ abort_packing(agent);
+ return;
+ }
+
+ if (slot < VDO_MAX_COMPRESSION_SLOTS) {
+ /* Clear out the sizes of the unused slots */
+ memset(&block->header.sizes[slot], 0,
+ (VDO_MAX_COMPRESSION_SLOTS - slot) * sizeof(__le16));
+ }
+
+ agent->vio.completion.error_handler = handle_compressed_write_error;
+ if (vdo_is_read_only(vdo_from_data_vio(agent))) {
+ continue_data_vio_with_error(agent, VDO_READ_ONLY);
+ return;
+ }
+
+ result = vio_reset_bio(&agent->vio, (char *) block, compressed_write_end_io,
+ REQ_OP_WRITE, agent->allocation.pbn);
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(agent, result);
+ return;
+ }
+
+ /*
+ * Once the compressed write is submitted, the fragments are no longer in the packer, so
+ * update stats now.
+ */
+ stats = &packer->statistics;
+ WRITE_ONCE(stats->compressed_fragments_in_packer,
+ (stats->compressed_fragments_in_packer - slot));
+ WRITE_ONCE(stats->compressed_fragments_written,
+ (stats->compressed_fragments_written + slot));
+ WRITE_ONCE(stats->compressed_blocks_written,
+ stats->compressed_blocks_written + 1);
+
+ vdo_submit_data_vio(agent);
+}
+
+/**
+ * add_data_vio_to_packer_bin() - Add a data_vio to a bin's incoming queue
+ * @packer: The packer.
+ * @bin: The bin to which to add the data_vio.
+ * @data_vio: The data_vio to add to the bin's queue.
+ *
+ * Adds a data_vio to a bin's incoming queue, handles logical space change, and calls physical
+ * space processor.
+ */
+static void add_data_vio_to_packer_bin(struct packer *packer, struct packer_bin *bin,
+ struct data_vio *data_vio)
+{
+ /* If the selected bin doesn't have room, start a new batch to make room. */
+ if (bin->free_space < data_vio->compression.size)
+ write_bin(packer, bin);
+
+ add_to_bin(bin, data_vio);
+ bin->free_space -= data_vio->compression.size;
+
+ /* If we happen to exactly fill the bin, start a new batch. */
+ if ((bin->slots_used == VDO_MAX_COMPRESSION_SLOTS) ||
+ (bin->free_space == 0))
+ write_bin(packer, bin);
+
+ /* Now that we've finished changing the free space, restore the sort order. */
+ insert_in_sorted_list(packer, bin);
+}
+
+/**
+ * select_bin() - Select the bin that should be used to pack the compressed data in a data_vio with
+ * other data_vios.
+ * @packer: The packer.
+ * @data_vio: The data_vio.
+ */
+static struct packer_bin * __must_check select_bin(struct packer *packer,
+ struct data_vio *data_vio)
+{
+ /*
+ * First best fit: select the bin with the least free space that has enough room for the
+ * compressed data in the data_vio.
+ */
+ struct packer_bin *bin, *fullest_bin;
+
+ list_for_each_entry(bin, &packer->bins, list) {
+ if (bin->free_space >= data_vio->compression.size)
+ return bin;
+ }
+
+ /*
+ * None of the bins have enough space for the data_vio. We're not allowed to create new
+ * bins, so we have to overflow one of the existing bins. It's pretty intuitive to select
+ * the fullest bin, since that "wastes" the least amount of free space in the compressed
+ * block. But if the space currently used in the fullest bin is smaller than the compressed
+ * size of the incoming block, it seems wrong to force that bin to write when giving up on
+ * compressing the incoming data_vio would likewise "waste" the least amount of free space.
+ */
+ fullest_bin = list_first_entry(&packer->bins, struct packer_bin, list);
+ if (data_vio->compression.size >=
+ (VDO_COMPRESSED_BLOCK_DATA_SIZE - fullest_bin->free_space))
+ return NULL;
+
+ /*
+ * The fullest bin doesn't have room, but writing it out and starting a new batch with the
+ * incoming data_vio will increase the packer's free space.
+ */
+ return fullest_bin;
+}
+
+/**
+ * vdo_attempt_packing() - Attempt to rewrite the data in this data_vio as part of a compressed
+ * block.
+ * @data_vio: The data_vio to pack.
+ */
+void vdo_attempt_packing(struct data_vio *data_vio)
+{
+ int result;
+ struct packer_bin *bin;
+ struct data_vio_compression_status status = get_data_vio_compression_status(data_vio);
+ struct packer *packer = get_packer_from_data_vio(data_vio);
+
+ assert_on_packer_thread(packer, __func__);
+
+ result = VDO_ASSERT((status.stage == DATA_VIO_COMPRESSING),
+ "attempt to pack data_vio not ready for packing, stage: %u",
+ status.stage);
+ if (result != VDO_SUCCESS)
+ return;
+
+ /*
+ * Increment whether or not this data_vio will be packed or not since abort_packing()
+ * always decrements the counter.
+ */
+ WRITE_ONCE(packer->statistics.compressed_fragments_in_packer,
+ packer->statistics.compressed_fragments_in_packer + 1);
+
+ /*
+ * If packing of this data_vio is disallowed for administrative reasons, give up before
+ * making any state changes.
+ */
+ if (!vdo_is_state_normal(&packer->state) ||
+ (data_vio->flush_generation < packer->flush_generation)) {
+ abort_packing(data_vio);
+ return;
+ }
+
+ /*
+ * The advance_data_vio_compression_stage() check here verifies that the data_vio is
+ * allowed to be compressed (if it has already been canceled, we'll fall out here). Once
+ * the data_vio is in the DATA_VIO_PACKING state, it must be guaranteed to be put in a bin
+ * before any more requests can be processed by the packer thread. Otherwise, a canceling
+ * data_vio could attempt to remove the canceled data_vio from the packer and fail to
+ * rendezvous with it. Thus, we must call select_bin() first to ensure that we will
+ * actually add the data_vio to a bin before advancing to the DATA_VIO_PACKING stage.
+ */
+ bin = select_bin(packer, data_vio);
+ if ((bin == NULL) ||
+ (advance_data_vio_compression_stage(data_vio).stage != DATA_VIO_PACKING)) {
+ abort_packing(data_vio);
+ return;
+ }
+
+ add_data_vio_to_packer_bin(packer, bin, data_vio);
+}
+
+/**
+ * check_for_drain_complete() - Check whether the packer has drained.
+ * @packer: The packer.
+ */
+static void check_for_drain_complete(struct packer *packer)
+{
+ if (vdo_is_state_draining(&packer->state) && (packer->canceled_bin->slots_used == 0))
+ vdo_finish_draining(&packer->state);
+}
+
+/**
+ * write_all_non_empty_bins() - Write out all non-empty bins on behalf of a flush or suspend.
+ * @packer: The packer being flushed.
+ */
+static void write_all_non_empty_bins(struct packer *packer)
+{
+ struct packer_bin *bin;
+
+ list_for_each_entry(bin, &packer->bins, list)
+ write_bin(packer, bin);
+ /*
+ * We don't need to re-sort the bin here since this loop will make every bin have
+ * the same amount of free space, so every ordering is sorted.
+ */
+
+ check_for_drain_complete(packer);
+}
+
+/**
+ * vdo_flush_packer() - Request that the packer flush asynchronously.
+ * @packer: The packer to flush.
+ *
+ * All bins with at least two compressed data blocks will be written out, and any solitary pending
+ * VIOs will be released from the packer. While flushing is in progress, any VIOs submitted to
+ * vdo_attempt_packing() will be continued immediately without attempting to pack them.
+ */
+void vdo_flush_packer(struct packer *packer)
+{
+ assert_on_packer_thread(packer, __func__);
+ if (vdo_is_state_normal(&packer->state))
+ write_all_non_empty_bins(packer);
+}
+
+/**
+ * vdo_remove_lock_holder_from_packer() - Remove a lock holder from the packer.
+ * @completion: The data_vio which needs a lock held by a data_vio in the packer. The data_vio's
+ * compression.lock_holder field will point to the data_vio to remove.
+ */
+void vdo_remove_lock_holder_from_packer(struct vdo_completion *completion)
+{
+ struct data_vio *data_vio = as_data_vio(completion);
+ struct packer *packer = get_packer_from_data_vio(data_vio);
+ struct data_vio *lock_holder;
+ struct packer_bin *bin;
+ slot_number_t slot;
+
+ assert_data_vio_in_packer_zone(data_vio);
+
+ lock_holder = vdo_forget(data_vio->compression.lock_holder);
+ bin = lock_holder->compression.bin;
+ VDO_ASSERT_LOG_ONLY((bin != NULL), "data_vio in packer has a bin");
+
+ slot = lock_holder->compression.slot;
+ bin->slots_used--;
+ if (slot < bin->slots_used) {
+ bin->incoming[slot] = bin->incoming[bin->slots_used];
+ bin->incoming[slot]->compression.slot = slot;
+ }
+
+ lock_holder->compression.bin = NULL;
+ lock_holder->compression.slot = 0;
+
+ if (bin != packer->canceled_bin) {
+ bin->free_space += lock_holder->compression.size;
+ insert_in_sorted_list(packer, bin);
+ }
+
+ abort_packing(lock_holder);
+ check_for_drain_complete(packer);
+}
+
+/**
+ * vdo_increment_packer_flush_generation() - Increment the flush generation in the packer.
+ * @packer: The packer.
+ *
+ * This will also cause the packer to flush so that any VIOs from previous generations will exit
+ * the packer.
+ */
+void vdo_increment_packer_flush_generation(struct packer *packer)
+{
+ assert_on_packer_thread(packer, __func__);
+ packer->flush_generation++;
+ vdo_flush_packer(packer);
+}
+
+/**
+ * initiate_drain() - Initiate a drain.
+ *
+ * Implements vdo_admin_initiator_fn.
+ */
+static void initiate_drain(struct admin_state *state)
+{
+ struct packer *packer = container_of(state, struct packer, state);
+
+ write_all_non_empty_bins(packer);
+}
+
+/**
+ * vdo_drain_packer() - Drain the packer by preventing any more VIOs from entering the packer and
+ * then flushing.
+ * @packer: The packer to drain.
+ * @completion: The completion to finish when the packer has drained.
+ */
+void vdo_drain_packer(struct packer *packer, struct vdo_completion *completion)
+{
+ assert_on_packer_thread(packer, __func__);
+ vdo_start_draining(&packer->state, VDO_ADMIN_STATE_SUSPENDING, completion,
+ initiate_drain);
+}
+
+/**
+ * vdo_resume_packer() - Resume a packer which has been suspended.
+ * @packer: The packer to resume.
+ * @parent: The completion to finish when the packer has resumed.
+ */
+void vdo_resume_packer(struct packer *packer, struct vdo_completion *parent)
+{
+ assert_on_packer_thread(packer, __func__);
+ vdo_continue_completion(parent, vdo_resume_if_quiescent(&packer->state));
+}
+
+static void dump_packer_bin(const struct packer_bin *bin, bool canceled)
+{
+ if (bin->slots_used == 0)
+ /* Don't dump empty bins. */
+ return;
+
+ vdo_log_info(" %sBin slots_used=%u free_space=%zu",
+ (canceled ? "Canceled" : ""), bin->slots_used, bin->free_space);
+
+ /*
+ * FIXME: dump vios in bin->incoming? The vios should have been dumped from the vio pool.
+ * Maybe just dump their addresses so it's clear they're here?
+ */
+}
+
+/**
+ * vdo_dump_packer() - Dump the packer.
+ * @packer: The packer.
+ *
+ * Context: dumps in a thread-unsafe fashion.
+ */
+void vdo_dump_packer(const struct packer *packer)
+{
+ struct packer_bin *bin;
+
+ vdo_log_info("packer");
+ vdo_log_info(" flushGeneration=%llu state %s packer_bin_count=%llu",
+ (unsigned long long) packer->flush_generation,
+ vdo_get_admin_state_code(&packer->state)->name,
+ (unsigned long long) packer->size);
+
+ list_for_each_entry(bin, &packer->bins, list)
+ dump_packer_bin(bin, false);
+
+ dump_packer_bin(packer->canceled_bin, true);
+}
diff --git a/drivers/md/dm-vdo/packer.h b/drivers/md/dm-vdo/packer.h
new file mode 100644
index 000000000000..0f3be44710b5
--- /dev/null
+++ b/drivers/md/dm-vdo/packer.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_PACKER_H
+#define VDO_PACKER_H
+
+#include <linux/list.h>
+
+#include "admin-state.h"
+#include "constants.h"
+#include "encodings.h"
+#include "statistics.h"
+#include "types.h"
+#include "wait-queue.h"
+
+enum {
+ DEFAULT_PACKER_BINS = 16,
+};
+
+/* The header of a compressed block. */
+struct compressed_block_header {
+ /* Unsigned 32-bit major and minor versions, little-endian */
+ struct packed_version_number version;
+
+ /* List of unsigned 16-bit compressed block sizes, little-endian */
+ __le16 sizes[VDO_MAX_COMPRESSION_SLOTS];
+} __packed;
+
+enum {
+ VDO_COMPRESSED_BLOCK_DATA_SIZE = VDO_BLOCK_SIZE - sizeof(struct compressed_block_header),
+
+ /*
+ * A compressed block is only written if we can pack at least two fragments into it, so a
+ * fragment which fills the entire data portion of a compressed block is too big.
+ */
+ VDO_MAX_COMPRESSED_FRAGMENT_SIZE = VDO_COMPRESSED_BLOCK_DATA_SIZE - 1,
+};
+
+/* * The compressed block overlay. */
+struct compressed_block {
+ struct compressed_block_header header;
+ char data[VDO_COMPRESSED_BLOCK_DATA_SIZE];
+} __packed;
+
+/*
+ * Each packer_bin holds an incomplete batch of data_vios that only partially fill a compressed
+ * block. The bins are kept in a ring sorted by the amount of unused space so the first bin with
+ * enough space to hold a newly-compressed data_vio can easily be found. When the bin fills up or
+ * is flushed, the first uncanceled data_vio in the bin is selected to be the agent for that bin.
+ * Upon entering the packer, each data_vio already has its compressed data in the first slot of the
+ * data_vio's compressed_block (overlaid on the data_vio's scratch_block). So the agent's fragment
+ * is already in place. The fragments for the other uncanceled data_vios in the bin are packed into
+ * the agent's compressed block. The agent then writes out the compressed block. If the write is
+ * successful, the agent shares its pbn lock which each of the other data_vios in its compressed
+ * block and sends each on its way. Finally the agent itself continues on the write path as before.
+ *
+ * There is one special bin which is used to hold data_vios which have been canceled and removed
+ * from their bin by the packer. These data_vios need to wait for the canceller to rendezvous with
+ * them and so they sit in this special bin.
+ */
+struct packer_bin {
+ /* List links for packer.packer_bins */
+ struct list_head list;
+ /* The number of items in the bin */
+ slot_number_t slots_used;
+ /* The number of compressed block bytes remaining in the current batch */
+ size_t free_space;
+ /* The current partial batch of data_vios, waiting for more */
+ struct data_vio *incoming[];
+};
+
+struct packer {
+ /* The ID of the packer's callback thread */
+ thread_id_t thread_id;
+ /* The number of bins */
+ block_count_t size;
+ /* A list of all packer_bins, kept sorted by free_space */
+ struct list_head bins;
+ /*
+ * A bin to hold data_vios which were canceled out of the packer and are waiting to
+ * rendezvous with the canceling data_vio.
+ */
+ struct packer_bin *canceled_bin;
+
+ /* The current flush generation */
+ sequence_number_t flush_generation;
+
+ /* The administrative state of the packer */
+ struct admin_state state;
+
+ /* Statistics are only updated on the packer thread, but are accessed from other threads */
+ struct packer_statistics statistics;
+};
+
+int vdo_get_compressed_block_fragment(enum block_mapping_state mapping_state,
+ struct compressed_block *block,
+ u16 *fragment_offset, u16 *fragment_size);
+
+int __must_check vdo_make_packer(struct vdo *vdo, block_count_t bin_count,
+ struct packer **packer_ptr);
+
+void vdo_free_packer(struct packer *packer);
+
+struct packer_statistics __must_check vdo_get_packer_statistics(const struct packer *packer);
+
+void vdo_attempt_packing(struct data_vio *data_vio);
+
+void vdo_flush_packer(struct packer *packer);
+
+void vdo_remove_lock_holder_from_packer(struct vdo_completion *completion);
+
+void vdo_increment_packer_flush_generation(struct packer *packer);
+
+void vdo_drain_packer(struct packer *packer, struct vdo_completion *completion);
+
+void vdo_resume_packer(struct packer *packer, struct vdo_completion *parent);
+
+void vdo_dump_packer(const struct packer *packer);
+
+#endif /* VDO_PACKER_H */
diff --git a/drivers/md/dm-vdo/permassert.c b/drivers/md/dm-vdo/permassert.c
new file mode 100644
index 000000000000..bf9eccea1cb3
--- /dev/null
+++ b/drivers/md/dm-vdo/permassert.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "permassert.h"
+
+#include "errors.h"
+#include "logger.h"
+
+int vdo_assertion_failed(const char *expression_string, const char *file_name,
+ int line_number, const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+
+ vdo_log_embedded_message(VDO_LOG_ERR, VDO_LOGGING_MODULE_NAME, "assertion \"",
+ format, args, "\" (%s) failed at %s:%d",
+ expression_string, file_name, line_number);
+ vdo_log_backtrace(VDO_LOG_ERR);
+
+ va_end(args);
+
+ return UDS_ASSERTION_FAILED;
+}
diff --git a/drivers/md/dm-vdo/permassert.h b/drivers/md/dm-vdo/permassert.h
new file mode 100644
index 000000000000..c34f2ba650e1
--- /dev/null
+++ b/drivers/md/dm-vdo/permassert.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef PERMASSERT_H
+#define PERMASSERT_H
+
+#include <linux/compiler.h>
+
+#include "errors.h"
+
+/* Utilities for asserting that certain conditions are met */
+
+#define STRINGIFY(X) #X
+
+/*
+ * A hack to apply the "warn if unused" attribute to an integral expression.
+ *
+ * Since GCC doesn't propagate the warn_unused_result attribute to conditional expressions
+ * incorporating calls to functions with that attribute, this function can be used to wrap such an
+ * expression. With optimization enabled, this function contributes no additional instructions, but
+ * the warn_unused_result attribute still applies to the code calling it.
+ */
+static inline int __must_check vdo_must_use(int value)
+{
+ return value;
+}
+
+/* Assert that an expression is true and return an error if it is not. */
+#define VDO_ASSERT(expr, ...) vdo_must_use(__VDO_ASSERT(expr, __VA_ARGS__))
+
+/* Log a message if the expression is not true. */
+#define VDO_ASSERT_LOG_ONLY(expr, ...) __VDO_ASSERT(expr, __VA_ARGS__)
+
+#define __VDO_ASSERT(expr, ...) \
+ (likely(expr) ? VDO_SUCCESS \
+ : vdo_assertion_failed(STRINGIFY(expr), __FILE__, __LINE__, __VA_ARGS__))
+
+/* Log an assertion failure message. */
+int vdo_assertion_failed(const char *expression_string, const char *file_name,
+ int line_number, const char *format, ...)
+ __printf(4, 5);
+
+#endif /* PERMASSERT_H */
diff --git a/drivers/md/dm-vdo/physical-zone.c b/drivers/md/dm-vdo/physical-zone.c
new file mode 100644
index 000000000000..2fee3a7c1191
--- /dev/null
+++ b/drivers/md/dm-vdo/physical-zone.c
@@ -0,0 +1,644 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "physical-zone.h"
+
+#include <linux/list.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "block-map.h"
+#include "completion.h"
+#include "constants.h"
+#include "data-vio.h"
+#include "dedupe.h"
+#include "encodings.h"
+#include "flush.h"
+#include "int-map.h"
+#include "slab-depot.h"
+#include "status-codes.h"
+#include "vdo.h"
+
+/* Each user data_vio needs a PBN read lock and write lock. */
+#define LOCK_POOL_CAPACITY (2 * MAXIMUM_VDO_USER_VIOS)
+
+struct pbn_lock_implementation {
+ enum pbn_lock_type type;
+ const char *name;
+ const char *release_reason;
+};
+
+/* This array must have an entry for every pbn_lock_type value. */
+static const struct pbn_lock_implementation LOCK_IMPLEMENTATIONS[] = {
+ [VIO_READ_LOCK] = {
+ .type = VIO_READ_LOCK,
+ .name = "read",
+ .release_reason = "candidate duplicate",
+ },
+ [VIO_WRITE_LOCK] = {
+ .type = VIO_WRITE_LOCK,
+ .name = "write",
+ .release_reason = "newly allocated",
+ },
+ [VIO_BLOCK_MAP_WRITE_LOCK] = {
+ .type = VIO_BLOCK_MAP_WRITE_LOCK,
+ .name = "block map write",
+ .release_reason = "block map write",
+ },
+};
+
+static inline bool has_lock_type(const struct pbn_lock *lock, enum pbn_lock_type type)
+{
+ return (lock->implementation == &LOCK_IMPLEMENTATIONS[type]);
+}
+
+/**
+ * vdo_is_pbn_read_lock() - Check whether a pbn_lock is a read lock.
+ * @lock: The lock to check.
+ *
+ * Return: true if the lock is a read lock.
+ */
+bool vdo_is_pbn_read_lock(const struct pbn_lock *lock)
+{
+ return has_lock_type(lock, VIO_READ_LOCK);
+}
+
+static inline void set_pbn_lock_type(struct pbn_lock *lock, enum pbn_lock_type type)
+{
+ lock->implementation = &LOCK_IMPLEMENTATIONS[type];
+}
+
+/**
+ * vdo_downgrade_pbn_write_lock() - Downgrade a PBN write lock to a PBN read lock.
+ * @lock: The PBN write lock to downgrade.
+ *
+ * The lock holder count is cleared and the caller is responsible for setting the new count.
+ */
+void vdo_downgrade_pbn_write_lock(struct pbn_lock *lock, bool compressed_write)
+{
+ VDO_ASSERT_LOG_ONLY(!vdo_is_pbn_read_lock(lock),
+ "PBN lock must not already have been downgraded");
+ VDO_ASSERT_LOG_ONLY(!has_lock_type(lock, VIO_BLOCK_MAP_WRITE_LOCK),
+ "must not downgrade block map write locks");
+ VDO_ASSERT_LOG_ONLY(lock->holder_count == 1,
+ "PBN write lock should have one holder but has %u",
+ lock->holder_count);
+ /*
+ * data_vio write locks are downgraded in place--the writer retains the hold on the lock.
+ * If this was a compressed write, the holder has not yet journaled its own inc ref,
+ * otherwise, it has.
+ */
+ lock->increment_limit =
+ (compressed_write ? MAXIMUM_REFERENCE_COUNT : MAXIMUM_REFERENCE_COUNT - 1);
+ set_pbn_lock_type(lock, VIO_READ_LOCK);
+}
+
+/**
+ * vdo_claim_pbn_lock_increment() - Try to claim one of the available reference count increments on
+ * a read lock.
+ * @lock: The PBN read lock from which to claim an increment.
+ *
+ * Claims may be attempted from any thread. A claim is only valid until the PBN lock is released.
+ *
+ * Return: true if the claim succeeded, guaranteeing one increment can be made without overflowing
+ * the PBN's reference count.
+ */
+bool vdo_claim_pbn_lock_increment(struct pbn_lock *lock)
+{
+ /*
+ * Claim the next free reference atomically since hash locks from multiple hash zone
+ * threads might be concurrently deduplicating against a single PBN lock on compressed
+ * block. As long as hitting the increment limit will lead to the PBN lock being released
+ * in a sane time-frame, we won't overflow a 32-bit claim counter, allowing a simple add
+ * instead of a compare-and-swap.
+ */
+ u32 claim_number = (u32) atomic_add_return(1, &lock->increments_claimed);
+
+ return (claim_number <= lock->increment_limit);
+}
+
+/**
+ * vdo_assign_pbn_lock_provisional_reference() - Inform a PBN lock that it is responsible for a
+ * provisional reference.
+ * @lock: The PBN lock.
+ */
+void vdo_assign_pbn_lock_provisional_reference(struct pbn_lock *lock)
+{
+ VDO_ASSERT_LOG_ONLY(!lock->has_provisional_reference,
+ "lock does not have a provisional reference");
+ lock->has_provisional_reference = true;
+}
+
+/**
+ * vdo_unassign_pbn_lock_provisional_reference() - Inform a PBN lock that it is no longer
+ * responsible for a provisional reference.
+ * @lock: The PBN lock.
+ */
+void vdo_unassign_pbn_lock_provisional_reference(struct pbn_lock *lock)
+{
+ lock->has_provisional_reference = false;
+}
+
+/**
+ * release_pbn_lock_provisional_reference() - If the lock is responsible for a provisional
+ * reference, release that reference.
+ * @lock: The lock.
+ * @locked_pbn: The PBN covered by the lock.
+ * @allocator: The block allocator from which to release the reference.
+ *
+ * This method is called when the lock is released.
+ */
+static void release_pbn_lock_provisional_reference(struct pbn_lock *lock,
+ physical_block_number_t locked_pbn,
+ struct block_allocator *allocator)
+{
+ int result;
+
+ if (!vdo_pbn_lock_has_provisional_reference(lock))
+ return;
+
+ result = vdo_release_block_reference(allocator, locked_pbn);
+ if (result != VDO_SUCCESS) {
+ vdo_log_error_strerror(result,
+ "Failed to release reference to %s physical block %llu",
+ lock->implementation->release_reason,
+ (unsigned long long) locked_pbn);
+ }
+
+ vdo_unassign_pbn_lock_provisional_reference(lock);
+}
+
+/**
+ * union idle_pbn_lock - PBN lock list entries.
+ *
+ * Unused (idle) PBN locks are kept in a list. Just like in a malloc implementation, the lock
+ * structure is unused memory, so we can save a bit of space (and not pollute the lock structure
+ * proper) by using a union to overlay the lock structure with the free list.
+ */
+typedef union {
+ /** @entry: Only used while locks are in the pool. */
+ struct list_head entry;
+ /** @lock: Only used while locks are not in the pool. */
+ struct pbn_lock lock;
+} idle_pbn_lock;
+
+/**
+ * struct pbn_lock_pool - list of PBN locks.
+ *
+ * The lock pool is little more than the memory allocated for the locks.
+ */
+struct pbn_lock_pool {
+ /** @capacity: The number of locks allocated for the pool. */
+ size_t capacity;
+ /** @borrowed: The number of locks currently borrowed from the pool. */
+ size_t borrowed;
+ /** @idle_list: A list containing all idle PBN lock instances. */
+ struct list_head idle_list;
+ /** @locks: The memory for all the locks allocated by this pool. */
+ idle_pbn_lock locks[];
+};
+
+/**
+ * return_pbn_lock_to_pool() - Return a pbn lock to its pool.
+ * @pool: The pool from which the lock was borrowed.
+ * @lock: The last reference to the lock being returned.
+ *
+ * It must be the last live reference, as if the memory were being freed (the lock memory will
+ * re-initialized or zeroed).
+ */
+static void return_pbn_lock_to_pool(struct pbn_lock_pool *pool, struct pbn_lock *lock)
+{
+ idle_pbn_lock *idle;
+
+ /* A bit expensive, but will promptly catch some use-after-free errors. */
+ memset(lock, 0, sizeof(*lock));
+
+ idle = container_of(lock, idle_pbn_lock, lock);
+ INIT_LIST_HEAD(&idle->entry);
+ list_add_tail(&idle->entry, &pool->idle_list);
+
+ VDO_ASSERT_LOG_ONLY(pool->borrowed > 0, "shouldn't return more than borrowed");
+ pool->borrowed -= 1;
+}
+
+/**
+ * make_pbn_lock_pool() - Create a new PBN lock pool and all the lock instances it can loan out.
+ *
+ * @capacity: The number of PBN locks to allocate for the pool.
+ * @pool_ptr: A pointer to receive the new pool.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int make_pbn_lock_pool(size_t capacity, struct pbn_lock_pool **pool_ptr)
+{
+ size_t i;
+ struct pbn_lock_pool *pool;
+ int result;
+
+ result = vdo_allocate_extended(struct pbn_lock_pool, capacity, idle_pbn_lock,
+ __func__, &pool);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ pool->capacity = capacity;
+ pool->borrowed = capacity;
+ INIT_LIST_HEAD(&pool->idle_list);
+
+ for (i = 0; i < capacity; i++)
+ return_pbn_lock_to_pool(pool, &pool->locks[i].lock);
+
+ *pool_ptr = pool;
+ return VDO_SUCCESS;
+}
+
+/**
+ * free_pbn_lock_pool() - Free a PBN lock pool.
+ * @pool: The lock pool to free.
+ *
+ * This also frees all the PBN locks it allocated, so the caller must ensure that all locks have
+ * been returned to the pool.
+ */
+static void free_pbn_lock_pool(struct pbn_lock_pool *pool)
+{
+ if (pool == NULL)
+ return;
+
+ VDO_ASSERT_LOG_ONLY(pool->borrowed == 0,
+ "All PBN locks must be returned to the pool before it is freed, but %zu locks are still on loan",
+ pool->borrowed);
+ vdo_free(pool);
+}
+
+/**
+ * borrow_pbn_lock_from_pool() - Borrow a PBN lock from the pool and initialize it with the
+ * provided type.
+ * @pool: The pool from which to borrow.
+ * @type: The type with which to initialize the lock.
+ * @lock_ptr: A pointer to receive the borrowed lock.
+ *
+ * Pools do not grow on demand or allocate memory, so this will fail if the pool is empty. Borrowed
+ * locks are still associated with this pool and must be returned to only this pool.
+ *
+ * Return: VDO_SUCCESS, or VDO_LOCK_ERROR if the pool is empty.
+ */
+static int __must_check borrow_pbn_lock_from_pool(struct pbn_lock_pool *pool,
+ enum pbn_lock_type type,
+ struct pbn_lock **lock_ptr)
+{
+ int result;
+ struct list_head *idle_entry;
+ idle_pbn_lock *idle;
+
+ if (pool->borrowed >= pool->capacity)
+ return vdo_log_error_strerror(VDO_LOCK_ERROR,
+ "no free PBN locks left to borrow");
+ pool->borrowed += 1;
+
+ result = VDO_ASSERT(!list_empty(&pool->idle_list),
+ "idle list should not be empty if pool not at capacity");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ idle_entry = pool->idle_list.prev;
+ list_del(idle_entry);
+ memset(idle_entry, 0, sizeof(*idle_entry));
+
+ idle = list_entry(idle_entry, idle_pbn_lock, entry);
+ idle->lock.holder_count = 0;
+ set_pbn_lock_type(&idle->lock, type);
+
+ *lock_ptr = &idle->lock;
+ return VDO_SUCCESS;
+}
+
+/**
+ * initialize_zone() - Initialize a physical zone.
+ * @vdo: The vdo to which the zone will belong.
+ * @zones: The physical_zones to which the zone being initialized belongs
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int initialize_zone(struct vdo *vdo, struct physical_zones *zones)
+{
+ int result;
+ zone_count_t zone_number = zones->zone_count;
+ struct physical_zone *zone = &zones->zones[zone_number];
+
+ result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->pbn_operations);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = make_pbn_lock_pool(LOCK_POOL_CAPACITY, &zone->lock_pool);
+ if (result != VDO_SUCCESS) {
+ vdo_int_map_free(zone->pbn_operations);
+ return result;
+ }
+
+ zone->zone_number = zone_number;
+ zone->thread_id = vdo->thread_config.physical_threads[zone_number];
+ zone->allocator = &vdo->depot->allocators[zone_number];
+ zone->next = &zones->zones[(zone_number + 1) % vdo->thread_config.physical_zone_count];
+ result = vdo_make_default_thread(vdo, zone->thread_id);
+ if (result != VDO_SUCCESS) {
+ free_pbn_lock_pool(vdo_forget(zone->lock_pool));
+ vdo_int_map_free(zone->pbn_operations);
+ return result;
+ }
+ return result;
+}
+
+/**
+ * vdo_make_physical_zones() - Make the physical zones for a vdo.
+ * @vdo: The vdo being constructed
+ * @zones_ptr: A pointer to hold the zones
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_make_physical_zones(struct vdo *vdo, struct physical_zones **zones_ptr)
+{
+ struct physical_zones *zones;
+ int result;
+ zone_count_t zone_count = vdo->thread_config.physical_zone_count;
+
+ if (zone_count == 0)
+ return VDO_SUCCESS;
+
+ result = vdo_allocate_extended(struct physical_zones, zone_count,
+ struct physical_zone, __func__, &zones);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ for (zones->zone_count = 0; zones->zone_count < zone_count; zones->zone_count++) {
+ result = initialize_zone(vdo, zones);
+ if (result != VDO_SUCCESS) {
+ vdo_free_physical_zones(zones);
+ return result;
+ }
+ }
+
+ *zones_ptr = zones;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_free_physical_zones() - Destroy the physical zones.
+ * @zones: The zones to free.
+ */
+void vdo_free_physical_zones(struct physical_zones *zones)
+{
+ zone_count_t index;
+
+ if (zones == NULL)
+ return;
+
+ for (index = 0; index < zones->zone_count; index++) {
+ struct physical_zone *zone = &zones->zones[index];
+
+ free_pbn_lock_pool(vdo_forget(zone->lock_pool));
+ vdo_int_map_free(vdo_forget(zone->pbn_operations));
+ }
+
+ vdo_free(zones);
+}
+
+/**
+ * vdo_get_physical_zone_pbn_lock() - Get the lock on a PBN if one exists.
+ * @zone: The physical zone responsible for the PBN.
+ * @pbn: The physical block number whose lock is desired.
+ *
+ * Return: The lock or NULL if the PBN is not locked.
+ */
+struct pbn_lock *vdo_get_physical_zone_pbn_lock(struct physical_zone *zone,
+ physical_block_number_t pbn)
+{
+ return ((zone == NULL) ? NULL : vdo_int_map_get(zone->pbn_operations, pbn));
+}
+
+/**
+ * vdo_attempt_physical_zone_pbn_lock() - Attempt to lock a physical block in the zone responsible
+ * for it.
+ * @zone: The physical zone responsible for the PBN.
+ * @pbn: The physical block number to lock.
+ * @type: The type with which to initialize a new lock.
+ * @lock_ptr: A pointer to receive the lock, existing or new.
+ *
+ * If the PBN is already locked, the existing lock will be returned. Otherwise, a new lock instance
+ * will be borrowed from the pool, initialized, and returned. The lock owner will be NULL for a new
+ * lock acquired by the caller, who is responsible for setting that field promptly. The lock owner
+ * will be non-NULL when there is already an existing lock on the PBN.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_attempt_physical_zone_pbn_lock(struct physical_zone *zone,
+ physical_block_number_t pbn,
+ enum pbn_lock_type type,
+ struct pbn_lock **lock_ptr)
+{
+ /*
+ * Borrow and prepare a lock from the pool so we don't have to do two int_map accesses in
+ * the common case of no lock contention.
+ */
+ struct pbn_lock *lock, *new_lock = NULL;
+ int result;
+
+ result = borrow_pbn_lock_from_pool(zone->lock_pool, type, &new_lock);
+ if (result != VDO_SUCCESS) {
+ VDO_ASSERT_LOG_ONLY(false, "must always be able to borrow a PBN lock");
+ return result;
+ }
+
+ result = vdo_int_map_put(zone->pbn_operations, pbn, new_lock, false,
+ (void **) &lock);
+ if (result != VDO_SUCCESS) {
+ return_pbn_lock_to_pool(zone->lock_pool, new_lock);
+ return result;
+ }
+
+ if (lock != NULL) {
+ /* The lock is already held, so we don't need the borrowed one. */
+ return_pbn_lock_to_pool(zone->lock_pool, vdo_forget(new_lock));
+ result = VDO_ASSERT(lock->holder_count > 0, "physical block %llu lock held",
+ (unsigned long long) pbn);
+ if (result != VDO_SUCCESS)
+ return result;
+ *lock_ptr = lock;
+ } else {
+ *lock_ptr = new_lock;
+ }
+ return VDO_SUCCESS;
+}
+
+/**
+ * allocate_and_lock_block() - Attempt to allocate a block from this zone.
+ * @allocation: The struct allocation of the data_vio attempting to allocate.
+ *
+ * If a block is allocated, the recipient will also hold a lock on it.
+ *
+ * Return: VDO_SUCCESS if a block was allocated, or an error code.
+ */
+static int allocate_and_lock_block(struct allocation *allocation)
+{
+ int result;
+ struct pbn_lock *lock;
+
+ VDO_ASSERT_LOG_ONLY(allocation->lock == NULL,
+ "must not allocate a block while already holding a lock on one");
+
+ result = vdo_allocate_block(allocation->zone->allocator, &allocation->pbn);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_attempt_physical_zone_pbn_lock(allocation->zone, allocation->pbn,
+ allocation->write_lock_type, &lock);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (lock->holder_count > 0) {
+ /* This block is already locked, which should be impossible. */
+ return vdo_log_error_strerror(VDO_LOCK_ERROR,
+ "Newly allocated block %llu was spuriously locked (holder_count=%u)",
+ (unsigned long long) allocation->pbn,
+ lock->holder_count);
+ }
+
+ /* We've successfully acquired a new lock, so mark it as ours. */
+ lock->holder_count += 1;
+ allocation->lock = lock;
+ vdo_assign_pbn_lock_provisional_reference(lock);
+ return VDO_SUCCESS;
+}
+
+/**
+ * retry_allocation() - Retry allocating a block now that we're done waiting for scrubbing.
+ * @waiter: The allocating_vio that was waiting to allocate.
+ * @context: The context (unused).
+ */
+static void retry_allocation(struct vdo_waiter *waiter, void *context __always_unused)
+{
+ struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
+
+ /* Now that some slab has scrubbed, restart the allocation process. */
+ data_vio->allocation.wait_for_clean_slab = false;
+ data_vio->allocation.first_allocation_zone = data_vio->allocation.zone->zone_number;
+ continue_data_vio(data_vio);
+}
+
+/**
+ * continue_allocating() - Continue searching for an allocation by enqueuing to wait for scrubbing
+ * or switching to the next zone.
+ * @data_vio: The data_vio attempting to get an allocation.
+ *
+ * This method should only be called from the error handler set in data_vio_allocate_data_block.
+ *
+ * Return: true if the allocation process has continued in another zone.
+ */
+static bool continue_allocating(struct data_vio *data_vio)
+{
+ struct allocation *allocation = &data_vio->allocation;
+ struct physical_zone *zone = allocation->zone;
+ struct vdo_completion *completion = &data_vio->vio.completion;
+ int result = VDO_SUCCESS;
+ bool was_waiting = allocation->wait_for_clean_slab;
+ bool tried_all = (allocation->first_allocation_zone == zone->next->zone_number);
+
+ vdo_reset_completion(completion);
+
+ if (tried_all && !was_waiting) {
+ /*
+ * We've already looked in all the zones, and found nothing. So go through the
+ * zones again, and wait for each to scrub before trying to allocate.
+ */
+ allocation->wait_for_clean_slab = true;
+ allocation->first_allocation_zone = zone->zone_number;
+ }
+
+ if (allocation->wait_for_clean_slab) {
+ data_vio->waiter.callback = retry_allocation;
+ result = vdo_enqueue_clean_slab_waiter(zone->allocator,
+ &data_vio->waiter);
+ if (result == VDO_SUCCESS) {
+ /* We've enqueued to wait for a slab to be scrubbed. */
+ return true;
+ }
+
+ if ((result != VDO_NO_SPACE) || (was_waiting && tried_all)) {
+ vdo_set_completion_result(completion, result);
+ return false;
+ }
+ }
+
+ allocation->zone = zone->next;
+ completion->callback_thread_id = allocation->zone->thread_id;
+ vdo_launch_completion(completion);
+ return true;
+}
+
+/**
+ * vdo_allocate_block_in_zone() - Attempt to allocate a block in the current physical zone, and if
+ * that fails try the next if possible.
+ * @data_vio: The data_vio needing an allocation.
+ *
+ * Return: true if a block was allocated, if not the data_vio will have been dispatched so the
+ * caller must not touch it.
+ */
+bool vdo_allocate_block_in_zone(struct data_vio *data_vio)
+{
+ int result = allocate_and_lock_block(&data_vio->allocation);
+
+ if (result == VDO_SUCCESS)
+ return true;
+
+ if ((result != VDO_NO_SPACE) || !continue_allocating(data_vio))
+ continue_data_vio_with_error(data_vio, result);
+
+ return false;
+}
+
+/**
+ * vdo_release_physical_zone_pbn_lock() - Release a physical block lock if it is held and return it
+ * to the lock pool.
+ * @zone: The physical zone in which the lock was obtained.
+ * @locked_pbn: The physical block number to unlock.
+ * @lock: The lock being released.
+ *
+ * It must be the last live reference, as if the memory were being freed (the
+ * lock memory will re-initialized or zeroed).
+ */
+void vdo_release_physical_zone_pbn_lock(struct physical_zone *zone,
+ physical_block_number_t locked_pbn,
+ struct pbn_lock *lock)
+{
+ struct pbn_lock *holder;
+
+ if (lock == NULL)
+ return;
+
+ VDO_ASSERT_LOG_ONLY(lock->holder_count > 0,
+ "should not be releasing a lock that is not held");
+
+ lock->holder_count -= 1;
+ if (lock->holder_count > 0) {
+ /* The lock was shared and is still referenced, so don't release it yet. */
+ return;
+ }
+
+ holder = vdo_int_map_remove(zone->pbn_operations, locked_pbn);
+ VDO_ASSERT_LOG_ONLY((lock == holder), "physical block lock mismatch for block %llu",
+ (unsigned long long) locked_pbn);
+
+ release_pbn_lock_provisional_reference(lock, locked_pbn, zone->allocator);
+ return_pbn_lock_to_pool(zone->lock_pool, lock);
+}
+
+/**
+ * vdo_dump_physical_zone() - Dump information about a physical zone to the log for debugging.
+ * @zone: The zone to dump.
+ */
+void vdo_dump_physical_zone(const struct physical_zone *zone)
+{
+ vdo_dump_block_allocator(zone->allocator);
+}
diff --git a/drivers/md/dm-vdo/physical-zone.h b/drivers/md/dm-vdo/physical-zone.h
new file mode 100644
index 000000000000..47d874fd5a0b
--- /dev/null
+++ b/drivers/md/dm-vdo/physical-zone.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_PHYSICAL_ZONE_H
+#define VDO_PHYSICAL_ZONE_H
+
+#include <linux/atomic.h>
+
+#include "types.h"
+
+/*
+ * The type of a PBN lock.
+ */
+enum pbn_lock_type {
+ VIO_READ_LOCK,
+ VIO_WRITE_LOCK,
+ VIO_BLOCK_MAP_WRITE_LOCK,
+};
+
+struct pbn_lock_implementation;
+
+/*
+ * A PBN lock.
+ */
+struct pbn_lock {
+ /* The implementation of the lock */
+ const struct pbn_lock_implementation *implementation;
+
+ /* The number of VIOs holding or sharing this lock */
+ data_vio_count_t holder_count;
+ /*
+ * The number of compressed block writers holding a share of this lock while they are
+ * acquiring a reference to the PBN.
+ */
+ u8 fragment_locks;
+
+ /* Whether the locked PBN has been provisionally referenced on behalf of the lock holder. */
+ bool has_provisional_reference;
+
+ /*
+ * For read locks, the number of references that were known to be available on the locked
+ * block at the time the lock was acquired.
+ */
+ u8 increment_limit;
+
+ /*
+ * For read locks, the number of data_vios that have tried to claim one of the available
+ * increments during the lifetime of the lock. Each claim will first increment this
+ * counter, so it can exceed the increment limit.
+ */
+ atomic_t increments_claimed;
+};
+
+struct physical_zone {
+ /* Which physical zone this is */
+ zone_count_t zone_number;
+ /* The thread ID for this zone */
+ thread_id_t thread_id;
+ /* In progress operations keyed by PBN */
+ struct int_map *pbn_operations;
+ /* Pool of unused pbn_lock instances */
+ struct pbn_lock_pool *lock_pool;
+ /* The block allocator for this zone */
+ struct block_allocator *allocator;
+ /* The next zone from which to attempt an allocation */
+ struct physical_zone *next;
+};
+
+struct physical_zones {
+ /* The number of zones */
+ zone_count_t zone_count;
+ /* The physical zones themselves */
+ struct physical_zone zones[];
+};
+
+bool __must_check vdo_is_pbn_read_lock(const struct pbn_lock *lock);
+void vdo_downgrade_pbn_write_lock(struct pbn_lock *lock, bool compressed_write);
+bool __must_check vdo_claim_pbn_lock_increment(struct pbn_lock *lock);
+
+/**
+ * vdo_pbn_lock_has_provisional_reference() - Check whether a PBN lock has a provisional reference.
+ * @lock: The PBN lock.
+ */
+static inline bool vdo_pbn_lock_has_provisional_reference(struct pbn_lock *lock)
+{
+ return ((lock != NULL) && lock->has_provisional_reference);
+}
+
+void vdo_assign_pbn_lock_provisional_reference(struct pbn_lock *lock);
+void vdo_unassign_pbn_lock_provisional_reference(struct pbn_lock *lock);
+
+int __must_check vdo_make_physical_zones(struct vdo *vdo,
+ struct physical_zones **zones_ptr);
+
+void vdo_free_physical_zones(struct physical_zones *zones);
+
+struct pbn_lock * __must_check vdo_get_physical_zone_pbn_lock(struct physical_zone *zone,
+ physical_block_number_t pbn);
+
+int __must_check vdo_attempt_physical_zone_pbn_lock(struct physical_zone *zone,
+ physical_block_number_t pbn,
+ enum pbn_lock_type type,
+ struct pbn_lock **lock_ptr);
+
+bool __must_check vdo_allocate_block_in_zone(struct data_vio *data_vio);
+
+void vdo_release_physical_zone_pbn_lock(struct physical_zone *zone,
+ physical_block_number_t locked_pbn,
+ struct pbn_lock *lock);
+
+void vdo_dump_physical_zone(const struct physical_zone *zone);
+
+#endif /* VDO_PHYSICAL_ZONE_H */
diff --git a/drivers/md/dm-vdo/priority-table.c b/drivers/md/dm-vdo/priority-table.c
new file mode 100644
index 000000000000..42d3d8d0e4b5
--- /dev/null
+++ b/drivers/md/dm-vdo/priority-table.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "priority-table.h"
+
+#include <linux/log2.h>
+
+#include "errors.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "status-codes.h"
+
+/* We use a single 64-bit search vector, so the maximum priority is 63 */
+#define MAX_PRIORITY 63
+
+/*
+ * All the entries with the same priority are queued in a circular list in a bucket for that
+ * priority. The table is essentially an array of buckets.
+ */
+struct bucket {
+ /*
+ * The head of a queue of table entries, all having the same priority
+ */
+ struct list_head queue;
+ /* The priority of all the entries in this bucket */
+ unsigned int priority;
+};
+
+/*
+ * A priority table is an array of buckets, indexed by priority. New entries are added to the end
+ * of the queue in the appropriate bucket. The dequeue operation finds the highest-priority
+ * non-empty bucket by searching a bit vector represented as a single 8-byte word, which is very
+ * fast with compiler and CPU support.
+ */
+struct priority_table {
+ /* The maximum priority of entries that may be stored in this table */
+ unsigned int max_priority;
+ /* A bit vector flagging all buckets that are currently non-empty */
+ u64 search_vector;
+ /* The array of all buckets, indexed by priority */
+ struct bucket buckets[];
+};
+
+/**
+ * vdo_make_priority_table() - Allocate and initialize a new priority_table.
+ * @max_priority: The maximum priority value for table entries.
+ * @table_ptr: A pointer to hold the new table.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+int vdo_make_priority_table(unsigned int max_priority, struct priority_table **table_ptr)
+{
+ struct priority_table *table;
+ int result;
+ unsigned int priority;
+
+ if (max_priority > MAX_PRIORITY)
+ return UDS_INVALID_ARGUMENT;
+
+ result = vdo_allocate_extended(struct priority_table, max_priority + 1,
+ struct bucket, __func__, &table);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ for (priority = 0; priority <= max_priority; priority++) {
+ struct bucket *bucket = &table->buckets[priority];
+
+ bucket->priority = priority;
+ INIT_LIST_HEAD(&bucket->queue);
+ }
+
+ table->max_priority = max_priority;
+ table->search_vector = 0;
+
+ *table_ptr = table;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_free_priority_table() - Free a priority_table.
+ * @table: The table to free.
+ *
+ * The table does not own the entries stored in it and they are not freed by this call.
+ */
+void vdo_free_priority_table(struct priority_table *table)
+{
+ if (table == NULL)
+ return;
+
+ /*
+ * Unlink the buckets from any entries still in the table so the entries won't be left with
+ * dangling pointers to freed memory.
+ */
+ vdo_reset_priority_table(table);
+
+ vdo_free(table);
+}
+
+/**
+ * vdo_reset_priority_table() - Reset a priority table, leaving it in the same empty state as when
+ * newly constructed.
+ * @table: The table to reset.
+ *
+ * The table does not own the entries stored in it and they are not freed (or even unlinked from
+ * each other) by this call.
+ */
+void vdo_reset_priority_table(struct priority_table *table)
+{
+ unsigned int priority;
+
+ table->search_vector = 0;
+ for (priority = 0; priority <= table->max_priority; priority++)
+ list_del_init(&table->buckets[priority].queue);
+}
+
+/**
+ * vdo_priority_table_enqueue() - Add a new entry to the priority table, appending it to the queue
+ * for entries with the specified priority.
+ * @table: The table in which to store the entry.
+ * @priority: The priority of the entry.
+ * @entry: The list_head embedded in the entry to store in the table (the caller must have
+ * initialized it).
+ */
+void vdo_priority_table_enqueue(struct priority_table *table, unsigned int priority,
+ struct list_head *entry)
+{
+ VDO_ASSERT_LOG_ONLY((priority <= table->max_priority),
+ "entry priority must be valid for the table");
+
+ /* Append the entry to the queue in the specified bucket. */
+ list_move_tail(entry, &table->buckets[priority].queue);
+
+ /* Flag the bucket in the search vector since it must be non-empty. */
+ table->search_vector |= (1ULL << priority);
+}
+
+static inline void mark_bucket_empty(struct priority_table *table, struct bucket *bucket)
+{
+ table->search_vector &= ~(1ULL << bucket->priority);
+}
+
+/**
+ * vdo_priority_table_dequeue() - Find the highest-priority entry in the table, remove it from the
+ * table, and return it.
+ * @table: The priority table from which to remove an entry.
+ *
+ * If there are multiple entries with the same priority, the one that has been in the table with
+ * that priority the longest will be returned.
+ *
+ * Return: The dequeued entry, or NULL if the table is currently empty.
+ */
+struct list_head *vdo_priority_table_dequeue(struct priority_table *table)
+{
+ struct bucket *bucket;
+ struct list_head *entry;
+ int top_priority;
+
+ if (table->search_vector == 0) {
+ /* All buckets are empty. */
+ return NULL;
+ }
+
+ /*
+ * Find the highest priority non-empty bucket by finding the highest-order non-zero bit in
+ * the search vector.
+ */
+ top_priority = ilog2(table->search_vector);
+
+ /* Dequeue the first entry in the bucket. */
+ bucket = &table->buckets[top_priority];
+ entry = bucket->queue.next;
+ list_del_init(entry);
+
+ /* Clear the bit in the search vector if the bucket has been emptied. */
+ if (list_empty(&bucket->queue))
+ mark_bucket_empty(table, bucket);
+
+ return entry;
+}
+
+/**
+ * vdo_priority_table_remove() - Remove a specified entry from its priority table.
+ * @table: The table from which to remove the entry.
+ * @entry: The entry to remove from the table.
+ */
+void vdo_priority_table_remove(struct priority_table *table, struct list_head *entry)
+{
+ struct list_head *next_entry;
+
+ /*
+ * We can't guard against calls where the entry is on a list for a different table, but
+ * it's easy to deal with an entry not in any table or list.
+ */
+ if (list_empty(entry))
+ return;
+
+ /*
+ * Remove the entry from the bucket list, remembering a pointer to another entry in the
+ * ring.
+ */
+ next_entry = entry->next;
+ list_del_init(entry);
+
+ /*
+ * If the rest of the list is now empty, the next node must be the list head in the bucket
+ * and we can use it to update the search vector.
+ */
+ if (list_empty(next_entry))
+ mark_bucket_empty(table, list_entry(next_entry, struct bucket, queue));
+}
+
+/**
+ * vdo_is_priority_table_empty() - Return whether the priority table is empty.
+ * @table: The table to check.
+ *
+ * Return: true if the table is empty.
+ */
+bool vdo_is_priority_table_empty(struct priority_table *table)
+{
+ return (table->search_vector == 0);
+}
diff --git a/drivers/md/dm-vdo/priority-table.h b/drivers/md/dm-vdo/priority-table.h
new file mode 100644
index 000000000000..8b060462e3e4
--- /dev/null
+++ b/drivers/md/dm-vdo/priority-table.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_PRIORITY_TABLE_H
+#define VDO_PRIORITY_TABLE_H
+
+#include <linux/list.h>
+
+/*
+ * A priority_table is a simple implementation of a priority queue for entries with priorities that
+ * are small non-negative integer values. It implements the obvious priority queue operations of
+ * enqueuing an entry and dequeuing an entry with the maximum priority. It also supports removing
+ * an arbitrary entry. The priority of an entry already in the table can be changed by removing it
+ * and re-enqueuing it with a different priority. All operations have O(1) complexity.
+ *
+ * The links for the table entries must be embedded in the entries themselves. Lists are used to
+ * link entries in the table and no wrapper type is declared, so an existing list entry in an
+ * object can also be used to queue it in a priority_table, assuming the field is not used for
+ * anything else while so queued.
+ *
+ * The table is implemented as an array of queues (circular lists) indexed by priority, along with
+ * a hint for which queues are non-empty. Steven Skiena calls a very similar structure a "bounded
+ * height priority queue", but given the resemblance to a hash table, "priority table" seems both
+ * shorter and more apt, if somewhat novel.
+ */
+
+struct priority_table;
+
+int __must_check vdo_make_priority_table(unsigned int max_priority,
+ struct priority_table **table_ptr);
+
+void vdo_free_priority_table(struct priority_table *table);
+
+void vdo_priority_table_enqueue(struct priority_table *table, unsigned int priority,
+ struct list_head *entry);
+
+void vdo_reset_priority_table(struct priority_table *table);
+
+struct list_head * __must_check vdo_priority_table_dequeue(struct priority_table *table);
+
+void vdo_priority_table_remove(struct priority_table *table, struct list_head *entry);
+
+bool __must_check vdo_is_priority_table_empty(struct priority_table *table);
+
+#endif /* VDO_PRIORITY_TABLE_H */
diff --git a/drivers/md/dm-vdo/recovery-journal.c b/drivers/md/dm-vdo/recovery-journal.c
new file mode 100644
index 000000000000..ee6321a3e523
--- /dev/null
+++ b/drivers/md/dm-vdo/recovery-journal.c
@@ -0,0 +1,1762 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "recovery-journal.h"
+
+#include <linux/atomic.h>
+#include <linux/bio.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "block-map.h"
+#include "completion.h"
+#include "constants.h"
+#include "data-vio.h"
+#include "encodings.h"
+#include "io-submitter.h"
+#include "slab-depot.h"
+#include "types.h"
+#include "vdo.h"
+#include "vio.h"
+#include "wait-queue.h"
+
+static const u64 RECOVERY_COUNT_MASK = 0xff;
+
+/*
+ * The number of reserved blocks must be large enough to prevent a new recovery journal
+ * block write from overwriting a block which appears to still be a valid head block of the
+ * journal. Currently, that means reserving enough space for all 2048 data_vios.
+ */
+#define RECOVERY_JOURNAL_RESERVED_BLOCKS \
+ ((MAXIMUM_VDO_USER_VIOS / RECOVERY_JOURNAL_ENTRIES_PER_BLOCK) + 2)
+
+/**
+ * DOC: Lock Counters.
+ *
+ * A lock_counter is intended to keep all of the locks for the blocks in the recovery journal. The
+ * per-zone counters are all kept in a single array which is arranged by zone (i.e. zone 0's lock 0
+ * is at index 0, zone 0's lock 1 is at index 1, and zone 1's lock 0 is at index 'locks'. This
+ * arrangement is intended to minimize cache-line contention for counters from different zones.
+ *
+ * The locks are implemented as a single object instead of as a lock counter per lock both to
+ * afford this opportunity to reduce cache line contention and also to eliminate the need to have a
+ * completion per lock.
+ *
+ * Lock sets are laid out with the set for recovery journal first, followed by the logical zones,
+ * and then the physical zones.
+ */
+
+enum lock_counter_state {
+ LOCK_COUNTER_STATE_NOT_NOTIFYING,
+ LOCK_COUNTER_STATE_NOTIFYING,
+ LOCK_COUNTER_STATE_SUSPENDED,
+};
+
+/**
+ * get_zone_count_ptr() - Get a pointer to the zone count for a given lock on a given zone.
+ * @journal: The recovery journal.
+ * @lock_number: The lock to get.
+ * @zone_type: The zone type whose count is desired.
+ *
+ * Return: A pointer to the zone count for the given lock and zone.
+ */
+static inline atomic_t *get_zone_count_ptr(struct recovery_journal *journal,
+ block_count_t lock_number,
+ enum vdo_zone_type zone_type)
+{
+ return ((zone_type == VDO_ZONE_TYPE_LOGICAL)
+ ? &journal->lock_counter.logical_zone_counts[lock_number]
+ : &journal->lock_counter.physical_zone_counts[lock_number]);
+}
+
+/**
+ * get_counter() - Get the zone counter for a given lock on a given zone.
+ * @journal: The recovery journal.
+ * @lock_number: The lock to get.
+ * @zone_type: The zone type whose count is desired.
+ * @zone_id: The zone index whose count is desired.
+ *
+ * Return: The counter for the given lock and zone.
+ */
+static inline u16 *get_counter(struct recovery_journal *journal,
+ block_count_t lock_number, enum vdo_zone_type zone_type,
+ zone_count_t zone_id)
+{
+ struct lock_counter *counter = &journal->lock_counter;
+ block_count_t zone_counter = (counter->locks * zone_id) + lock_number;
+
+ if (zone_type == VDO_ZONE_TYPE_JOURNAL)
+ return &counter->journal_counters[zone_counter];
+
+ if (zone_type == VDO_ZONE_TYPE_LOGICAL)
+ return &counter->logical_counters[zone_counter];
+
+ return &counter->physical_counters[zone_counter];
+}
+
+static atomic_t *get_decrement_counter(struct recovery_journal *journal,
+ block_count_t lock_number)
+{
+ return &journal->lock_counter.journal_decrement_counts[lock_number];
+}
+
+/**
+ * is_journal_zone_locked() - Check whether the journal zone is locked for a given lock.
+ * @journal: The recovery journal.
+ * @lock_number: The lock to check.
+ *
+ * Return: true if the journal zone is locked.
+ */
+static bool is_journal_zone_locked(struct recovery_journal *journal,
+ block_count_t lock_number)
+{
+ u16 journal_value = *get_counter(journal, lock_number, VDO_ZONE_TYPE_JOURNAL, 0);
+ u32 decrements = atomic_read(get_decrement_counter(journal, lock_number));
+
+ /* Pairs with barrier in vdo_release_journal_entry_lock() */
+ smp_rmb();
+ VDO_ASSERT_LOG_ONLY((decrements <= journal_value),
+ "journal zone lock counter must not underflow");
+ return (journal_value != decrements);
+}
+
+/**
+ * vdo_release_recovery_journal_block_reference() - Release a reference to a recovery journal
+ * block.
+ * @journal: The recovery journal.
+ * @sequence_number: The journal sequence number of the referenced block.
+ * @zone_type: The type of the zone making the adjustment.
+ * @zone_id: The ID of the zone making the adjustment.
+ *
+ * If this is the last reference for a given zone type, an attempt will be made to reap the
+ * journal.
+ */
+void vdo_release_recovery_journal_block_reference(struct recovery_journal *journal,
+ sequence_number_t sequence_number,
+ enum vdo_zone_type zone_type,
+ zone_count_t zone_id)
+{
+ u16 *current_value;
+ block_count_t lock_number;
+ int prior_state;
+
+ if (sequence_number == 0)
+ return;
+
+ lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number);
+ current_value = get_counter(journal, lock_number, zone_type, zone_id);
+
+ VDO_ASSERT_LOG_ONLY((*current_value >= 1),
+ "decrement of lock counter must not underflow");
+ *current_value -= 1;
+
+ if (zone_type == VDO_ZONE_TYPE_JOURNAL) {
+ if (is_journal_zone_locked(journal, lock_number))
+ return;
+ } else {
+ atomic_t *zone_count;
+
+ if (*current_value != 0)
+ return;
+
+ zone_count = get_zone_count_ptr(journal, lock_number, zone_type);
+
+ if (atomic_add_return(-1, zone_count) > 0)
+ return;
+ }
+
+ /*
+ * Extra barriers because this was original developed using a CAS operation that implicitly
+ * had them.
+ */
+ smp_mb__before_atomic();
+ prior_state = atomic_cmpxchg(&journal->lock_counter.state,
+ LOCK_COUNTER_STATE_NOT_NOTIFYING,
+ LOCK_COUNTER_STATE_NOTIFYING);
+ /* same as before_atomic */
+ smp_mb__after_atomic();
+
+ if (prior_state != LOCK_COUNTER_STATE_NOT_NOTIFYING)
+ return;
+
+ vdo_launch_completion(&journal->lock_counter.completion);
+}
+
+static inline struct recovery_journal_block * __must_check get_journal_block(struct list_head *list)
+{
+ return list_first_entry_or_null(list, struct recovery_journal_block, list_node);
+}
+
+/**
+ * pop_free_list() - Get a block from the end of the free list.
+ * @journal: The journal.
+ *
+ * Return: The block or NULL if the list is empty.
+ */
+static struct recovery_journal_block * __must_check pop_free_list(struct recovery_journal *journal)
+{
+ struct recovery_journal_block *block;
+
+ if (list_empty(&journal->free_tail_blocks))
+ return NULL;
+
+ block = list_last_entry(&journal->free_tail_blocks,
+ struct recovery_journal_block, list_node);
+ list_del_init(&block->list_node);
+ return block;
+}
+
+/**
+ * is_block_dirty() - Check whether a recovery block is dirty.
+ * @block: The block to check.
+ *
+ * Indicates it has any uncommitted entries, which includes both entries not written and entries
+ * written but not yet acknowledged.
+ *
+ * Return: true if the block has any uncommitted entries.
+ */
+static inline bool __must_check is_block_dirty(const struct recovery_journal_block *block)
+{
+ return (block->uncommitted_entry_count > 0);
+}
+
+/**
+ * is_block_empty() - Check whether a journal block is empty.
+ * @block: The block to check.
+ *
+ * Return: true if the block has no entries.
+ */
+static inline bool __must_check is_block_empty(const struct recovery_journal_block *block)
+{
+ return (block->entry_count == 0);
+}
+
+/**
+ * is_block_full() - Check whether a journal block is full.
+ * @block: The block to check.
+ *
+ * Return: true if the block is full.
+ */
+static inline bool __must_check is_block_full(const struct recovery_journal_block *block)
+{
+ return ((block == NULL) || (block->journal->entries_per_block == block->entry_count));
+}
+
+/**
+ * assert_on_journal_thread() - Assert that we are running on the journal thread.
+ * @journal: The journal.
+ * @function_name: The function doing the check (for logging).
+ */
+static void assert_on_journal_thread(struct recovery_journal *journal,
+ const char *function_name)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == journal->thread_id),
+ "%s() called on journal thread", function_name);
+}
+
+/**
+ * continue_waiter() - Release a data_vio from the journal.
+ *
+ * Invoked whenever a data_vio is to be released from the journal, either because its entry was
+ * committed to disk, or because there was an error. Implements waiter_callback_fn.
+ */
+static void continue_waiter(struct vdo_waiter *waiter, void *context)
+{
+ continue_data_vio_with_error(vdo_waiter_as_data_vio(waiter), *((int *) context));
+}
+
+/**
+ * has_block_waiters() - Check whether the journal has any waiters on any blocks.
+ * @journal: The journal in question.
+ *
+ * Return: true if any block has a waiter.
+ */
+static inline bool has_block_waiters(struct recovery_journal *journal)
+{
+ struct recovery_journal_block *block = get_journal_block(&journal->active_tail_blocks);
+
+ /*
+ * Either the first active tail block (if it exists) has waiters, or no active tail block
+ * has waiters.
+ */
+ return ((block != NULL) &&
+ (vdo_waitq_has_waiters(&block->entry_waiters) ||
+ vdo_waitq_has_waiters(&block->commit_waiters)));
+}
+
+static void recycle_journal_blocks(struct recovery_journal *journal);
+static void recycle_journal_block(struct recovery_journal_block *block);
+static void notify_commit_waiters(struct recovery_journal *journal);
+
+/**
+ * suspend_lock_counter() - Prevent the lock counter from notifying.
+ * @counter: The counter.
+ *
+ * Return: true if the lock counter was not notifying and hence the suspend was efficacious.
+ */
+static bool suspend_lock_counter(struct lock_counter *counter)
+{
+ int prior_state;
+
+ /*
+ * Extra barriers because this was originally developed using a CAS operation that
+ * implicitly had them.
+ */
+ smp_mb__before_atomic();
+ prior_state = atomic_cmpxchg(&counter->state, LOCK_COUNTER_STATE_NOT_NOTIFYING,
+ LOCK_COUNTER_STATE_SUSPENDED);
+ /* same as before_atomic */
+ smp_mb__after_atomic();
+
+ return ((prior_state == LOCK_COUNTER_STATE_SUSPENDED) ||
+ (prior_state == LOCK_COUNTER_STATE_NOT_NOTIFYING));
+}
+
+static inline bool is_read_only(struct recovery_journal *journal)
+{
+ return vdo_is_read_only(journal->flush_vio->completion.vdo);
+}
+
+/**
+ * check_for_drain_complete() - Check whether the journal has drained.
+ * @journal: The journal which may have just drained.
+ */
+static void check_for_drain_complete(struct recovery_journal *journal)
+{
+ int result = VDO_SUCCESS;
+
+ if (is_read_only(journal)) {
+ result = VDO_READ_ONLY;
+ /*
+ * Clean up any full active blocks which were not written due to read-only mode.
+ *
+ * FIXME: This would probably be better as a short-circuit in write_block().
+ */
+ notify_commit_waiters(journal);
+ recycle_journal_blocks(journal);
+
+ /* Release any data_vios waiting to be assigned entries. */
+ vdo_waitq_notify_all_waiters(&journal->entry_waiters,
+ continue_waiter, &result);
+ }
+
+ if (!vdo_is_state_draining(&journal->state) ||
+ journal->reaping ||
+ has_block_waiters(journal) ||
+ vdo_waitq_has_waiters(&journal->entry_waiters) ||
+ !suspend_lock_counter(&journal->lock_counter))
+ return;
+
+ if (vdo_is_state_saving(&journal->state)) {
+ if (journal->active_block != NULL) {
+ VDO_ASSERT_LOG_ONLY(((result == VDO_READ_ONLY) ||
+ !is_block_dirty(journal->active_block)),
+ "journal being saved has clean active block");
+ recycle_journal_block(journal->active_block);
+ }
+
+ VDO_ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks),
+ "all blocks in a journal being saved must be inactive");
+ }
+
+ vdo_finish_draining_with_result(&journal->state, result);
+}
+
+/**
+ * notify_recovery_journal_of_read_only_mode() - Notify a recovery journal that the VDO has gone
+ * read-only.
+ * @listener: The journal.
+ * @parent: The completion to notify in order to acknowledge the notification.
+ *
+ * Implements vdo_read_only_notification_fn.
+ */
+static void notify_recovery_journal_of_read_only_mode(void *listener,
+ struct vdo_completion *parent)
+{
+ check_for_drain_complete(listener);
+ vdo_finish_completion(parent);
+}
+
+/**
+ * enter_journal_read_only_mode() - Put the journal in read-only mode.
+ * @journal: The journal which has failed.
+ * @error_code: The error result triggering this call.
+ *
+ * All attempts to add entries after this function is called will fail. All VIOs waiting for
+ * commits will be awakened with an error.
+ */
+static void enter_journal_read_only_mode(struct recovery_journal *journal,
+ int error_code)
+{
+ vdo_enter_read_only_mode(journal->flush_vio->completion.vdo, error_code);
+ check_for_drain_complete(journal);
+}
+
+/**
+ * vdo_get_recovery_journal_current_sequence_number() - Obtain the recovery journal's current
+ * sequence number.
+ * @journal: The journal in question.
+ *
+ * Exposed only so the block map can be initialized therefrom.
+ *
+ * Return: The sequence number of the tail block.
+ */
+sequence_number_t vdo_get_recovery_journal_current_sequence_number(struct recovery_journal *journal)
+{
+ return journal->tail;
+}
+
+/**
+ * get_recovery_journal_head() - Get the head of the recovery journal.
+ * @journal: The journal.
+ *
+ * The head is the lowest sequence number of the block map head and the slab journal head.
+ *
+ * Return: the head of the journal.
+ */
+static inline sequence_number_t get_recovery_journal_head(const struct recovery_journal *journal)
+{
+ return min(journal->block_map_head, journal->slab_journal_head);
+}
+
+/**
+ * compute_recovery_count_byte() - Compute the recovery count byte for a given recovery count.
+ * @recovery_count: The recovery count.
+ *
+ * Return: The byte corresponding to the recovery count.
+ */
+static inline u8 __must_check compute_recovery_count_byte(u64 recovery_count)
+{
+ return (u8)(recovery_count & RECOVERY_COUNT_MASK);
+}
+
+/**
+ * check_slab_journal_commit_threshold() - Check whether the journal is over the threshold, and if
+ * so, force the oldest slab journal tail block to commit.
+ * @journal: The journal.
+ */
+static void check_slab_journal_commit_threshold(struct recovery_journal *journal)
+{
+ block_count_t current_length = journal->tail - journal->slab_journal_head;
+
+ if (current_length > journal->slab_journal_commit_threshold) {
+ journal->events.slab_journal_commits_requested++;
+ vdo_commit_oldest_slab_journal_tail_blocks(journal->depot,
+ journal->slab_journal_head);
+ }
+}
+
+static void reap_recovery_journal(struct recovery_journal *journal);
+static void assign_entries(struct recovery_journal *journal);
+
+/**
+ * finish_reaping() - Finish reaping the journal.
+ * @journal: The journal being reaped.
+ */
+static void finish_reaping(struct recovery_journal *journal)
+{
+ block_count_t blocks_reaped;
+ sequence_number_t old_head = get_recovery_journal_head(journal);
+
+ journal->block_map_head = journal->block_map_reap_head;
+ journal->slab_journal_head = journal->slab_journal_reap_head;
+ blocks_reaped = get_recovery_journal_head(journal) - old_head;
+ journal->available_space += blocks_reaped * journal->entries_per_block;
+ journal->reaping = false;
+ check_slab_journal_commit_threshold(journal);
+ assign_entries(journal);
+ check_for_drain_complete(journal);
+}
+
+/**
+ * complete_reaping() - Finish reaping the journal after flushing the lower layer.
+ * @completion: The journal's flush VIO.
+ *
+ * This is the callback registered in reap_recovery_journal().
+ */
+static void complete_reaping(struct vdo_completion *completion)
+{
+ struct recovery_journal *journal = completion->parent;
+
+ finish_reaping(journal);
+
+ /* Try reaping again in case more locks were released while flush was out. */
+ reap_recovery_journal(journal);
+}
+
+/**
+ * handle_flush_error() - Handle an error when flushing the lower layer due to reaping.
+ * @completion: The journal's flush VIO.
+ */
+static void handle_flush_error(struct vdo_completion *completion)
+{
+ struct recovery_journal *journal = completion->parent;
+
+ vio_record_metadata_io_error(as_vio(completion));
+ journal->reaping = false;
+ enter_journal_read_only_mode(journal, completion->result);
+}
+
+static void flush_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct recovery_journal *journal = vio->completion.parent;
+
+ continue_vio_after_io(vio, complete_reaping, journal->thread_id);
+}
+
+/**
+ * initialize_journal_state() - Set all journal fields appropriately to start journaling from the
+ * current active block.
+ * @journal: The journal to be reset based on its active block.
+ */
+static void initialize_journal_state(struct recovery_journal *journal)
+{
+ journal->append_point.sequence_number = journal->tail;
+ journal->last_write_acknowledged = journal->tail;
+ journal->block_map_head = journal->tail;
+ journal->slab_journal_head = journal->tail;
+ journal->block_map_reap_head = journal->tail;
+ journal->slab_journal_reap_head = journal->tail;
+ journal->block_map_head_block_number =
+ vdo_get_recovery_journal_block_number(journal, journal->block_map_head);
+ journal->slab_journal_head_block_number =
+ vdo_get_recovery_journal_block_number(journal,
+ journal->slab_journal_head);
+ journal->available_space =
+ (journal->entries_per_block * vdo_get_recovery_journal_length(journal->size));
+}
+
+/**
+ * vdo_get_recovery_journal_length() - Get the number of usable recovery journal blocks.
+ * @journal_size: The size of the recovery journal in blocks.
+ *
+ * Return: the number of recovery journal blocks usable for entries.
+ */
+block_count_t vdo_get_recovery_journal_length(block_count_t journal_size)
+{
+ block_count_t reserved_blocks = journal_size / 4;
+
+ if (reserved_blocks > RECOVERY_JOURNAL_RESERVED_BLOCKS)
+ reserved_blocks = RECOVERY_JOURNAL_RESERVED_BLOCKS;
+ return (journal_size - reserved_blocks);
+}
+
+/**
+ * reap_recovery_journal_callback() - Attempt to reap the journal.
+ * @completion: The lock counter completion.
+ *
+ * Attempts to reap the journal now that all the locks on some journal block have been released.
+ * This is the callback registered with the lock counter.
+ */
+static void reap_recovery_journal_callback(struct vdo_completion *completion)
+{
+ struct recovery_journal *journal = (struct recovery_journal *) completion->parent;
+ /*
+ * The acknowledgment must be done before reaping so that there is no race between
+ * acknowledging the notification and unlocks wishing to notify.
+ */
+ smp_wmb();
+ atomic_set(&journal->lock_counter.state, LOCK_COUNTER_STATE_NOT_NOTIFYING);
+
+ if (vdo_is_state_quiescing(&journal->state)) {
+ /*
+ * Don't start reaping when the journal is trying to quiesce. Do check if this
+ * notification is the last thing the is waiting on.
+ */
+ check_for_drain_complete(journal);
+ return;
+ }
+
+ reap_recovery_journal(journal);
+ check_slab_journal_commit_threshold(journal);
+}
+
+/**
+ * initialize_lock_counter() - Initialize a lock counter.
+ *
+ * @journal: The recovery journal.
+ * @vdo: The vdo.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check initialize_lock_counter(struct recovery_journal *journal,
+ struct vdo *vdo)
+{
+ int result;
+ struct thread_config *config = &vdo->thread_config;
+ struct lock_counter *counter = &journal->lock_counter;
+
+ result = vdo_allocate(journal->size, u16, __func__, &counter->journal_counters);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(journal->size, atomic_t, __func__,
+ &counter->journal_decrement_counts);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(journal->size * config->logical_zone_count, u16, __func__,
+ &counter->logical_counters);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(journal->size, atomic_t, __func__,
+ &counter->logical_zone_counts);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(journal->size * config->physical_zone_count, u16, __func__,
+ &counter->physical_counters);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(journal->size, atomic_t, __func__,
+ &counter->physical_zone_counts);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_initialize_completion(&counter->completion, vdo,
+ VDO_LOCK_COUNTER_COMPLETION);
+ vdo_prepare_completion(&counter->completion, reap_recovery_journal_callback,
+ reap_recovery_journal_callback, config->journal_thread,
+ journal);
+ counter->logical_zones = config->logical_zone_count;
+ counter->physical_zones = config->physical_zone_count;
+ counter->locks = journal->size;
+ return VDO_SUCCESS;
+}
+
+/**
+ * set_journal_tail() - Set the journal's tail sequence number.
+ * @journal: The journal whose tail is to be set.
+ * @tail: The new tail value.
+ */
+static void set_journal_tail(struct recovery_journal *journal, sequence_number_t tail)
+{
+ /* VDO does not support sequence numbers above 1 << 48 in the slab journal. */
+ if (tail >= (1ULL << 48))
+ enter_journal_read_only_mode(journal, VDO_JOURNAL_OVERFLOW);
+
+ journal->tail = tail;
+}
+
+/**
+ * initialize_recovery_block() - Initialize a journal block.
+ * @vdo: The vdo from which to construct vios.
+ * @journal: The journal to which the block will belong.
+ * @block: The block to initialize.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int initialize_recovery_block(struct vdo *vdo, struct recovery_journal *journal,
+ struct recovery_journal_block *block)
+{
+ char *data;
+ int result;
+
+ /*
+ * Ensure that a block is large enough to store RECOVERY_JOURNAL_ENTRIES_PER_BLOCK entries.
+ */
+ BUILD_BUG_ON(RECOVERY_JOURNAL_ENTRIES_PER_BLOCK >
+ ((VDO_BLOCK_SIZE - sizeof(struct packed_journal_header)) /
+ sizeof(struct packed_recovery_journal_entry)));
+
+ /*
+ * Allocate a full block for the journal block even though not all of the space is used
+ * since the VIO needs to write a full disk block.
+ */
+ result = vdo_allocate(VDO_BLOCK_SIZE, char, __func__, &data);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = allocate_vio_components(vdo, VIO_TYPE_RECOVERY_JOURNAL,
+ VIO_PRIORITY_HIGH, block, 1, data, &block->vio);
+ if (result != VDO_SUCCESS) {
+ vdo_free(data);
+ return result;
+ }
+
+ list_add_tail(&block->list_node, &journal->free_tail_blocks);
+ block->journal = journal;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_decode_recovery_journal() - Make a recovery journal and initialize it with the state that
+ * was decoded from the super block.
+ *
+ * @state: The decoded state of the journal.
+ * @nonce: The nonce of the VDO.
+ * @vdo: The VDO.
+ * @partition: The partition for the journal.
+ * @recovery_count: The VDO's number of completed recoveries.
+ * @journal_size: The number of blocks in the journal on disk.
+ * @journal_ptr: The pointer to hold the new recovery journal.
+ *
+ * Return: A success or error code.
+ */
+int vdo_decode_recovery_journal(struct recovery_journal_state_7_0 state, nonce_t nonce,
+ struct vdo *vdo, struct partition *partition,
+ u64 recovery_count, block_count_t journal_size,
+ struct recovery_journal **journal_ptr)
+{
+ block_count_t i;
+ struct recovery_journal *journal;
+ int result;
+
+ result = vdo_allocate_extended(struct recovery_journal,
+ RECOVERY_JOURNAL_RESERVED_BLOCKS,
+ struct recovery_journal_block, __func__,
+ &journal);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ INIT_LIST_HEAD(&journal->free_tail_blocks);
+ INIT_LIST_HEAD(&journal->active_tail_blocks);
+ vdo_waitq_init(&journal->pending_writes);
+
+ journal->thread_id = vdo->thread_config.journal_thread;
+ journal->origin = partition->offset;
+ journal->nonce = nonce;
+ journal->recovery_count = compute_recovery_count_byte(recovery_count);
+ journal->size = journal_size;
+ journal->slab_journal_commit_threshold = (journal_size * 2) / 3;
+ journal->logical_blocks_used = state.logical_blocks_used;
+ journal->block_map_data_blocks = state.block_map_data_blocks;
+ journal->entries_per_block = RECOVERY_JOURNAL_ENTRIES_PER_BLOCK;
+ set_journal_tail(journal, state.journal_start);
+ initialize_journal_state(journal);
+ /* TODO: this will have to change if we make initial resume of a VDO a real resume */
+ vdo_set_admin_state_code(&journal->state, VDO_ADMIN_STATE_SUSPENDED);
+
+ for (i = 0; i < RECOVERY_JOURNAL_RESERVED_BLOCKS; i++) {
+ struct recovery_journal_block *block = &journal->blocks[i];
+
+ result = initialize_recovery_block(vdo, journal, block);
+ if (result != VDO_SUCCESS) {
+ vdo_free_recovery_journal(journal);
+ return result;
+ }
+ }
+
+ result = initialize_lock_counter(journal, vdo);
+ if (result != VDO_SUCCESS) {
+ vdo_free_recovery_journal(journal);
+ return result;
+ }
+
+ result = create_metadata_vio(vdo, VIO_TYPE_RECOVERY_JOURNAL, VIO_PRIORITY_HIGH,
+ journal, NULL, &journal->flush_vio);
+ if (result != VDO_SUCCESS) {
+ vdo_free_recovery_journal(journal);
+ return result;
+ }
+
+ result = vdo_register_read_only_listener(vdo, journal,
+ notify_recovery_journal_of_read_only_mode,
+ journal->thread_id);
+ if (result != VDO_SUCCESS) {
+ vdo_free_recovery_journal(journal);
+ return result;
+ }
+
+ result = vdo_make_default_thread(vdo, journal->thread_id);
+ if (result != VDO_SUCCESS) {
+ vdo_free_recovery_journal(journal);
+ return result;
+ }
+
+ journal->flush_vio->completion.callback_thread_id = journal->thread_id;
+ *journal_ptr = journal;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_free_recovery_journal() - Free a recovery journal.
+ * @journal: The recovery journal to free.
+ */
+void vdo_free_recovery_journal(struct recovery_journal *journal)
+{
+ block_count_t i;
+
+ if (journal == NULL)
+ return;
+
+ vdo_free(vdo_forget(journal->lock_counter.logical_zone_counts));
+ vdo_free(vdo_forget(journal->lock_counter.physical_zone_counts));
+ vdo_free(vdo_forget(journal->lock_counter.journal_counters));
+ vdo_free(vdo_forget(journal->lock_counter.journal_decrement_counts));
+ vdo_free(vdo_forget(journal->lock_counter.logical_counters));
+ vdo_free(vdo_forget(journal->lock_counter.physical_counters));
+ free_vio(vdo_forget(journal->flush_vio));
+
+ /*
+ * FIXME: eventually, the journal should be constructed in a quiescent state which
+ * requires opening before use.
+ */
+ if (!vdo_is_state_quiescent(&journal->state)) {
+ VDO_ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks),
+ "journal being freed has no active tail blocks");
+ } else if (!vdo_is_state_saved(&journal->state) &&
+ !list_empty(&journal->active_tail_blocks)) {
+ vdo_log_warning("journal being freed has uncommitted entries");
+ }
+
+ for (i = 0; i < RECOVERY_JOURNAL_RESERVED_BLOCKS; i++) {
+ struct recovery_journal_block *block = &journal->blocks[i];
+
+ vdo_free(vdo_forget(block->vio.data));
+ free_vio_components(&block->vio);
+ }
+
+ vdo_free(journal);
+}
+
+/**
+ * vdo_initialize_recovery_journal_post_repair() - Initialize the journal after a repair.
+ * @journal: The journal in question.
+ * @recovery_count: The number of completed recoveries.
+ * @tail: The new tail block sequence number.
+ * @logical_blocks_used: The new number of logical blocks used.
+ * @block_map_data_blocks: The new number of block map data blocks.
+ */
+void vdo_initialize_recovery_journal_post_repair(struct recovery_journal *journal,
+ u64 recovery_count,
+ sequence_number_t tail,
+ block_count_t logical_blocks_used,
+ block_count_t block_map_data_blocks)
+{
+ set_journal_tail(journal, tail + 1);
+ journal->recovery_count = compute_recovery_count_byte(recovery_count);
+ initialize_journal_state(journal);
+ journal->logical_blocks_used = logical_blocks_used;
+ journal->block_map_data_blocks = block_map_data_blocks;
+}
+
+/**
+ * vdo_get_journal_block_map_data_blocks_used() - Get the number of block map pages, allocated from
+ * data blocks, currently in use.
+ * @journal: The journal in question.
+ *
+ * Return: The number of block map pages allocated from slabs.
+ */
+block_count_t vdo_get_journal_block_map_data_blocks_used(struct recovery_journal *journal)
+{
+ return journal->block_map_data_blocks;
+}
+
+/**
+ * vdo_get_recovery_journal_thread_id() - Get the ID of a recovery journal's thread.
+ * @journal: The journal to query.
+ *
+ * Return: The ID of the journal's thread.
+ */
+thread_id_t vdo_get_recovery_journal_thread_id(struct recovery_journal *journal)
+{
+ return journal->thread_id;
+}
+
+/**
+ * vdo_open_recovery_journal() - Prepare the journal for new entries.
+ * @journal: The journal in question.
+ * @depot: The slab depot for this VDO.
+ * @block_map: The block map for this VDO.
+ */
+void vdo_open_recovery_journal(struct recovery_journal *journal,
+ struct slab_depot *depot, struct block_map *block_map)
+{
+ journal->depot = depot;
+ journal->block_map = block_map;
+ WRITE_ONCE(journal->state.current_state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+}
+
+/**
+ * vdo_record_recovery_journal() - Record the state of a recovery journal for encoding in the super
+ * block.
+ * @journal: the recovery journal.
+ *
+ * Return: the state of the journal.
+ */
+struct recovery_journal_state_7_0
+vdo_record_recovery_journal(const struct recovery_journal *journal)
+{
+ struct recovery_journal_state_7_0 state = {
+ .logical_blocks_used = journal->logical_blocks_used,
+ .block_map_data_blocks = journal->block_map_data_blocks,
+ };
+
+ if (vdo_is_state_saved(&journal->state)) {
+ /*
+ * If the journal is saved, we should start one past the active block (since the
+ * active block is not guaranteed to be empty).
+ */
+ state.journal_start = journal->tail;
+ } else {
+ /*
+ * When we're merely suspended or have gone read-only, we must record the first
+ * block that might have entries that need to be applied.
+ */
+ state.journal_start = get_recovery_journal_head(journal);
+ }
+
+ return state;
+}
+
+/**
+ * get_block_header() - Get a pointer to the packed journal block header in the block buffer.
+ * @block: The recovery block.
+ *
+ * Return: The block's header.
+ */
+static inline struct packed_journal_header *
+get_block_header(const struct recovery_journal_block *block)
+{
+ return (struct packed_journal_header *) block->vio.data;
+}
+
+/**
+ * set_active_sector() - Set the current sector of the current block and initialize it.
+ * @block: The block to update.
+ * @sector: A pointer to the first byte of the new sector.
+ */
+static void set_active_sector(struct recovery_journal_block *block, void *sector)
+{
+ block->sector = sector;
+ block->sector->check_byte = get_block_header(block)->check_byte;
+ block->sector->recovery_count = block->journal->recovery_count;
+ block->sector->entry_count = 0;
+}
+
+/**
+ * advance_tail() - Advance the tail of the journal.
+ * @journal: The journal whose tail should be advanced.
+ *
+ * Return: true if the tail was advanced.
+ */
+static bool advance_tail(struct recovery_journal *journal)
+{
+ struct recovery_block_header unpacked;
+ struct packed_journal_header *header;
+ struct recovery_journal_block *block;
+
+ block = journal->active_block = pop_free_list(journal);
+ if (block == NULL)
+ return false;
+
+ list_move_tail(&block->list_node, &journal->active_tail_blocks);
+
+ unpacked = (struct recovery_block_header) {
+ .metadata_type = VDO_METADATA_RECOVERY_JOURNAL_2,
+ .block_map_data_blocks = journal->block_map_data_blocks,
+ .logical_blocks_used = journal->logical_blocks_used,
+ .nonce = journal->nonce,
+ .recovery_count = journal->recovery_count,
+ .sequence_number = journal->tail,
+ .check_byte = vdo_compute_recovery_journal_check_byte(journal,
+ journal->tail),
+ };
+
+ header = get_block_header(block);
+ memset(block->vio.data, 0x0, VDO_BLOCK_SIZE);
+ block->sequence_number = journal->tail;
+ block->entry_count = 0;
+ block->uncommitted_entry_count = 0;
+ block->block_number = vdo_get_recovery_journal_block_number(journal,
+ journal->tail);
+
+ vdo_pack_recovery_block_header(&unpacked, header);
+ set_active_sector(block, vdo_get_journal_block_sector(header, 1));
+ set_journal_tail(journal, journal->tail + 1);
+ vdo_advance_block_map_era(journal->block_map, journal->tail);
+ return true;
+}
+
+/**
+ * initialize_lock_count() - Initialize the value of the journal zone's counter for a given lock.
+ * @journal: The recovery journal.
+ *
+ * Context: This must be called from the journal zone.
+ */
+static void initialize_lock_count(struct recovery_journal *journal)
+{
+ u16 *journal_value;
+ block_count_t lock_number = journal->active_block->block_number;
+ atomic_t *decrement_counter = get_decrement_counter(journal, lock_number);
+
+ journal_value = get_counter(journal, lock_number, VDO_ZONE_TYPE_JOURNAL, 0);
+ VDO_ASSERT_LOG_ONLY((*journal_value == atomic_read(decrement_counter)),
+ "count to be initialized not in use");
+ *journal_value = journal->entries_per_block + 1;
+ atomic_set(decrement_counter, 0);
+}
+
+/**
+ * prepare_to_assign_entry() - Prepare the currently active block to receive an entry and check
+ * whether an entry of the given type may be assigned at this time.
+ * @journal: The journal receiving an entry.
+ *
+ * Return: true if there is space in the journal to store an entry of the specified type.
+ */
+static bool prepare_to_assign_entry(struct recovery_journal *journal)
+{
+ if (journal->available_space == 0)
+ return false;
+
+ if (is_block_full(journal->active_block) && !advance_tail(journal))
+ return false;
+
+ if (!is_block_empty(journal->active_block))
+ return true;
+
+ if ((journal->tail - get_recovery_journal_head(journal)) > journal->size) {
+ /* Cannot use this block since the journal is full. */
+ journal->events.disk_full++;
+ return false;
+ }
+
+ /*
+ * Don't allow the new block to be reaped until all of its entries have been committed to
+ * the block map and until the journal block has been fully committed as well. Because the
+ * block map update is done only after any slab journal entries have been made, the
+ * per-entry lock for the block map entry serves to protect those as well.
+ */
+ initialize_lock_count(journal);
+ return true;
+}
+
+static void write_blocks(struct recovery_journal *journal);
+
+/**
+ * schedule_block_write() - Queue a block for writing.
+ * @journal: The journal in question.
+ * @block: The block which is now ready to write.
+ *
+ * The block is expected to be full. If the block is currently writing, this is a noop as the block
+ * will be queued for writing when the write finishes. The block must not currently be queued for
+ * writing.
+ */
+static void schedule_block_write(struct recovery_journal *journal,
+ struct recovery_journal_block *block)
+{
+ if (!block->committing)
+ vdo_waitq_enqueue_waiter(&journal->pending_writes, &block->write_waiter);
+ /*
+ * At the end of adding entries, or discovering this partial block is now full and ready to
+ * rewrite, we will call write_blocks() and write a whole batch.
+ */
+}
+
+/**
+ * release_journal_block_reference() - Release a reference to a journal block.
+ * @block: The journal block from which to release a reference.
+ */
+static void release_journal_block_reference(struct recovery_journal_block *block)
+{
+ vdo_release_recovery_journal_block_reference(block->journal,
+ block->sequence_number,
+ VDO_ZONE_TYPE_JOURNAL, 0);
+}
+
+static void update_usages(struct recovery_journal *journal, struct data_vio *data_vio)
+{
+ if (data_vio->increment_updater.operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING) {
+ journal->block_map_data_blocks++;
+ return;
+ }
+
+ if (data_vio->new_mapped.state != VDO_MAPPING_STATE_UNMAPPED)
+ journal->logical_blocks_used++;
+
+ if (data_vio->mapped.state != VDO_MAPPING_STATE_UNMAPPED)
+ journal->logical_blocks_used--;
+}
+
+/**
+ * assign_entry() - Assign an entry waiter to the active block.
+ *
+ * Implements waiter_callback_fn.
+ */
+static void assign_entry(struct vdo_waiter *waiter, void *context)
+{
+ struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
+ struct recovery_journal_block *block = context;
+ struct recovery_journal *journal = block->journal;
+
+ /* Record the point at which we will make the journal entry. */
+ data_vio->recovery_journal_point = (struct journal_point) {
+ .sequence_number = block->sequence_number,
+ .entry_count = block->entry_count,
+ };
+
+ update_usages(journal, data_vio);
+ journal->available_space--;
+
+ if (!vdo_waitq_has_waiters(&block->entry_waiters))
+ journal->events.blocks.started++;
+
+ vdo_waitq_enqueue_waiter(&block->entry_waiters, &data_vio->waiter);
+ block->entry_count++;
+ block->uncommitted_entry_count++;
+ journal->events.entries.started++;
+
+ if (is_block_full(block)) {
+ /*
+ * The block is full, so we can write it anytime henceforth. If it is already
+ * committing, we'll queue it for writing when it comes back.
+ */
+ schedule_block_write(journal, block);
+ }
+
+ /* Force out slab journal tail blocks when threshold is reached. */
+ check_slab_journal_commit_threshold(journal);
+}
+
+static void assign_entries(struct recovery_journal *journal)
+{
+ if (journal->adding_entries) {
+ /* Protect against re-entrancy. */
+ return;
+ }
+
+ journal->adding_entries = true;
+ while (vdo_waitq_has_waiters(&journal->entry_waiters) &&
+ prepare_to_assign_entry(journal)) {
+ vdo_waitq_notify_next_waiter(&journal->entry_waiters,
+ assign_entry, journal->active_block);
+ }
+
+ /* Now that we've finished with entries, see if we have a batch of blocks to write. */
+ write_blocks(journal);
+ journal->adding_entries = false;
+}
+
+/**
+ * recycle_journal_block() - Prepare an in-memory journal block to be reused now that it has been
+ * fully committed.
+ * @block: The block to be recycled.
+ */
+static void recycle_journal_block(struct recovery_journal_block *block)
+{
+ struct recovery_journal *journal = block->journal;
+ block_count_t i;
+
+ list_move_tail(&block->list_node, &journal->free_tail_blocks);
+
+ /* Release any unused entry locks. */
+ for (i = block->entry_count; i < journal->entries_per_block; i++)
+ release_journal_block_reference(block);
+
+ /*
+ * Release our own lock against reaping now that the block is completely committed, or
+ * we're giving up because we're in read-only mode.
+ */
+ if (block->entry_count > 0)
+ release_journal_block_reference(block);
+
+ if (block == journal->active_block)
+ journal->active_block = NULL;
+}
+
+/**
+ * continue_committed_waiter() - invoked whenever a VIO is to be released from the journal because
+ * its entry was committed to disk.
+ *
+ * Implements waiter_callback_fn.
+ */
+static void continue_committed_waiter(struct vdo_waiter *waiter, void *context)
+{
+ struct data_vio *data_vio = vdo_waiter_as_data_vio(waiter);
+ struct recovery_journal *journal = context;
+ int result = (is_read_only(journal) ? VDO_READ_ONLY : VDO_SUCCESS);
+ bool has_decrement;
+
+ VDO_ASSERT_LOG_ONLY(vdo_before_journal_point(&journal->commit_point,
+ &data_vio->recovery_journal_point),
+ "DataVIOs released from recovery journal in order. Recovery journal point is (%llu, %u), but commit waiter point is (%llu, %u)",
+ (unsigned long long) journal->commit_point.sequence_number,
+ journal->commit_point.entry_count,
+ (unsigned long long) data_vio->recovery_journal_point.sequence_number,
+ data_vio->recovery_journal_point.entry_count);
+
+ journal->commit_point = data_vio->recovery_journal_point;
+ data_vio->last_async_operation = VIO_ASYNC_OP_UPDATE_REFERENCE_COUNTS;
+ if (result != VDO_SUCCESS) {
+ continue_data_vio_with_error(data_vio, result);
+ return;
+ }
+
+ /*
+ * The increment must be launched first since it must come before the
+ * decrement if they are in the same slab.
+ */
+ has_decrement = (data_vio->decrement_updater.zpbn.pbn != VDO_ZERO_BLOCK);
+ if ((data_vio->increment_updater.zpbn.pbn != VDO_ZERO_BLOCK) || !has_decrement)
+ continue_data_vio(data_vio);
+
+ if (has_decrement)
+ vdo_launch_completion(&data_vio->decrement_completion);
+}
+
+/**
+ * notify_commit_waiters() - Notify any VIOs whose entries have now committed.
+ * @journal: The recovery journal to update.
+ */
+static void notify_commit_waiters(struct recovery_journal *journal)
+{
+ struct recovery_journal_block *block;
+
+ list_for_each_entry(block, &journal->active_tail_blocks, list_node) {
+ if (block->committing)
+ return;
+
+ vdo_waitq_notify_all_waiters(&block->commit_waiters,
+ continue_committed_waiter, journal);
+ if (is_read_only(journal)) {
+ vdo_waitq_notify_all_waiters(&block->entry_waiters,
+ continue_committed_waiter,
+ journal);
+ } else if (is_block_dirty(block) || !is_block_full(block)) {
+ /* Stop at partially-committed or partially-filled blocks. */
+ return;
+ }
+ }
+}
+
+/**
+ * recycle_journal_blocks() - Recycle any journal blocks which have been fully committed.
+ * @journal: The recovery journal to update.
+ */
+static void recycle_journal_blocks(struct recovery_journal *journal)
+{
+ struct recovery_journal_block *block, *tmp;
+
+ list_for_each_entry_safe(block, tmp, &journal->active_tail_blocks, list_node) {
+ if (block->committing) {
+ /* Don't recycle committing blocks. */
+ return;
+ }
+
+ if (!is_read_only(journal) &&
+ (is_block_dirty(block) || !is_block_full(block))) {
+ /*
+ * Don't recycle partially written or partially full blocks, except in
+ * read-only mode.
+ */
+ return;
+ }
+
+ recycle_journal_block(block);
+ }
+}
+
+/**
+ * complete_write() - Handle post-commit processing.
+ * @completion: The completion of the VIO writing this block.
+ *
+ * This is the callback registered by write_block(). If more entries accumulated in the block being
+ * committed while the commit was in progress, another commit will be initiated.
+ */
+static void complete_write(struct vdo_completion *completion)
+{
+ struct recovery_journal_block *block = completion->parent;
+ struct recovery_journal *journal = block->journal;
+ struct recovery_journal_block *last_active_block;
+
+ assert_on_journal_thread(journal, __func__);
+
+ journal->pending_write_count -= 1;
+ journal->events.blocks.committed += 1;
+ journal->events.entries.committed += block->entries_in_commit;
+ block->uncommitted_entry_count -= block->entries_in_commit;
+ block->entries_in_commit = 0;
+ block->committing = false;
+
+ /* If this block is the latest block to be acknowledged, record that fact. */
+ if (block->sequence_number > journal->last_write_acknowledged)
+ journal->last_write_acknowledged = block->sequence_number;
+
+ last_active_block = get_journal_block(&journal->active_tail_blocks);
+ VDO_ASSERT_LOG_ONLY((block->sequence_number >= last_active_block->sequence_number),
+ "completed journal write is still active");
+
+ notify_commit_waiters(journal);
+
+ /*
+ * Is this block now full? Reaping, and adding entries, might have already sent it off for
+ * rewriting; else, queue it for rewrite.
+ */
+ if (is_block_dirty(block) && is_block_full(block))
+ schedule_block_write(journal, block);
+
+ recycle_journal_blocks(journal);
+ write_blocks(journal);
+
+ check_for_drain_complete(journal);
+}
+
+static void handle_write_error(struct vdo_completion *completion)
+{
+ struct recovery_journal_block *block = completion->parent;
+ struct recovery_journal *journal = block->journal;
+
+ vio_record_metadata_io_error(as_vio(completion));
+ vdo_log_error_strerror(completion->result,
+ "cannot write recovery journal block %llu",
+ (unsigned long long) block->sequence_number);
+ enter_journal_read_only_mode(journal, completion->result);
+ complete_write(completion);
+}
+
+static void complete_write_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct recovery_journal_block *block = vio->completion.parent;
+ struct recovery_journal *journal = block->journal;
+
+ continue_vio_after_io(vio, complete_write, journal->thread_id);
+}
+
+/**
+ * add_queued_recovery_entries() - Actually add entries from the queue to the given block.
+ * @block: The journal block.
+ */
+static void add_queued_recovery_entries(struct recovery_journal_block *block)
+{
+ while (vdo_waitq_has_waiters(&block->entry_waiters)) {
+ struct data_vio *data_vio =
+ vdo_waiter_as_data_vio(vdo_waitq_dequeue_waiter(&block->entry_waiters));
+ struct tree_lock *lock = &data_vio->tree_lock;
+ struct packed_recovery_journal_entry *packed_entry;
+ struct recovery_journal_entry new_entry;
+
+ if (block->sector->entry_count == RECOVERY_JOURNAL_ENTRIES_PER_SECTOR)
+ set_active_sector(block,
+ (char *) block->sector + VDO_SECTOR_SIZE);
+
+ /* Compose and encode the entry. */
+ packed_entry = &block->sector->entries[block->sector->entry_count++];
+ new_entry = (struct recovery_journal_entry) {
+ .mapping = {
+ .pbn = data_vio->increment_updater.zpbn.pbn,
+ .state = data_vio->increment_updater.zpbn.state,
+ },
+ .unmapping = {
+ .pbn = data_vio->decrement_updater.zpbn.pbn,
+ .state = data_vio->decrement_updater.zpbn.state,
+ },
+ .operation = data_vio->increment_updater.operation,
+ .slot = lock->tree_slots[lock->height].block_map_slot,
+ };
+ *packed_entry = vdo_pack_recovery_journal_entry(&new_entry);
+ data_vio->recovery_sequence_number = block->sequence_number;
+
+ /* Enqueue the data_vio to wait for its entry to commit. */
+ vdo_waitq_enqueue_waiter(&block->commit_waiters, &data_vio->waiter);
+ }
+}
+
+/**
+ * write_block() - Issue a block for writing.
+ *
+ * Implements waiter_callback_fn.
+ */
+static void write_block(struct vdo_waiter *waiter, void *context __always_unused)
+{
+ struct recovery_journal_block *block =
+ container_of(waiter, struct recovery_journal_block, write_waiter);
+ struct recovery_journal *journal = block->journal;
+ struct packed_journal_header *header = get_block_header(block);
+
+ if (block->committing || !vdo_waitq_has_waiters(&block->entry_waiters) ||
+ is_read_only(journal))
+ return;
+
+ block->entries_in_commit = vdo_waitq_num_waiters(&block->entry_waiters);
+ add_queued_recovery_entries(block);
+
+ journal->pending_write_count += 1;
+ journal->events.blocks.written += 1;
+ journal->events.entries.written += block->entries_in_commit;
+
+ header->block_map_head = __cpu_to_le64(journal->block_map_head);
+ header->slab_journal_head = __cpu_to_le64(journal->slab_journal_head);
+ header->entry_count = __cpu_to_le16(block->entry_count);
+
+ block->committing = true;
+
+ /*
+ * We must issue a flush and a FUA for every commit. The flush is necessary to ensure that
+ * the data being referenced is stable. The FUA is necessary to ensure that the journal
+ * block itself is stable before allowing overwrites of the lbn's previous data.
+ */
+ vdo_submit_metadata_vio(&block->vio, journal->origin + block->block_number,
+ complete_write_endio, handle_write_error,
+ REQ_OP_WRITE | REQ_PRIO | REQ_PREFLUSH | REQ_SYNC | REQ_FUA);
+}
+
+
+/**
+ * write_blocks() - Attempt to commit blocks, according to write policy.
+ * @journal: The recovery journal.
+ */
+static void write_blocks(struct recovery_journal *journal)
+{
+ assert_on_journal_thread(journal, __func__);
+ /*
+ * We call this function after adding entries to the journal and after finishing a block
+ * write. Thus, when this function terminates we must either have no VIOs waiting in the
+ * journal or have some outstanding IO to provide a future wakeup.
+ *
+ * We want to only issue full blocks if there are no pending writes. However, if there are
+ * no outstanding writes and some unwritten entries, we must issue a block, even if it's
+ * the active block and it isn't full.
+ */
+ if (journal->pending_write_count > 0)
+ return;
+
+ /* Write all the full blocks. */
+ vdo_waitq_notify_all_waiters(&journal->pending_writes, write_block, NULL);
+
+ /*
+ * Do we need to write the active block? Only if we have no outstanding writes, even after
+ * issuing all of the full writes.
+ */
+ if ((journal->pending_write_count == 0) && (journal->active_block != NULL))
+ write_block(&journal->active_block->write_waiter, NULL);
+}
+
+/**
+ * vdo_add_recovery_journal_entry() - Add an entry to a recovery journal.
+ * @journal: The journal in which to make an entry.
+ * @data_vio: The data_vio for which to add the entry. The entry will be taken
+ * from the logical and new_mapped fields of the data_vio. The
+ * data_vio's recovery_sequence_number field will be set to the
+ * sequence number of the journal block in which the entry was
+ * made.
+ *
+ * This method is asynchronous. The data_vio will not be called back until the entry is committed
+ * to the on-disk journal.
+ */
+void vdo_add_recovery_journal_entry(struct recovery_journal *journal,
+ struct data_vio *data_vio)
+{
+ assert_on_journal_thread(journal, __func__);
+ if (!vdo_is_state_normal(&journal->state)) {
+ continue_data_vio_with_error(data_vio, VDO_INVALID_ADMIN_STATE);
+ return;
+ }
+
+ if (is_read_only(journal)) {
+ continue_data_vio_with_error(data_vio, VDO_READ_ONLY);
+ return;
+ }
+
+ VDO_ASSERT_LOG_ONLY(data_vio->recovery_sequence_number == 0,
+ "journal lock not held for new entry");
+
+ vdo_advance_journal_point(&journal->append_point, journal->entries_per_block);
+ vdo_waitq_enqueue_waiter(&journal->entry_waiters, &data_vio->waiter);
+ assign_entries(journal);
+}
+
+/**
+ * is_lock_locked() - Check whether a lock is locked for a zone type.
+ * @journal: The recovery journal.
+ * @lock_number: The lock to check.
+ * @zone_type: The type of the zone.
+ *
+ * If the recovery journal has a lock on the lock number, both logical and physical zones are
+ * considered locked.
+ *
+ * Return: true if the specified lock has references (is locked).
+ */
+static bool is_lock_locked(struct recovery_journal *journal, block_count_t lock_number,
+ enum vdo_zone_type zone_type)
+{
+ atomic_t *zone_count;
+ bool locked;
+
+ if (is_journal_zone_locked(journal, lock_number))
+ return true;
+
+ zone_count = get_zone_count_ptr(journal, lock_number, zone_type);
+ locked = (atomic_read(zone_count) != 0);
+ /* Pairs with implicit barrier in vdo_release_recovery_journal_block_reference() */
+ smp_rmb();
+ return locked;
+}
+
+/**
+ * reap_recovery_journal() - Conduct a sweep on a recovery journal to reclaim unreferenced blocks.
+ * @journal: The recovery journal.
+ */
+static void reap_recovery_journal(struct recovery_journal *journal)
+{
+ if (journal->reaping) {
+ /*
+ * We already have an outstanding reap in progress. We need to wait for it to
+ * finish.
+ */
+ return;
+ }
+
+ if (vdo_is_state_quiescent(&journal->state)) {
+ /* We are supposed to not do IO. Don't botch it by reaping. */
+ return;
+ }
+
+ /*
+ * Start reclaiming blocks only when the journal head has no references. Then stop when a
+ * block is referenced.
+ */
+ while ((journal->block_map_reap_head < journal->last_write_acknowledged) &&
+ !is_lock_locked(journal, journal->block_map_head_block_number,
+ VDO_ZONE_TYPE_LOGICAL)) {
+ journal->block_map_reap_head++;
+ if (++journal->block_map_head_block_number == journal->size)
+ journal->block_map_head_block_number = 0;
+ }
+
+ while ((journal->slab_journal_reap_head < journal->last_write_acknowledged) &&
+ !is_lock_locked(journal, journal->slab_journal_head_block_number,
+ VDO_ZONE_TYPE_PHYSICAL)) {
+ journal->slab_journal_reap_head++;
+ if (++journal->slab_journal_head_block_number == journal->size)
+ journal->slab_journal_head_block_number = 0;
+ }
+
+ if ((journal->block_map_reap_head == journal->block_map_head) &&
+ (journal->slab_journal_reap_head == journal->slab_journal_head)) {
+ /* Nothing happened. */
+ return;
+ }
+
+ /*
+ * If the block map head will advance, we must flush any block map page modified by the
+ * entries we are reaping. If the slab journal head will advance, we must flush the slab
+ * summary update covering the slab journal that just released some lock.
+ */
+ journal->reaping = true;
+ vdo_submit_flush_vio(journal->flush_vio, flush_endio, handle_flush_error);
+}
+
+/**
+ * vdo_acquire_recovery_journal_block_reference() - Acquire a reference to a recovery journal block
+ * from somewhere other than the journal itself.
+ * @journal: The recovery journal.
+ * @sequence_number: The journal sequence number of the referenced block.
+ * @zone_type: The type of the zone making the adjustment.
+ * @zone_id: The ID of the zone making the adjustment.
+ */
+void vdo_acquire_recovery_journal_block_reference(struct recovery_journal *journal,
+ sequence_number_t sequence_number,
+ enum vdo_zone_type zone_type,
+ zone_count_t zone_id)
+{
+ block_count_t lock_number;
+ u16 *current_value;
+
+ if (sequence_number == 0)
+ return;
+
+ VDO_ASSERT_LOG_ONLY((zone_type != VDO_ZONE_TYPE_JOURNAL),
+ "invalid lock count increment from journal zone");
+
+ lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number);
+ current_value = get_counter(journal, lock_number, zone_type, zone_id);
+ VDO_ASSERT_LOG_ONLY(*current_value < U16_MAX,
+ "increment of lock counter must not overflow");
+
+ if (*current_value == 0) {
+ /*
+ * This zone is acquiring this lock for the first time. Extra barriers because this
+ * was original developed using an atomic add operation that implicitly had them.
+ */
+ smp_mb__before_atomic();
+ atomic_inc(get_zone_count_ptr(journal, lock_number, zone_type));
+ /* same as before_atomic */
+ smp_mb__after_atomic();
+ }
+
+ *current_value += 1;
+}
+
+/**
+ * vdo_release_journal_entry_lock() - Release a single per-entry reference count for a recovery
+ * journal block.
+ * @journal: The recovery journal.
+ * @sequence_number: The journal sequence number of the referenced block.
+ */
+void vdo_release_journal_entry_lock(struct recovery_journal *journal,
+ sequence_number_t sequence_number)
+{
+ block_count_t lock_number;
+
+ if (sequence_number == 0)
+ return;
+
+ lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number);
+ /*
+ * Extra barriers because this was originally developed using an atomic add operation that
+ * implicitly had them.
+ */
+ smp_mb__before_atomic();
+ atomic_inc(get_decrement_counter(journal, lock_number));
+ /* same as before_atomic */
+ smp_mb__after_atomic();
+}
+
+/**
+ * initiate_drain() - Initiate a drain.
+ *
+ * Implements vdo_admin_initiator_fn.
+ */
+static void initiate_drain(struct admin_state *state)
+{
+ check_for_drain_complete(container_of(state, struct recovery_journal, state));
+}
+
+/**
+ * vdo_drain_recovery_journal() - Drain recovery journal I/O.
+ * @journal: The journal to drain.
+ * @operation: The drain operation (suspend or save).
+ * @parent: The completion to notify once the journal is drained.
+ *
+ * All uncommitted entries will be written out.
+ */
+void vdo_drain_recovery_journal(struct recovery_journal *journal,
+ const struct admin_state_code *operation,
+ struct vdo_completion *parent)
+{
+ assert_on_journal_thread(journal, __func__);
+ vdo_start_draining(&journal->state, operation, parent, initiate_drain);
+}
+
+/**
+ * resume_lock_counter() - Re-allow notifications from a suspended lock counter.
+ * @counter: The counter.
+ *
+ * Return: true if the lock counter was suspended.
+ */
+static bool resume_lock_counter(struct lock_counter *counter)
+{
+ int prior_state;
+
+ /*
+ * Extra barriers because this was original developed using a CAS operation that implicitly
+ * had them.
+ */
+ smp_mb__before_atomic();
+ prior_state = atomic_cmpxchg(&counter->state, LOCK_COUNTER_STATE_SUSPENDED,
+ LOCK_COUNTER_STATE_NOT_NOTIFYING);
+ /* same as before_atomic */
+ smp_mb__after_atomic();
+
+ return (prior_state == LOCK_COUNTER_STATE_SUSPENDED);
+}
+
+/**
+ * vdo_resume_recovery_journal() - Resume a recovery journal which has been drained.
+ * @journal: The journal to resume.
+ * @parent: The completion to finish once the journal is resumed.
+ */
+void vdo_resume_recovery_journal(struct recovery_journal *journal,
+ struct vdo_completion *parent)
+{
+ bool saved;
+
+ assert_on_journal_thread(journal, __func__);
+ saved = vdo_is_state_saved(&journal->state);
+ vdo_set_completion_result(parent, vdo_resume_if_quiescent(&journal->state));
+ if (is_read_only(journal)) {
+ vdo_continue_completion(parent, VDO_READ_ONLY);
+ return;
+ }
+
+ if (saved)
+ initialize_journal_state(journal);
+
+ if (resume_lock_counter(&journal->lock_counter)) {
+ /* We might have missed a notification. */
+ reap_recovery_journal(journal);
+ }
+
+ vdo_launch_completion(parent);
+}
+
+/**
+ * vdo_get_recovery_journal_logical_blocks_used() - Get the number of logical blocks in use by the
+ * VDO.
+ * @journal: The journal.
+ *
+ * Return: The number of logical blocks in use by the VDO.
+ */
+block_count_t vdo_get_recovery_journal_logical_blocks_used(const struct recovery_journal *journal)
+{
+ return journal->logical_blocks_used;
+}
+
+/**
+ * vdo_get_recovery_journal_statistics() - Get the current statistics from the recovery journal.
+ * @journal: The recovery journal to query.
+ *
+ * Return: A copy of the current statistics for the journal.
+ */
+struct recovery_journal_statistics
+vdo_get_recovery_journal_statistics(const struct recovery_journal *journal)
+{
+ return journal->events;
+}
+
+/**
+ * dump_recovery_block() - Dump the contents of the recovery block to the log.
+ * @block: The block to dump.
+ */
+static void dump_recovery_block(const struct recovery_journal_block *block)
+{
+ vdo_log_info(" sequence number %llu; entries %u; %s; %zu entry waiters; %zu commit waiters",
+ (unsigned long long) block->sequence_number, block->entry_count,
+ (block->committing ? "committing" : "waiting"),
+ vdo_waitq_num_waiters(&block->entry_waiters),
+ vdo_waitq_num_waiters(&block->commit_waiters));
+}
+
+/**
+ * vdo_dump_recovery_journal_statistics() - Dump some current statistics and other debug info from
+ * the recovery journal.
+ * @journal: The recovery journal to dump.
+ */
+void vdo_dump_recovery_journal_statistics(const struct recovery_journal *journal)
+{
+ const struct recovery_journal_block *block;
+ struct recovery_journal_statistics stats = vdo_get_recovery_journal_statistics(journal);
+
+ vdo_log_info("Recovery Journal");
+ vdo_log_info(" block_map_head=%llu slab_journal_head=%llu last_write_acknowledged=%llu tail=%llu block_map_reap_head=%llu slab_journal_reap_head=%llu disk_full=%llu slab_journal_commits_requested=%llu entry_waiters=%zu",
+ (unsigned long long) journal->block_map_head,
+ (unsigned long long) journal->slab_journal_head,
+ (unsigned long long) journal->last_write_acknowledged,
+ (unsigned long long) journal->tail,
+ (unsigned long long) journal->block_map_reap_head,
+ (unsigned long long) journal->slab_journal_reap_head,
+ (unsigned long long) stats.disk_full,
+ (unsigned long long) stats.slab_journal_commits_requested,
+ vdo_waitq_num_waiters(&journal->entry_waiters));
+ vdo_log_info(" entries: started=%llu written=%llu committed=%llu",
+ (unsigned long long) stats.entries.started,
+ (unsigned long long) stats.entries.written,
+ (unsigned long long) stats.entries.committed);
+ vdo_log_info(" blocks: started=%llu written=%llu committed=%llu",
+ (unsigned long long) stats.blocks.started,
+ (unsigned long long) stats.blocks.written,
+ (unsigned long long) stats.blocks.committed);
+
+ vdo_log_info(" active blocks:");
+ list_for_each_entry(block, &journal->active_tail_blocks, list_node)
+ dump_recovery_block(block);
+}
diff --git a/drivers/md/dm-vdo/recovery-journal.h b/drivers/md/dm-vdo/recovery-journal.h
new file mode 100644
index 000000000000..899071173015
--- /dev/null
+++ b/drivers/md/dm-vdo/recovery-journal.h
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_RECOVERY_JOURNAL_H
+#define VDO_RECOVERY_JOURNAL_H
+
+#include <linux/list.h>
+
+#include "numeric.h"
+
+#include "admin-state.h"
+#include "constants.h"
+#include "encodings.h"
+#include "flush.h"
+#include "statistics.h"
+#include "types.h"
+#include "wait-queue.h"
+
+/**
+ * DOC: recovery journal.
+ *
+ * The recovery_journal provides a log of all block mapping and reference count changes which have
+ * not yet been stably written to the block map or slab journals. This log helps to reduce the
+ * write amplification of writes by providing amortization of slab journal and block map page
+ * updates.
+ *
+ * The recovery journal has a single dedicated queue and thread for performing all journal updates.
+ * The concurrency guarantees of this single-threaded model allow the code to omit more
+ * fine-grained locking for recovery journal structures.
+ *
+ * The journal consists of a set of on-disk blocks arranged as a circular log with monotonically
+ * increasing sequence numbers. Three sequence numbers serve to define the active extent of the
+ * journal. The 'head' is the oldest active block in the journal. The 'tail' is the end of the
+ * half-open interval containing the active blocks. 'active' is the number of the block actively
+ * receiving entries. In an empty journal, head == active == tail. Once any entries are added, tail
+ * = active + 1, and head may be any value in the interval [tail - size, active].
+ *
+ * The journal also contains a set of in-memory blocks which are used to buffer up entries until
+ * they can be committed. In general the number of in-memory blocks ('tail_buffer_count') will be
+ * less than the on-disk size. Each in-memory block is also a vdo_completion. Each in-memory block
+ * has a vio which is used to commit that block to disk. The vio's data is the on-disk
+ * representation of the journal block. In addition each in-memory block has a buffer which is used
+ * to accumulate entries while a partial commit of the block is in progress. In-memory blocks are
+ * kept on two rings. Free blocks live on the 'free_tail_blocks' ring. When a block becomes active
+ * (see below) it is moved to the 'active_tail_blocks' ring. When a block is fully committed, it is
+ * moved back to the 'free_tail_blocks' ring.
+ *
+ * When entries are added to the journal, they are added to the active in-memory block, as
+ * indicated by the 'active_block' field. If the caller wishes to wait for the entry to be
+ * committed, the requesting VIO will be attached to the in-memory block to which the caller's
+ * entry was added. If the caller does wish to wait, or if the entry filled the active block, an
+ * attempt will be made to commit that block to disk. If there is already another commit in
+ * progress, the attempt will be ignored and then automatically retried when the in-progress commit
+ * completes. If there is no commit in progress, any data_vios waiting on the block are transferred
+ * to the block's vio which is then written, automatically waking all of the waiters when it
+ * completes. When the write completes, any entries which accumulated in the block are copied to
+ * the vio's data buffer.
+ *
+ * Finally, the journal maintains a set of counters, one for each on disk journal block. These
+ * counters are used as locks to prevent premature reaping of journal blocks. Each time a new
+ * sequence number is used, the counter for the corresponding block is incremented. The counter is
+ * subsequently decremented when that block is filled and then committed for the last time. This
+ * prevents blocks from being reaped while they are still being updated. The counter is also
+ * incremented once for each entry added to a block, and decremented once each time the block map
+ * is updated in memory for that request. This prevents blocks from being reaped while their VIOs
+ * are still active. Finally, each in-memory block map page tracks the oldest journal block that
+ * contains entries corresponding to uncommitted updates to that block map page. Each time an
+ * in-memory block map page is updated, it checks if the journal block for the VIO is earlier than
+ * the one it references, in which case it increments the count on the earlier journal block and
+ * decrements the count on the later journal block, maintaining a lock on the oldest journal block
+ * containing entries for that page. When a block map page has been flushed from the cache, the
+ * counter for the journal block it references is decremented. Whenever the counter for the head
+ * block goes to 0, the head is advanced until it comes to a block whose counter is not 0 or until
+ * it reaches the active block. This is the mechanism for reclaiming journal space on disk.
+ *
+ * If there is no in-memory space when a VIO attempts to add an entry, the VIO will be attached to
+ * the 'commit_completion' and will be woken the next time a full block has committed. If there is
+ * no on-disk space when a VIO attempts to add an entry, the VIO will be attached to the
+ * 'reap_completion', and will be woken the next time a journal block is reaped.
+ */
+
+enum vdo_zone_type {
+ VDO_ZONE_TYPE_ADMIN,
+ VDO_ZONE_TYPE_JOURNAL,
+ VDO_ZONE_TYPE_LOGICAL,
+ VDO_ZONE_TYPE_PHYSICAL,
+};
+
+struct lock_counter {
+ /* The completion for notifying the owner of a lock release */
+ struct vdo_completion completion;
+ /* The number of logical zones which may hold locks */
+ zone_count_t logical_zones;
+ /* The number of physical zones which may hold locks */
+ zone_count_t physical_zones;
+ /* The number of locks */
+ block_count_t locks;
+ /* Whether the lock release notification is in flight */
+ atomic_t state;
+ /* The number of logical zones which hold each lock */
+ atomic_t *logical_zone_counts;
+ /* The number of physical zones which hold each lock */
+ atomic_t *physical_zone_counts;
+ /* The per-lock counts for the journal zone */
+ u16 *journal_counters;
+ /* The per-lock decrement counts for the journal zone */
+ atomic_t *journal_decrement_counts;
+ /* The per-zone, per-lock reference counts for logical zones */
+ u16 *logical_counters;
+ /* The per-zone, per-lock reference counts for physical zones */
+ u16 *physical_counters;
+};
+
+struct recovery_journal_block {
+ /* The doubly linked pointers for the free or active lists */
+ struct list_head list_node;
+ /* The waiter for the pending full block list */
+ struct vdo_waiter write_waiter;
+ /* The journal to which this block belongs */
+ struct recovery_journal *journal;
+ /* A pointer to the current sector in the packed block buffer */
+ struct packed_journal_sector *sector;
+ /* The vio for writing this block */
+ struct vio vio;
+ /* The sequence number for this block */
+ sequence_number_t sequence_number;
+ /* The location of this block in the on-disk journal */
+ physical_block_number_t block_number;
+ /* Whether this block is being committed */
+ bool committing;
+ /* The total number of entries in this block */
+ journal_entry_count_t entry_count;
+ /* The total number of uncommitted entries (queued or committing) */
+ journal_entry_count_t uncommitted_entry_count;
+ /* The number of new entries in the current commit */
+ journal_entry_count_t entries_in_commit;
+ /* The queue of vios which will make entries for the next commit */
+ struct vdo_wait_queue entry_waiters;
+ /* The queue of vios waiting for the current commit */
+ struct vdo_wait_queue commit_waiters;
+};
+
+struct recovery_journal {
+ /* The thread ID of the journal zone */
+ thread_id_t thread_id;
+ /* The slab depot which can hold locks on this journal */
+ struct slab_depot *depot;
+ /* The block map which can hold locks on this journal */
+ struct block_map *block_map;
+ /* The queue of vios waiting to make entries */
+ struct vdo_wait_queue entry_waiters;
+ /* The number of free entries in the journal */
+ u64 available_space;
+ /* The number of decrement entries which need to be made */
+ data_vio_count_t pending_decrement_count;
+ /* Whether the journal is adding entries from the increment or decrement waiters queues */
+ bool adding_entries;
+ /* The administrative state of the journal */
+ struct admin_state state;
+ /* Whether a reap is in progress */
+ bool reaping;
+ /* The location of the first journal block */
+ physical_block_number_t origin;
+ /* The oldest active block in the journal on disk for block map rebuild */
+ sequence_number_t block_map_head;
+ /* The oldest active block in the journal on disk for slab journal replay */
+ sequence_number_t slab_journal_head;
+ /* The newest block in the journal on disk to which a write has finished */
+ sequence_number_t last_write_acknowledged;
+ /* The end of the half-open interval of the active journal */
+ sequence_number_t tail;
+ /* The point at which the last entry will have been added */
+ struct journal_point append_point;
+ /* The journal point of the vio most recently released from the journal */
+ struct journal_point commit_point;
+ /* The nonce of the VDO */
+ nonce_t nonce;
+ /* The number of recoveries completed by the VDO */
+ u8 recovery_count;
+ /* The number of entries which fit in a single block */
+ journal_entry_count_t entries_per_block;
+ /* Unused in-memory journal blocks */
+ struct list_head free_tail_blocks;
+ /* In-memory journal blocks with records */
+ struct list_head active_tail_blocks;
+ /* A pointer to the active block (the one we are adding entries to now) */
+ struct recovery_journal_block *active_block;
+ /* Journal blocks that need writing */
+ struct vdo_wait_queue pending_writes;
+ /* The new block map reap head after reaping */
+ sequence_number_t block_map_reap_head;
+ /* The head block number for the block map rebuild range */
+ block_count_t block_map_head_block_number;
+ /* The new slab journal reap head after reaping */
+ sequence_number_t slab_journal_reap_head;
+ /* The head block number for the slab journal replay range */
+ block_count_t slab_journal_head_block_number;
+ /* The data-less vio, usable only for flushing */
+ struct vio *flush_vio;
+ /* The number of blocks in the on-disk journal */
+ block_count_t size;
+ /* The number of logical blocks that are in-use */
+ block_count_t logical_blocks_used;
+ /* The number of block map pages that are allocated */
+ block_count_t block_map_data_blocks;
+ /* The number of journal blocks written but not yet acknowledged */
+ block_count_t pending_write_count;
+ /* The threshold at which slab journal tail blocks will be written out */
+ block_count_t slab_journal_commit_threshold;
+ /* Counters for events in the journal that are reported as statistics */
+ struct recovery_journal_statistics events;
+ /* The locks for each on-disk block */
+ struct lock_counter lock_counter;
+ /* The tail blocks */
+ struct recovery_journal_block blocks[];
+};
+
+/**
+ * vdo_get_recovery_journal_block_number() - Get the physical block number for a given sequence
+ * number.
+ * @journal: The journal.
+ * @sequence: The sequence number of the desired block.
+ *
+ * Return: The block number corresponding to the sequence number.
+ */
+static inline physical_block_number_t __must_check
+vdo_get_recovery_journal_block_number(const struct recovery_journal *journal,
+ sequence_number_t sequence)
+{
+ /*
+ * Since journal size is a power of two, the block number modulus can just be extracted
+ * from the low-order bits of the sequence.
+ */
+ return vdo_compute_recovery_journal_block_number(journal->size, sequence);
+}
+
+/**
+ * vdo_compute_recovery_journal_check_byte() - Compute the check byte for a given sequence number.
+ * @journal: The journal.
+ * @sequence: The sequence number.
+ *
+ * Return: The check byte corresponding to the sequence number.
+ */
+static inline u8 __must_check
+vdo_compute_recovery_journal_check_byte(const struct recovery_journal *journal,
+ sequence_number_t sequence)
+{
+ /* The check byte must change with each trip around the journal. */
+ return (((sequence / journal->size) & 0x7F) | 0x80);
+}
+
+int __must_check vdo_decode_recovery_journal(struct recovery_journal_state_7_0 state,
+ nonce_t nonce, struct vdo *vdo,
+ struct partition *partition,
+ u64 recovery_count,
+ block_count_t journal_size,
+ struct recovery_journal **journal_ptr);
+
+void vdo_free_recovery_journal(struct recovery_journal *journal);
+
+void vdo_initialize_recovery_journal_post_repair(struct recovery_journal *journal,
+ u64 recovery_count,
+ sequence_number_t tail,
+ block_count_t logical_blocks_used,
+ block_count_t block_map_data_blocks);
+
+block_count_t __must_check
+vdo_get_journal_block_map_data_blocks_used(struct recovery_journal *journal);
+
+thread_id_t __must_check vdo_get_recovery_journal_thread_id(struct recovery_journal *journal);
+
+void vdo_open_recovery_journal(struct recovery_journal *journal,
+ struct slab_depot *depot, struct block_map *block_map);
+
+sequence_number_t
+vdo_get_recovery_journal_current_sequence_number(struct recovery_journal *journal);
+
+block_count_t __must_check vdo_get_recovery_journal_length(block_count_t journal_size);
+
+struct recovery_journal_state_7_0 __must_check
+vdo_record_recovery_journal(const struct recovery_journal *journal);
+
+void vdo_add_recovery_journal_entry(struct recovery_journal *journal,
+ struct data_vio *data_vio);
+
+void vdo_acquire_recovery_journal_block_reference(struct recovery_journal *journal,
+ sequence_number_t sequence_number,
+ enum vdo_zone_type zone_type,
+ zone_count_t zone_id);
+
+void vdo_release_recovery_journal_block_reference(struct recovery_journal *journal,
+ sequence_number_t sequence_number,
+ enum vdo_zone_type zone_type,
+ zone_count_t zone_id);
+
+void vdo_release_journal_entry_lock(struct recovery_journal *journal,
+ sequence_number_t sequence_number);
+
+void vdo_drain_recovery_journal(struct recovery_journal *journal,
+ const struct admin_state_code *operation,
+ struct vdo_completion *parent);
+
+void vdo_resume_recovery_journal(struct recovery_journal *journal,
+ struct vdo_completion *parent);
+
+block_count_t __must_check
+vdo_get_recovery_journal_logical_blocks_used(const struct recovery_journal *journal);
+
+struct recovery_journal_statistics __must_check
+vdo_get_recovery_journal_statistics(const struct recovery_journal *journal);
+
+void vdo_dump_recovery_journal_statistics(const struct recovery_journal *journal);
+
+#endif /* VDO_RECOVERY_JOURNAL_H */
diff --git a/drivers/md/dm-vdo/repair.c b/drivers/md/dm-vdo/repair.c
new file mode 100644
index 000000000000..defc9359f10e
--- /dev/null
+++ b/drivers/md/dm-vdo/repair.c
@@ -0,0 +1,1756 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "repair.h"
+
+#include <linux/min_heap.h>
+#include <linux/minmax.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "block-map.h"
+#include "completion.h"
+#include "constants.h"
+#include "encodings.h"
+#include "int-map.h"
+#include "io-submitter.h"
+#include "recovery-journal.h"
+#include "slab-depot.h"
+#include "types.h"
+#include "vdo.h"
+#include "wait-queue.h"
+
+/*
+ * An explicitly numbered block mapping. Numbering the mappings allows them to be sorted by logical
+ * block number during repair while still preserving the relative order of journal entries with
+ * the same logical block number.
+ */
+struct numbered_block_mapping {
+ struct block_map_slot block_map_slot;
+ struct block_map_entry block_map_entry;
+ /* A serial number to use during replay */
+ u32 number;
+} __packed;
+
+/*
+ * The absolute position of an entry in the recovery journal, including the sector number and the
+ * entry number within the sector.
+ */
+struct recovery_point {
+ /* Block sequence number */
+ sequence_number_t sequence_number;
+ /* Sector number */
+ u8 sector_count;
+ /* Entry number */
+ journal_entry_count_t entry_count;
+ /* Whether or not the increment portion of the current entry has been applied */
+ bool increment_applied;
+};
+
+struct repair_completion {
+ /* The completion header */
+ struct vdo_completion completion;
+
+ /* A buffer to hold the data read off disk */
+ char *journal_data;
+
+ /* For loading the journal */
+ data_vio_count_t vio_count;
+ data_vio_count_t vios_complete;
+ struct vio *vios;
+
+ /* The number of entries to be applied to the block map */
+ size_t block_map_entry_count;
+ /* The sequence number of the first valid block for block map recovery */
+ sequence_number_t block_map_head;
+ /* The sequence number of the first valid block for slab journal replay */
+ sequence_number_t slab_journal_head;
+ /* The sequence number of the last valid block of the journal (if known) */
+ sequence_number_t tail;
+ /*
+ * The highest sequence number of the journal. During recovery (vs read-only rebuild), not
+ * the same as the tail, since the tail ignores blocks after the first hole.
+ */
+ sequence_number_t highest_tail;
+
+ /* The number of logical blocks currently known to be in use */
+ block_count_t logical_blocks_used;
+ /* The number of block map data blocks known to be allocated */
+ block_count_t block_map_data_blocks;
+
+ /* These fields are for playing the journal into the block map */
+ /* The entry data for the block map recovery */
+ struct numbered_block_mapping *entries;
+ /* The number of entries in the entry array */
+ size_t entry_count;
+ /* number of pending (non-ready) requests*/
+ page_count_t outstanding;
+ /* number of page completions */
+ page_count_t page_count;
+ bool launching;
+ /*
+ * a heap wrapping journal_entries. It re-orders and sorts journal entries in ascending LBN
+ * order, then original journal order. This permits efficient iteration over the journal
+ * entries in order.
+ */
+ struct min_heap replay_heap;
+ /* Fields tracking progress through the journal entries. */
+ struct numbered_block_mapping *current_entry;
+ struct numbered_block_mapping *current_unfetched_entry;
+ /* Current requested page's PBN */
+ physical_block_number_t pbn;
+
+ /* These fields are only used during recovery. */
+ /* A location just beyond the last valid entry of the journal */
+ struct recovery_point tail_recovery_point;
+ /* The location of the next recovery journal entry to apply */
+ struct recovery_point next_recovery_point;
+ /* The journal point to give to the next synthesized decref */
+ struct journal_point next_journal_point;
+ /* The number of entries played into slab journals */
+ size_t entries_added_to_slab_journals;
+
+ /* These fields are only used during read-only rebuild */
+ page_count_t page_to_fetch;
+ /* the number of leaf pages in the block map */
+ page_count_t leaf_pages;
+ /* the last slot of the block map */
+ struct block_map_slot last_slot;
+
+ /*
+ * The page completions used for playing the journal into the block map, and, during
+ * read-only rebuild, for rebuilding the reference counts from the block map.
+ */
+ struct vdo_page_completion page_completions[];
+};
+
+/*
+ * This is a min_heap callback function that orders numbered_block_mappings using the
+ * 'block_map_slot' field as the primary key and the mapping 'number' field as the secondary key.
+ * Using the mapping number preserves the journal order of entries for the same slot, allowing us
+ * to sort by slot while still ensuring we replay all entries with the same slot in the exact order
+ * as they appeared in the journal.
+ */
+static bool mapping_is_less_than(const void *item1, const void *item2)
+{
+ const struct numbered_block_mapping *mapping1 =
+ (const struct numbered_block_mapping *) item1;
+ const struct numbered_block_mapping *mapping2 =
+ (const struct numbered_block_mapping *) item2;
+
+ if (mapping1->block_map_slot.pbn != mapping2->block_map_slot.pbn)
+ return mapping1->block_map_slot.pbn < mapping2->block_map_slot.pbn;
+
+ if (mapping1->block_map_slot.slot != mapping2->block_map_slot.slot)
+ return mapping1->block_map_slot.slot < mapping2->block_map_slot.slot;
+
+ if (mapping1->number != mapping2->number)
+ return mapping1->number < mapping2->number;
+
+ return 0;
+}
+
+static void swap_mappings(void *item1, void *item2)
+{
+ struct numbered_block_mapping *mapping1 = item1;
+ struct numbered_block_mapping *mapping2 = item2;
+
+ swap(*mapping1, *mapping2);
+}
+
+static const struct min_heap_callbacks repair_min_heap = {
+ .elem_size = sizeof(struct numbered_block_mapping),
+ .less = mapping_is_less_than,
+ .swp = swap_mappings,
+};
+
+static struct numbered_block_mapping *sort_next_heap_element(struct repair_completion *repair)
+{
+ struct min_heap *heap = &repair->replay_heap;
+ struct numbered_block_mapping *last;
+
+ if (heap->nr == 0)
+ return NULL;
+
+ /*
+ * Swap the next heap element with the last one on the heap, popping it off the heap,
+ * restore the heap invariant, and return a pointer to the popped element.
+ */
+ last = &repair->entries[--heap->nr];
+ swap_mappings(heap->data, last);
+ min_heapify(heap, 0, &repair_min_heap);
+ return last;
+}
+
+/**
+ * as_repair_completion() - Convert a generic completion to a repair_completion.
+ * @completion: The completion to convert.
+ *
+ * Return: The repair_completion.
+ */
+static inline struct repair_completion * __must_check
+as_repair_completion(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_REPAIR_COMPLETION);
+ return container_of(completion, struct repair_completion, completion);
+}
+
+static void prepare_repair_completion(struct repair_completion *repair,
+ vdo_action_fn callback, enum vdo_zone_type zone_type)
+{
+ struct vdo_completion *completion = &repair->completion;
+ const struct thread_config *thread_config = &completion->vdo->thread_config;
+ thread_id_t thread_id;
+
+ /* All blockmap access is done on single thread, so use logical zone 0. */
+ thread_id = ((zone_type == VDO_ZONE_TYPE_LOGICAL) ?
+ thread_config->logical_threads[0] :
+ thread_config->admin_thread);
+ vdo_reset_completion(completion);
+ vdo_set_completion_callback(completion, callback, thread_id);
+}
+
+static void launch_repair_completion(struct repair_completion *repair,
+ vdo_action_fn callback, enum vdo_zone_type zone_type)
+{
+ prepare_repair_completion(repair, callback, zone_type);
+ vdo_launch_completion(&repair->completion);
+}
+
+static void uninitialize_vios(struct repair_completion *repair)
+{
+ while (repair->vio_count > 0)
+ free_vio_components(&repair->vios[--repair->vio_count]);
+
+ vdo_free(vdo_forget(repair->vios));
+}
+
+static void free_repair_completion(struct repair_completion *repair)
+{
+ if (repair == NULL)
+ return;
+
+ /*
+ * We do this here because this function is the only common bottleneck for all clean up
+ * paths.
+ */
+ repair->completion.vdo->block_map->zones[0].page_cache.rebuilding = false;
+
+ uninitialize_vios(repair);
+ vdo_free(vdo_forget(repair->journal_data));
+ vdo_free(vdo_forget(repair->entries));
+ vdo_free(repair);
+}
+
+static void finish_repair(struct vdo_completion *completion)
+{
+ struct vdo_completion *parent = completion->parent;
+ struct vdo *vdo = completion->vdo;
+ struct repair_completion *repair = as_repair_completion(completion);
+
+ vdo_assert_on_admin_thread(vdo, __func__);
+
+ if (vdo->load_state != VDO_REBUILD_FOR_UPGRADE)
+ vdo->states.vdo.complete_recoveries++;
+
+ vdo_initialize_recovery_journal_post_repair(vdo->recovery_journal,
+ vdo->states.vdo.complete_recoveries,
+ repair->highest_tail,
+ repair->logical_blocks_used,
+ repair->block_map_data_blocks);
+ free_repair_completion(vdo_forget(repair));
+
+ if (vdo_state_requires_read_only_rebuild(vdo->load_state)) {
+ vdo_log_info("Read-only rebuild complete");
+ vdo_launch_completion(parent);
+ return;
+ }
+
+ /* FIXME: shouldn't this say either "recovery" or "repair"? */
+ vdo_log_info("Rebuild complete");
+
+ /*
+ * Now that we've freed the repair completion and its vast array of journal entries, we
+ * can allocate refcounts.
+ */
+ vdo_continue_completion(parent, vdo_allocate_reference_counters(vdo->depot));
+}
+
+/**
+ * abort_repair() - Handle a repair error.
+ * @completion: The repair completion.
+ */
+static void abort_repair(struct vdo_completion *completion)
+{
+ struct vdo_completion *parent = completion->parent;
+ int result = completion->result;
+ struct repair_completion *repair = as_repair_completion(completion);
+
+ if (vdo_state_requires_read_only_rebuild(completion->vdo->load_state))
+ vdo_log_info("Read-only rebuild aborted");
+ else
+ vdo_log_warning("Recovery aborted");
+
+ free_repair_completion(vdo_forget(repair));
+ vdo_continue_completion(parent, result);
+}
+
+/**
+ * abort_on_error() - Abort a repair if there is an error.
+ * @result: The result to check.
+ * @repair: The repair completion.
+ *
+ * Return: true if the result was an error.
+ */
+static bool __must_check abort_on_error(int result, struct repair_completion *repair)
+{
+ if (result == VDO_SUCCESS)
+ return false;
+
+ vdo_fail_completion(&repair->completion, result);
+ return true;
+}
+
+/**
+ * drain_slab_depot() - Flush out all dirty refcounts blocks now that they have been rebuilt or
+ * recovered.
+ */
+static void drain_slab_depot(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ struct repair_completion *repair = as_repair_completion(completion);
+ const struct admin_state_code *operation;
+
+ vdo_assert_on_admin_thread(vdo, __func__);
+
+ prepare_repair_completion(repair, finish_repair, VDO_ZONE_TYPE_ADMIN);
+ if (vdo_state_requires_read_only_rebuild(vdo->load_state)) {
+ vdo_log_info("Saving rebuilt state");
+ operation = VDO_ADMIN_STATE_REBUILDING;
+ } else {
+ vdo_log_info("Replayed %zu journal entries into slab journals",
+ repair->entries_added_to_slab_journals);
+ operation = VDO_ADMIN_STATE_RECOVERING;
+ }
+
+ vdo_drain_slab_depot(vdo->depot, operation, completion);
+}
+
+/**
+ * flush_block_map_updates() - Flush the block map now that all the reference counts are rebuilt.
+ * @completion: The repair completion.
+ *
+ * This callback is registered in finish_if_done().
+ */
+static void flush_block_map_updates(struct vdo_completion *completion)
+{
+ vdo_assert_on_admin_thread(completion->vdo, __func__);
+
+ vdo_log_info("Flushing block map changes");
+ prepare_repair_completion(as_repair_completion(completion), drain_slab_depot,
+ VDO_ZONE_TYPE_ADMIN);
+ vdo_drain_block_map(completion->vdo->block_map, VDO_ADMIN_STATE_RECOVERING,
+ completion);
+}
+
+static bool fetch_page(struct repair_completion *repair,
+ struct vdo_completion *completion);
+
+/**
+ * handle_page_load_error() - Handle an error loading a page.
+ * @completion: The vdo_page_completion.
+ */
+static void handle_page_load_error(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = completion->parent;
+
+ repair->outstanding--;
+ vdo_set_completion_result(&repair->completion, completion->result);
+ vdo_release_page_completion(completion);
+ fetch_page(repair, completion);
+}
+
+/**
+ * unmap_entry() - Unmap an invalid entry and indicate that its page must be written out.
+ * @page: The page containing the entries
+ * @completion: The page_completion for writing the page
+ * @slot: The slot to unmap
+ */
+static void unmap_entry(struct block_map_page *page, struct vdo_completion *completion,
+ slot_number_t slot)
+{
+ page->entries[slot] = UNMAPPED_BLOCK_MAP_ENTRY;
+ vdo_request_page_write(completion);
+}
+
+/**
+ * remove_out_of_bounds_entries() - Unmap entries which outside the logical space.
+ * @page: The page containing the entries
+ * @completion: The page_completion for writing the page
+ * @start: The first slot to check
+ */
+static void remove_out_of_bounds_entries(struct block_map_page *page,
+ struct vdo_completion *completion,
+ slot_number_t start)
+{
+ slot_number_t slot;
+
+ for (slot = start; slot < VDO_BLOCK_MAP_ENTRIES_PER_PAGE; slot++) {
+ struct data_location mapping = vdo_unpack_block_map_entry(&page->entries[slot]);
+
+ if (vdo_is_mapped_location(&mapping))
+ unmap_entry(page, completion, slot);
+ }
+}
+
+/**
+ * process_slot() - Update the reference counts for a single entry.
+ * @page: The page containing the entries
+ * @completion: The page_completion for writing the page
+ * @slot: The slot to check
+ *
+ * Return: true if the entry was a valid mapping
+ */
+static bool process_slot(struct block_map_page *page, struct vdo_completion *completion,
+ slot_number_t slot)
+{
+ struct slab_depot *depot = completion->vdo->depot;
+ int result;
+ struct data_location mapping = vdo_unpack_block_map_entry(&page->entries[slot]);
+
+ if (!vdo_is_valid_location(&mapping)) {
+ /* This entry is invalid, so remove it from the page. */
+ unmap_entry(page, completion, slot);
+ return false;
+ }
+
+ if (!vdo_is_mapped_location(&mapping))
+ return false;
+
+
+ if (mapping.pbn == VDO_ZERO_BLOCK)
+ return true;
+
+ if (!vdo_is_physical_data_block(depot, mapping.pbn)) {
+ /*
+ * This is a nonsense mapping. Remove it from the map so we're at least consistent
+ * and mark the page dirty.
+ */
+ unmap_entry(page, completion, slot);
+ return false;
+ }
+
+ result = vdo_adjust_reference_count_for_rebuild(depot, mapping.pbn,
+ VDO_JOURNAL_DATA_REMAPPING);
+ if (result == VDO_SUCCESS)
+ return true;
+
+ vdo_log_error_strerror(result,
+ "Could not adjust reference count for PBN %llu, slot %u mapped to PBN %llu",
+ (unsigned long long) vdo_get_block_map_page_pbn(page),
+ slot, (unsigned long long) mapping.pbn);
+ unmap_entry(page, completion, slot);
+ return false;
+}
+
+/**
+ * rebuild_reference_counts_from_page() - Rebuild reference counts from a block map page.
+ * @repair: The repair completion.
+ * @completion: The page completion holding the page.
+ */
+static void rebuild_reference_counts_from_page(struct repair_completion *repair,
+ struct vdo_completion *completion)
+{
+ slot_number_t slot, last_slot;
+ struct block_map_page *page;
+ int result;
+
+ result = vdo_get_cached_page(completion, &page);
+ if (result != VDO_SUCCESS) {
+ vdo_set_completion_result(&repair->completion, result);
+ return;
+ }
+
+ if (!page->header.initialized)
+ return;
+
+ /* Remove any bogus entries which exist beyond the end of the logical space. */
+ if (vdo_get_block_map_page_pbn(page) == repair->last_slot.pbn) {
+ last_slot = repair->last_slot.slot;
+ remove_out_of_bounds_entries(page, completion, last_slot);
+ } else {
+ last_slot = VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
+ }
+
+ /* Inform the slab depot of all entries on this page. */
+ for (slot = 0; slot < last_slot; slot++) {
+ if (process_slot(page, completion, slot))
+ repair->logical_blocks_used++;
+ }
+}
+
+/**
+ * page_loaded() - Process a page which has just been loaded.
+ * @completion: The vdo_page_completion for the fetched page.
+ *
+ * This callback is registered by fetch_page().
+ */
+static void page_loaded(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = completion->parent;
+
+ repair->outstanding--;
+ rebuild_reference_counts_from_page(repair, completion);
+ vdo_release_page_completion(completion);
+
+ /* Advance progress to the next page, and fetch the next page we haven't yet requested. */
+ fetch_page(repair, completion);
+}
+
+static physical_block_number_t get_pbn_to_fetch(struct repair_completion *repair,
+ struct block_map *block_map)
+{
+ physical_block_number_t pbn = VDO_ZERO_BLOCK;
+
+ if (repair->completion.result != VDO_SUCCESS)
+ return VDO_ZERO_BLOCK;
+
+ while ((pbn == VDO_ZERO_BLOCK) && (repair->page_to_fetch < repair->leaf_pages))
+ pbn = vdo_find_block_map_page_pbn(block_map, repair->page_to_fetch++);
+
+ if (vdo_is_physical_data_block(repair->completion.vdo->depot, pbn))
+ return pbn;
+
+ vdo_set_completion_result(&repair->completion, VDO_BAD_MAPPING);
+ return VDO_ZERO_BLOCK;
+}
+
+/**
+ * fetch_page() - Fetch a page from the block map.
+ * @repair: The repair_completion.
+ * @completion: The page completion to use.
+ *
+ * Return true if the rebuild is complete
+ */
+static bool fetch_page(struct repair_completion *repair,
+ struct vdo_completion *completion)
+{
+ struct vdo_page_completion *page_completion = (struct vdo_page_completion *) completion;
+ struct block_map *block_map = repair->completion.vdo->block_map;
+ physical_block_number_t pbn = get_pbn_to_fetch(repair, block_map);
+
+ if (pbn != VDO_ZERO_BLOCK) {
+ repair->outstanding++;
+ /*
+ * We must set the requeue flag here to ensure that we don't blow the stack if all
+ * the requested pages are already in the cache or get load errors.
+ */
+ vdo_get_page(page_completion, &block_map->zones[0], pbn, true, repair,
+ page_loaded, handle_page_load_error, true);
+ }
+
+ if (repair->outstanding > 0)
+ return false;
+
+ launch_repair_completion(repair, flush_block_map_updates, VDO_ZONE_TYPE_ADMIN);
+ return true;
+}
+
+/**
+ * rebuild_from_leaves() - Rebuild reference counts from the leaf block map pages.
+ * @completion: The repair completion.
+ *
+ * Rebuilds reference counts from the leaf block map pages now that reference counts have been
+ * rebuilt from the interior tree pages (which have been loaded in the process). This callback is
+ * registered in rebuild_reference_counts().
+ */
+static void rebuild_from_leaves(struct vdo_completion *completion)
+{
+ page_count_t i;
+ struct repair_completion *repair = as_repair_completion(completion);
+ struct block_map *map = completion->vdo->block_map;
+
+ repair->logical_blocks_used = 0;
+
+ /*
+ * The PBN calculation doesn't work until the tree pages have been loaded, so we can't set
+ * this value at the start of repair.
+ */
+ repair->leaf_pages = vdo_compute_block_map_page_count(map->entry_count);
+ repair->last_slot = (struct block_map_slot) {
+ .slot = map->entry_count % VDO_BLOCK_MAP_ENTRIES_PER_PAGE,
+ .pbn = vdo_find_block_map_page_pbn(map, repair->leaf_pages - 1),
+ };
+ if (repair->last_slot.slot == 0)
+ repair->last_slot.slot = VDO_BLOCK_MAP_ENTRIES_PER_PAGE;
+
+ for (i = 0; i < repair->page_count; i++) {
+ if (fetch_page(repair, &repair->page_completions[i].completion)) {
+ /*
+ * The rebuild has already moved on, so it isn't safe nor is there a need
+ * to launch any more fetches.
+ */
+ return;
+ }
+ }
+}
+
+/**
+ * process_entry() - Process a single entry from the block map tree.
+ * @pbn: A pbn which holds a block map tree page.
+ * @completion: The parent completion of the traversal.
+ *
+ * Implements vdo_entry_callback_fn.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int process_entry(physical_block_number_t pbn, struct vdo_completion *completion)
+{
+ struct repair_completion *repair = as_repair_completion(completion);
+ struct slab_depot *depot = completion->vdo->depot;
+ int result;
+
+ if ((pbn == VDO_ZERO_BLOCK) || !vdo_is_physical_data_block(depot, pbn)) {
+ return vdo_log_error_strerror(VDO_BAD_CONFIGURATION,
+ "PBN %llu out of range",
+ (unsigned long long) pbn);
+ }
+
+ result = vdo_adjust_reference_count_for_rebuild(depot, pbn,
+ VDO_JOURNAL_BLOCK_MAP_REMAPPING);
+ if (result != VDO_SUCCESS) {
+ return vdo_log_error_strerror(result,
+ "Could not adjust reference count for block map tree PBN %llu",
+ (unsigned long long) pbn);
+ }
+
+ repair->block_map_data_blocks++;
+ return VDO_SUCCESS;
+}
+
+static void rebuild_reference_counts(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = as_repair_completion(completion);
+ struct vdo *vdo = completion->vdo;
+ struct vdo_page_cache *cache = &vdo->block_map->zones[0].page_cache;
+
+ /* We must allocate ref_counts before we can rebuild them. */
+ if (abort_on_error(vdo_allocate_reference_counters(vdo->depot), repair))
+ return;
+
+ /*
+ * Completion chaining from page cache hits can lead to stack overflow during the rebuild,
+ * so clear out the cache before this rebuild phase.
+ */
+ if (abort_on_error(vdo_invalidate_page_cache(cache), repair))
+ return;
+
+ prepare_repair_completion(repair, rebuild_from_leaves, VDO_ZONE_TYPE_LOGICAL);
+ vdo_traverse_forest(vdo->block_map, process_entry, completion);
+}
+
+/**
+ * increment_recovery_point() - Move the given recovery point forward by one entry.
+ */
+static void increment_recovery_point(struct recovery_point *point)
+{
+ if (++point->entry_count < RECOVERY_JOURNAL_ENTRIES_PER_SECTOR)
+ return;
+
+ point->entry_count = 0;
+ if (point->sector_count < (VDO_SECTORS_PER_BLOCK - 1)) {
+ point->sector_count++;
+ return;
+ }
+
+ point->sequence_number++;
+ point->sector_count = 1;
+}
+
+/**
+ * advance_points() - Advance the current recovery and journal points.
+ * @repair: The repair_completion whose points are to be advanced.
+ * @entries_per_block: The number of entries in a recovery journal block.
+ */
+static void advance_points(struct repair_completion *repair,
+ journal_entry_count_t entries_per_block)
+{
+ if (!repair->next_recovery_point.increment_applied) {
+ repair->next_recovery_point.increment_applied = true;
+ return;
+ }
+
+ increment_recovery_point(&repair->next_recovery_point);
+ vdo_advance_journal_point(&repair->next_journal_point, entries_per_block);
+ repair->next_recovery_point.increment_applied = false;
+}
+
+/**
+ * before_recovery_point() - Check whether the first point precedes the second point.
+ * @first: The first recovery point.
+ * @second: The second recovery point.
+ *
+ * Return: true if the first point precedes the second point.
+ */
+static bool __must_check before_recovery_point(const struct recovery_point *first,
+ const struct recovery_point *second)
+{
+ if (first->sequence_number < second->sequence_number)
+ return true;
+
+ if (first->sequence_number > second->sequence_number)
+ return false;
+
+ if (first->sector_count < second->sector_count)
+ return true;
+
+ return ((first->sector_count == second->sector_count) &&
+ (first->entry_count < second->entry_count));
+}
+
+static struct packed_journal_sector * __must_check get_sector(struct recovery_journal *journal,
+ char *journal_data,
+ sequence_number_t sequence,
+ u8 sector_number)
+{
+ off_t offset;
+
+ offset = ((vdo_get_recovery_journal_block_number(journal, sequence) * VDO_BLOCK_SIZE) +
+ (VDO_SECTOR_SIZE * sector_number));
+ return (struct packed_journal_sector *) (journal_data + offset);
+}
+
+/**
+ * get_entry() - Unpack the recovery journal entry associated with the given recovery point.
+ * @repair: The repair completion.
+ * @point: The recovery point.
+ *
+ * Return: The unpacked contents of the matching recovery journal entry.
+ */
+static struct recovery_journal_entry get_entry(const struct repair_completion *repair,
+ const struct recovery_point *point)
+{
+ struct packed_journal_sector *sector;
+
+ sector = get_sector(repair->completion.vdo->recovery_journal,
+ repair->journal_data, point->sequence_number,
+ point->sector_count);
+ return vdo_unpack_recovery_journal_entry(&sector->entries[point->entry_count]);
+}
+
+/**
+ * validate_recovery_journal_entry() - Validate a recovery journal entry.
+ * @vdo: The vdo.
+ * @entry: The entry to validate.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int validate_recovery_journal_entry(const struct vdo *vdo,
+ const struct recovery_journal_entry *entry)
+{
+ if ((entry->slot.pbn >= vdo->states.vdo.config.physical_blocks) ||
+ (entry->slot.slot >= VDO_BLOCK_MAP_ENTRIES_PER_PAGE) ||
+ !vdo_is_valid_location(&entry->mapping) ||
+ !vdo_is_valid_location(&entry->unmapping) ||
+ !vdo_is_physical_data_block(vdo->depot, entry->mapping.pbn) ||
+ !vdo_is_physical_data_block(vdo->depot, entry->unmapping.pbn)) {
+ return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL,
+ "Invalid entry: %s (%llu, %u) from %llu to %llu is not within bounds",
+ vdo_get_journal_operation_name(entry->operation),
+ (unsigned long long) entry->slot.pbn,
+ entry->slot.slot,
+ (unsigned long long) entry->unmapping.pbn,
+ (unsigned long long) entry->mapping.pbn);
+ }
+
+ if ((entry->operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING) &&
+ (vdo_is_state_compressed(entry->mapping.state) ||
+ (entry->mapping.pbn == VDO_ZERO_BLOCK) ||
+ (entry->unmapping.state != VDO_MAPPING_STATE_UNMAPPED) ||
+ (entry->unmapping.pbn != VDO_ZERO_BLOCK))) {
+ return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL,
+ "Invalid entry: %s (%llu, %u) from %llu to %llu is not a valid tree mapping",
+ vdo_get_journal_operation_name(entry->operation),
+ (unsigned long long) entry->slot.pbn,
+ entry->slot.slot,
+ (unsigned long long) entry->unmapping.pbn,
+ (unsigned long long) entry->mapping.pbn);
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * add_slab_journal_entries() - Replay recovery journal entries into the slab journals of the
+ * allocator currently being recovered.
+ * @completion: The allocator completion.
+ *
+ * Waits for slab journal tailblock space when necessary. This method is its own callback.
+ */
+static void add_slab_journal_entries(struct vdo_completion *completion)
+{
+ struct recovery_point *recovery_point;
+ struct repair_completion *repair = completion->parent;
+ struct vdo *vdo = completion->vdo;
+ struct recovery_journal *journal = vdo->recovery_journal;
+ struct block_allocator *allocator = vdo_as_block_allocator(completion);
+
+ /* Get ready in case we need to enqueue again. */
+ vdo_prepare_completion(completion, add_slab_journal_entries,
+ vdo_notify_slab_journals_are_recovered,
+ completion->callback_thread_id, repair);
+ for (recovery_point = &repair->next_recovery_point;
+ before_recovery_point(recovery_point, &repair->tail_recovery_point);
+ advance_points(repair, journal->entries_per_block)) {
+ int result;
+ physical_block_number_t pbn;
+ struct vdo_slab *slab;
+ struct recovery_journal_entry entry = get_entry(repair, recovery_point);
+ bool increment = !repair->next_recovery_point.increment_applied;
+
+ if (increment) {
+ result = validate_recovery_journal_entry(vdo, &entry);
+ if (result != VDO_SUCCESS) {
+ vdo_enter_read_only_mode(vdo, result);
+ vdo_fail_completion(completion, result);
+ return;
+ }
+
+ pbn = entry.mapping.pbn;
+ } else {
+ pbn = entry.unmapping.pbn;
+ }
+
+ if (pbn == VDO_ZERO_BLOCK)
+ continue;
+
+ slab = vdo_get_slab(vdo->depot, pbn);
+ if (slab->allocator != allocator)
+ continue;
+
+ if (!vdo_attempt_replay_into_slab(slab, pbn, entry.operation, increment,
+ &repair->next_journal_point,
+ completion))
+ return;
+
+ repair->entries_added_to_slab_journals++;
+ }
+
+ vdo_notify_slab_journals_are_recovered(completion);
+}
+
+/**
+ * vdo_replay_into_slab_journals() - Replay recovery journal entries in the slab journals of slabs
+ * owned by a given block_allocator.
+ * @allocator: The allocator whose slab journals are to be recovered.
+ * @context: The slab depot load context supplied by a recovery when it loads the depot.
+ */
+void vdo_replay_into_slab_journals(struct block_allocator *allocator, void *context)
+{
+ struct vdo_completion *completion = &allocator->completion;
+ struct repair_completion *repair = context;
+ struct vdo *vdo = completion->vdo;
+
+ vdo_assert_on_physical_zone_thread(vdo, allocator->zone_number, __func__);
+ if (repair->entry_count == 0) {
+ /* there's nothing to replay */
+ repair->logical_blocks_used = vdo->recovery_journal->logical_blocks_used;
+ repair->block_map_data_blocks = vdo->recovery_journal->block_map_data_blocks;
+ vdo_notify_slab_journals_are_recovered(completion);
+ return;
+ }
+
+ repair->next_recovery_point = (struct recovery_point) {
+ .sequence_number = repair->slab_journal_head,
+ .sector_count = 1,
+ .entry_count = 0,
+ };
+
+ repair->next_journal_point = (struct journal_point) {
+ .sequence_number = repair->slab_journal_head,
+ .entry_count = 0,
+ };
+
+ vdo_log_info("Replaying entries into slab journals for zone %u",
+ allocator->zone_number);
+ completion->parent = repair;
+ add_slab_journal_entries(completion);
+}
+
+static void load_slab_depot(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = as_repair_completion(completion);
+ const struct admin_state_code *operation;
+
+ vdo_assert_on_admin_thread(completion->vdo, __func__);
+
+ if (vdo_state_requires_read_only_rebuild(completion->vdo->load_state)) {
+ prepare_repair_completion(repair, rebuild_reference_counts,
+ VDO_ZONE_TYPE_LOGICAL);
+ operation = VDO_ADMIN_STATE_LOADING_FOR_REBUILD;
+ } else {
+ prepare_repair_completion(repair, drain_slab_depot, VDO_ZONE_TYPE_ADMIN);
+ operation = VDO_ADMIN_STATE_LOADING_FOR_RECOVERY;
+ }
+
+ vdo_load_slab_depot(completion->vdo->depot, operation, completion, repair);
+}
+
+static void flush_block_map(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = as_repair_completion(completion);
+ const struct admin_state_code *operation;
+
+ vdo_assert_on_admin_thread(completion->vdo, __func__);
+
+ vdo_log_info("Flushing block map changes");
+ prepare_repair_completion(repair, load_slab_depot, VDO_ZONE_TYPE_ADMIN);
+ operation = (vdo_state_requires_read_only_rebuild(completion->vdo->load_state) ?
+ VDO_ADMIN_STATE_REBUILDING :
+ VDO_ADMIN_STATE_RECOVERING);
+ vdo_drain_block_map(completion->vdo->block_map, operation, completion);
+}
+
+static bool finish_if_done(struct repair_completion *repair)
+{
+ /* Pages are still being launched or there is still work to do */
+ if (repair->launching || (repair->outstanding > 0))
+ return false;
+
+ if (repair->completion.result != VDO_SUCCESS) {
+ page_count_t i;
+
+ for (i = 0; i < repair->page_count; i++) {
+ struct vdo_page_completion *page_completion =
+ &repair->page_completions[i];
+
+ if (page_completion->ready)
+ vdo_release_page_completion(&page_completion->completion);
+ }
+
+ vdo_launch_completion(&repair->completion);
+ return true;
+ }
+
+ if (repair->current_entry >= repair->entries)
+ return false;
+
+ launch_repair_completion(repair, flush_block_map, VDO_ZONE_TYPE_ADMIN);
+ return true;
+}
+
+static void abort_block_map_recovery(struct repair_completion *repair, int result)
+{
+ vdo_set_completion_result(&repair->completion, result);
+ finish_if_done(repair);
+}
+
+/**
+ * find_entry_starting_next_page() - Find the first journal entry after a given entry which is not
+ * on the same block map page.
+ * @current_entry: The entry to search from.
+ * @needs_sort: Whether sorting is needed to proceed.
+ *
+ * Return: Pointer to the first later journal entry on a different block map page, or a pointer to
+ * just before the journal entries if no subsequent entry is on a different block map page.
+ */
+static struct numbered_block_mapping *
+find_entry_starting_next_page(struct repair_completion *repair,
+ struct numbered_block_mapping *current_entry, bool needs_sort)
+{
+ size_t current_page;
+
+ /* If current_entry is invalid, return immediately. */
+ if (current_entry < repair->entries)
+ return current_entry;
+
+ current_page = current_entry->block_map_slot.pbn;
+
+ /* Decrement current_entry until it's out of bounds or on a different page. */
+ while ((current_entry >= repair->entries) &&
+ (current_entry->block_map_slot.pbn == current_page)) {
+ if (needs_sort) {
+ struct numbered_block_mapping *just_sorted_entry =
+ sort_next_heap_element(repair);
+ VDO_ASSERT_LOG_ONLY(just_sorted_entry < current_entry,
+ "heap is returning elements in an unexpected order");
+ }
+
+ current_entry--;
+ }
+
+ return current_entry;
+}
+
+/*
+ * Apply a range of journal entries [starting_entry, ending_entry) journal
+ * entries to a block map page.
+ */
+static void apply_journal_entries_to_page(struct block_map_page *page,
+ struct numbered_block_mapping *starting_entry,
+ struct numbered_block_mapping *ending_entry)
+{
+ struct numbered_block_mapping *current_entry = starting_entry;
+
+ while (current_entry != ending_entry) {
+ page->entries[current_entry->block_map_slot.slot] = current_entry->block_map_entry;
+ current_entry--;
+ }
+}
+
+static void recover_ready_pages(struct repair_completion *repair,
+ struct vdo_completion *completion);
+
+static void block_map_page_loaded(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = as_repair_completion(completion->parent);
+
+ repair->outstanding--;
+ if (!repair->launching)
+ recover_ready_pages(repair, completion);
+}
+
+static void handle_block_map_page_load_error(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = as_repair_completion(completion->parent);
+
+ repair->outstanding--;
+ abort_block_map_recovery(repair, completion->result);
+}
+
+static void fetch_block_map_page(struct repair_completion *repair,
+ struct vdo_completion *completion)
+{
+ physical_block_number_t pbn;
+
+ if (repair->current_unfetched_entry < repair->entries)
+ /* Nothing left to fetch. */
+ return;
+
+ /* Fetch the next page we haven't yet requested. */
+ pbn = repair->current_unfetched_entry->block_map_slot.pbn;
+ repair->current_unfetched_entry =
+ find_entry_starting_next_page(repair, repair->current_unfetched_entry,
+ true);
+ repair->outstanding++;
+ vdo_get_page(((struct vdo_page_completion *) completion),
+ &repair->completion.vdo->block_map->zones[0], pbn, true,
+ &repair->completion, block_map_page_loaded,
+ handle_block_map_page_load_error, false);
+}
+
+static struct vdo_page_completion *get_next_page_completion(struct repair_completion *repair,
+ struct vdo_page_completion *completion)
+{
+ completion++;
+ if (completion == (&repair->page_completions[repair->page_count]))
+ completion = &repair->page_completions[0];
+ return completion;
+}
+
+static void recover_ready_pages(struct repair_completion *repair,
+ struct vdo_completion *completion)
+{
+ struct vdo_page_completion *page_completion = (struct vdo_page_completion *) completion;
+
+ if (finish_if_done(repair))
+ return;
+
+ if (repair->pbn != page_completion->pbn)
+ return;
+
+ while (page_completion->ready) {
+ struct numbered_block_mapping *start_of_next_page;
+ struct block_map_page *page;
+ int result;
+
+ result = vdo_get_cached_page(completion, &page);
+ if (result != VDO_SUCCESS) {
+ abort_block_map_recovery(repair, result);
+ return;
+ }
+
+ start_of_next_page =
+ find_entry_starting_next_page(repair, repair->current_entry,
+ false);
+ apply_journal_entries_to_page(page, repair->current_entry,
+ start_of_next_page);
+ repair->current_entry = start_of_next_page;
+ vdo_request_page_write(completion);
+ vdo_release_page_completion(completion);
+
+ if (finish_if_done(repair))
+ return;
+
+ repair->pbn = repair->current_entry->block_map_slot.pbn;
+ fetch_block_map_page(repair, completion);
+ page_completion = get_next_page_completion(repair, page_completion);
+ completion = &page_completion->completion;
+ }
+}
+
+static void recover_block_map(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = as_repair_completion(completion);
+ struct vdo *vdo = completion->vdo;
+ struct numbered_block_mapping *first_sorted_entry;
+ page_count_t i;
+
+ vdo_assert_on_logical_zone_thread(vdo, 0, __func__);
+
+ /* Suppress block map errors. */
+ vdo->block_map->zones[0].page_cache.rebuilding =
+ vdo_state_requires_read_only_rebuild(vdo->load_state);
+
+ if (repair->block_map_entry_count == 0) {
+ vdo_log_info("Replaying 0 recovery entries into block map");
+ vdo_free(vdo_forget(repair->journal_data));
+ launch_repair_completion(repair, load_slab_depot, VDO_ZONE_TYPE_ADMIN);
+ return;
+ }
+
+ /*
+ * Organize the journal entries into a binary heap so we can iterate over them in sorted
+ * order incrementally, avoiding an expensive sort call.
+ */
+ repair->replay_heap = (struct min_heap) {
+ .data = repair->entries,
+ .nr = repair->block_map_entry_count,
+ .size = repair->block_map_entry_count,
+ };
+ min_heapify_all(&repair->replay_heap, &repair_min_heap);
+
+ vdo_log_info("Replaying %zu recovery entries into block map",
+ repair->block_map_entry_count);
+
+ repair->current_entry = &repair->entries[repair->block_map_entry_count - 1];
+ first_sorted_entry = sort_next_heap_element(repair);
+ VDO_ASSERT_LOG_ONLY(first_sorted_entry == repair->current_entry,
+ "heap is returning elements in an unexpected order");
+
+ /* Prevent any page from being processed until all pages have been launched. */
+ repair->launching = true;
+ repair->pbn = repair->current_entry->block_map_slot.pbn;
+ repair->current_unfetched_entry = repair->current_entry;
+ for (i = 0; i < repair->page_count; i++) {
+ if (repair->current_unfetched_entry < repair->entries)
+ break;
+
+ fetch_block_map_page(repair, &repair->page_completions[i].completion);
+ }
+ repair->launching = false;
+
+ /* Process any ready pages. */
+ recover_ready_pages(repair, &repair->page_completions[0].completion);
+}
+
+/**
+ * get_recovery_journal_block_header() - Get the block header for a block at a position in the
+ * journal data and unpack it.
+ * @journal: The recovery journal.
+ * @data: The recovery journal data.
+ * @sequence: The sequence number.
+ *
+ * Return: The unpacked header.
+ */
+static struct recovery_block_header __must_check
+get_recovery_journal_block_header(struct recovery_journal *journal, char *data,
+ sequence_number_t sequence)
+{
+ physical_block_number_t pbn =
+ vdo_get_recovery_journal_block_number(journal, sequence);
+ char *header = &data[pbn * VDO_BLOCK_SIZE];
+
+ return vdo_unpack_recovery_block_header((struct packed_journal_header *) header);
+}
+
+/**
+ * is_valid_recovery_journal_block() - Determine whether the given header describes a valid block
+ * for the given journal.
+ * @journal: The journal to use.
+ * @header: The unpacked block header to check.
+ * @old_ok: Whether an old format header is valid.
+ *
+ * A block is not valid if it is unformatted, or if it is older than the last successful recovery
+ * or reformat.
+ *
+ * Return: True if the header is valid.
+ */
+static bool __must_check is_valid_recovery_journal_block(const struct recovery_journal *journal,
+ const struct recovery_block_header *header,
+ bool old_ok)
+{
+ if ((header->nonce != journal->nonce) ||
+ (header->recovery_count != journal->recovery_count))
+ return false;
+
+ if (header->metadata_type == VDO_METADATA_RECOVERY_JOURNAL_2)
+ return (header->entry_count <= journal->entries_per_block);
+
+ return (old_ok &&
+ (header->metadata_type == VDO_METADATA_RECOVERY_JOURNAL) &&
+ (header->entry_count <= RECOVERY_JOURNAL_1_ENTRIES_PER_BLOCK));
+}
+
+/**
+ * is_exact_recovery_journal_block() - Determine whether the given header describes the exact block
+ * indicated.
+ * @journal: The journal to use.
+ * @header: The unpacked block header to check.
+ * @sequence: The expected sequence number.
+ * @type: The expected metadata type.
+ *
+ * Return: True if the block matches.
+ */
+static bool __must_check is_exact_recovery_journal_block(const struct recovery_journal *journal,
+ const struct recovery_block_header *header,
+ sequence_number_t sequence,
+ enum vdo_metadata_type type)
+{
+ return ((header->metadata_type == type) &&
+ (header->sequence_number == sequence) &&
+ (is_valid_recovery_journal_block(journal, header, true)));
+}
+
+/**
+ * find_recovery_journal_head_and_tail() - Find the tail and head of the journal.
+ *
+ * Return: True if there were valid journal blocks.
+ */
+static bool find_recovery_journal_head_and_tail(struct repair_completion *repair)
+{
+ struct recovery_journal *journal = repair->completion.vdo->recovery_journal;
+ bool found_entries = false;
+ physical_block_number_t i;
+
+ /*
+ * Ensure that we don't replay old entries since we know the tail recorded in the super
+ * block must be a lower bound. Not doing so can result in extra data loss by setting the
+ * tail too early.
+ */
+ repair->highest_tail = journal->tail;
+ for (i = 0; i < journal->size; i++) {
+ struct recovery_block_header header =
+ get_recovery_journal_block_header(journal, repair->journal_data, i);
+
+ if (!is_valid_recovery_journal_block(journal, &header, true)) {
+ /* This block is old or incorrectly formatted */
+ continue;
+ }
+
+ if (vdo_get_recovery_journal_block_number(journal, header.sequence_number) != i) {
+ /* This block is in the wrong location */
+ continue;
+ }
+
+ if (header.sequence_number >= repair->highest_tail) {
+ found_entries = true;
+ repair->highest_tail = header.sequence_number;
+ }
+
+ if (!found_entries)
+ continue;
+
+ if (header.block_map_head > repair->block_map_head)
+ repair->block_map_head = header.block_map_head;
+
+ if (header.slab_journal_head > repair->slab_journal_head)
+ repair->slab_journal_head = header.slab_journal_head;
+ }
+
+ return found_entries;
+}
+
+/**
+ * unpack_entry() - Unpack a recovery journal entry in either format.
+ * @vdo: The vdo.
+ * @packed: The entry to unpack.
+ * @format: The expected format of the entry.
+ * @entry: The unpacked entry.
+ *
+ * Return: true if the entry should be applied.3
+ */
+static bool unpack_entry(struct vdo *vdo, char *packed, enum vdo_metadata_type format,
+ struct recovery_journal_entry *entry)
+{
+ if (format == VDO_METADATA_RECOVERY_JOURNAL_2) {
+ struct packed_recovery_journal_entry *packed_entry =
+ (struct packed_recovery_journal_entry *) packed;
+
+ *entry = vdo_unpack_recovery_journal_entry(packed_entry);
+ } else {
+ physical_block_number_t low32, high4;
+
+ struct packed_recovery_journal_entry_1 *packed_entry =
+ (struct packed_recovery_journal_entry_1 *) packed;
+
+ if (packed_entry->operation == VDO_JOURNAL_DATA_INCREMENT)
+ entry->operation = VDO_JOURNAL_DATA_REMAPPING;
+ else if (packed_entry->operation == VDO_JOURNAL_BLOCK_MAP_INCREMENT)
+ entry->operation = VDO_JOURNAL_BLOCK_MAP_REMAPPING;
+ else
+ return false;
+
+ low32 = __le32_to_cpu(packed_entry->pbn_low_word);
+ high4 = packed_entry->pbn_high_nibble;
+ entry->slot = (struct block_map_slot) {
+ .pbn = ((high4 << 32) | low32),
+ .slot = (packed_entry->slot_low | (packed_entry->slot_high << 6)),
+ };
+ entry->mapping = vdo_unpack_block_map_entry(&packed_entry->block_map_entry);
+ entry->unmapping = (struct data_location) {
+ .pbn = VDO_ZERO_BLOCK,
+ .state = VDO_MAPPING_STATE_UNMAPPED,
+ };
+ }
+
+ return (validate_recovery_journal_entry(vdo, entry) == VDO_SUCCESS);
+}
+
+/**
+ * append_sector_entries() - Append an array of recovery journal entries from a journal block
+ * sector to the array of numbered mappings in the repair completion,
+ * numbering each entry in the order they are appended.
+ * @repair: The repair completion.
+ * @entries: The entries in the sector.
+ * @format: The format of the sector.
+ * @entry_count: The number of entries to append.
+ */
+static void append_sector_entries(struct repair_completion *repair, char *entries,
+ enum vdo_metadata_type format,
+ journal_entry_count_t entry_count)
+{
+ journal_entry_count_t i;
+ struct vdo *vdo = repair->completion.vdo;
+ off_t increment = ((format == VDO_METADATA_RECOVERY_JOURNAL_2)
+ ? sizeof(struct packed_recovery_journal_entry)
+ : sizeof(struct packed_recovery_journal_entry_1));
+
+ for (i = 0; i < entry_count; i++, entries += increment) {
+ struct recovery_journal_entry entry;
+
+ if (!unpack_entry(vdo, entries, format, &entry))
+ /* When recovering from read-only mode, ignore damaged entries. */
+ continue;
+
+ repair->entries[repair->block_map_entry_count] =
+ (struct numbered_block_mapping) {
+ .block_map_slot = entry.slot,
+ .block_map_entry = vdo_pack_block_map_entry(entry.mapping.pbn,
+ entry.mapping.state),
+ .number = repair->block_map_entry_count,
+ };
+ repair->block_map_entry_count++;
+ }
+}
+
+static journal_entry_count_t entries_per_sector(enum vdo_metadata_type format,
+ u8 sector_number)
+{
+ if (format == VDO_METADATA_RECOVERY_JOURNAL_2)
+ return RECOVERY_JOURNAL_ENTRIES_PER_SECTOR;
+
+ return ((sector_number == (VDO_SECTORS_PER_BLOCK - 1))
+ ? RECOVERY_JOURNAL_1_ENTRIES_IN_LAST_SECTOR
+ : RECOVERY_JOURNAL_1_ENTRIES_PER_SECTOR);
+}
+
+static void extract_entries_from_block(struct repair_completion *repair,
+ struct recovery_journal *journal,
+ sequence_number_t sequence,
+ enum vdo_metadata_type format,
+ journal_entry_count_t entries)
+{
+ sector_count_t i;
+ struct recovery_block_header header =
+ get_recovery_journal_block_header(journal, repair->journal_data,
+ sequence);
+
+ if (!is_exact_recovery_journal_block(journal, &header, sequence, format)) {
+ /* This block is invalid, so skip it. */
+ return;
+ }
+
+ entries = min(entries, header.entry_count);
+ for (i = 1; i < VDO_SECTORS_PER_BLOCK; i++) {
+ struct packed_journal_sector *sector =
+ get_sector(journal, repair->journal_data, sequence, i);
+ journal_entry_count_t sector_entries =
+ min(entries, entries_per_sector(format, i));
+
+ if (vdo_is_valid_recovery_journal_sector(&header, sector, i)) {
+ /* Only extract as many as the block header calls for. */
+ append_sector_entries(repair, (char *) sector->entries, format,
+ min_t(journal_entry_count_t,
+ sector->entry_count,
+ sector_entries));
+ }
+
+ /*
+ * Even if the sector wasn't full, count it as full when counting up to the
+ * entry count the block header claims.
+ */
+ entries -= sector_entries;
+ }
+}
+
+static int parse_journal_for_rebuild(struct repair_completion *repair)
+{
+ int result;
+ sequence_number_t i;
+ block_count_t count;
+ enum vdo_metadata_type format;
+ struct vdo *vdo = repair->completion.vdo;
+ struct recovery_journal *journal = vdo->recovery_journal;
+ journal_entry_count_t entries_per_block = journal->entries_per_block;
+
+ format = get_recovery_journal_block_header(journal, repair->journal_data,
+ repair->highest_tail).metadata_type;
+ if (format == VDO_METADATA_RECOVERY_JOURNAL)
+ entries_per_block = RECOVERY_JOURNAL_1_ENTRIES_PER_BLOCK;
+
+ /*
+ * Allocate an array of numbered_block_mapping structures large enough to transcribe every
+ * packed_recovery_journal_entry from every valid journal block.
+ */
+ count = ((repair->highest_tail - repair->block_map_head + 1) * entries_per_block);
+ result = vdo_allocate(count, struct numbered_block_mapping, __func__,
+ &repair->entries);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ for (i = repair->block_map_head; i <= repair->highest_tail; i++)
+ extract_entries_from_block(repair, journal, i, format, entries_per_block);
+
+ return VDO_SUCCESS;
+}
+
+static int validate_heads(struct repair_completion *repair)
+{
+ /* Both reap heads must be behind the tail. */
+ if ((repair->block_map_head <= repair->tail) &&
+ (repair->slab_journal_head <= repair->tail))
+ return VDO_SUCCESS;
+
+
+ return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL,
+ "Journal tail too early. block map head: %llu, slab journal head: %llu, tail: %llu",
+ (unsigned long long) repair->block_map_head,
+ (unsigned long long) repair->slab_journal_head,
+ (unsigned long long) repair->tail);
+}
+
+/**
+ * extract_new_mappings() - Find all valid new mappings to be applied to the block map.
+ *
+ * The mappings are extracted from the journal and stored in a sortable array so that all of the
+ * mappings to be applied to a given block map page can be done in a single page fetch.
+ */
+static int extract_new_mappings(struct repair_completion *repair)
+{
+ int result;
+ struct vdo *vdo = repair->completion.vdo;
+ struct recovery_point recovery_point = {
+ .sequence_number = repair->block_map_head,
+ .sector_count = 1,
+ .entry_count = 0,
+ };
+
+ /*
+ * Allocate an array of numbered_block_mapping structs just large enough to transcribe
+ * every packed_recovery_journal_entry from every valid journal block.
+ */
+ result = vdo_allocate(repair->entry_count, struct numbered_block_mapping,
+ __func__, &repair->entries);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ for (; before_recovery_point(&recovery_point, &repair->tail_recovery_point);
+ increment_recovery_point(&recovery_point)) {
+ struct recovery_journal_entry entry = get_entry(repair, &recovery_point);
+
+ result = validate_recovery_journal_entry(vdo, &entry);
+ if (result != VDO_SUCCESS) {
+ vdo_enter_read_only_mode(vdo, result);
+ return result;
+ }
+
+ repair->entries[repair->block_map_entry_count] =
+ (struct numbered_block_mapping) {
+ .block_map_slot = entry.slot,
+ .block_map_entry = vdo_pack_block_map_entry(entry.mapping.pbn,
+ entry.mapping.state),
+ .number = repair->block_map_entry_count,
+ };
+ repair->block_map_entry_count++;
+ }
+
+ result = VDO_ASSERT((repair->block_map_entry_count <= repair->entry_count),
+ "approximate entry count is an upper bound");
+ if (result != VDO_SUCCESS)
+ vdo_enter_read_only_mode(vdo, result);
+
+ return result;
+}
+
+/**
+ * compute_usages() - Compute the lbns in use and block map data blocks counts from the tail of
+ * the journal.
+ */
+static noinline int compute_usages(struct repair_completion *repair)
+{
+ /*
+ * This function is declared noinline to avoid a spurious valgrind error regarding the
+ * following structure being uninitialized.
+ */
+ struct recovery_point recovery_point = {
+ .sequence_number = repair->tail,
+ .sector_count = 1,
+ .entry_count = 0,
+ };
+
+ struct vdo *vdo = repair->completion.vdo;
+ struct recovery_journal *journal = vdo->recovery_journal;
+ struct recovery_block_header header =
+ get_recovery_journal_block_header(journal, repair->journal_data,
+ repair->tail);
+
+ repair->logical_blocks_used = header.logical_blocks_used;
+ repair->block_map_data_blocks = header.block_map_data_blocks;
+
+ for (; before_recovery_point(&recovery_point, &repair->tail_recovery_point);
+ increment_recovery_point(&recovery_point)) {
+ struct recovery_journal_entry entry = get_entry(repair, &recovery_point);
+ int result;
+
+ result = validate_recovery_journal_entry(vdo, &entry);
+ if (result != VDO_SUCCESS) {
+ vdo_enter_read_only_mode(vdo, result);
+ return result;
+ }
+
+ if (entry.operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING) {
+ repair->block_map_data_blocks++;
+ continue;
+ }
+
+ if (vdo_is_mapped_location(&entry.mapping))
+ repair->logical_blocks_used++;
+
+ if (vdo_is_mapped_location(&entry.unmapping))
+ repair->logical_blocks_used--;
+ }
+
+ return VDO_SUCCESS;
+}
+
+static int parse_journal_for_recovery(struct repair_completion *repair)
+{
+ int result;
+ sequence_number_t i, head;
+ bool found_entries = false;
+ struct recovery_journal *journal = repair->completion.vdo->recovery_journal;
+
+ head = min(repair->block_map_head, repair->slab_journal_head);
+ for (i = head; i <= repair->highest_tail; i++) {
+ struct recovery_block_header header;
+ journal_entry_count_t block_entries;
+ u8 j;
+
+ repair->tail = i;
+ repair->tail_recovery_point = (struct recovery_point) {
+ .sequence_number = i,
+ .sector_count = 0,
+ .entry_count = 0,
+ };
+
+ header = get_recovery_journal_block_header(journal, repair->journal_data, i);
+ if (header.metadata_type == VDO_METADATA_RECOVERY_JOURNAL) {
+ /* This is an old format block, so we need to upgrade */
+ vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION,
+ "Recovery journal is in the old format, a read-only rebuild is required.");
+ vdo_enter_read_only_mode(repair->completion.vdo,
+ VDO_UNSUPPORTED_VERSION);
+ return VDO_UNSUPPORTED_VERSION;
+ }
+
+ if (!is_exact_recovery_journal_block(journal, &header, i,
+ VDO_METADATA_RECOVERY_JOURNAL_2)) {
+ /* A bad block header was found so this must be the end of the journal. */
+ break;
+ }
+
+ block_entries = header.entry_count;
+
+ /* Examine each sector in turn to determine the last valid sector. */
+ for (j = 1; j < VDO_SECTORS_PER_BLOCK; j++) {
+ struct packed_journal_sector *sector =
+ get_sector(journal, repair->journal_data, i, j);
+ journal_entry_count_t sector_entries =
+ min_t(journal_entry_count_t, sector->entry_count,
+ block_entries);
+
+ /* A bad sector means that this block was torn. */
+ if (!vdo_is_valid_recovery_journal_sector(&header, sector, j))
+ break;
+
+ if (sector_entries > 0) {
+ found_entries = true;
+ repair->tail_recovery_point.sector_count++;
+ repair->tail_recovery_point.entry_count = sector_entries;
+ block_entries -= sector_entries;
+ repair->entry_count += sector_entries;
+ }
+
+ /* If this sector is short, the later sectors can't matter. */
+ if ((sector_entries < RECOVERY_JOURNAL_ENTRIES_PER_SECTOR) ||
+ (block_entries == 0))
+ break;
+ }
+
+ /* If this block was not filled, or if it tore, no later block can matter. */
+ if ((header.entry_count != journal->entries_per_block) || (block_entries > 0))
+ break;
+ }
+
+ if (!found_entries)
+ return validate_heads(repair);
+
+ /* Set the tail to the last valid tail block, if there is one. */
+ if (repair->tail_recovery_point.sector_count == 0)
+ repair->tail--;
+
+ result = validate_heads(repair);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_log_info("Highest-numbered recovery journal block has sequence number %llu, and the highest-numbered usable block is %llu",
+ (unsigned long long) repair->highest_tail,
+ (unsigned long long) repair->tail);
+
+ result = extract_new_mappings(repair);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return compute_usages(repair);
+}
+
+static int parse_journal(struct repair_completion *repair)
+{
+ if (!find_recovery_journal_head_and_tail(repair))
+ return VDO_SUCCESS;
+
+ return (vdo_state_requires_read_only_rebuild(repair->completion.vdo->load_state) ?
+ parse_journal_for_rebuild(repair) :
+ parse_journal_for_recovery(repair));
+}
+
+static void finish_journal_load(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = completion->parent;
+
+ if (++repair->vios_complete != repair->vio_count)
+ return;
+
+ vdo_log_info("Finished reading recovery journal");
+ uninitialize_vios(repair);
+ prepare_repair_completion(repair, recover_block_map, VDO_ZONE_TYPE_LOGICAL);
+ vdo_continue_completion(&repair->completion, parse_journal(repair));
+}
+
+static void handle_journal_load_error(struct vdo_completion *completion)
+{
+ struct repair_completion *repair = completion->parent;
+
+ /* Preserve the error */
+ vdo_set_completion_result(&repair->completion, completion->result);
+ vio_record_metadata_io_error(as_vio(completion));
+ completion->callback(completion);
+}
+
+static void read_journal_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct vdo *vdo = vio->completion.vdo;
+
+ continue_vio_after_io(vio, finish_journal_load, vdo->thread_config.admin_thread);
+}
+
+/**
+ * vdo_repair() - Load the recovery journal and then recover or rebuild a vdo.
+ * @parent: The completion to notify when the operation is complete
+ */
+void vdo_repair(struct vdo_completion *parent)
+{
+ int result;
+ char *ptr;
+ struct repair_completion *repair;
+ struct vdo *vdo = parent->vdo;
+ struct recovery_journal *journal = vdo->recovery_journal;
+ physical_block_number_t pbn = journal->origin;
+ block_count_t remaining = journal->size;
+ block_count_t vio_count = DIV_ROUND_UP(remaining, MAX_BLOCKS_PER_VIO);
+ page_count_t page_count = min_t(page_count_t,
+ vdo->device_config->cache_size >> 1,
+ MAXIMUM_SIMULTANEOUS_VDO_BLOCK_MAP_RESTORATION_READS);
+
+ vdo_assert_on_admin_thread(vdo, __func__);
+
+ if (vdo->load_state == VDO_FORCE_REBUILD) {
+ vdo_log_warning("Rebuilding reference counts to clear read-only mode");
+ vdo->states.vdo.read_only_recoveries++;
+ } else if (vdo->load_state == VDO_REBUILD_FOR_UPGRADE) {
+ vdo_log_warning("Rebuilding reference counts for upgrade");
+ } else {
+ vdo_log_warning("Device was dirty, rebuilding reference counts");
+ }
+
+ result = vdo_allocate_extended(struct repair_completion, page_count,
+ struct vdo_page_completion, __func__,
+ &repair);
+ if (result != VDO_SUCCESS) {
+ vdo_fail_completion(parent, result);
+ return;
+ }
+
+ vdo_initialize_completion(&repair->completion, vdo, VDO_REPAIR_COMPLETION);
+ repair->completion.error_handler = abort_repair;
+ repair->completion.parent = parent;
+ prepare_repair_completion(repair, finish_repair, VDO_ZONE_TYPE_ADMIN);
+ repair->page_count = page_count;
+
+ result = vdo_allocate(remaining * VDO_BLOCK_SIZE, char, __func__,
+ &repair->journal_data);
+ if (abort_on_error(result, repair))
+ return;
+
+ result = vdo_allocate(vio_count, struct vio, __func__, &repair->vios);
+ if (abort_on_error(result, repair))
+ return;
+
+ ptr = repair->journal_data;
+ for (repair->vio_count = 0; repair->vio_count < vio_count; repair->vio_count++) {
+ block_count_t blocks = min_t(block_count_t, remaining,
+ MAX_BLOCKS_PER_VIO);
+
+ result = allocate_vio_components(vdo, VIO_TYPE_RECOVERY_JOURNAL,
+ VIO_PRIORITY_METADATA,
+ repair, blocks, ptr,
+ &repair->vios[repair->vio_count]);
+ if (abort_on_error(result, repair))
+ return;
+
+ ptr += (blocks * VDO_BLOCK_SIZE);
+ remaining -= blocks;
+ }
+
+ for (vio_count = 0; vio_count < repair->vio_count;
+ vio_count++, pbn += MAX_BLOCKS_PER_VIO) {
+ vdo_submit_metadata_vio(&repair->vios[vio_count], pbn, read_journal_endio,
+ handle_journal_load_error, REQ_OP_READ);
+ }
+}
diff --git a/drivers/md/dm-vdo/repair.h b/drivers/md/dm-vdo/repair.h
new file mode 100644
index 000000000000..ff255cf41486
--- /dev/null
+++ b/drivers/md/dm-vdo/repair.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_REPAIR_H
+#define VDO_REPAIR_H
+
+#include "types.h"
+
+void vdo_replay_into_slab_journals(struct block_allocator *allocator, void *context);
+void vdo_repair(struct vdo_completion *parent);
+
+#endif /* VDO_REPAIR_H */
diff --git a/drivers/md/dm-vdo/slab-depot.c b/drivers/md/dm-vdo/slab-depot.c
new file mode 100644
index 000000000000..46e4721e5b4f
--- /dev/null
+++ b/drivers/md/dm-vdo/slab-depot.c
@@ -0,0 +1,5101 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "slab-depot.h"
+
+#include <linux/atomic.h>
+#include <linux/bio.h>
+#include <linux/err.h>
+#include <linux/log2.h>
+#include <linux/min_heap.h>
+#include <linux/minmax.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "numeric.h"
+#include "permassert.h"
+#include "string-utils.h"
+
+#include "action-manager.h"
+#include "admin-state.h"
+#include "completion.h"
+#include "constants.h"
+#include "data-vio.h"
+#include "encodings.h"
+#include "io-submitter.h"
+#include "physical-zone.h"
+#include "priority-table.h"
+#include "recovery-journal.h"
+#include "repair.h"
+#include "status-codes.h"
+#include "types.h"
+#include "vdo.h"
+#include "vio.h"
+#include "wait-queue.h"
+
+static const u64 BYTES_PER_WORD = sizeof(u64);
+static const bool NORMAL_OPERATION = true;
+
+/**
+ * get_lock() - Get the lock object for a slab journal block by sequence number.
+ * @journal: vdo_slab journal to retrieve from.
+ * @sequence_number: Sequence number of the block.
+ *
+ * Return: The lock object for the given sequence number.
+ */
+static inline struct journal_lock * __must_check get_lock(struct slab_journal *journal,
+ sequence_number_t sequence_number)
+{
+ return &journal->locks[sequence_number % journal->size];
+}
+
+static bool is_slab_open(struct vdo_slab *slab)
+{
+ return (!vdo_is_state_quiescing(&slab->state) &&
+ !vdo_is_state_quiescent(&slab->state));
+}
+
+/**
+ * must_make_entries_to_flush() - Check whether there are entry waiters which should delay a flush.
+ * @journal: The journal to check.
+ *
+ * Return: true if there are no entry waiters, or if the slab is unrecovered.
+ */
+static inline bool __must_check must_make_entries_to_flush(struct slab_journal *journal)
+{
+ return ((journal->slab->status != VDO_SLAB_REBUILDING) &&
+ vdo_waitq_has_waiters(&journal->entry_waiters));
+}
+
+/**
+ * is_reaping() - Check whether a reap is currently in progress.
+ * @journal: The journal which may be reaping.
+ *
+ * Return: true if the journal is reaping.
+ */
+static inline bool __must_check is_reaping(struct slab_journal *journal)
+{
+ return (journal->head != journal->unreapable);
+}
+
+/**
+ * initialize_tail_block() - Initialize tail block as a new block.
+ * @journal: The journal whose tail block is being initialized.
+ */
+static void initialize_tail_block(struct slab_journal *journal)
+{
+ struct slab_journal_block_header *header = &journal->tail_header;
+
+ header->sequence_number = journal->tail;
+ header->entry_count = 0;
+ header->has_block_map_increments = false;
+}
+
+/**
+ * initialize_journal_state() - Set all journal fields appropriately to start journaling.
+ * @journal: The journal to be reset, based on its tail sequence number.
+ */
+static void initialize_journal_state(struct slab_journal *journal)
+{
+ journal->unreapable = journal->head;
+ journal->reap_lock = get_lock(journal, journal->unreapable);
+ journal->next_commit = journal->tail;
+ journal->summarized = journal->last_summarized = journal->tail;
+ initialize_tail_block(journal);
+}
+
+/**
+ * block_is_full() - Check whether a journal block is full.
+ * @journal: The slab journal for the block.
+ *
+ * Return: true if the tail block is full.
+ */
+static bool __must_check block_is_full(struct slab_journal *journal)
+{
+ journal_entry_count_t count = journal->tail_header.entry_count;
+
+ return (journal->tail_header.has_block_map_increments ?
+ (journal->full_entries_per_block == count) :
+ (journal->entries_per_block == count));
+}
+
+static void add_entries(struct slab_journal *journal);
+static void update_tail_block_location(struct slab_journal *journal);
+static void release_journal_locks(struct vdo_waiter *waiter, void *context);
+
+/**
+ * is_slab_journal_blank() - Check whether a slab's journal is blank.
+ *
+ * A slab journal is blank if it has never had any entries recorded in it.
+ *
+ * Return: true if the slab's journal has never been modified.
+ */
+static bool is_slab_journal_blank(const struct vdo_slab *slab)
+{
+ return ((slab->journal.tail == 1) &&
+ (slab->journal.tail_header.entry_count == 0));
+}
+
+/**
+ * mark_slab_journal_dirty() - Put a slab journal on the dirty ring of its allocator in the correct
+ * order.
+ * @journal: The journal to be marked dirty.
+ * @lock: The recovery journal lock held by the slab journal.
+ */
+static void mark_slab_journal_dirty(struct slab_journal *journal, sequence_number_t lock)
+{
+ struct slab_journal *dirty_journal;
+ struct list_head *dirty_list = &journal->slab->allocator->dirty_slab_journals;
+
+ VDO_ASSERT_LOG_ONLY(journal->recovery_lock == 0, "slab journal was clean");
+
+ journal->recovery_lock = lock;
+ list_for_each_entry_reverse(dirty_journal, dirty_list, dirty_entry) {
+ if (dirty_journal->recovery_lock <= journal->recovery_lock)
+ break;
+ }
+
+ list_move_tail(&journal->dirty_entry, dirty_journal->dirty_entry.next);
+}
+
+static void mark_slab_journal_clean(struct slab_journal *journal)
+{
+ journal->recovery_lock = 0;
+ list_del_init(&journal->dirty_entry);
+}
+
+static void check_if_slab_drained(struct vdo_slab *slab)
+{
+ bool read_only;
+ struct slab_journal *journal = &slab->journal;
+ const struct admin_state_code *code;
+
+ if (!vdo_is_state_draining(&slab->state) ||
+ must_make_entries_to_flush(journal) ||
+ is_reaping(journal) ||
+ journal->waiting_to_commit ||
+ !list_empty(&journal->uncommitted_blocks) ||
+ journal->updating_slab_summary ||
+ (slab->active_count > 0))
+ return;
+
+ /* When not suspending or recovering, the slab must be clean. */
+ code = vdo_get_admin_state_code(&slab->state);
+ read_only = vdo_is_read_only(slab->allocator->depot->vdo);
+ if (!read_only &&
+ vdo_waitq_has_waiters(&slab->dirty_blocks) &&
+ (code != VDO_ADMIN_STATE_SUSPENDING) &&
+ (code != VDO_ADMIN_STATE_RECOVERING))
+ return;
+
+ vdo_finish_draining_with_result(&slab->state,
+ (read_only ? VDO_READ_ONLY : VDO_SUCCESS));
+}
+
+/* FULLNESS HINT COMPUTATION */
+
+/**
+ * compute_fullness_hint() - Translate a slab's free block count into a 'fullness hint' that can be
+ * stored in a slab_summary_entry's 7 bits that are dedicated to its free
+ * count.
+ * @depot: The depot whose summary being updated.
+ * @free_blocks: The number of free blocks.
+ *
+ * Note: the number of free blocks must be strictly less than 2^23 blocks, even though
+ * theoretically slabs could contain precisely 2^23 blocks; there is an assumption that at least
+ * one block is used by metadata. This assumption is necessary; otherwise, the fullness hint might
+ * overflow. The fullness hint formula is roughly (fullness >> 16) & 0x7f, but (2^23 >> 16) & 0x7f
+ * is 0, which would make it impossible to distinguish completely full from completely empty.
+ *
+ * Return: A fullness hint, which can be stored in 7 bits.
+ */
+static u8 __must_check compute_fullness_hint(struct slab_depot *depot,
+ block_count_t free_blocks)
+{
+ block_count_t hint;
+
+ VDO_ASSERT_LOG_ONLY((free_blocks < (1 << 23)), "free blocks must be less than 2^23");
+
+ if (free_blocks == 0)
+ return 0;
+
+ hint = free_blocks >> depot->hint_shift;
+ return ((hint == 0) ? 1 : hint);
+}
+
+/**
+ * check_summary_drain_complete() - Check whether an allocators summary has finished draining.
+ */
+static void check_summary_drain_complete(struct block_allocator *allocator)
+{
+ if (!vdo_is_state_draining(&allocator->summary_state) ||
+ (allocator->summary_write_count > 0))
+ return;
+
+ vdo_finish_operation(&allocator->summary_state,
+ (vdo_is_read_only(allocator->depot->vdo) ?
+ VDO_READ_ONLY : VDO_SUCCESS));
+}
+
+/**
+ * notify_summary_waiters() - Wake all the waiters in a given queue.
+ * @allocator: The block allocator summary which owns the queue.
+ * @queue: The queue to notify.
+ */
+static void notify_summary_waiters(struct block_allocator *allocator,
+ struct vdo_wait_queue *queue)
+{
+ int result = (vdo_is_read_only(allocator->depot->vdo) ?
+ VDO_READ_ONLY : VDO_SUCCESS);
+
+ vdo_waitq_notify_all_waiters(queue, NULL, &result);
+}
+
+static void launch_write(struct slab_summary_block *summary_block);
+
+/**
+ * finish_updating_slab_summary_block() - Finish processing a block which attempted to write,
+ * whether or not the attempt succeeded.
+ * @block: The block.
+ */
+static void finish_updating_slab_summary_block(struct slab_summary_block *block)
+{
+ notify_summary_waiters(block->allocator, &block->current_update_waiters);
+ block->writing = false;
+ block->allocator->summary_write_count--;
+ if (vdo_waitq_has_waiters(&block->next_update_waiters))
+ launch_write(block);
+ else
+ check_summary_drain_complete(block->allocator);
+}
+
+/**
+ * finish_update() - This is the callback for a successful summary block write.
+ * @completion: The write vio.
+ */
+static void finish_update(struct vdo_completion *completion)
+{
+ struct slab_summary_block *block =
+ container_of(as_vio(completion), struct slab_summary_block, vio);
+
+ atomic64_inc(&block->allocator->depot->summary_statistics.blocks_written);
+ finish_updating_slab_summary_block(block);
+}
+
+/**
+ * handle_write_error() - Handle an error writing a slab summary block.
+ * @completion: The write VIO.
+ */
+static void handle_write_error(struct vdo_completion *completion)
+{
+ struct slab_summary_block *block =
+ container_of(as_vio(completion), struct slab_summary_block, vio);
+
+ vio_record_metadata_io_error(as_vio(completion));
+ vdo_enter_read_only_mode(completion->vdo, completion->result);
+ finish_updating_slab_summary_block(block);
+}
+
+static void write_slab_summary_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct slab_summary_block *block =
+ container_of(vio, struct slab_summary_block, vio);
+
+ continue_vio_after_io(vio, finish_update, block->allocator->thread_id);
+}
+
+/**
+ * launch_write() - Write a slab summary block unless it is currently out for writing.
+ * @block: The block that needs to be committed.
+ */
+static void launch_write(struct slab_summary_block *block)
+{
+ struct block_allocator *allocator = block->allocator;
+ struct slab_depot *depot = allocator->depot;
+ physical_block_number_t pbn;
+
+ if (block->writing)
+ return;
+
+ allocator->summary_write_count++;
+ vdo_waitq_transfer_all_waiters(&block->next_update_waiters,
+ &block->current_update_waiters);
+ block->writing = true;
+
+ if (vdo_is_read_only(depot->vdo)) {
+ finish_updating_slab_summary_block(block);
+ return;
+ }
+
+ memcpy(block->outgoing_entries, block->entries, VDO_BLOCK_SIZE);
+
+ /*
+ * Flush before writing to ensure that the slab journal tail blocks and reference updates
+ * covered by this summary update are stable. Otherwise, a subsequent recovery could
+ * encounter a slab summary update that refers to a slab journal tail block that has not
+ * actually been written. In such cases, the slab journal referenced will be treated as
+ * empty, causing any data within the slab which predates the existing recovery journal
+ * entries to be lost.
+ */
+ pbn = (depot->summary_origin +
+ (VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE * allocator->zone_number) +
+ block->index);
+ vdo_submit_metadata_vio(&block->vio, pbn, write_slab_summary_endio,
+ handle_write_error, REQ_OP_WRITE | REQ_PREFLUSH);
+}
+
+/**
+ * update_slab_summary_entry() - Update the entry for a slab.
+ * @slab: The slab whose entry is to be updated
+ * @waiter: The waiter that is updating the summary.
+ * @tail_block_offset: The offset of the slab journal's tail block.
+ * @load_ref_counts: Whether the reference counts must be loaded from disk on the vdo load.
+ * @is_clean: Whether the slab is clean.
+ * @free_blocks: The number of free blocks.
+ */
+static void update_slab_summary_entry(struct vdo_slab *slab, struct vdo_waiter *waiter,
+ tail_block_offset_t tail_block_offset,
+ bool load_ref_counts, bool is_clean,
+ block_count_t free_blocks)
+{
+ u8 index = slab->slab_number / VDO_SLAB_SUMMARY_ENTRIES_PER_BLOCK;
+ struct block_allocator *allocator = slab->allocator;
+ struct slab_summary_block *block = &allocator->summary_blocks[index];
+ int result;
+ struct slab_summary_entry *entry;
+
+ if (vdo_is_read_only(block->vio.completion.vdo)) {
+ result = VDO_READ_ONLY;
+ waiter->callback(waiter, &result);
+ return;
+ }
+
+ if (vdo_is_state_draining(&allocator->summary_state) ||
+ vdo_is_state_quiescent(&allocator->summary_state)) {
+ result = VDO_INVALID_ADMIN_STATE;
+ waiter->callback(waiter, &result);
+ return;
+ }
+
+ entry = &allocator->summary_entries[slab->slab_number];
+ *entry = (struct slab_summary_entry) {
+ .tail_block_offset = tail_block_offset,
+ .load_ref_counts = (entry->load_ref_counts || load_ref_counts),
+ .is_dirty = !is_clean,
+ .fullness_hint = compute_fullness_hint(allocator->depot, free_blocks),
+ };
+ vdo_waitq_enqueue_waiter(&block->next_update_waiters, waiter);
+ launch_write(block);
+}
+
+/**
+ * finish_reaping() - Actually advance the head of the journal now that any necessary flushes are
+ * complete.
+ * @journal: The journal to be reaped.
+ */
+static void finish_reaping(struct slab_journal *journal)
+{
+ journal->head = journal->unreapable;
+ add_entries(journal);
+ check_if_slab_drained(journal->slab);
+}
+
+static void reap_slab_journal(struct slab_journal *journal);
+
+/**
+ * complete_reaping() - Finish reaping now that we have flushed the lower layer and then try
+ * reaping again in case we deferred reaping due to an outstanding vio.
+ * @completion: The flush vio.
+ */
+static void complete_reaping(struct vdo_completion *completion)
+{
+ struct slab_journal *journal = completion->parent;
+
+ return_vio_to_pool(journal->slab->allocator->vio_pool,
+ vio_as_pooled_vio(as_vio(vdo_forget(completion))));
+ finish_reaping(journal);
+ reap_slab_journal(journal);
+}
+
+/**
+ * handle_flush_error() - Handle an error flushing the lower layer.
+ * @completion: The flush vio.
+ */
+static void handle_flush_error(struct vdo_completion *completion)
+{
+ vio_record_metadata_io_error(as_vio(completion));
+ vdo_enter_read_only_mode(completion->vdo, completion->result);
+ complete_reaping(completion);
+}
+
+static void flush_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct slab_journal *journal = vio->completion.parent;
+
+ continue_vio_after_io(vio, complete_reaping,
+ journal->slab->allocator->thread_id);
+}
+
+/**
+ * flush_for_reaping() - A waiter callback for getting a vio with which to flush the lower layer
+ * prior to reaping.
+ * @waiter: The journal as a flush waiter.
+ * @context: The newly acquired flush vio.
+ */
+static void flush_for_reaping(struct vdo_waiter *waiter, void *context)
+{
+ struct slab_journal *journal =
+ container_of(waiter, struct slab_journal, flush_waiter);
+ struct pooled_vio *pooled = context;
+ struct vio *vio = &pooled->vio;
+
+ vio->completion.parent = journal;
+ vdo_submit_flush_vio(vio, flush_endio, handle_flush_error);
+}
+
+/**
+ * reap_slab_journal() - Conduct a reap on a slab journal to reclaim unreferenced blocks.
+ * @journal: The slab journal.
+ */
+static void reap_slab_journal(struct slab_journal *journal)
+{
+ bool reaped = false;
+
+ if (is_reaping(journal)) {
+ /* We already have a reap in progress so wait for it to finish. */
+ return;
+ }
+
+ if ((journal->slab->status != VDO_SLAB_REBUILT) ||
+ !vdo_is_state_normal(&journal->slab->state) ||
+ vdo_is_read_only(journal->slab->allocator->depot->vdo)) {
+ /*
+ * We must not reap in the first two cases, and there's no point in read-only mode.
+ */
+ return;
+ }
+
+ /*
+ * Start reclaiming blocks only when the journal head has no references. Then stop when a
+ * block is referenced or reap reaches the most recently written block, referenced by the
+ * slab summary, which has the sequence number just before the tail.
+ */
+ while ((journal->unreapable < journal->tail) && (journal->reap_lock->count == 0)) {
+ reaped = true;
+ journal->unreapable++;
+ journal->reap_lock++;
+ if (journal->reap_lock == &journal->locks[journal->size])
+ journal->reap_lock = &journal->locks[0];
+ }
+
+ if (!reaped)
+ return;
+
+ /*
+ * It is never safe to reap a slab journal block without first issuing a flush, regardless
+ * of whether a user flush has been received or not. In the absence of the flush, the
+ * reference block write which released the locks allowing the slab journal to reap may not
+ * be persisted. Although slab summary writes will eventually issue flushes, multiple slab
+ * journal block writes can be issued while previous slab summary updates have not yet been
+ * made. Even though those slab journal block writes will be ignored if the slab summary
+ * update is not persisted, they may still overwrite the to-be-reaped slab journal block
+ * resulting in a loss of reference count updates.
+ */
+ journal->flush_waiter.callback = flush_for_reaping;
+ acquire_vio_from_pool(journal->slab->allocator->vio_pool,
+ &journal->flush_waiter);
+}
+
+/**
+ * adjust_slab_journal_block_reference() - Adjust the reference count for a slab journal block.
+ * @journal: The slab journal.
+ * @sequence_number: The journal sequence number of the referenced block.
+ * @adjustment: Amount to adjust the reference counter.
+ *
+ * Note that when the adjustment is negative, the slab journal will be reaped.
+ */
+static void adjust_slab_journal_block_reference(struct slab_journal *journal,
+ sequence_number_t sequence_number,
+ int adjustment)
+{
+ struct journal_lock *lock;
+
+ if (sequence_number == 0)
+ return;
+
+ if (journal->slab->status == VDO_SLAB_REPLAYING) {
+ /* Locks should not be used during offline replay. */
+ return;
+ }
+
+ VDO_ASSERT_LOG_ONLY((adjustment != 0), "adjustment must be non-zero");
+ lock = get_lock(journal, sequence_number);
+ if (adjustment < 0) {
+ VDO_ASSERT_LOG_ONLY((-adjustment <= lock->count),
+ "adjustment %d of lock count %u for slab journal block %llu must not underflow",
+ adjustment, lock->count,
+ (unsigned long long) sequence_number);
+ }
+
+ lock->count += adjustment;
+ if (lock->count == 0)
+ reap_slab_journal(journal);
+}
+
+/**
+ * release_journal_locks() - Callback invoked after a slab summary update completes.
+ * @waiter: The slab summary waiter that has just been notified.
+ * @context: The result code of the update.
+ *
+ * Registered in the constructor on behalf of update_tail_block_location().
+ *
+ * Implements waiter_callback_fn.
+ */
+static void release_journal_locks(struct vdo_waiter *waiter, void *context)
+{
+ sequence_number_t first, i;
+ struct slab_journal *journal =
+ container_of(waiter, struct slab_journal, slab_summary_waiter);
+ int result = *((int *) context);
+
+ if (result != VDO_SUCCESS) {
+ if (result != VDO_READ_ONLY) {
+ /*
+ * Don't bother logging what might be lots of errors if we are already in
+ * read-only mode.
+ */
+ vdo_log_error_strerror(result, "failed slab summary update %llu",
+ (unsigned long long) journal->summarized);
+ }
+
+ journal->updating_slab_summary = false;
+ vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result);
+ check_if_slab_drained(journal->slab);
+ return;
+ }
+
+ if (journal->partial_write_in_progress && (journal->summarized == journal->tail)) {
+ journal->partial_write_in_progress = false;
+ add_entries(journal);
+ }
+
+ first = journal->last_summarized;
+ journal->last_summarized = journal->summarized;
+ for (i = journal->summarized - 1; i >= first; i--) {
+ /*
+ * Release the lock the summarized block held on the recovery journal. (During
+ * replay, recovery_start will always be 0.)
+ */
+ if (journal->recovery_journal != NULL) {
+ zone_count_t zone_number = journal->slab->allocator->zone_number;
+ struct journal_lock *lock = get_lock(journal, i);
+
+ vdo_release_recovery_journal_block_reference(journal->recovery_journal,
+ lock->recovery_start,
+ VDO_ZONE_TYPE_PHYSICAL,
+ zone_number);
+ }
+
+ /*
+ * Release our own lock against reaping for blocks that are committed. (This
+ * function will not change locks during replay.)
+ */
+ adjust_slab_journal_block_reference(journal, i, -1);
+ }
+
+ journal->updating_slab_summary = false;
+
+ reap_slab_journal(journal);
+
+ /* Check if the slab summary needs to be updated again. */
+ update_tail_block_location(journal);
+}
+
+/**
+ * update_tail_block_location() - Update the tail block location in the slab summary, if necessary.
+ * @journal: The slab journal that is updating its tail block location.
+ */
+static void update_tail_block_location(struct slab_journal *journal)
+{
+ block_count_t free_block_count;
+ struct vdo_slab *slab = journal->slab;
+
+ if (journal->updating_slab_summary ||
+ vdo_is_read_only(journal->slab->allocator->depot->vdo) ||
+ (journal->last_summarized >= journal->next_commit)) {
+ check_if_slab_drained(slab);
+ return;
+ }
+
+ if (slab->status != VDO_SLAB_REBUILT) {
+ u8 hint = slab->allocator->summary_entries[slab->slab_number].fullness_hint;
+
+ free_block_count = ((block_count_t) hint) << slab->allocator->depot->hint_shift;
+ } else {
+ free_block_count = slab->free_blocks;
+ }
+
+ journal->summarized = journal->next_commit;
+ journal->updating_slab_summary = true;
+
+ /*
+ * Update slab summary as dirty.
+ * vdo_slab journal can only reap past sequence number 1 when all the ref counts for this
+ * slab have been written to the layer. Therefore, indicate that the ref counts must be
+ * loaded when the journal head has reaped past sequence number 1.
+ */
+ update_slab_summary_entry(slab, &journal->slab_summary_waiter,
+ journal->summarized % journal->size,
+ (journal->head > 1), false, free_block_count);
+}
+
+/**
+ * reopen_slab_journal() - Reopen a slab's journal by emptying it and then adding pending entries.
+ */
+static void reopen_slab_journal(struct vdo_slab *slab)
+{
+ struct slab_journal *journal = &slab->journal;
+ sequence_number_t block;
+
+ VDO_ASSERT_LOG_ONLY(journal->tail_header.entry_count == 0,
+ "vdo_slab journal's active block empty before reopening");
+ journal->head = journal->tail;
+ initialize_journal_state(journal);
+
+ /* Ensure no locks are spuriously held on an empty journal. */
+ for (block = 1; block <= journal->size; block++) {
+ VDO_ASSERT_LOG_ONLY((get_lock(journal, block)->count == 0),
+ "Scrubbed journal's block %llu is not locked",
+ (unsigned long long) block);
+ }
+
+ add_entries(journal);
+}
+
+static sequence_number_t get_committing_sequence_number(const struct pooled_vio *vio)
+{
+ const struct packed_slab_journal_block *block =
+ (const struct packed_slab_journal_block *) vio->vio.data;
+
+ return __le64_to_cpu(block->header.sequence_number);
+}
+
+/**
+ * complete_write() - Handle post-commit processing.
+ * @completion: The write vio as a completion.
+ *
+ * This is the callback registered by write_slab_journal_block().
+ */
+static void complete_write(struct vdo_completion *completion)
+{
+ int result = completion->result;
+ struct pooled_vio *pooled = vio_as_pooled_vio(as_vio(completion));
+ struct slab_journal *journal = completion->parent;
+ sequence_number_t committed = get_committing_sequence_number(pooled);
+
+ list_del_init(&pooled->list_entry);
+ return_vio_to_pool(journal->slab->allocator->vio_pool, vdo_forget(pooled));
+
+ if (result != VDO_SUCCESS) {
+ vio_record_metadata_io_error(as_vio(completion));
+ vdo_log_error_strerror(result, "cannot write slab journal block %llu",
+ (unsigned long long) committed);
+ vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result);
+ check_if_slab_drained(journal->slab);
+ return;
+ }
+
+ WRITE_ONCE(journal->events->blocks_written, journal->events->blocks_written + 1);
+
+ if (list_empty(&journal->uncommitted_blocks)) {
+ /* If no blocks are outstanding, then the commit point is at the tail. */
+ journal->next_commit = journal->tail;
+ } else {
+ /* The commit point is always the beginning of the oldest incomplete block. */
+ pooled = container_of(journal->uncommitted_blocks.next,
+ struct pooled_vio, list_entry);
+ journal->next_commit = get_committing_sequence_number(pooled);
+ }
+
+ update_tail_block_location(journal);
+}
+
+static void write_slab_journal_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct slab_journal *journal = vio->completion.parent;
+
+ continue_vio_after_io(vio, complete_write, journal->slab->allocator->thread_id);
+}
+
+/**
+ * write_slab_journal_block() - Write a slab journal block.
+ * @waiter: The vio pool waiter which was just notified.
+ * @context: The vio pool entry for the write.
+ *
+ * Callback from acquire_vio_from_pool() registered in commit_tail().
+ */
+static void write_slab_journal_block(struct vdo_waiter *waiter, void *context)
+{
+ struct pooled_vio *pooled = context;
+ struct vio *vio = &pooled->vio;
+ struct slab_journal *journal =
+ container_of(waiter, struct slab_journal, resource_waiter);
+ struct slab_journal_block_header *header = &journal->tail_header;
+ int unused_entries = journal->entries_per_block - header->entry_count;
+ physical_block_number_t block_number;
+ const struct admin_state_code *operation;
+
+ header->head = journal->head;
+ list_add_tail(&pooled->list_entry, &journal->uncommitted_blocks);
+ vdo_pack_slab_journal_block_header(header, &journal->block->header);
+
+ /* Copy the tail block into the vio. */
+ memcpy(pooled->vio.data, journal->block, VDO_BLOCK_SIZE);
+
+ VDO_ASSERT_LOG_ONLY(unused_entries >= 0, "vdo_slab journal block is not overfull");
+ if (unused_entries > 0) {
+ /*
+ * Release the per-entry locks for any unused entries in the block we are about to
+ * write.
+ */
+ adjust_slab_journal_block_reference(journal, header->sequence_number,
+ -unused_entries);
+ journal->partial_write_in_progress = !block_is_full(journal);
+ }
+
+ block_number = journal->slab->journal_origin +
+ (header->sequence_number % journal->size);
+ vio->completion.parent = journal;
+
+ /*
+ * This block won't be read in recovery until the slab summary is updated to refer to it.
+ * The slab summary update does a flush which is sufficient to protect us from corruption
+ * due to out of order slab journal, reference block, or block map writes.
+ */
+ vdo_submit_metadata_vio(vdo_forget(vio), block_number, write_slab_journal_endio,
+ complete_write, REQ_OP_WRITE);
+
+ /* Since the write is submitted, the tail block structure can be reused. */
+ journal->tail++;
+ initialize_tail_block(journal);
+ journal->waiting_to_commit = false;
+
+ operation = vdo_get_admin_state_code(&journal->slab->state);
+ if (operation == VDO_ADMIN_STATE_WAITING_FOR_RECOVERY) {
+ vdo_finish_operation(&journal->slab->state,
+ (vdo_is_read_only(journal->slab->allocator->depot->vdo) ?
+ VDO_READ_ONLY : VDO_SUCCESS));
+ return;
+ }
+
+ add_entries(journal);
+}
+
+/**
+ * commit_tail() - Commit the tail block of the slab journal.
+ * @journal: The journal whose tail block should be committed.
+ */
+static void commit_tail(struct slab_journal *journal)
+{
+ if ((journal->tail_header.entry_count == 0) && must_make_entries_to_flush(journal)) {
+ /*
+ * There are no entries at the moment, but there are some waiters, so defer
+ * initiating the flush until those entries are ready to write.
+ */
+ return;
+ }
+
+ if (vdo_is_read_only(journal->slab->allocator->depot->vdo) ||
+ journal->waiting_to_commit ||
+ (journal->tail_header.entry_count == 0)) {
+ /*
+ * There is nothing to do since the tail block is empty, or writing, or the journal
+ * is in read-only mode.
+ */
+ return;
+ }
+
+ /*
+ * Since we are about to commit the tail block, this journal no longer needs to be on the
+ * ring of journals which the recovery journal might ask to commit.
+ */
+ mark_slab_journal_clean(journal);
+
+ journal->waiting_to_commit = true;
+
+ journal->resource_waiter.callback = write_slab_journal_block;
+ acquire_vio_from_pool(journal->slab->allocator->vio_pool,
+ &journal->resource_waiter);
+}
+
+/**
+ * encode_slab_journal_entry() - Encode a slab journal entry.
+ * @tail_header: The unpacked header for the block.
+ * @payload: The journal block payload to hold the entry.
+ * @sbn: The slab block number of the entry to encode.
+ * @operation: The type of the entry.
+ * @increment: True if this is an increment.
+ *
+ * Exposed for unit tests.
+ */
+static void encode_slab_journal_entry(struct slab_journal_block_header *tail_header,
+ slab_journal_payload *payload,
+ slab_block_number sbn,
+ enum journal_operation operation,
+ bool increment)
+{
+ journal_entry_count_t entry_number = tail_header->entry_count++;
+
+ if (operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING) {
+ if (!tail_header->has_block_map_increments) {
+ memset(payload->full_entries.entry_types, 0,
+ VDO_SLAB_JOURNAL_ENTRY_TYPES_SIZE);
+ tail_header->has_block_map_increments = true;
+ }
+
+ payload->full_entries.entry_types[entry_number / 8] |=
+ ((u8)1 << (entry_number % 8));
+ }
+
+ vdo_pack_slab_journal_entry(&payload->entries[entry_number], sbn, increment);
+}
+
+/**
+ * expand_journal_point() - Convert a recovery journal journal_point which refers to both an
+ * increment and a decrement to a single point which refers to one or the
+ * other.
+ * @recovery_point: The journal point to convert.
+ * @increment: Whether the current entry is an increment.
+ *
+ * Return: The expanded journal point
+ *
+ * Because each data_vio has but a single recovery journal point, but may need to make both
+ * increment and decrement entries in the same slab journal. In order to distinguish the two
+ * entries, the entry count of the expanded journal point is twice the actual recovery journal
+ * entry count for increments, and one more than that for decrements.
+ */
+static struct journal_point expand_journal_point(struct journal_point recovery_point,
+ bool increment)
+{
+ recovery_point.entry_count *= 2;
+ if (!increment)
+ recovery_point.entry_count++;
+
+ return recovery_point;
+}
+
+/**
+ * add_entry() - Actually add an entry to the slab journal, potentially firing off a write if a
+ * block becomes full.
+ * @journal: The slab journal to append to.
+ * @pbn: The pbn being adjusted.
+ * @operation: The type of entry to make.
+ * @increment: True if this is an increment.
+ * @recovery_point: The expanded recovery point.
+ *
+ * This function is synchronous.
+ */
+static void add_entry(struct slab_journal *journal, physical_block_number_t pbn,
+ enum journal_operation operation, bool increment,
+ struct journal_point recovery_point)
+{
+ struct packed_slab_journal_block *block = journal->block;
+ int result;
+
+ result = VDO_ASSERT(vdo_before_journal_point(&journal->tail_header.recovery_point,
+ &recovery_point),
+ "recovery journal point is monotonically increasing, recovery point: %llu.%u, block recovery point: %llu.%u",
+ (unsigned long long) recovery_point.sequence_number,
+ recovery_point.entry_count,
+ (unsigned long long) journal->tail_header.recovery_point.sequence_number,
+ journal->tail_header.recovery_point.entry_count);
+ if (result != VDO_SUCCESS) {
+ vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result);
+ return;
+ }
+
+ if (operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING) {
+ result = VDO_ASSERT((journal->tail_header.entry_count <
+ journal->full_entries_per_block),
+ "block has room for full entries");
+ if (result != VDO_SUCCESS) {
+ vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo,
+ result);
+ return;
+ }
+ }
+
+ encode_slab_journal_entry(&journal->tail_header, &block->payload,
+ pbn - journal->slab->start, operation, increment);
+ journal->tail_header.recovery_point = recovery_point;
+ if (block_is_full(journal))
+ commit_tail(journal);
+}
+
+static inline block_count_t journal_length(const struct slab_journal *journal)
+{
+ return journal->tail - journal->head;
+}
+
+/**
+ * vdo_attempt_replay_into_slab() - Replay a recovery journal entry into a slab's journal.
+ * @slab: The slab to play into.
+ * @pbn: The PBN for the entry.
+ * @operation: The type of entry to add.
+ * @increment: True if this entry is an increment.
+ * @recovery_point: The recovery journal point corresponding to this entry.
+ * @parent: The completion to notify when there is space to add the entry if the entry could not be
+ * added immediately.
+ *
+ * Return: true if the entry was added immediately.
+ */
+bool vdo_attempt_replay_into_slab(struct vdo_slab *slab, physical_block_number_t pbn,
+ enum journal_operation operation, bool increment,
+ struct journal_point *recovery_point,
+ struct vdo_completion *parent)
+{
+ struct slab_journal *journal = &slab->journal;
+ struct slab_journal_block_header *header = &journal->tail_header;
+ struct journal_point expanded = expand_journal_point(*recovery_point, increment);
+
+ /* Only accept entries after the current recovery point. */
+ if (!vdo_before_journal_point(&journal->tail_header.recovery_point, &expanded))
+ return true;
+
+ if ((header->entry_count >= journal->full_entries_per_block) &&
+ (header->has_block_map_increments || (operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING))) {
+ /*
+ * The tail block does not have room for the entry we are attempting to add so
+ * commit the tail block now.
+ */
+ commit_tail(journal);
+ }
+
+ if (journal->waiting_to_commit) {
+ vdo_start_operation_with_waiter(&journal->slab->state,
+ VDO_ADMIN_STATE_WAITING_FOR_RECOVERY,
+ parent, NULL);
+ return false;
+ }
+
+ if (journal_length(journal) >= journal->size) {
+ /*
+ * We must have reaped the current head before the crash, since the blocked
+ * threshold keeps us from having more entries than fit in a slab journal; hence we
+ * can just advance the head (and unreapable block), as needed.
+ */
+ journal->head++;
+ journal->unreapable++;
+ }
+
+ if (journal->slab->status == VDO_SLAB_REBUILT)
+ journal->slab->status = VDO_SLAB_REPLAYING;
+
+ add_entry(journal, pbn, operation, increment, expanded);
+ return true;
+}
+
+/**
+ * requires_reaping() - Check whether the journal must be reaped before adding new entries.
+ * @journal: The journal to check.
+ *
+ * Return: true if the journal must be reaped.
+ */
+static bool requires_reaping(const struct slab_journal *journal)
+{
+ return (journal_length(journal) >= journal->blocking_threshold);
+}
+
+/** finish_summary_update() - A waiter callback that resets the writing state of a slab. */
+static void finish_summary_update(struct vdo_waiter *waiter, void *context)
+{
+ struct vdo_slab *slab = container_of(waiter, struct vdo_slab, summary_waiter);
+ int result = *((int *) context);
+
+ slab->active_count--;
+
+ if ((result != VDO_SUCCESS) && (result != VDO_READ_ONLY)) {
+ vdo_log_error_strerror(result, "failed to update slab summary");
+ vdo_enter_read_only_mode(slab->allocator->depot->vdo, result);
+ }
+
+ check_if_slab_drained(slab);
+}
+
+static void write_reference_block(struct vdo_waiter *waiter, void *context);
+
+/**
+ * launch_reference_block_write() - Launch the write of a dirty reference block by first acquiring
+ * a VIO for it from the pool.
+ * @waiter: The waiter of the block which is starting to write.
+ * @context: The parent slab of the block.
+ *
+ * This can be asynchronous since the writer will have to wait if all VIOs in the pool are
+ * currently in use.
+ */
+static void launch_reference_block_write(struct vdo_waiter *waiter, void *context)
+{
+ struct vdo_slab *slab = context;
+
+ if (vdo_is_read_only(slab->allocator->depot->vdo))
+ return;
+
+ slab->active_count++;
+ container_of(waiter, struct reference_block, waiter)->is_writing = true;
+ waiter->callback = write_reference_block;
+ acquire_vio_from_pool(slab->allocator->vio_pool, waiter);
+}
+
+static void save_dirty_reference_blocks(struct vdo_slab *slab)
+{
+ vdo_waitq_notify_all_waiters(&slab->dirty_blocks,
+ launch_reference_block_write, slab);
+ check_if_slab_drained(slab);
+}
+
+/**
+ * finish_reference_block_write() - After a reference block has written, clean it, release its
+ * locks, and return its VIO to the pool.
+ * @completion: The VIO that just finished writing.
+ */
+static void finish_reference_block_write(struct vdo_completion *completion)
+{
+ struct vio *vio = as_vio(completion);
+ struct pooled_vio *pooled = vio_as_pooled_vio(vio);
+ struct reference_block *block = completion->parent;
+ struct vdo_slab *slab = block->slab;
+ tail_block_offset_t offset;
+
+ slab->active_count--;
+
+ /* Release the slab journal lock. */
+ adjust_slab_journal_block_reference(&slab->journal,
+ block->slab_journal_lock_to_release, -1);
+ return_vio_to_pool(slab->allocator->vio_pool, pooled);
+
+ /*
+ * We can't clear the is_writing flag earlier as releasing the slab journal lock may cause
+ * us to be dirtied again, but we don't want to double enqueue.
+ */
+ block->is_writing = false;
+
+ if (vdo_is_read_only(completion->vdo)) {
+ check_if_slab_drained(slab);
+ return;
+ }
+
+ /* Re-queue the block if it was re-dirtied while it was writing. */
+ if (block->is_dirty) {
+ vdo_waitq_enqueue_waiter(&block->slab->dirty_blocks, &block->waiter);
+ if (vdo_is_state_draining(&slab->state)) {
+ /* We must be saving, and this block will otherwise not be relaunched. */
+ save_dirty_reference_blocks(slab);
+ }
+
+ return;
+ }
+
+ /*
+ * Mark the slab as clean in the slab summary if there are no dirty or writing blocks
+ * and no summary update in progress.
+ */
+ if ((slab->active_count > 0) || vdo_waitq_has_waiters(&slab->dirty_blocks)) {
+ check_if_slab_drained(slab);
+ return;
+ }
+
+ offset = slab->allocator->summary_entries[slab->slab_number].tail_block_offset;
+ slab->active_count++;
+ slab->summary_waiter.callback = finish_summary_update;
+ update_slab_summary_entry(slab, &slab->summary_waiter, offset,
+ true, true, slab->free_blocks);
+}
+
+/**
+ * get_reference_counters_for_block() - Find the reference counters for a given block.
+ * @block: The reference_block in question.
+ *
+ * Return: A pointer to the reference counters for this block.
+ */
+static vdo_refcount_t * __must_check get_reference_counters_for_block(struct reference_block *block)
+{
+ size_t block_index = block - block->slab->reference_blocks;
+
+ return &block->slab->counters[block_index * COUNTS_PER_BLOCK];
+}
+
+/**
+ * pack_reference_block() - Copy data from a reference block to a buffer ready to be written out.
+ * @block: The block to copy.
+ * @buffer: The char buffer to fill with the packed block.
+ */
+static void pack_reference_block(struct reference_block *block, void *buffer)
+{
+ struct packed_reference_block *packed = buffer;
+ vdo_refcount_t *counters = get_reference_counters_for_block(block);
+ sector_count_t i;
+ struct packed_journal_point commit_point;
+
+ vdo_pack_journal_point(&block->slab->slab_journal_point, &commit_point);
+
+ for (i = 0; i < VDO_SECTORS_PER_BLOCK; i++) {
+ packed->sectors[i].commit_point = commit_point;
+ memcpy(packed->sectors[i].counts, counters + (i * COUNTS_PER_SECTOR),
+ (sizeof(vdo_refcount_t) * COUNTS_PER_SECTOR));
+ }
+}
+
+static void write_reference_block_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct reference_block *block = vio->completion.parent;
+ thread_id_t thread_id = block->slab->allocator->thread_id;
+
+ continue_vio_after_io(vio, finish_reference_block_write, thread_id);
+}
+
+/**
+ * handle_io_error() - Handle an I/O error reading or writing a reference count block.
+ * @completion: The VIO doing the I/O as a completion.
+ */
+static void handle_io_error(struct vdo_completion *completion)
+{
+ int result = completion->result;
+ struct vio *vio = as_vio(completion);
+ struct vdo_slab *slab = ((struct reference_block *) completion->parent)->slab;
+
+ vio_record_metadata_io_error(vio);
+ return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio));
+ slab->active_count--;
+ vdo_enter_read_only_mode(slab->allocator->depot->vdo, result);
+ check_if_slab_drained(slab);
+}
+
+/**
+ * write_reference_block() - After a dirty block waiter has gotten a VIO from the VIO pool, copy
+ * its counters and associated data into the VIO, and launch the write.
+ * @waiter: The waiter of the dirty block.
+ * @context: The VIO returned by the pool.
+ */
+static void write_reference_block(struct vdo_waiter *waiter, void *context)
+{
+ size_t block_offset;
+ physical_block_number_t pbn;
+ struct pooled_vio *pooled = context;
+ struct vdo_completion *completion = &pooled->vio.completion;
+ struct reference_block *block = container_of(waiter, struct reference_block,
+ waiter);
+
+ pack_reference_block(block, pooled->vio.data);
+ block_offset = (block - block->slab->reference_blocks);
+ pbn = (block->slab->ref_counts_origin + block_offset);
+ block->slab_journal_lock_to_release = block->slab_journal_lock;
+ completion->parent = block;
+
+ /*
+ * Mark the block as clean, since we won't be committing any updates that happen after this
+ * moment. As long as VIO order is preserved, two VIOs updating this block at once will not
+ * cause complications.
+ */
+ block->is_dirty = false;
+
+ /*
+ * Flush before writing to ensure that the recovery journal and slab journal entries which
+ * cover this reference update are stable. This prevents data corruption that can be caused
+ * by out of order writes.
+ */
+ WRITE_ONCE(block->slab->allocator->ref_counts_statistics.blocks_written,
+ block->slab->allocator->ref_counts_statistics.blocks_written + 1);
+
+ completion->callback_thread_id = ((struct block_allocator *) pooled->context)->thread_id;
+ vdo_submit_metadata_vio(&pooled->vio, pbn, write_reference_block_endio,
+ handle_io_error, REQ_OP_WRITE | REQ_PREFLUSH);
+}
+
+static void reclaim_journal_space(struct slab_journal *journal)
+{
+ block_count_t length = journal_length(journal);
+ struct vdo_slab *slab = journal->slab;
+ block_count_t write_count = vdo_waitq_num_waiters(&slab->dirty_blocks);
+ block_count_t written;
+
+ if ((length < journal->flushing_threshold) || (write_count == 0))
+ return;
+
+ /* The slab journal is over the first threshold, schedule some reference block writes. */
+ WRITE_ONCE(journal->events->flush_count, journal->events->flush_count + 1);
+ if (length < journal->flushing_deadline) {
+ /* Schedule more writes the closer to the deadline we get. */
+ write_count /= journal->flushing_deadline - length + 1;
+ write_count = max_t(block_count_t, write_count, 1);
+ }
+
+ for (written = 0; written < write_count; written++) {
+ vdo_waitq_notify_next_waiter(&slab->dirty_blocks,
+ launch_reference_block_write, slab);
+ }
+}
+
+/**
+ * reference_count_to_status() - Convert a reference count to a reference status.
+ * @count: The count to convert.
+ *
+ * Return: The appropriate reference status.
+ */
+static enum reference_status __must_check reference_count_to_status(vdo_refcount_t count)
+{
+ if (count == EMPTY_REFERENCE_COUNT)
+ return RS_FREE;
+ else if (count == 1)
+ return RS_SINGLE;
+ else if (count == PROVISIONAL_REFERENCE_COUNT)
+ return RS_PROVISIONAL;
+ else
+ return RS_SHARED;
+}
+
+/**
+ * dirty_block() - Mark a reference count block as dirty, potentially adding it to the dirty queue
+ * if it wasn't already dirty.
+ * @block: The reference block to mark as dirty.
+ */
+static void dirty_block(struct reference_block *block)
+{
+ if (block->is_dirty)
+ return;
+
+ block->is_dirty = true;
+ if (!block->is_writing)
+ vdo_waitq_enqueue_waiter(&block->slab->dirty_blocks, &block->waiter);
+}
+
+/**
+ * get_reference_block() - Get the reference block that covers the given block index.
+ */
+static struct reference_block * __must_check get_reference_block(struct vdo_slab *slab,
+ slab_block_number index)
+{
+ return &slab->reference_blocks[index / COUNTS_PER_BLOCK];
+}
+
+/**
+ * slab_block_number_from_pbn() - Determine the index within the slab of a particular physical
+ * block number.
+ * @slab: The slab.
+ * @physical_block_number: The physical block number.
+ * @slab_block_number_ptr: A pointer to the slab block number.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int __must_check slab_block_number_from_pbn(struct vdo_slab *slab,
+ physical_block_number_t pbn,
+ slab_block_number *slab_block_number_ptr)
+{
+ u64 slab_block_number;
+
+ if (pbn < slab->start)
+ return VDO_OUT_OF_RANGE;
+
+ slab_block_number = pbn - slab->start;
+ if (slab_block_number >= slab->allocator->depot->slab_config.data_blocks)
+ return VDO_OUT_OF_RANGE;
+
+ *slab_block_number_ptr = slab_block_number;
+ return VDO_SUCCESS;
+}
+
+/**
+ * get_reference_counter() - Get the reference counter that covers the given physical block number.
+ * @slab: The slab to query.
+ * @pbn: The physical block number.
+ * @counter_ptr: A pointer to the reference counter.
+ */
+static int __must_check get_reference_counter(struct vdo_slab *slab,
+ physical_block_number_t pbn,
+ vdo_refcount_t **counter_ptr)
+{
+ slab_block_number index;
+ int result = slab_block_number_from_pbn(slab, pbn, &index);
+
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *counter_ptr = &slab->counters[index];
+
+ return VDO_SUCCESS;
+}
+
+static unsigned int calculate_slab_priority(struct vdo_slab *slab)
+{
+ block_count_t free_blocks = slab->free_blocks;
+ unsigned int unopened_slab_priority = slab->allocator->unopened_slab_priority;
+ unsigned int priority;
+
+ /*
+ * Wholly full slabs must be the only ones with lowest priority, 0.
+ *
+ * Slabs that have never been opened (empty, newly initialized, and never been written to)
+ * have lower priority than previously opened slabs that have a significant number of free
+ * blocks. This ranking causes VDO to avoid writing physical blocks for the first time
+ * unless there are very few free blocks that have been previously written to.
+ *
+ * Since VDO doesn't discard blocks currently, reusing previously written blocks makes VDO
+ * a better client of any underlying storage that is thinly-provisioned (though discarding
+ * would be better).
+ *
+ * For all other slabs, the priority is derived from the logarithm of the number of free
+ * blocks. Slabs with the same order of magnitude of free blocks have the same priority.
+ * With 2^23 blocks, the priority will range from 1 to 25. The reserved
+ * unopened_slab_priority divides the range and is skipped by the logarithmic mapping.
+ */
+
+ if (free_blocks == 0)
+ return 0;
+
+ if (is_slab_journal_blank(slab))
+ return unopened_slab_priority;
+
+ priority = (1 + ilog2(free_blocks));
+ return ((priority < unopened_slab_priority) ? priority : priority + 1);
+}
+
+/*
+ * Slabs are essentially prioritized by an approximation of the number of free blocks in the slab
+ * so slabs with lots of free blocks will be opened for allocation before slabs that have few free
+ * blocks.
+ */
+static void prioritize_slab(struct vdo_slab *slab)
+{
+ VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
+ "a slab must not already be on a ring when prioritizing");
+ slab->priority = calculate_slab_priority(slab);
+ vdo_priority_table_enqueue(slab->allocator->prioritized_slabs,
+ slab->priority, &slab->allocq_entry);
+}
+
+/**
+ * adjust_free_block_count() - Adjust the free block count and (if needed) reprioritize the slab.
+ * @incremented: true if the free block count went up.
+ */
+static void adjust_free_block_count(struct vdo_slab *slab, bool incremented)
+{
+ struct block_allocator *allocator = slab->allocator;
+
+ WRITE_ONCE(allocator->allocated_blocks,
+ allocator->allocated_blocks + (incremented ? -1 : 1));
+
+ /* The open slab doesn't need to be reprioritized until it is closed. */
+ if (slab == allocator->open_slab)
+ return;
+
+ /* Don't bother adjusting the priority table if unneeded. */
+ if (slab->priority == calculate_slab_priority(slab))
+ return;
+
+ /*
+ * Reprioritize the slab to reflect the new free block count by removing it from the table
+ * and re-enqueuing it with the new priority.
+ */
+ vdo_priority_table_remove(allocator->prioritized_slabs, &slab->allocq_entry);
+ prioritize_slab(slab);
+}
+
+/**
+ * increment_for_data() - Increment the reference count for a data block.
+ * @slab: The slab which owns the block.
+ * @block: The reference block which contains the block being updated.
+ * @block_number: The block to update.
+ * @old_status: The reference status of the data block before this increment.
+ * @lock: The pbn_lock associated with this increment (may be NULL).
+ * @counter_ptr: A pointer to the count for the data block (in, out).
+ * @adjust_block_count: Whether to update the allocator's free block count.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int increment_for_data(struct vdo_slab *slab, struct reference_block *block,
+ slab_block_number block_number,
+ enum reference_status old_status,
+ struct pbn_lock *lock, vdo_refcount_t *counter_ptr,
+ bool adjust_block_count)
+{
+ switch (old_status) {
+ case RS_FREE:
+ *counter_ptr = 1;
+ block->allocated_count++;
+ slab->free_blocks--;
+ if (adjust_block_count)
+ adjust_free_block_count(slab, false);
+
+ break;
+
+ case RS_PROVISIONAL:
+ *counter_ptr = 1;
+ break;
+
+ default:
+ /* Single or shared */
+ if (*counter_ptr >= MAXIMUM_REFERENCE_COUNT) {
+ return vdo_log_error_strerror(VDO_REF_COUNT_INVALID,
+ "Incrementing a block already having 254 references (slab %u, offset %u)",
+ slab->slab_number, block_number);
+ }
+ (*counter_ptr)++;
+ }
+
+ if (lock != NULL)
+ vdo_unassign_pbn_lock_provisional_reference(lock);
+ return VDO_SUCCESS;
+}
+
+/**
+ * decrement_for_data() - Decrement the reference count for a data block.
+ * @slab: The slab which owns the block.
+ * @block: The reference block which contains the block being updated.
+ * @block_number: The block to update.
+ * @old_status: The reference status of the data block before this decrement.
+ * @updater: The reference updater doing this operation in case we need to look up the pbn lock.
+ * @lock: The pbn_lock associated with the block being decremented (may be NULL).
+ * @counter_ptr: A pointer to the count for the data block (in, out).
+ * @adjust_block_count: Whether to update the allocator's free block count.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int decrement_for_data(struct vdo_slab *slab, struct reference_block *block,
+ slab_block_number block_number,
+ enum reference_status old_status,
+ struct reference_updater *updater,
+ vdo_refcount_t *counter_ptr, bool adjust_block_count)
+{
+ switch (old_status) {
+ case RS_FREE:
+ return vdo_log_error_strerror(VDO_REF_COUNT_INVALID,
+ "Decrementing free block at offset %u in slab %u",
+ block_number, slab->slab_number);
+
+ case RS_PROVISIONAL:
+ case RS_SINGLE:
+ if (updater->zpbn.zone != NULL) {
+ struct pbn_lock *lock = vdo_get_physical_zone_pbn_lock(updater->zpbn.zone,
+ updater->zpbn.pbn);
+
+ if (lock != NULL) {
+ /*
+ * There is a read lock on this block, so the block must not become
+ * unreferenced.
+ */
+ *counter_ptr = PROVISIONAL_REFERENCE_COUNT;
+ vdo_assign_pbn_lock_provisional_reference(lock);
+ break;
+ }
+ }
+
+ *counter_ptr = EMPTY_REFERENCE_COUNT;
+ block->allocated_count--;
+ slab->free_blocks++;
+ if (adjust_block_count)
+ adjust_free_block_count(slab, true);
+
+ break;
+
+ default:
+ /* Shared */
+ (*counter_ptr)--;
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * increment_for_block_map() - Increment the reference count for a block map page.
+ * @slab: The slab which owns the block.
+ * @block: The reference block which contains the block being updated.
+ * @block_number: The block to update.
+ * @old_status: The reference status of the block before this increment.
+ * @lock: The pbn_lock associated with this increment (may be NULL).
+ * @normal_operation: Whether we are in normal operation vs. recovery or rebuild.
+ * @counter_ptr: A pointer to the count for the block (in, out).
+ * @adjust_block_count: Whether to update the allocator's free block count.
+ *
+ * All block map increments should be from provisional to MAXIMUM_REFERENCE_COUNT. Since block map
+ * blocks never dedupe they should never be adjusted from any other state. The adjustment always
+ * results in MAXIMUM_REFERENCE_COUNT as this value is used to prevent dedupe against block map
+ * blocks.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int increment_for_block_map(struct vdo_slab *slab, struct reference_block *block,
+ slab_block_number block_number,
+ enum reference_status old_status,
+ struct pbn_lock *lock, bool normal_operation,
+ vdo_refcount_t *counter_ptr, bool adjust_block_count)
+{
+ switch (old_status) {
+ case RS_FREE:
+ if (normal_operation) {
+ return vdo_log_error_strerror(VDO_REF_COUNT_INVALID,
+ "Incrementing unallocated block map block (slab %u, offset %u)",
+ slab->slab_number, block_number);
+ }
+
+ *counter_ptr = MAXIMUM_REFERENCE_COUNT;
+ block->allocated_count++;
+ slab->free_blocks--;
+ if (adjust_block_count)
+ adjust_free_block_count(slab, false);
+
+ return VDO_SUCCESS;
+
+ case RS_PROVISIONAL:
+ if (!normal_operation)
+ return vdo_log_error_strerror(VDO_REF_COUNT_INVALID,
+ "Block map block had provisional reference during replay (slab %u, offset %u)",
+ slab->slab_number, block_number);
+
+ *counter_ptr = MAXIMUM_REFERENCE_COUNT;
+ if (lock != NULL)
+ vdo_unassign_pbn_lock_provisional_reference(lock);
+ return VDO_SUCCESS;
+
+ default:
+ return vdo_log_error_strerror(VDO_REF_COUNT_INVALID,
+ "Incrementing a block map block which is already referenced %u times (slab %u, offset %u)",
+ *counter_ptr, slab->slab_number,
+ block_number);
+ }
+}
+
+static bool __must_check is_valid_journal_point(const struct journal_point *point)
+{
+ return ((point != NULL) && (point->sequence_number > 0));
+}
+
+/**
+ * update_reference_count() - Update the reference count of a block.
+ * @slab: The slab which owns the block.
+ * @block: The reference block which contains the block being updated.
+ * @block_number: The block to update.
+ * @slab_journal_point: The slab journal point at which this update is journaled.
+ * @updater: The reference updater.
+ * @normal_operation: Whether we are in normal operation vs. recovery or rebuild.
+ * @adjust_block_count: Whether to update the slab's free block count.
+ * @provisional_decrement_ptr: A pointer which will be set to true if this update was a decrement
+ * of a provisional reference.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int update_reference_count(struct vdo_slab *slab, struct reference_block *block,
+ slab_block_number block_number,
+ const struct journal_point *slab_journal_point,
+ struct reference_updater *updater,
+ bool normal_operation, bool adjust_block_count,
+ bool *provisional_decrement_ptr)
+{
+ vdo_refcount_t *counter_ptr = &slab->counters[block_number];
+ enum reference_status old_status = reference_count_to_status(*counter_ptr);
+ int result;
+
+ if (!updater->increment) {
+ result = decrement_for_data(slab, block, block_number, old_status,
+ updater, counter_ptr, adjust_block_count);
+ if ((result == VDO_SUCCESS) && (old_status == RS_PROVISIONAL)) {
+ if (provisional_decrement_ptr != NULL)
+ *provisional_decrement_ptr = true;
+ return VDO_SUCCESS;
+ }
+ } else if (updater->operation == VDO_JOURNAL_DATA_REMAPPING) {
+ result = increment_for_data(slab, block, block_number, old_status,
+ updater->lock, counter_ptr, adjust_block_count);
+ } else {
+ result = increment_for_block_map(slab, block, block_number, old_status,
+ updater->lock, normal_operation,
+ counter_ptr, adjust_block_count);
+ }
+
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (is_valid_journal_point(slab_journal_point))
+ slab->slab_journal_point = *slab_journal_point;
+
+ return VDO_SUCCESS;
+}
+
+static int __must_check adjust_reference_count(struct vdo_slab *slab,
+ struct reference_updater *updater,
+ const struct journal_point *slab_journal_point)
+{
+ slab_block_number block_number;
+ int result;
+ struct reference_block *block;
+ bool provisional_decrement = false;
+
+ if (!is_slab_open(slab))
+ return VDO_INVALID_ADMIN_STATE;
+
+ result = slab_block_number_from_pbn(slab, updater->zpbn.pbn, &block_number);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ block = get_reference_block(slab, block_number);
+ result = update_reference_count(slab, block, block_number, slab_journal_point,
+ updater, NORMAL_OPERATION, true,
+ &provisional_decrement);
+ if ((result != VDO_SUCCESS) || provisional_decrement)
+ return result;
+
+ if (block->is_dirty && (block->slab_journal_lock > 0)) {
+ sequence_number_t entry_lock = slab_journal_point->sequence_number;
+ /*
+ * This block is already dirty and a slab journal entry has been made for it since
+ * the last time it was clean. We must release the per-entry slab journal lock for
+ * the entry associated with the update we are now doing.
+ */
+ result = VDO_ASSERT(is_valid_journal_point(slab_journal_point),
+ "Reference count adjustments need slab journal points.");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ adjust_slab_journal_block_reference(&slab->journal, entry_lock, -1);
+ return VDO_SUCCESS;
+ }
+
+ /*
+ * This may be the first time we are applying an update for which there is a slab journal
+ * entry to this block since the block was cleaned. Therefore, we convert the per-entry
+ * slab journal lock to an uncommitted reference block lock, if there is a per-entry lock.
+ */
+ if (is_valid_journal_point(slab_journal_point))
+ block->slab_journal_lock = slab_journal_point->sequence_number;
+ else
+ block->slab_journal_lock = 0;
+
+ dirty_block(block);
+ return VDO_SUCCESS;
+}
+
+/**
+ * add_entry_from_waiter() - Add an entry to the slab journal.
+ * @waiter: The vio which should make an entry now.
+ * @context: The slab journal to make an entry in.
+ *
+ * This callback is invoked by add_entries() once it has determined that we are ready to make
+ * another entry in the slab journal. Implements waiter_callback_fn.
+ */
+static void add_entry_from_waiter(struct vdo_waiter *waiter, void *context)
+{
+ int result;
+ struct reference_updater *updater =
+ container_of(waiter, struct reference_updater, waiter);
+ struct data_vio *data_vio = data_vio_from_reference_updater(updater);
+ struct slab_journal *journal = context;
+ struct slab_journal_block_header *header = &journal->tail_header;
+ struct journal_point slab_journal_point = {
+ .sequence_number = header->sequence_number,
+ .entry_count = header->entry_count,
+ };
+ sequence_number_t recovery_block = data_vio->recovery_journal_point.sequence_number;
+
+ if (header->entry_count == 0) {
+ /*
+ * This is the first entry in the current tail block, so get a lock on the recovery
+ * journal which we will hold until this tail block is committed.
+ */
+ get_lock(journal, header->sequence_number)->recovery_start = recovery_block;
+ if (journal->recovery_journal != NULL) {
+ zone_count_t zone_number = journal->slab->allocator->zone_number;
+
+ vdo_acquire_recovery_journal_block_reference(journal->recovery_journal,
+ recovery_block,
+ VDO_ZONE_TYPE_PHYSICAL,
+ zone_number);
+ }
+
+ mark_slab_journal_dirty(journal, recovery_block);
+ reclaim_journal_space(journal);
+ }
+
+ add_entry(journal, updater->zpbn.pbn, updater->operation, updater->increment,
+ expand_journal_point(data_vio->recovery_journal_point,
+ updater->increment));
+
+ if (journal->slab->status != VDO_SLAB_REBUILT) {
+ /*
+ * If the slab is unrecovered, scrubbing will take care of the count since the
+ * update is now recorded in the journal.
+ */
+ adjust_slab_journal_block_reference(journal,
+ slab_journal_point.sequence_number, -1);
+ result = VDO_SUCCESS;
+ } else {
+ /* Now that an entry has been made in the slab journal, update the counter. */
+ result = adjust_reference_count(journal->slab, updater,
+ &slab_journal_point);
+ }
+
+ if (updater->increment)
+ continue_data_vio_with_error(data_vio, result);
+ else
+ vdo_continue_completion(&data_vio->decrement_completion, result);
+}
+
+/**
+ * is_next_entry_a_block_map_increment() - Check whether the next entry to be made is a block map
+ * increment.
+ * @journal: The journal.
+ *
+ * Return: true if the first entry waiter's operation is a block map increment.
+ */
+static inline bool is_next_entry_a_block_map_increment(struct slab_journal *journal)
+{
+ struct vdo_waiter *waiter = vdo_waitq_get_first_waiter(&journal->entry_waiters);
+ struct reference_updater *updater =
+ container_of(waiter, struct reference_updater, waiter);
+
+ return (updater->operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING);
+}
+
+/**
+ * add_entries() - Add as many entries as possible from the queue of vios waiting to make entries.
+ * @journal: The journal to which entries may be added.
+ *
+ * By processing the queue in order, we ensure that slab journal entries are made in the same order
+ * as recovery journal entries for the same increment or decrement.
+ */
+static void add_entries(struct slab_journal *journal)
+{
+ if (journal->adding_entries) {
+ /* Protect against re-entrancy. */
+ return;
+ }
+
+ journal->adding_entries = true;
+ while (vdo_waitq_has_waiters(&journal->entry_waiters)) {
+ struct slab_journal_block_header *header = &journal->tail_header;
+
+ if (journal->partial_write_in_progress ||
+ (journal->slab->status == VDO_SLAB_REBUILDING)) {
+ /*
+ * Don't add entries while rebuilding or while a partial write is
+ * outstanding, as it could result in reference count corruption.
+ */
+ break;
+ }
+
+ if (journal->waiting_to_commit) {
+ /*
+ * If we are waiting for resources to write the tail block, and the tail
+ * block is full, we can't make another entry.
+ */
+ WRITE_ONCE(journal->events->tail_busy_count,
+ journal->events->tail_busy_count + 1);
+ break;
+ } else if (is_next_entry_a_block_map_increment(journal) &&
+ (header->entry_count >= journal->full_entries_per_block)) {
+ /*
+ * The tail block does not have room for a block map increment, so commit
+ * it now.
+ */
+ commit_tail(journal);
+ if (journal->waiting_to_commit) {
+ WRITE_ONCE(journal->events->tail_busy_count,
+ journal->events->tail_busy_count + 1);
+ break;
+ }
+ }
+
+ /* If the slab is over the blocking threshold, make the vio wait. */
+ if (requires_reaping(journal)) {
+ WRITE_ONCE(journal->events->blocked_count,
+ journal->events->blocked_count + 1);
+ save_dirty_reference_blocks(journal->slab);
+ break;
+ }
+
+ if (header->entry_count == 0) {
+ struct journal_lock *lock =
+ get_lock(journal, header->sequence_number);
+
+ /*
+ * Check if the on disk slab journal is full. Because of the blocking and
+ * scrubbing thresholds, this should never happen.
+ */
+ if (lock->count > 0) {
+ VDO_ASSERT_LOG_ONLY((journal->head + journal->size) == journal->tail,
+ "New block has locks, but journal is not full");
+
+ /*
+ * The blocking threshold must let the journal fill up if the new
+ * block has locks; if the blocking threshold is smaller than the
+ * journal size, the new block cannot possibly have locks already.
+ */
+ VDO_ASSERT_LOG_ONLY((journal->blocking_threshold >= journal->size),
+ "New block can have locks already iff blocking threshold is at the end of the journal");
+
+ WRITE_ONCE(journal->events->disk_full_count,
+ journal->events->disk_full_count + 1);
+ save_dirty_reference_blocks(journal->slab);
+ break;
+ }
+
+ /*
+ * Don't allow the new block to be reaped until all of the reference count
+ * blocks are written and the journal block has been fully committed as
+ * well.
+ */
+ lock->count = journal->entries_per_block + 1;
+
+ if (header->sequence_number == 1) {
+ struct vdo_slab *slab = journal->slab;
+ block_count_t i;
+
+ /*
+ * This is the first entry in this slab journal, ever. Dirty all of
+ * the reference count blocks. Each will acquire a lock on the tail
+ * block so that the journal won't be reaped until the reference
+ * counts are initialized. The lock acquisition must be done by the
+ * ref_counts since here we don't know how many reference blocks
+ * the ref_counts has.
+ */
+ for (i = 0; i < slab->reference_block_count; i++) {
+ slab->reference_blocks[i].slab_journal_lock = 1;
+ dirty_block(&slab->reference_blocks[i]);
+ }
+
+ adjust_slab_journal_block_reference(journal, 1,
+ slab->reference_block_count);
+ }
+ }
+
+ vdo_waitq_notify_next_waiter(&journal->entry_waiters,
+ add_entry_from_waiter, journal);
+ }
+
+ journal->adding_entries = false;
+
+ /* If there are no waiters, and we are flushing or saving, commit the tail block. */
+ if (vdo_is_state_draining(&journal->slab->state) &&
+ !vdo_is_state_suspending(&journal->slab->state) &&
+ !vdo_waitq_has_waiters(&journal->entry_waiters))
+ commit_tail(journal);
+}
+
+/**
+ * reset_search_cursor() - Reset the free block search back to the first reference counter in the
+ * first reference block of a slab.
+ */
+static void reset_search_cursor(struct vdo_slab *slab)
+{
+ struct search_cursor *cursor = &slab->search_cursor;
+
+ cursor->block = cursor->first_block;
+ cursor->index = 0;
+ /* Unit tests have slabs with only one reference block (and it's a runt). */
+ cursor->end_index = min_t(u32, COUNTS_PER_BLOCK, slab->block_count);
+}
+
+/**
+ * advance_search_cursor() - Advance the search cursor to the start of the next reference block in
+ * a slab,
+ *
+ * Wraps around to the first reference block if the current block is the last reference block.
+ *
+ * Return: true unless the cursor was at the last reference block.
+ */
+static bool advance_search_cursor(struct vdo_slab *slab)
+{
+ struct search_cursor *cursor = &slab->search_cursor;
+
+ /*
+ * If we just finished searching the last reference block, then wrap back around to the
+ * start of the array.
+ */
+ if (cursor->block == cursor->last_block) {
+ reset_search_cursor(slab);
+ return false;
+ }
+
+ /* We're not already at the end, so advance to cursor to the next block. */
+ cursor->block++;
+ cursor->index = cursor->end_index;
+
+ if (cursor->block == cursor->last_block) {
+ /* The last reference block will usually be a runt. */
+ cursor->end_index = slab->block_count;
+ } else {
+ cursor->end_index += COUNTS_PER_BLOCK;
+ }
+
+ return true;
+}
+
+/**
+ * vdo_adjust_reference_count_for_rebuild() - Adjust the reference count of a block during rebuild.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_adjust_reference_count_for_rebuild(struct slab_depot *depot,
+ physical_block_number_t pbn,
+ enum journal_operation operation)
+{
+ int result;
+ slab_block_number block_number;
+ struct reference_block *block;
+ struct vdo_slab *slab = vdo_get_slab(depot, pbn);
+ struct reference_updater updater = {
+ .operation = operation,
+ .increment = true,
+ };
+
+ result = slab_block_number_from_pbn(slab, pbn, &block_number);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ block = get_reference_block(slab, block_number);
+ result = update_reference_count(slab, block, block_number, NULL,
+ &updater, !NORMAL_OPERATION, false, NULL);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ dirty_block(block);
+ return VDO_SUCCESS;
+}
+
+/**
+ * replay_reference_count_change() - Replay the reference count adjustment from a slab journal
+ * entry into the reference count for a block.
+ * @slab: The slab.
+ * @entry_point: The slab journal point for the entry.
+ * @entry: The slab journal entry being replayed.
+ *
+ * The adjustment will be ignored if it was already recorded in the reference count.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int replay_reference_count_change(struct vdo_slab *slab,
+ const struct journal_point *entry_point,
+ struct slab_journal_entry entry)
+{
+ int result;
+ struct reference_block *block = get_reference_block(slab, entry.sbn);
+ sector_count_t sector = (entry.sbn % COUNTS_PER_BLOCK) / COUNTS_PER_SECTOR;
+ struct reference_updater updater = {
+ .operation = entry.operation,
+ .increment = entry.increment,
+ };
+
+ if (!vdo_before_journal_point(&block->commit_points[sector], entry_point)) {
+ /* This entry is already reflected in the existing counts, so do nothing. */
+ return VDO_SUCCESS;
+ }
+
+ /* This entry is not yet counted in the reference counts. */
+ result = update_reference_count(slab, block, entry.sbn, entry_point,
+ &updater, !NORMAL_OPERATION, false, NULL);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ dirty_block(block);
+ return VDO_SUCCESS;
+}
+
+/**
+ * find_zero_byte_in_word() - Find the array index of the first zero byte in word-sized range of
+ * reference counters.
+ * @word_ptr: A pointer to the eight counter bytes to check.
+ * @start_index: The array index corresponding to word_ptr[0].
+ * @fail_index: The array index to return if no zero byte is found.
+ *
+ * The search does no bounds checking; the function relies on the array being sufficiently padded.
+ *
+ * Return: The array index of the first zero byte in the word, or the value passed as fail_index if
+ * no zero byte was found.
+ */
+static inline slab_block_number find_zero_byte_in_word(const u8 *word_ptr,
+ slab_block_number start_index,
+ slab_block_number fail_index)
+{
+ u64 word = get_unaligned_le64(word_ptr);
+
+ /* This looks like a loop, but GCC will unroll the eight iterations for us. */
+ unsigned int offset;
+
+ for (offset = 0; offset < BYTES_PER_WORD; offset++) {
+ /* Assumes little-endian byte order, which we have on X86. */
+ if ((word & 0xFF) == 0)
+ return (start_index + offset);
+ word >>= 8;
+ }
+
+ return fail_index;
+}
+
+/**
+ * find_free_block() - Find the first block with a reference count of zero in the specified
+ * range of reference counter indexes.
+ * @slab: The slab counters to scan.
+ * @index_ptr: A pointer to hold the array index of the free block.
+ *
+ * Exposed for unit testing.
+ *
+ * Return: true if a free block was found in the specified range.
+ */
+static bool find_free_block(const struct vdo_slab *slab, slab_block_number *index_ptr)
+{
+ slab_block_number zero_index;
+ slab_block_number next_index = slab->search_cursor.index;
+ slab_block_number end_index = slab->search_cursor.end_index;
+ u8 *next_counter = &slab->counters[next_index];
+ u8 *end_counter = &slab->counters[end_index];
+
+ /*
+ * Search every byte of the first unaligned word. (Array is padded so reading past end is
+ * safe.)
+ */
+ zero_index = find_zero_byte_in_word(next_counter, next_index, end_index);
+ if (zero_index < end_index) {
+ *index_ptr = zero_index;
+ return true;
+ }
+
+ /*
+ * On architectures where unaligned word access is expensive, this would be a good place to
+ * advance to an alignment boundary.
+ */
+ next_index += BYTES_PER_WORD;
+ next_counter += BYTES_PER_WORD;
+
+ /*
+ * Now we're word-aligned; check an word at a time until we find a word containing a zero.
+ * (Array is padded so reading past end is safe.)
+ */
+ while (next_counter < end_counter) {
+ /*
+ * The following code is currently an exact copy of the code preceding the loop,
+ * but if you try to merge them by using a do loop, it runs slower because a jump
+ * instruction gets added at the start of the iteration.
+ */
+ zero_index = find_zero_byte_in_word(next_counter, next_index, end_index);
+ if (zero_index < end_index) {
+ *index_ptr = zero_index;
+ return true;
+ }
+
+ next_index += BYTES_PER_WORD;
+ next_counter += BYTES_PER_WORD;
+ }
+
+ return false;
+}
+
+/**
+ * search_current_reference_block() - Search the reference block currently saved in the search
+ * cursor for a reference count of zero, starting at the saved
+ * counter index.
+ * @slab: The slab to search.
+ * @free_index_ptr: A pointer to receive the array index of the zero reference count.
+ *
+ * Return: true if an unreferenced counter was found.
+ */
+static bool search_current_reference_block(const struct vdo_slab *slab,
+ slab_block_number *free_index_ptr)
+{
+ /* Don't bother searching if the current block is known to be full. */
+ return ((slab->search_cursor.block->allocated_count < COUNTS_PER_BLOCK) &&
+ find_free_block(slab, free_index_ptr));
+}
+
+/**
+ * search_reference_blocks() - Search each reference block for a reference count of zero.
+ * @slab: The slab to search.
+ * @free_index_ptr: A pointer to receive the array index of the zero reference count.
+ *
+ * Searches each reference block for a reference count of zero, starting at the reference block and
+ * counter index saved in the search cursor and searching up to the end of the last reference
+ * block. The search does not wrap.
+ *
+ * Return: true if an unreferenced counter was found.
+ */
+static bool search_reference_blocks(struct vdo_slab *slab,
+ slab_block_number *free_index_ptr)
+{
+ /* Start searching at the saved search position in the current block. */
+ if (search_current_reference_block(slab, free_index_ptr))
+ return true;
+
+ /* Search each reference block up to the end of the slab. */
+ while (advance_search_cursor(slab)) {
+ if (search_current_reference_block(slab, free_index_ptr))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * make_provisional_reference() - Do the bookkeeping for making a provisional reference.
+ */
+static void make_provisional_reference(struct vdo_slab *slab,
+ slab_block_number block_number)
+{
+ struct reference_block *block = get_reference_block(slab, block_number);
+
+ /*
+ * Make the initial transition from an unreferenced block to a
+ * provisionally allocated block.
+ */
+ slab->counters[block_number] = PROVISIONAL_REFERENCE_COUNT;
+
+ /* Account for the allocation. */
+ block->allocated_count++;
+ slab->free_blocks--;
+}
+
+/**
+ * dirty_all_reference_blocks() - Mark all reference count blocks in a slab as dirty.
+ */
+static void dirty_all_reference_blocks(struct vdo_slab *slab)
+{
+ block_count_t i;
+
+ for (i = 0; i < slab->reference_block_count; i++)
+ dirty_block(&slab->reference_blocks[i]);
+}
+
+/**
+ * clear_provisional_references() - Clear the provisional reference counts from a reference block.
+ * @block: The block to clear.
+ */
+static void clear_provisional_references(struct reference_block *block)
+{
+ vdo_refcount_t *counters = get_reference_counters_for_block(block);
+ block_count_t j;
+
+ for (j = 0; j < COUNTS_PER_BLOCK; j++) {
+ if (counters[j] == PROVISIONAL_REFERENCE_COUNT) {
+ counters[j] = EMPTY_REFERENCE_COUNT;
+ block->allocated_count--;
+ }
+ }
+}
+
+static inline bool journal_points_equal(struct journal_point first,
+ struct journal_point second)
+{
+ return ((first.sequence_number == second.sequence_number) &&
+ (first.entry_count == second.entry_count));
+}
+
+/**
+ * unpack_reference_block() - Unpack reference counts blocks into the internal memory structure.
+ * @packed: The written reference block to be unpacked.
+ * @block: The internal reference block to be loaded.
+ */
+static void unpack_reference_block(struct packed_reference_block *packed,
+ struct reference_block *block)
+{
+ block_count_t index;
+ sector_count_t i;
+ struct vdo_slab *slab = block->slab;
+ vdo_refcount_t *counters = get_reference_counters_for_block(block);
+
+ for (i = 0; i < VDO_SECTORS_PER_BLOCK; i++) {
+ struct packed_reference_sector *sector = &packed->sectors[i];
+
+ vdo_unpack_journal_point(&sector->commit_point, &block->commit_points[i]);
+ memcpy(counters + (i * COUNTS_PER_SECTOR), sector->counts,
+ (sizeof(vdo_refcount_t) * COUNTS_PER_SECTOR));
+ /* The slab_journal_point must be the latest point found in any sector. */
+ if (vdo_before_journal_point(&slab->slab_journal_point,
+ &block->commit_points[i]))
+ slab->slab_journal_point = block->commit_points[i];
+
+ if ((i > 0) &&
+ !journal_points_equal(block->commit_points[0],
+ block->commit_points[i])) {
+ size_t block_index = block - block->slab->reference_blocks;
+
+ vdo_log_warning("Torn write detected in sector %u of reference block %zu of slab %u",
+ i, block_index, block->slab->slab_number);
+ }
+ }
+
+ block->allocated_count = 0;
+ for (index = 0; index < COUNTS_PER_BLOCK; index++) {
+ if (counters[index] != EMPTY_REFERENCE_COUNT)
+ block->allocated_count++;
+ }
+}
+
+/**
+ * finish_reference_block_load() - After a reference block has been read, unpack it.
+ * @completion: The VIO that just finished reading.
+ */
+static void finish_reference_block_load(struct vdo_completion *completion)
+{
+ struct vio *vio = as_vio(completion);
+ struct pooled_vio *pooled = vio_as_pooled_vio(vio);
+ struct reference_block *block = completion->parent;
+ struct vdo_slab *slab = block->slab;
+
+ unpack_reference_block((struct packed_reference_block *) vio->data, block);
+ return_vio_to_pool(slab->allocator->vio_pool, pooled);
+ slab->active_count--;
+ clear_provisional_references(block);
+
+ slab->free_blocks -= block->allocated_count;
+ check_if_slab_drained(slab);
+}
+
+static void load_reference_block_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct reference_block *block = vio->completion.parent;
+
+ continue_vio_after_io(vio, finish_reference_block_load,
+ block->slab->allocator->thread_id);
+}
+
+/**
+ * load_reference_block() - After a block waiter has gotten a VIO from the VIO pool, load the
+ * block.
+ * @waiter: The waiter of the block to load.
+ * @context: The VIO returned by the pool.
+ */
+static void load_reference_block(struct vdo_waiter *waiter, void *context)
+{
+ struct pooled_vio *pooled = context;
+ struct vio *vio = &pooled->vio;
+ struct reference_block *block =
+ container_of(waiter, struct reference_block, waiter);
+ size_t block_offset = (block - block->slab->reference_blocks);
+
+ vio->completion.parent = block;
+ vdo_submit_metadata_vio(vio, block->slab->ref_counts_origin + block_offset,
+ load_reference_block_endio, handle_io_error,
+ REQ_OP_READ);
+}
+
+/**
+ * load_reference_blocks() - Load a slab's reference blocks from the underlying storage into a
+ * pre-allocated reference counter.
+ */
+static void load_reference_blocks(struct vdo_slab *slab)
+{
+ block_count_t i;
+
+ slab->free_blocks = slab->block_count;
+ slab->active_count = slab->reference_block_count;
+ for (i = 0; i < slab->reference_block_count; i++) {
+ struct vdo_waiter *waiter = &slab->reference_blocks[i].waiter;
+
+ waiter->callback = load_reference_block;
+ acquire_vio_from_pool(slab->allocator->vio_pool, waiter);
+ }
+}
+
+/**
+ * drain_slab() - Drain all reference count I/O.
+ *
+ * Depending upon the type of drain being performed (as recorded in the ref_count's vdo_slab), the
+ * reference blocks may be loaded from disk or dirty reference blocks may be written out.
+ */
+static void drain_slab(struct vdo_slab *slab)
+{
+ bool save;
+ bool load;
+ const struct admin_state_code *state = vdo_get_admin_state_code(&slab->state);
+
+ if (state == VDO_ADMIN_STATE_SUSPENDING)
+ return;
+
+ if ((state != VDO_ADMIN_STATE_REBUILDING) &&
+ (state != VDO_ADMIN_STATE_SAVE_FOR_SCRUBBING))
+ commit_tail(&slab->journal);
+
+ if ((state == VDO_ADMIN_STATE_RECOVERING) || (slab->counters == NULL))
+ return;
+
+ save = false;
+ load = slab->allocator->summary_entries[slab->slab_number].load_ref_counts;
+ if (state == VDO_ADMIN_STATE_SCRUBBING) {
+ if (load) {
+ load_reference_blocks(slab);
+ return;
+ }
+ } else if (state == VDO_ADMIN_STATE_SAVE_FOR_SCRUBBING) {
+ if (!load) {
+ /* These reference counts were never written, so mark them all dirty. */
+ dirty_all_reference_blocks(slab);
+ }
+ save = true;
+ } else if (state == VDO_ADMIN_STATE_REBUILDING) {
+ /*
+ * Write out the counters if the slab has written them before, or it has any
+ * non-zero reference counts, or there are any slab journal blocks.
+ */
+ block_count_t data_blocks = slab->allocator->depot->slab_config.data_blocks;
+
+ if (load || (slab->free_blocks != data_blocks) ||
+ !is_slab_journal_blank(slab)) {
+ dirty_all_reference_blocks(slab);
+ save = true;
+ }
+ } else if (state == VDO_ADMIN_STATE_SAVING) {
+ save = (slab->status == VDO_SLAB_REBUILT);
+ } else {
+ vdo_finish_draining_with_result(&slab->state, VDO_SUCCESS);
+ return;
+ }
+
+ if (save)
+ save_dirty_reference_blocks(slab);
+}
+
+static int allocate_slab_counters(struct vdo_slab *slab)
+{
+ int result;
+ size_t index, bytes;
+
+ result = VDO_ASSERT(slab->reference_blocks == NULL,
+ "vdo_slab %u doesn't allocate refcounts twice",
+ slab->slab_number);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(slab->reference_block_count, struct reference_block,
+ __func__, &slab->reference_blocks);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /*
+ * Allocate such that the runt slab has a full-length memory array, plus a little padding
+ * so we can word-search even at the very end.
+ */
+ bytes = (slab->reference_block_count * COUNTS_PER_BLOCK) + (2 * BYTES_PER_WORD);
+ result = vdo_allocate(bytes, vdo_refcount_t, "ref counts array",
+ &slab->counters);
+ if (result != VDO_SUCCESS) {
+ vdo_free(vdo_forget(slab->reference_blocks));
+ return result;
+ }
+
+ slab->search_cursor.first_block = slab->reference_blocks;
+ slab->search_cursor.last_block = &slab->reference_blocks[slab->reference_block_count - 1];
+ reset_search_cursor(slab);
+
+ for (index = 0; index < slab->reference_block_count; index++) {
+ slab->reference_blocks[index] = (struct reference_block) {
+ .slab = slab,
+ };
+ }
+
+ return VDO_SUCCESS;
+}
+
+static int allocate_counters_if_clean(struct vdo_slab *slab)
+{
+ if (vdo_is_state_clean_load(&slab->state))
+ return allocate_slab_counters(slab);
+
+ return VDO_SUCCESS;
+}
+
+static void finish_loading_journal(struct vdo_completion *completion)
+{
+ struct vio *vio = as_vio(completion);
+ struct slab_journal *journal = completion->parent;
+ struct vdo_slab *slab = journal->slab;
+ struct packed_slab_journal_block *block = (struct packed_slab_journal_block *) vio->data;
+ struct slab_journal_block_header header;
+
+ vdo_unpack_slab_journal_block_header(&block->header, &header);
+
+ /* FIXME: should it be an error if the following conditional fails? */
+ if ((header.metadata_type == VDO_METADATA_SLAB_JOURNAL) &&
+ (header.nonce == slab->allocator->nonce)) {
+ journal->tail = header.sequence_number + 1;
+
+ /*
+ * If the slab is clean, this implies the slab journal is empty, so advance the
+ * head appropriately.
+ */
+ journal->head = (slab->allocator->summary_entries[slab->slab_number].is_dirty ?
+ header.head : journal->tail);
+ journal->tail_header = header;
+ initialize_journal_state(journal);
+ }
+
+ return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio));
+ vdo_finish_loading_with_result(&slab->state, allocate_counters_if_clean(slab));
+}
+
+static void read_slab_journal_tail_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct slab_journal *journal = vio->completion.parent;
+
+ continue_vio_after_io(vio, finish_loading_journal,
+ journal->slab->allocator->thread_id);
+}
+
+static void handle_load_error(struct vdo_completion *completion)
+{
+ int result = completion->result;
+ struct slab_journal *journal = completion->parent;
+ struct vio *vio = as_vio(completion);
+
+ vio_record_metadata_io_error(vio);
+ return_vio_to_pool(journal->slab->allocator->vio_pool, vio_as_pooled_vio(vio));
+ vdo_finish_loading_with_result(&journal->slab->state, result);
+}
+
+/**
+ * read_slab_journal_tail() - Read the slab journal tail block by using a vio acquired from the vio
+ * pool.
+ * @waiter: The vio pool waiter which has just been notified.
+ * @context: The vio pool entry given to the waiter.
+ *
+ * This is the success callback from acquire_vio_from_pool() when loading a slab journal.
+ */
+static void read_slab_journal_tail(struct vdo_waiter *waiter, void *context)
+{
+ struct slab_journal *journal =
+ container_of(waiter, struct slab_journal, resource_waiter);
+ struct vdo_slab *slab = journal->slab;
+ struct pooled_vio *pooled = context;
+ struct vio *vio = &pooled->vio;
+ tail_block_offset_t last_commit_point =
+ slab->allocator->summary_entries[slab->slab_number].tail_block_offset;
+
+ /*
+ * Slab summary keeps the commit point offset, so the tail block is the block before that.
+ * Calculation supports small journals in unit tests.
+ */
+ tail_block_offset_t tail_block = ((last_commit_point == 0) ?
+ (tail_block_offset_t)(journal->size - 1) :
+ (last_commit_point - 1));
+
+ vio->completion.parent = journal;
+ vio->completion.callback_thread_id = slab->allocator->thread_id;
+ vdo_submit_metadata_vio(vio, slab->journal_origin + tail_block,
+ read_slab_journal_tail_endio, handle_load_error,
+ REQ_OP_READ);
+}
+
+/**
+ * load_slab_journal() - Load a slab's journal by reading the journal's tail.
+ */
+static void load_slab_journal(struct vdo_slab *slab)
+{
+ struct slab_journal *journal = &slab->journal;
+ tail_block_offset_t last_commit_point;
+
+ last_commit_point = slab->allocator->summary_entries[slab->slab_number].tail_block_offset;
+ if ((last_commit_point == 0) &&
+ !slab->allocator->summary_entries[slab->slab_number].load_ref_counts) {
+ /*
+ * This slab claims that it has a tail block at (journal->size - 1), but a head of
+ * 1. This is impossible, due to the scrubbing threshold, on a real system, so
+ * don't bother reading the (bogus) data off disk.
+ */
+ VDO_ASSERT_LOG_ONLY(((journal->size < 16) ||
+ (journal->scrubbing_threshold < (journal->size - 1))),
+ "Scrubbing threshold protects against reads of unwritten slab journal blocks");
+ vdo_finish_loading_with_result(&slab->state,
+ allocate_counters_if_clean(slab));
+ return;
+ }
+
+ journal->resource_waiter.callback = read_slab_journal_tail;
+ acquire_vio_from_pool(slab->allocator->vio_pool, &journal->resource_waiter);
+}
+
+static void register_slab_for_scrubbing(struct vdo_slab *slab, bool high_priority)
+{
+ struct slab_scrubber *scrubber = &slab->allocator->scrubber;
+
+ VDO_ASSERT_LOG_ONLY((slab->status != VDO_SLAB_REBUILT),
+ "slab to be scrubbed is unrecovered");
+
+ if (slab->status != VDO_SLAB_REQUIRES_SCRUBBING)
+ return;
+
+ list_del_init(&slab->allocq_entry);
+ if (!slab->was_queued_for_scrubbing) {
+ WRITE_ONCE(scrubber->slab_count, scrubber->slab_count + 1);
+ slab->was_queued_for_scrubbing = true;
+ }
+
+ if (high_priority) {
+ slab->status = VDO_SLAB_REQUIRES_HIGH_PRIORITY_SCRUBBING;
+ list_add_tail(&slab->allocq_entry, &scrubber->high_priority_slabs);
+ return;
+ }
+
+ list_add_tail(&slab->allocq_entry, &scrubber->slabs);
+}
+
+/* Queue a slab for allocation or scrubbing. */
+static void queue_slab(struct vdo_slab *slab)
+{
+ struct block_allocator *allocator = slab->allocator;
+ block_count_t free_blocks;
+ int result;
+
+ VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
+ "a requeued slab must not already be on a ring");
+
+ if (vdo_is_read_only(allocator->depot->vdo))
+ return;
+
+ free_blocks = slab->free_blocks;
+ result = VDO_ASSERT((free_blocks <= allocator->depot->slab_config.data_blocks),
+ "rebuilt slab %u must have a valid free block count (has %llu, expected maximum %llu)",
+ slab->slab_number, (unsigned long long) free_blocks,
+ (unsigned long long) allocator->depot->slab_config.data_blocks);
+ if (result != VDO_SUCCESS) {
+ vdo_enter_read_only_mode(allocator->depot->vdo, result);
+ return;
+ }
+
+ if (slab->status != VDO_SLAB_REBUILT) {
+ register_slab_for_scrubbing(slab, false);
+ return;
+ }
+
+ if (!vdo_is_state_resuming(&slab->state)) {
+ /*
+ * If the slab is resuming, we've already accounted for it here, so don't do it
+ * again.
+ * FIXME: under what situation would the slab be resuming here?
+ */
+ WRITE_ONCE(allocator->allocated_blocks,
+ allocator->allocated_blocks - free_blocks);
+ if (!is_slab_journal_blank(slab)) {
+ WRITE_ONCE(allocator->statistics.slabs_opened,
+ allocator->statistics.slabs_opened + 1);
+ }
+ }
+
+ if (allocator->depot->vdo->suspend_type == VDO_ADMIN_STATE_SAVING)
+ reopen_slab_journal(slab);
+
+ prioritize_slab(slab);
+}
+
+/**
+ * initiate_slab_action() - Initiate a slab action.
+ *
+ * Implements vdo_admin_initiator_fn.
+ */
+static void initiate_slab_action(struct admin_state *state)
+{
+ struct vdo_slab *slab = container_of(state, struct vdo_slab, state);
+
+ if (vdo_is_state_draining(state)) {
+ const struct admin_state_code *operation = vdo_get_admin_state_code(state);
+
+ if (operation == VDO_ADMIN_STATE_SCRUBBING)
+ slab->status = VDO_SLAB_REBUILDING;
+
+ drain_slab(slab);
+ check_if_slab_drained(slab);
+ return;
+ }
+
+ if (vdo_is_state_loading(state)) {
+ load_slab_journal(slab);
+ return;
+ }
+
+ if (vdo_is_state_resuming(state)) {
+ queue_slab(slab);
+ vdo_finish_resuming(state);
+ return;
+ }
+
+ vdo_finish_operation(state, VDO_INVALID_ADMIN_STATE);
+}
+
+/**
+ * get_next_slab() - Get the next slab to scrub.
+ * @scrubber: The slab scrubber.
+ *
+ * Return: The next slab to scrub or NULL if there are none.
+ */
+static struct vdo_slab *get_next_slab(struct slab_scrubber *scrubber)
+{
+ struct vdo_slab *slab;
+
+ slab = list_first_entry_or_null(&scrubber->high_priority_slabs,
+ struct vdo_slab, allocq_entry);
+ if (slab != NULL)
+ return slab;
+
+ return list_first_entry_or_null(&scrubber->slabs, struct vdo_slab,
+ allocq_entry);
+}
+
+/**
+ * has_slabs_to_scrub() - Check whether a scrubber has slabs to scrub.
+ * @scrubber: The scrubber to check.
+ *
+ * Return: true if the scrubber has slabs to scrub.
+ */
+static inline bool __must_check has_slabs_to_scrub(struct slab_scrubber *scrubber)
+{
+ return (get_next_slab(scrubber) != NULL);
+}
+
+/**
+ * uninitialize_scrubber_vio() - Clean up the slab_scrubber's vio.
+ * @scrubber: The scrubber.
+ */
+static void uninitialize_scrubber_vio(struct slab_scrubber *scrubber)
+{
+ vdo_free(vdo_forget(scrubber->vio.data));
+ free_vio_components(&scrubber->vio);
+}
+
+/**
+ * finish_scrubbing() - Stop scrubbing, either because there are no more slabs to scrub or because
+ * there's been an error.
+ * @scrubber: The scrubber.
+ */
+static void finish_scrubbing(struct slab_scrubber *scrubber, int result)
+{
+ bool notify = vdo_waitq_has_waiters(&scrubber->waiters);
+ bool done = !has_slabs_to_scrub(scrubber);
+ struct block_allocator *allocator =
+ container_of(scrubber, struct block_allocator, scrubber);
+
+ if (done)
+ uninitialize_scrubber_vio(scrubber);
+
+ if (scrubber->high_priority_only) {
+ scrubber->high_priority_only = false;
+ vdo_fail_completion(vdo_forget(scrubber->vio.completion.parent), result);
+ } else if (done && (atomic_add_return(-1, &allocator->depot->zones_to_scrub) == 0)) {
+ /* All of our slabs were scrubbed, and we're the last allocator to finish. */
+ enum vdo_state prior_state =
+ atomic_cmpxchg(&allocator->depot->vdo->state, VDO_RECOVERING,
+ VDO_DIRTY);
+
+ /*
+ * To be safe, even if the CAS failed, ensure anything that follows is ordered with
+ * respect to whatever state change did happen.
+ */
+ smp_mb__after_atomic();
+
+ /*
+ * We must check the VDO state here and not the depot's read_only_notifier since
+ * the compare-swap-above could have failed due to a read-only entry which our own
+ * thread does not yet know about.
+ */
+ if (prior_state == VDO_DIRTY)
+ vdo_log_info("VDO commencing normal operation");
+ else if (prior_state == VDO_RECOVERING)
+ vdo_log_info("Exiting recovery mode");
+ }
+
+ /*
+ * Note that the scrubber has stopped, and inform anyone who might be waiting for that to
+ * happen.
+ */
+ if (!vdo_finish_draining(&scrubber->admin_state))
+ WRITE_ONCE(scrubber->admin_state.current_state,
+ VDO_ADMIN_STATE_SUSPENDED);
+
+ /*
+ * We can't notify waiters until after we've finished draining or they'll just requeue.
+ * Fortunately if there were waiters, we can't have been freed yet.
+ */
+ if (notify)
+ vdo_waitq_notify_all_waiters(&scrubber->waiters, NULL, NULL);
+}
+
+static void scrub_next_slab(struct slab_scrubber *scrubber);
+
+/**
+ * slab_scrubbed() - Notify the scrubber that a slab has been scrubbed.
+ * @completion: The slab rebuild completion.
+ *
+ * This callback is registered in apply_journal_entries().
+ */
+static void slab_scrubbed(struct vdo_completion *completion)
+{
+ struct slab_scrubber *scrubber =
+ container_of(as_vio(completion), struct slab_scrubber, vio);
+ struct vdo_slab *slab = scrubber->slab;
+
+ slab->status = VDO_SLAB_REBUILT;
+ queue_slab(slab);
+ reopen_slab_journal(slab);
+ WRITE_ONCE(scrubber->slab_count, scrubber->slab_count - 1);
+ scrub_next_slab(scrubber);
+}
+
+/**
+ * abort_scrubbing() - Abort scrubbing due to an error.
+ * @scrubber: The slab scrubber.
+ * @result: The error.
+ */
+static void abort_scrubbing(struct slab_scrubber *scrubber, int result)
+{
+ vdo_enter_read_only_mode(scrubber->vio.completion.vdo, result);
+ finish_scrubbing(scrubber, result);
+}
+
+/**
+ * handle_scrubber_error() - Handle errors while rebuilding a slab.
+ * @completion: The slab rebuild completion.
+ */
+static void handle_scrubber_error(struct vdo_completion *completion)
+{
+ struct vio *vio = as_vio(completion);
+
+ vio_record_metadata_io_error(vio);
+ abort_scrubbing(container_of(vio, struct slab_scrubber, vio),
+ completion->result);
+}
+
+/**
+ * apply_block_entries() - Apply all the entries in a block to the reference counts.
+ * @block: A block with entries to apply.
+ * @entry_count: The number of entries to apply.
+ * @block_number: The sequence number of the block.
+ * @slab: The slab to apply the entries to.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int apply_block_entries(struct packed_slab_journal_block *block,
+ journal_entry_count_t entry_count,
+ sequence_number_t block_number, struct vdo_slab *slab)
+{
+ struct journal_point entry_point = {
+ .sequence_number = block_number,
+ .entry_count = 0,
+ };
+ int result;
+ slab_block_number max_sbn = slab->end - slab->start;
+
+ while (entry_point.entry_count < entry_count) {
+ struct slab_journal_entry entry =
+ vdo_decode_slab_journal_entry(block, entry_point.entry_count);
+
+ if (entry.sbn > max_sbn) {
+ /* This entry is out of bounds. */
+ return vdo_log_error_strerror(VDO_CORRUPT_JOURNAL,
+ "vdo_slab journal entry (%llu, %u) had invalid offset %u in slab (size %u blocks)",
+ (unsigned long long) block_number,
+ entry_point.entry_count,
+ entry.sbn, max_sbn);
+ }
+
+ result = replay_reference_count_change(slab, &entry_point, entry);
+ if (result != VDO_SUCCESS) {
+ vdo_log_error_strerror(result,
+ "vdo_slab journal entry (%llu, %u) (%s of offset %u) could not be applied in slab %u",
+ (unsigned long long) block_number,
+ entry_point.entry_count,
+ vdo_get_journal_operation_name(entry.operation),
+ entry.sbn, slab->slab_number);
+ return result;
+ }
+ entry_point.entry_count++;
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * apply_journal_entries() - Find the relevant vio of the slab journal and apply all valid entries.
+ * @completion: The metadata read vio completion.
+ *
+ * This is a callback registered in start_scrubbing().
+ */
+static void apply_journal_entries(struct vdo_completion *completion)
+{
+ int result;
+ struct slab_scrubber *scrubber =
+ container_of(as_vio(completion), struct slab_scrubber, vio);
+ struct vdo_slab *slab = scrubber->slab;
+ struct slab_journal *journal = &slab->journal;
+
+ /* Find the boundaries of the useful part of the journal. */
+ sequence_number_t tail = journal->tail;
+ tail_block_offset_t end_index = (tail - 1) % journal->size;
+ char *end_data = scrubber->vio.data + (end_index * VDO_BLOCK_SIZE);
+ struct packed_slab_journal_block *end_block =
+ (struct packed_slab_journal_block *) end_data;
+
+ sequence_number_t head = __le64_to_cpu(end_block->header.head);
+ tail_block_offset_t head_index = head % journal->size;
+ block_count_t index = head_index;
+
+ struct journal_point ref_counts_point = slab->slab_journal_point;
+ struct journal_point last_entry_applied = ref_counts_point;
+ sequence_number_t sequence;
+
+ for (sequence = head; sequence < tail; sequence++) {
+ char *block_data = scrubber->vio.data + (index * VDO_BLOCK_SIZE);
+ struct packed_slab_journal_block *block =
+ (struct packed_slab_journal_block *) block_data;
+ struct slab_journal_block_header header;
+
+ vdo_unpack_slab_journal_block_header(&block->header, &header);
+
+ if ((header.nonce != slab->allocator->nonce) ||
+ (header.metadata_type != VDO_METADATA_SLAB_JOURNAL) ||
+ (header.sequence_number != sequence) ||
+ (header.entry_count > journal->entries_per_block) ||
+ (header.has_block_map_increments &&
+ (header.entry_count > journal->full_entries_per_block))) {
+ /* The block is not what we expect it to be. */
+ vdo_log_error("vdo_slab journal block for slab %u was invalid",
+ slab->slab_number);
+ abort_scrubbing(scrubber, VDO_CORRUPT_JOURNAL);
+ return;
+ }
+
+ result = apply_block_entries(block, header.entry_count, sequence, slab);
+ if (result != VDO_SUCCESS) {
+ abort_scrubbing(scrubber, result);
+ return;
+ }
+
+ last_entry_applied.sequence_number = sequence;
+ last_entry_applied.entry_count = header.entry_count - 1;
+ index++;
+ if (index == journal->size)
+ index = 0;
+ }
+
+ /*
+ * At the end of rebuild, the reference counters should be accurate to the end of the
+ * journal we just applied.
+ */
+ result = VDO_ASSERT(!vdo_before_journal_point(&last_entry_applied,
+ &ref_counts_point),
+ "Refcounts are not more accurate than the slab journal");
+ if (result != VDO_SUCCESS) {
+ abort_scrubbing(scrubber, result);
+ return;
+ }
+
+ /* Save out the rebuilt reference blocks. */
+ vdo_prepare_completion(completion, slab_scrubbed, handle_scrubber_error,
+ slab->allocator->thread_id, completion->parent);
+ vdo_start_operation_with_waiter(&slab->state,
+ VDO_ADMIN_STATE_SAVE_FOR_SCRUBBING,
+ completion, initiate_slab_action);
+}
+
+static void read_slab_journal_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct slab_scrubber *scrubber = container_of(vio, struct slab_scrubber, vio);
+
+ continue_vio_after_io(bio->bi_private, apply_journal_entries,
+ scrubber->slab->allocator->thread_id);
+}
+
+/**
+ * start_scrubbing() - Read the current slab's journal from disk now that it has been flushed.
+ * @completion: The scrubber's vio completion.
+ *
+ * This callback is registered in scrub_next_slab().
+ */
+static void start_scrubbing(struct vdo_completion *completion)
+{
+ struct slab_scrubber *scrubber =
+ container_of(as_vio(completion), struct slab_scrubber, vio);
+ struct vdo_slab *slab = scrubber->slab;
+
+ if (!slab->allocator->summary_entries[slab->slab_number].is_dirty) {
+ slab_scrubbed(completion);
+ return;
+ }
+
+ vdo_submit_metadata_vio(&scrubber->vio, slab->journal_origin,
+ read_slab_journal_endio, handle_scrubber_error,
+ REQ_OP_READ);
+}
+
+/**
+ * scrub_next_slab() - Scrub the next slab if there is one.
+ * @scrubber: The scrubber.
+ */
+static void scrub_next_slab(struct slab_scrubber *scrubber)
+{
+ struct vdo_completion *completion = &scrubber->vio.completion;
+ struct vdo_slab *slab;
+
+ /*
+ * Note: this notify call is always safe only because scrubbing can only be started when
+ * the VDO is quiescent.
+ */
+ vdo_waitq_notify_all_waiters(&scrubber->waiters, NULL, NULL);
+
+ if (vdo_is_read_only(completion->vdo)) {
+ finish_scrubbing(scrubber, VDO_READ_ONLY);
+ return;
+ }
+
+ slab = get_next_slab(scrubber);
+ if ((slab == NULL) ||
+ (scrubber->high_priority_only && list_empty(&scrubber->high_priority_slabs))) {
+ finish_scrubbing(scrubber, VDO_SUCCESS);
+ return;
+ }
+
+ if (vdo_finish_draining(&scrubber->admin_state))
+ return;
+
+ list_del_init(&slab->allocq_entry);
+ scrubber->slab = slab;
+ vdo_prepare_completion(completion, start_scrubbing, handle_scrubber_error,
+ slab->allocator->thread_id, completion->parent);
+ vdo_start_operation_with_waiter(&slab->state, VDO_ADMIN_STATE_SCRUBBING,
+ completion, initiate_slab_action);
+}
+
+/**
+ * scrub_slabs() - Scrub all of an allocator's slabs that are eligible for scrubbing.
+ * @allocator: The block_allocator to scrub.
+ * @parent: The completion to notify when scrubbing is done, implies high_priority, may be NULL.
+ */
+static void scrub_slabs(struct block_allocator *allocator, struct vdo_completion *parent)
+{
+ struct slab_scrubber *scrubber = &allocator->scrubber;
+
+ scrubber->vio.completion.parent = parent;
+ scrubber->high_priority_only = (parent != NULL);
+ if (!has_slabs_to_scrub(scrubber)) {
+ finish_scrubbing(scrubber, VDO_SUCCESS);
+ return;
+ }
+
+ if (scrubber->high_priority_only &&
+ vdo_is_priority_table_empty(allocator->prioritized_slabs) &&
+ list_empty(&scrubber->high_priority_slabs))
+ register_slab_for_scrubbing(get_next_slab(scrubber), true);
+
+ vdo_resume_if_quiescent(&scrubber->admin_state);
+ scrub_next_slab(scrubber);
+}
+
+static inline void assert_on_allocator_thread(thread_id_t thread_id,
+ const char *function_name)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == thread_id),
+ "%s called on correct thread", function_name);
+}
+
+static void register_slab_with_allocator(struct block_allocator *allocator,
+ struct vdo_slab *slab)
+{
+ allocator->slab_count++;
+ allocator->last_slab = slab->slab_number;
+}
+
+/**
+ * get_depot_slab_iterator() - Return a slab_iterator over the slabs in a slab_depot.
+ * @depot: The depot over which to iterate.
+ * @start: The number of the slab to start iterating from.
+ * @end: The number of the last slab which may be returned.
+ * @stride: The difference in slab number between successive slabs.
+ *
+ * Iteration always occurs from higher to lower numbered slabs.
+ *
+ * Return: An initialized iterator structure.
+ */
+static struct slab_iterator get_depot_slab_iterator(struct slab_depot *depot,
+ slab_count_t start, slab_count_t end,
+ slab_count_t stride)
+{
+ struct vdo_slab **slabs = depot->slabs;
+
+ return (struct slab_iterator) {
+ .slabs = slabs,
+ .next = (((slabs == NULL) || (start < end)) ? NULL : slabs[start]),
+ .end = end,
+ .stride = stride,
+ };
+}
+
+static struct slab_iterator get_slab_iterator(const struct block_allocator *allocator)
+{
+ return get_depot_slab_iterator(allocator->depot, allocator->last_slab,
+ allocator->zone_number,
+ allocator->depot->zone_count);
+}
+
+/**
+ * next_slab() - Get the next slab from a slab_iterator and advance the iterator
+ * @iterator: The slab_iterator.
+ *
+ * Return: The next slab or NULL if the iterator is exhausted.
+ */
+static struct vdo_slab *next_slab(struct slab_iterator *iterator)
+{
+ struct vdo_slab *slab = iterator->next;
+
+ if ((slab == NULL) || (slab->slab_number < iterator->end + iterator->stride))
+ iterator->next = NULL;
+ else
+ iterator->next = iterator->slabs[slab->slab_number - iterator->stride];
+
+ return slab;
+}
+
+/**
+ * abort_waiter() - Abort vios waiting to make journal entries when read-only.
+ *
+ * This callback is invoked on all vios waiting to make slab journal entries after the VDO has gone
+ * into read-only mode. Implements waiter_callback_fn.
+ */
+static void abort_waiter(struct vdo_waiter *waiter, void *context __always_unused)
+{
+ struct reference_updater *updater =
+ container_of(waiter, struct reference_updater, waiter);
+ struct data_vio *data_vio = data_vio_from_reference_updater(updater);
+
+ if (updater->increment) {
+ continue_data_vio_with_error(data_vio, VDO_READ_ONLY);
+ return;
+ }
+
+ vdo_continue_completion(&data_vio->decrement_completion, VDO_READ_ONLY);
+}
+
+/* Implements vdo_read_only_notification_fn. */
+static void notify_block_allocator_of_read_only_mode(void *listener,
+ struct vdo_completion *parent)
+{
+ struct block_allocator *allocator = listener;
+ struct slab_iterator iterator;
+
+ assert_on_allocator_thread(allocator->thread_id, __func__);
+ iterator = get_slab_iterator(allocator);
+ while (iterator.next != NULL) {
+ struct vdo_slab *slab = next_slab(&iterator);
+
+ vdo_waitq_notify_all_waiters(&slab->journal.entry_waiters,
+ abort_waiter, &slab->journal);
+ check_if_slab_drained(slab);
+ }
+
+ vdo_finish_completion(parent);
+}
+
+/**
+ * vdo_acquire_provisional_reference() - Acquire a provisional reference on behalf of a PBN lock if
+ * the block it locks is unreferenced.
+ * @slab: The slab which contains the block.
+ * @pbn: The physical block to reference.
+ * @lock: The lock.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_acquire_provisional_reference(struct vdo_slab *slab, physical_block_number_t pbn,
+ struct pbn_lock *lock)
+{
+ slab_block_number block_number;
+ int result;
+
+ if (vdo_pbn_lock_has_provisional_reference(lock))
+ return VDO_SUCCESS;
+
+ if (!is_slab_open(slab))
+ return VDO_INVALID_ADMIN_STATE;
+
+ result = slab_block_number_from_pbn(slab, pbn, &block_number);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (slab->counters[block_number] == EMPTY_REFERENCE_COUNT) {
+ make_provisional_reference(slab, block_number);
+ if (lock != NULL)
+ vdo_assign_pbn_lock_provisional_reference(lock);
+ }
+
+ if (vdo_pbn_lock_has_provisional_reference(lock))
+ adjust_free_block_count(slab, false);
+
+ return VDO_SUCCESS;
+}
+
+static int __must_check allocate_slab_block(struct vdo_slab *slab,
+ physical_block_number_t *block_number_ptr)
+{
+ slab_block_number free_index;
+
+ if (!is_slab_open(slab))
+ return VDO_INVALID_ADMIN_STATE;
+
+ if (!search_reference_blocks(slab, &free_index))
+ return VDO_NO_SPACE;
+
+ VDO_ASSERT_LOG_ONLY((slab->counters[free_index] == EMPTY_REFERENCE_COUNT),
+ "free block must have ref count of zero");
+ make_provisional_reference(slab, free_index);
+ adjust_free_block_count(slab, false);
+
+ /*
+ * Update the search hint so the next search will start at the array index just past the
+ * free block we just found.
+ */
+ slab->search_cursor.index = (free_index + 1);
+
+ *block_number_ptr = slab->start + free_index;
+ return VDO_SUCCESS;
+}
+
+/**
+ * open_slab() - Prepare a slab to be allocated from.
+ * @slab: The slab.
+ */
+static void open_slab(struct vdo_slab *slab)
+{
+ reset_search_cursor(slab);
+ if (is_slab_journal_blank(slab)) {
+ WRITE_ONCE(slab->allocator->statistics.slabs_opened,
+ slab->allocator->statistics.slabs_opened + 1);
+ dirty_all_reference_blocks(slab);
+ } else {
+ WRITE_ONCE(slab->allocator->statistics.slabs_reopened,
+ slab->allocator->statistics.slabs_reopened + 1);
+ }
+
+ slab->allocator->open_slab = slab;
+}
+
+
+/*
+ * The block allocated will have a provisional reference and the reference must be either confirmed
+ * with a subsequent increment or vacated with a subsequent decrement via
+ * vdo_release_block_reference().
+ */
+int vdo_allocate_block(struct block_allocator *allocator,
+ physical_block_number_t *block_number_ptr)
+{
+ int result;
+
+ if (allocator->open_slab != NULL) {
+ /* Try to allocate the next block in the currently open slab. */
+ result = allocate_slab_block(allocator->open_slab, block_number_ptr);
+ if ((result == VDO_SUCCESS) || (result != VDO_NO_SPACE))
+ return result;
+
+ /* Put the exhausted open slab back into the priority table. */
+ prioritize_slab(allocator->open_slab);
+ }
+
+ /* Remove the highest priority slab from the priority table and make it the open slab. */
+ open_slab(list_entry(vdo_priority_table_dequeue(allocator->prioritized_slabs),
+ struct vdo_slab, allocq_entry));
+
+ /*
+ * Try allocating again. If we're out of space immediately after opening a slab, then every
+ * slab must be fully allocated.
+ */
+ return allocate_slab_block(allocator->open_slab, block_number_ptr);
+}
+
+/**
+ * vdo_enqueue_clean_slab_waiter() - Wait for a clean slab.
+ * @allocator: The block_allocator on which to wait.
+ * @waiter: The waiter.
+ *
+ * Return: VDO_SUCCESS if the waiter was queued, VDO_NO_SPACE if there are no slabs to scrub, and
+ * some other error otherwise.
+ */
+int vdo_enqueue_clean_slab_waiter(struct block_allocator *allocator,
+ struct vdo_waiter *waiter)
+{
+ if (vdo_is_read_only(allocator->depot->vdo))
+ return VDO_READ_ONLY;
+
+ if (vdo_is_state_quiescent(&allocator->scrubber.admin_state))
+ return VDO_NO_SPACE;
+
+ vdo_waitq_enqueue_waiter(&allocator->scrubber.waiters, waiter);
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_modify_reference_count() - Modify the reference count of a block by first making a slab
+ * journal entry and then updating the reference counter.
+ *
+ * @data_vio: The data_vio for which to add the entry.
+ * @updater: Which of the data_vio's reference updaters is being submitted.
+ */
+void vdo_modify_reference_count(struct vdo_completion *completion,
+ struct reference_updater *updater)
+{
+ struct vdo_slab *slab = vdo_get_slab(completion->vdo->depot, updater->zpbn.pbn);
+
+ if (!is_slab_open(slab)) {
+ vdo_continue_completion(completion, VDO_INVALID_ADMIN_STATE);
+ return;
+ }
+
+ if (vdo_is_read_only(completion->vdo)) {
+ vdo_continue_completion(completion, VDO_READ_ONLY);
+ return;
+ }
+
+ vdo_waitq_enqueue_waiter(&slab->journal.entry_waiters, &updater->waiter);
+ if ((slab->status != VDO_SLAB_REBUILT) && requires_reaping(&slab->journal))
+ register_slab_for_scrubbing(slab, true);
+
+ add_entries(&slab->journal);
+}
+
+/* Release an unused provisional reference. */
+int vdo_release_block_reference(struct block_allocator *allocator,
+ physical_block_number_t pbn)
+{
+ struct reference_updater updater;
+
+ if (pbn == VDO_ZERO_BLOCK)
+ return VDO_SUCCESS;
+
+ updater = (struct reference_updater) {
+ .operation = VDO_JOURNAL_DATA_REMAPPING,
+ .increment = false,
+ .zpbn = {
+ .pbn = pbn,
+ },
+ };
+
+ return adjust_reference_count(vdo_get_slab(allocator->depot, pbn),
+ &updater, NULL);
+}
+
+/*
+ * This is a min_heap callback function orders slab_status structures using the 'is_clean' field as
+ * the primary key and the 'emptiness' field as the secondary key.
+ *
+ * Slabs need to be pushed onto the rings in the same order they are to be popped off. Popping
+ * should always get the most empty first, so pushing should be from most empty to least empty.
+ * Thus, the ordering is reversed from the usual sense since min_heap returns smaller elements
+ * before larger ones.
+ */
+static bool slab_status_is_less_than(const void *item1, const void *item2)
+{
+ const struct slab_status *info1 = item1;
+ const struct slab_status *info2 = item2;
+
+ if (info1->is_clean != info2->is_clean)
+ return info1->is_clean;
+ if (info1->emptiness != info2->emptiness)
+ return info1->emptiness > info2->emptiness;
+ return info1->slab_number < info2->slab_number;
+}
+
+static void swap_slab_statuses(void *item1, void *item2)
+{
+ struct slab_status *info1 = item1;
+ struct slab_status *info2 = item2;
+
+ swap(*info1, *info2);
+}
+
+static const struct min_heap_callbacks slab_status_min_heap = {
+ .elem_size = sizeof(struct slab_status),
+ .less = slab_status_is_less_than,
+ .swp = swap_slab_statuses,
+};
+
+/* Inform the slab actor that a action has finished on some slab; used by apply_to_slabs(). */
+static void slab_action_callback(struct vdo_completion *completion)
+{
+ struct block_allocator *allocator = vdo_as_block_allocator(completion);
+ struct slab_actor *actor = &allocator->slab_actor;
+
+ if (--actor->slab_action_count == 0) {
+ actor->callback(completion);
+ return;
+ }
+
+ vdo_reset_completion(completion);
+}
+
+/* Preserve the error from part of an action and continue. */
+static void handle_operation_error(struct vdo_completion *completion)
+{
+ struct block_allocator *allocator = vdo_as_block_allocator(completion);
+
+ if (allocator->state.waiter != NULL)
+ vdo_set_completion_result(allocator->state.waiter, completion->result);
+ completion->callback(completion);
+}
+
+/* Perform an action on each of an allocator's slabs in parallel. */
+static void apply_to_slabs(struct block_allocator *allocator, vdo_action_fn callback)
+{
+ struct slab_iterator iterator;
+
+ vdo_prepare_completion(&allocator->completion, slab_action_callback,
+ handle_operation_error, allocator->thread_id, NULL);
+ allocator->completion.requeue = false;
+
+ /*
+ * Since we are going to dequeue all of the slabs, the open slab will become invalid, so
+ * clear it.
+ */
+ allocator->open_slab = NULL;
+
+ /* Ensure that we don't finish before we're done starting. */
+ allocator->slab_actor = (struct slab_actor) {
+ .slab_action_count = 1,
+ .callback = callback,
+ };
+
+ iterator = get_slab_iterator(allocator);
+ while (iterator.next != NULL) {
+ const struct admin_state_code *operation =
+ vdo_get_admin_state_code(&allocator->state);
+ struct vdo_slab *slab = next_slab(&iterator);
+
+ list_del_init(&slab->allocq_entry);
+ allocator->slab_actor.slab_action_count++;
+ vdo_start_operation_with_waiter(&slab->state, operation,
+ &allocator->completion,
+ initiate_slab_action);
+ }
+
+ slab_action_callback(&allocator->completion);
+}
+
+static void finish_loading_allocator(struct vdo_completion *completion)
+{
+ struct block_allocator *allocator = vdo_as_block_allocator(completion);
+ const struct admin_state_code *operation =
+ vdo_get_admin_state_code(&allocator->state);
+
+ if (allocator->eraser != NULL)
+ dm_kcopyd_client_destroy(vdo_forget(allocator->eraser));
+
+ if (operation == VDO_ADMIN_STATE_LOADING_FOR_RECOVERY) {
+ void *context =
+ vdo_get_current_action_context(allocator->depot->action_manager);
+
+ vdo_replay_into_slab_journals(allocator, context);
+ return;
+ }
+
+ vdo_finish_loading(&allocator->state);
+}
+
+static void erase_next_slab_journal(struct block_allocator *allocator);
+
+static void copy_callback(int read_err, unsigned long write_err, void *context)
+{
+ struct block_allocator *allocator = context;
+ int result = (((read_err == 0) && (write_err == 0)) ? VDO_SUCCESS : -EIO);
+
+ if (result != VDO_SUCCESS) {
+ vdo_fail_completion(&allocator->completion, result);
+ return;
+ }
+
+ erase_next_slab_journal(allocator);
+}
+
+/* erase_next_slab_journal() - Erase the next slab journal. */
+static void erase_next_slab_journal(struct block_allocator *allocator)
+{
+ struct vdo_slab *slab;
+ physical_block_number_t pbn;
+ struct dm_io_region regions[1];
+ struct slab_depot *depot = allocator->depot;
+ block_count_t blocks = depot->slab_config.slab_journal_blocks;
+
+ if (allocator->slabs_to_erase.next == NULL) {
+ vdo_finish_completion(&allocator->completion);
+ return;
+ }
+
+ slab = next_slab(&allocator->slabs_to_erase);
+ pbn = slab->journal_origin - depot->vdo->geometry.bio_offset;
+ regions[0] = (struct dm_io_region) {
+ .bdev = vdo_get_backing_device(depot->vdo),
+ .sector = pbn * VDO_SECTORS_PER_BLOCK,
+ .count = blocks * VDO_SECTORS_PER_BLOCK,
+ };
+ dm_kcopyd_zero(allocator->eraser, 1, regions, 0, copy_callback, allocator);
+}
+
+/* Implements vdo_admin_initiator_fn. */
+static void initiate_load(struct admin_state *state)
+{
+ struct block_allocator *allocator =
+ container_of(state, struct block_allocator, state);
+ const struct admin_state_code *operation = vdo_get_admin_state_code(state);
+
+ if (operation == VDO_ADMIN_STATE_LOADING_FOR_REBUILD) {
+ /*
+ * Must requeue because the kcopyd client cannot be freed in the same stack frame
+ * as the kcopyd callback, lest it deadlock.
+ */
+ vdo_prepare_completion_for_requeue(&allocator->completion,
+ finish_loading_allocator,
+ handle_operation_error,
+ allocator->thread_id, NULL);
+ allocator->eraser = dm_kcopyd_client_create(NULL);
+ if (IS_ERR(allocator->eraser)) {
+ vdo_fail_completion(&allocator->completion,
+ PTR_ERR(allocator->eraser));
+ allocator->eraser = NULL;
+ return;
+ }
+ allocator->slabs_to_erase = get_slab_iterator(allocator);
+
+ erase_next_slab_journal(allocator);
+ return;
+ }
+
+ apply_to_slabs(allocator, finish_loading_allocator);
+}
+
+/**
+ * vdo_notify_slab_journals_are_recovered() - Inform a block allocator that its slab journals have
+ * been recovered from the recovery journal.
+ * @completion The allocator completion
+ */
+void vdo_notify_slab_journals_are_recovered(struct vdo_completion *completion)
+{
+ struct block_allocator *allocator = vdo_as_block_allocator(completion);
+
+ vdo_finish_loading_with_result(&allocator->state, completion->result);
+}
+
+static int get_slab_statuses(struct block_allocator *allocator,
+ struct slab_status **statuses_ptr)
+{
+ int result;
+ struct slab_status *statuses;
+ struct slab_iterator iterator = get_slab_iterator(allocator);
+
+ result = vdo_allocate(allocator->slab_count, struct slab_status, __func__,
+ &statuses);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *statuses_ptr = statuses;
+
+ while (iterator.next != NULL) {
+ slab_count_t slab_number = next_slab(&iterator)->slab_number;
+
+ *statuses++ = (struct slab_status) {
+ .slab_number = slab_number,
+ .is_clean = !allocator->summary_entries[slab_number].is_dirty,
+ .emptiness = allocator->summary_entries[slab_number].fullness_hint,
+ };
+ }
+
+ return VDO_SUCCESS;
+}
+
+/* Prepare slabs for allocation or scrubbing. */
+static int __must_check vdo_prepare_slabs_for_allocation(struct block_allocator *allocator)
+{
+ struct slab_status current_slab_status;
+ struct min_heap heap;
+ int result;
+ struct slab_status *slab_statuses;
+ struct slab_depot *depot = allocator->depot;
+
+ WRITE_ONCE(allocator->allocated_blocks,
+ allocator->slab_count * depot->slab_config.data_blocks);
+ result = get_slab_statuses(allocator, &slab_statuses);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* Sort the slabs by cleanliness, then by emptiness hint. */
+ heap = (struct min_heap) {
+ .data = slab_statuses,
+ .nr = allocator->slab_count,
+ .size = allocator->slab_count,
+ };
+ min_heapify_all(&heap, &slab_status_min_heap);
+
+ while (heap.nr > 0) {
+ bool high_priority;
+ struct vdo_slab *slab;
+ struct slab_journal *journal;
+
+ current_slab_status = slab_statuses[0];
+ min_heap_pop(&heap, &slab_status_min_heap);
+ slab = depot->slabs[current_slab_status.slab_number];
+
+ if ((depot->load_type == VDO_SLAB_DEPOT_REBUILD_LOAD) ||
+ (!allocator->summary_entries[slab->slab_number].load_ref_counts &&
+ current_slab_status.is_clean)) {
+ queue_slab(slab);
+ continue;
+ }
+
+ slab->status = VDO_SLAB_REQUIRES_SCRUBBING;
+ journal = &slab->journal;
+ high_priority = ((current_slab_status.is_clean &&
+ (depot->load_type == VDO_SLAB_DEPOT_NORMAL_LOAD)) ||
+ (journal_length(journal) >= journal->scrubbing_threshold));
+ register_slab_for_scrubbing(slab, high_priority);
+ }
+
+ vdo_free(slab_statuses);
+ return VDO_SUCCESS;
+}
+
+static const char *status_to_string(enum slab_rebuild_status status)
+{
+ switch (status) {
+ case VDO_SLAB_REBUILT:
+ return "REBUILT";
+ case VDO_SLAB_REQUIRES_SCRUBBING:
+ return "SCRUBBING";
+ case VDO_SLAB_REQUIRES_HIGH_PRIORITY_SCRUBBING:
+ return "PRIORITY_SCRUBBING";
+ case VDO_SLAB_REBUILDING:
+ return "REBUILDING";
+ case VDO_SLAB_REPLAYING:
+ return "REPLAYING";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+void vdo_dump_block_allocator(const struct block_allocator *allocator)
+{
+ unsigned int pause_counter = 0;
+ struct slab_iterator iterator = get_slab_iterator(allocator);
+ const struct slab_scrubber *scrubber = &allocator->scrubber;
+
+ vdo_log_info("block_allocator zone %u", allocator->zone_number);
+ while (iterator.next != NULL) {
+ struct vdo_slab *slab = next_slab(&iterator);
+ struct slab_journal *journal = &slab->journal;
+
+ if (slab->reference_blocks != NULL) {
+ /* Terse because there are a lot of slabs to dump and syslog is lossy. */
+ vdo_log_info("slab %u: P%u, %llu free", slab->slab_number,
+ slab->priority,
+ (unsigned long long) slab->free_blocks);
+ } else {
+ vdo_log_info("slab %u: status %s", slab->slab_number,
+ status_to_string(slab->status));
+ }
+
+ vdo_log_info(" slab journal: entry_waiters=%zu waiting_to_commit=%s updating_slab_summary=%s head=%llu unreapable=%llu tail=%llu next_commit=%llu summarized=%llu last_summarized=%llu recovery_lock=%llu dirty=%s",
+ vdo_waitq_num_waiters(&journal->entry_waiters),
+ vdo_bool_to_string(journal->waiting_to_commit),
+ vdo_bool_to_string(journal->updating_slab_summary),
+ (unsigned long long) journal->head,
+ (unsigned long long) journal->unreapable,
+ (unsigned long long) journal->tail,
+ (unsigned long long) journal->next_commit,
+ (unsigned long long) journal->summarized,
+ (unsigned long long) journal->last_summarized,
+ (unsigned long long) journal->recovery_lock,
+ vdo_bool_to_string(journal->recovery_lock != 0));
+ /*
+ * Given the frequency with which the locks are just a tiny bit off, it might be
+ * worth dumping all the locks, but that might be too much logging.
+ */
+
+ if (slab->counters != NULL) {
+ /* Terse because there are a lot of slabs to dump and syslog is lossy. */
+ vdo_log_info(" slab: free=%u/%u blocks=%u dirty=%zu active=%zu journal@(%llu,%u)",
+ slab->free_blocks, slab->block_count,
+ slab->reference_block_count,
+ vdo_waitq_num_waiters(&slab->dirty_blocks),
+ slab->active_count,
+ (unsigned long long) slab->slab_journal_point.sequence_number,
+ slab->slab_journal_point.entry_count);
+ } else {
+ vdo_log_info(" no counters");
+ }
+
+ /*
+ * Wait for a while after each batch of 32 slabs dumped, an arbitrary number,
+ * allowing the kernel log a chance to be flushed instead of being overrun.
+ */
+ if (pause_counter++ == 31) {
+ pause_counter = 0;
+ vdo_pause_for_logger();
+ }
+ }
+
+ vdo_log_info("slab_scrubber slab_count %u waiters %zu %s%s",
+ READ_ONCE(scrubber->slab_count),
+ vdo_waitq_num_waiters(&scrubber->waiters),
+ vdo_get_admin_state_code(&scrubber->admin_state)->name,
+ scrubber->high_priority_only ? ", high_priority_only " : "");
+}
+
+static void free_slab(struct vdo_slab *slab)
+{
+ if (slab == NULL)
+ return;
+
+ list_del(&slab->allocq_entry);
+ vdo_free(vdo_forget(slab->journal.block));
+ vdo_free(vdo_forget(slab->journal.locks));
+ vdo_free(vdo_forget(slab->counters));
+ vdo_free(vdo_forget(slab->reference_blocks));
+ vdo_free(slab);
+}
+
+static int initialize_slab_journal(struct vdo_slab *slab)
+{
+ struct slab_journal *journal = &slab->journal;
+ const struct slab_config *slab_config = &slab->allocator->depot->slab_config;
+ int result;
+
+ result = vdo_allocate(slab_config->slab_journal_blocks, struct journal_lock,
+ __func__, &journal->locks);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(VDO_BLOCK_SIZE, char, "struct packed_slab_journal_block",
+ (char **) &journal->block);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ journal->slab = slab;
+ journal->size = slab_config->slab_journal_blocks;
+ journal->flushing_threshold = slab_config->slab_journal_flushing_threshold;
+ journal->blocking_threshold = slab_config->slab_journal_blocking_threshold;
+ journal->scrubbing_threshold = slab_config->slab_journal_scrubbing_threshold;
+ journal->entries_per_block = VDO_SLAB_JOURNAL_ENTRIES_PER_BLOCK;
+ journal->full_entries_per_block = VDO_SLAB_JOURNAL_FULL_ENTRIES_PER_BLOCK;
+ journal->events = &slab->allocator->slab_journal_statistics;
+ journal->recovery_journal = slab->allocator->depot->vdo->recovery_journal;
+ journal->tail = 1;
+ journal->head = 1;
+
+ journal->flushing_deadline = journal->flushing_threshold;
+ /*
+ * Set there to be some time between the deadline and the blocking threshold, so that
+ * hopefully all are done before blocking.
+ */
+ if ((journal->blocking_threshold - journal->flushing_threshold) > 5)
+ journal->flushing_deadline = journal->blocking_threshold - 5;
+
+ journal->slab_summary_waiter.callback = release_journal_locks;
+
+ INIT_LIST_HEAD(&journal->dirty_entry);
+ INIT_LIST_HEAD(&journal->uncommitted_blocks);
+
+ journal->tail_header.nonce = slab->allocator->nonce;
+ journal->tail_header.metadata_type = VDO_METADATA_SLAB_JOURNAL;
+ initialize_journal_state(journal);
+ return VDO_SUCCESS;
+}
+
+/**
+ * make_slab() - Construct a new, empty slab.
+ * @slab_origin: The physical block number within the block allocator partition of the first block
+ * in the slab.
+ * @allocator: The block allocator to which the slab belongs.
+ * @slab_number: The slab number of the slab.
+ * @is_new: true if this slab is being allocated as part of a resize.
+ * @slab_ptr: A pointer to receive the new slab.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int __must_check make_slab(physical_block_number_t slab_origin,
+ struct block_allocator *allocator,
+ slab_count_t slab_number, bool is_new,
+ struct vdo_slab **slab_ptr)
+{
+ const struct slab_config *slab_config = &allocator->depot->slab_config;
+ struct vdo_slab *slab;
+ int result;
+
+ result = vdo_allocate(1, struct vdo_slab, __func__, &slab);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *slab = (struct vdo_slab) {
+ .allocator = allocator,
+ .start = slab_origin,
+ .end = slab_origin + slab_config->slab_blocks,
+ .slab_number = slab_number,
+ .ref_counts_origin = slab_origin + slab_config->data_blocks,
+ .journal_origin =
+ vdo_get_slab_journal_start_block(slab_config, slab_origin),
+ .block_count = slab_config->data_blocks,
+ .free_blocks = slab_config->data_blocks,
+ .reference_block_count =
+ vdo_get_saved_reference_count_size(slab_config->data_blocks),
+ };
+ INIT_LIST_HEAD(&slab->allocq_entry);
+
+ result = initialize_slab_journal(slab);
+ if (result != VDO_SUCCESS) {
+ free_slab(slab);
+ return result;
+ }
+
+ if (is_new) {
+ vdo_set_admin_state_code(&slab->state, VDO_ADMIN_STATE_NEW);
+ result = allocate_slab_counters(slab);
+ if (result != VDO_SUCCESS) {
+ free_slab(slab);
+ return result;
+ }
+ } else {
+ vdo_set_admin_state_code(&slab->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+ }
+
+ *slab_ptr = slab;
+ return VDO_SUCCESS;
+}
+
+/**
+ * allocate_slabs() - Allocate a new slab pointer array.
+ * @depot: The depot.
+ * @slab_count: The number of slabs the depot should have in the new array.
+ *
+ * Any existing slab pointers will be copied into the new array, and slabs will be allocated as
+ * needed. The newly allocated slabs will not be distributed for use by the block allocators.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int allocate_slabs(struct slab_depot *depot, slab_count_t slab_count)
+{
+ block_count_t slab_size;
+ bool resizing = false;
+ physical_block_number_t slab_origin;
+ int result;
+
+ result = vdo_allocate(slab_count, struct vdo_slab *,
+ "slab pointer array", &depot->new_slabs);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ if (depot->slabs != NULL) {
+ memcpy(depot->new_slabs, depot->slabs,
+ depot->slab_count * sizeof(struct vdo_slab *));
+ resizing = true;
+ }
+
+ slab_size = depot->slab_config.slab_blocks;
+ slab_origin = depot->first_block + (depot->slab_count * slab_size);
+
+ for (depot->new_slab_count = depot->slab_count;
+ depot->new_slab_count < slab_count;
+ depot->new_slab_count++, slab_origin += slab_size) {
+ struct block_allocator *allocator =
+ &depot->allocators[depot->new_slab_count % depot->zone_count];
+ struct vdo_slab **slab_ptr = &depot->new_slabs[depot->new_slab_count];
+
+ result = make_slab(slab_origin, allocator, depot->new_slab_count,
+ resizing, slab_ptr);
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_abandon_new_slabs() - Abandon any new slabs in this depot, freeing them as needed.
+ * @depot: The depot.
+ */
+void vdo_abandon_new_slabs(struct slab_depot *depot)
+{
+ slab_count_t i;
+
+ if (depot->new_slabs == NULL)
+ return;
+
+ for (i = depot->slab_count; i < depot->new_slab_count; i++)
+ free_slab(vdo_forget(depot->new_slabs[i]));
+ depot->new_slab_count = 0;
+ depot->new_size = 0;
+ vdo_free(vdo_forget(depot->new_slabs));
+}
+
+/**
+ * get_allocator_thread_id() - Get the ID of the thread on which a given allocator operates.
+ *
+ * Implements vdo_zone_thread_getter_fn.
+ */
+static thread_id_t get_allocator_thread_id(void *context, zone_count_t zone_number)
+{
+ return ((struct slab_depot *) context)->allocators[zone_number].thread_id;
+}
+
+/**
+ * release_recovery_journal_lock() - Request the slab journal to release the recovery journal lock
+ * it may hold on a specified recovery journal block.
+ * @journal: The slab journal.
+ * @recovery_lock: The sequence number of the recovery journal block whose locks should be
+ * released.
+ *
+ * Return: true if the journal does hold a lock on the specified block (which it will release).
+ */
+static bool __must_check release_recovery_journal_lock(struct slab_journal *journal,
+ sequence_number_t recovery_lock)
+{
+ if (recovery_lock > journal->recovery_lock) {
+ VDO_ASSERT_LOG_ONLY((recovery_lock < journal->recovery_lock),
+ "slab journal recovery lock is not older than the recovery journal head");
+ return false;
+ }
+
+ if ((recovery_lock < journal->recovery_lock) ||
+ vdo_is_read_only(journal->slab->allocator->depot->vdo))
+ return false;
+
+ /* All locks are held by the block which is in progress; write it. */
+ commit_tail(journal);
+ return true;
+}
+
+/*
+ * Request a commit of all dirty tail blocks which are locking the recovery journal block the depot
+ * is seeking to release.
+ *
+ * Implements vdo_zone_action_fn.
+ */
+static void release_tail_block_locks(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct slab_journal *journal, *tmp;
+ struct slab_depot *depot = context;
+ struct list_head *list = &depot->allocators[zone_number].dirty_slab_journals;
+
+ list_for_each_entry_safe(journal, tmp, list, dirty_entry) {
+ if (!release_recovery_journal_lock(journal,
+ depot->active_release_request))
+ break;
+ }
+
+ vdo_finish_completion(parent);
+}
+
+/**
+ * prepare_for_tail_block_commit() - Prepare to commit oldest tail blocks.
+ *
+ * Implements vdo_action_preamble_fn.
+ */
+static void prepare_for_tail_block_commit(void *context, struct vdo_completion *parent)
+{
+ struct slab_depot *depot = context;
+
+ depot->active_release_request = depot->new_release_request;
+ vdo_finish_completion(parent);
+}
+
+/**
+ * schedule_tail_block_commit() - Schedule a tail block commit if necessary.
+ *
+ * This method should not be called directly. Rather, call vdo_schedule_default_action() on the
+ * depot's action manager.
+ *
+ * Implements vdo_action_scheduler_fn.
+ */
+static bool schedule_tail_block_commit(void *context)
+{
+ struct slab_depot *depot = context;
+
+ if (depot->new_release_request == depot->active_release_request)
+ return false;
+
+ return vdo_schedule_action(depot->action_manager,
+ prepare_for_tail_block_commit,
+ release_tail_block_locks,
+ NULL, NULL);
+}
+
+/**
+ * initialize_slab_scrubber() - Initialize an allocator's slab scrubber.
+ * @allocator: The allocator being initialized
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int initialize_slab_scrubber(struct block_allocator *allocator)
+{
+ struct slab_scrubber *scrubber = &allocator->scrubber;
+ block_count_t slab_journal_size =
+ allocator->depot->slab_config.slab_journal_blocks;
+ char *journal_data;
+ int result;
+
+ result = vdo_allocate(VDO_BLOCK_SIZE * slab_journal_size,
+ char, __func__, &journal_data);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = allocate_vio_components(allocator->completion.vdo,
+ VIO_TYPE_SLAB_JOURNAL,
+ VIO_PRIORITY_METADATA,
+ allocator, slab_journal_size,
+ journal_data, &scrubber->vio);
+ if (result != VDO_SUCCESS) {
+ vdo_free(journal_data);
+ return result;
+ }
+
+ INIT_LIST_HEAD(&scrubber->high_priority_slabs);
+ INIT_LIST_HEAD(&scrubber->slabs);
+ vdo_set_admin_state_code(&scrubber->admin_state, VDO_ADMIN_STATE_SUSPENDED);
+ return VDO_SUCCESS;
+}
+
+/**
+ * initialize_slab_summary_block() - Initialize a slab_summary_block.
+ * @allocator: The allocator which owns the block.
+ * @index: The index of this block in its zone's summary.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check initialize_slab_summary_block(struct block_allocator *allocator,
+ block_count_t index)
+{
+ struct slab_summary_block *block = &allocator->summary_blocks[index];
+ int result;
+
+ result = vdo_allocate(VDO_BLOCK_SIZE, char, __func__, &block->outgoing_entries);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = allocate_vio_components(allocator->depot->vdo, VIO_TYPE_SLAB_SUMMARY,
+ VIO_PRIORITY_METADATA, NULL, 1,
+ block->outgoing_entries, &block->vio);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ block->allocator = allocator;
+ block->entries = &allocator->summary_entries[VDO_SLAB_SUMMARY_ENTRIES_PER_BLOCK * index];
+ block->index = index;
+ return VDO_SUCCESS;
+}
+
+static int __must_check initialize_block_allocator(struct slab_depot *depot,
+ zone_count_t zone)
+{
+ int result;
+ block_count_t i;
+ struct block_allocator *allocator = &depot->allocators[zone];
+ struct vdo *vdo = depot->vdo;
+ block_count_t max_free_blocks = depot->slab_config.data_blocks;
+ unsigned int max_priority = (2 + ilog2(max_free_blocks));
+
+ *allocator = (struct block_allocator) {
+ .depot = depot,
+ .zone_number = zone,
+ .thread_id = vdo->thread_config.physical_threads[zone],
+ .nonce = vdo->states.vdo.nonce,
+ };
+
+ INIT_LIST_HEAD(&allocator->dirty_slab_journals);
+ vdo_set_admin_state_code(&allocator->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
+ result = vdo_register_read_only_listener(vdo, allocator,
+ notify_block_allocator_of_read_only_mode,
+ allocator->thread_id);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_initialize_completion(&allocator->completion, vdo, VDO_BLOCK_ALLOCATOR_COMPLETION);
+ result = make_vio_pool(vdo, BLOCK_ALLOCATOR_VIO_POOL_SIZE, allocator->thread_id,
+ VIO_TYPE_SLAB_JOURNAL, VIO_PRIORITY_METADATA,
+ allocator, &allocator->vio_pool);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = initialize_slab_scrubber(allocator);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_make_priority_table(max_priority, &allocator->prioritized_slabs);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE,
+ struct slab_summary_block, __func__,
+ &allocator->summary_blocks);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ vdo_set_admin_state_code(&allocator->summary_state,
+ VDO_ADMIN_STATE_NORMAL_OPERATION);
+ allocator->summary_entries = depot->summary_entries + (MAX_VDO_SLABS * zone);
+
+ /* Initialize each summary block. */
+ for (i = 0; i < VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE; i++) {
+ result = initialize_slab_summary_block(allocator, i);
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+
+ /*
+ * Performing well atop thin provisioned storage requires either that VDO discards freed
+ * blocks, or that the block allocator try to use slabs that already have allocated blocks
+ * in preference to slabs that have never been opened. For reasons we have not been able to
+ * fully understand, some SSD machines have been have been very sensitive (50% reduction in
+ * test throughput) to very slight differences in the timing and locality of block
+ * allocation. Assigning a low priority to unopened slabs (max_priority/2, say) would be
+ * ideal for the story, but anything less than a very high threshold (max_priority - 1)
+ * hurts on these machines.
+ *
+ * This sets the free block threshold for preferring to open an unopened slab to the binary
+ * floor of 3/4ths the total number of data blocks in a slab, which will generally evaluate
+ * to about half the slab size.
+ */
+ allocator->unopened_slab_priority = (1 + ilog2((max_free_blocks * 3) / 4));
+
+ return VDO_SUCCESS;
+}
+
+static int allocate_components(struct slab_depot *depot,
+ struct partition *summary_partition)
+{
+ int result;
+ zone_count_t zone;
+ slab_count_t slab_count;
+ u8 hint;
+ u32 i;
+ const struct thread_config *thread_config = &depot->vdo->thread_config;
+
+ result = vdo_make_action_manager(depot->zone_count, get_allocator_thread_id,
+ thread_config->journal_thread, depot,
+ schedule_tail_block_commit,
+ depot->vdo, &depot->action_manager);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ depot->origin = depot->first_block;
+
+ /* block size must be a multiple of entry size */
+ BUILD_BUG_ON((VDO_BLOCK_SIZE % sizeof(struct slab_summary_entry)) != 0);
+
+ depot->summary_origin = summary_partition->offset;
+ depot->hint_shift = vdo_get_slab_summary_hint_shift(depot->slab_size_shift);
+ result = vdo_allocate(MAXIMUM_VDO_SLAB_SUMMARY_ENTRIES,
+ struct slab_summary_entry, __func__,
+ &depot->summary_entries);
+ if (result != VDO_SUCCESS)
+ return result;
+
+
+ /* Initialize all the entries. */
+ hint = compute_fullness_hint(depot, depot->slab_config.data_blocks);
+ for (i = 0; i < MAXIMUM_VDO_SLAB_SUMMARY_ENTRIES; i++) {
+ /*
+ * This default tail block offset must be reflected in
+ * slabJournal.c::read_slab_journal_tail().
+ */
+ depot->summary_entries[i] = (struct slab_summary_entry) {
+ .tail_block_offset = 0,
+ .fullness_hint = hint,
+ .load_ref_counts = false,
+ .is_dirty = false,
+ };
+ }
+
+ slab_count = vdo_compute_slab_count(depot->first_block, depot->last_block,
+ depot->slab_size_shift);
+ if (thread_config->physical_zone_count > slab_count) {
+ return vdo_log_error_strerror(VDO_BAD_CONFIGURATION,
+ "%u physical zones exceeds slab count %u",
+ thread_config->physical_zone_count,
+ slab_count);
+ }
+
+ /* Initialize the block allocators. */
+ for (zone = 0; zone < depot->zone_count; zone++) {
+ result = initialize_block_allocator(depot, zone);
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+
+ /* Allocate slabs. */
+ result = allocate_slabs(depot, slab_count);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ /* Use the new slabs. */
+ for (i = depot->slab_count; i < depot->new_slab_count; i++) {
+ struct vdo_slab *slab = depot->new_slabs[i];
+
+ register_slab_with_allocator(slab->allocator, slab);
+ WRITE_ONCE(depot->slab_count, depot->slab_count + 1);
+ }
+
+ depot->slabs = depot->new_slabs;
+ depot->new_slabs = NULL;
+ depot->new_slab_count = 0;
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_decode_slab_depot() - Make a slab depot and configure it with the state read from the super
+ * block.
+ * @state: The slab depot state from the super block.
+ * @vdo: The VDO which will own the depot.
+ * @summary_partition: The partition which holds the slab summary.
+ * @depot_ptr: A pointer to hold the depot.
+ *
+ * Return: A success or error code.
+ */
+int vdo_decode_slab_depot(struct slab_depot_state_2_0 state, struct vdo *vdo,
+ struct partition *summary_partition,
+ struct slab_depot **depot_ptr)
+{
+ unsigned int slab_size_shift;
+ struct slab_depot *depot;
+ int result;
+
+ /*
+ * Calculate the bit shift for efficiently mapping block numbers to slabs. Using a shift
+ * requires that the slab size be a power of two.
+ */
+ block_count_t slab_size = state.slab_config.slab_blocks;
+
+ if (!is_power_of_2(slab_size)) {
+ return vdo_log_error_strerror(UDS_INVALID_ARGUMENT,
+ "slab size must be a power of two");
+ }
+ slab_size_shift = ilog2(slab_size);
+
+ result = vdo_allocate_extended(struct slab_depot,
+ vdo->thread_config.physical_zone_count,
+ struct block_allocator, __func__, &depot);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ depot->vdo = vdo;
+ depot->old_zone_count = state.zone_count;
+ depot->zone_count = vdo->thread_config.physical_zone_count;
+ depot->slab_config = state.slab_config;
+ depot->first_block = state.first_block;
+ depot->last_block = state.last_block;
+ depot->slab_size_shift = slab_size_shift;
+
+ result = allocate_components(depot, summary_partition);
+ if (result != VDO_SUCCESS) {
+ vdo_free_slab_depot(depot);
+ return result;
+ }
+
+ *depot_ptr = depot;
+ return VDO_SUCCESS;
+}
+
+static void uninitialize_allocator_summary(struct block_allocator *allocator)
+{
+ block_count_t i;
+
+ if (allocator->summary_blocks == NULL)
+ return;
+
+ for (i = 0; i < VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE; i++) {
+ free_vio_components(&allocator->summary_blocks[i].vio);
+ vdo_free(vdo_forget(allocator->summary_blocks[i].outgoing_entries));
+ }
+
+ vdo_free(vdo_forget(allocator->summary_blocks));
+}
+
+/**
+ * vdo_free_slab_depot() - Destroy a slab depot.
+ * @depot: The depot to destroy.
+ */
+void vdo_free_slab_depot(struct slab_depot *depot)
+{
+ zone_count_t zone = 0;
+
+ if (depot == NULL)
+ return;
+
+ vdo_abandon_new_slabs(depot);
+
+ for (zone = 0; zone < depot->zone_count; zone++) {
+ struct block_allocator *allocator = &depot->allocators[zone];
+
+ if (allocator->eraser != NULL)
+ dm_kcopyd_client_destroy(vdo_forget(allocator->eraser));
+
+ uninitialize_allocator_summary(allocator);
+ uninitialize_scrubber_vio(&allocator->scrubber);
+ free_vio_pool(vdo_forget(allocator->vio_pool));
+ vdo_free_priority_table(vdo_forget(allocator->prioritized_slabs));
+ }
+
+ if (depot->slabs != NULL) {
+ slab_count_t i;
+
+ for (i = 0; i < depot->slab_count; i++)
+ free_slab(vdo_forget(depot->slabs[i]));
+ }
+
+ vdo_free(vdo_forget(depot->slabs));
+ vdo_free(vdo_forget(depot->action_manager));
+ vdo_free(vdo_forget(depot->summary_entries));
+ vdo_free(depot);
+}
+
+/**
+ * vdo_record_slab_depot() - Record the state of a slab depot for encoding into the super block.
+ * @depot: The depot to encode.
+ *
+ * Return: The depot state.
+ */
+struct slab_depot_state_2_0 vdo_record_slab_depot(const struct slab_depot *depot)
+{
+ /*
+ * If this depot is currently using 0 zones, it must have been synchronously loaded by a
+ * tool and is now being saved. We did not load and combine the slab summary, so we still
+ * need to do that next time we load with the old zone count rather than 0.
+ */
+ struct slab_depot_state_2_0 state;
+ zone_count_t zones_to_record = depot->zone_count;
+
+ if (depot->zone_count == 0)
+ zones_to_record = depot->old_zone_count;
+
+ state = (struct slab_depot_state_2_0) {
+ .slab_config = depot->slab_config,
+ .first_block = depot->first_block,
+ .last_block = depot->last_block,
+ .zone_count = zones_to_record,
+ };
+
+ return state;
+}
+
+/**
+ * vdo_allocate_reference_counters() - Allocate the reference counters for all slabs in the depot.
+ *
+ * Context: This method may be called only before entering normal operation from the load thread.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_allocate_reference_counters(struct slab_depot *depot)
+{
+ struct slab_iterator iterator =
+ get_depot_slab_iterator(depot, depot->slab_count - 1, 0, 1);
+
+ while (iterator.next != NULL) {
+ int result = allocate_slab_counters(next_slab(&iterator));
+
+ if (result != VDO_SUCCESS)
+ return result;
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * get_slab_number() - Get the number of the slab that contains a specified block.
+ * @depot: The slab depot.
+ * @pbn: The physical block number.
+ * @slab_number_ptr: A pointer to hold the slab number.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check get_slab_number(const struct slab_depot *depot,
+ physical_block_number_t pbn,
+ slab_count_t *slab_number_ptr)
+{
+ slab_count_t slab_number;
+
+ if (pbn < depot->first_block)
+ return VDO_OUT_OF_RANGE;
+
+ slab_number = (pbn - depot->first_block) >> depot->slab_size_shift;
+ if (slab_number >= depot->slab_count)
+ return VDO_OUT_OF_RANGE;
+
+ *slab_number_ptr = slab_number;
+ return VDO_SUCCESS;
+}
+
+/**
+ * vdo_get_slab() - Get the slab object for the slab that contains a specified block.
+ * @depot: The slab depot.
+ * @pbn: The physical block number.
+ *
+ * Will put the VDO in read-only mode if the PBN is not a valid data block nor the zero block.
+ *
+ * Return: The slab containing the block, or NULL if the block number is the zero block or
+ * otherwise out of range.
+ */
+struct vdo_slab *vdo_get_slab(const struct slab_depot *depot,
+ physical_block_number_t pbn)
+{
+ slab_count_t slab_number;
+ int result;
+
+ if (pbn == VDO_ZERO_BLOCK)
+ return NULL;
+
+ result = get_slab_number(depot, pbn, &slab_number);
+ if (result != VDO_SUCCESS) {
+ vdo_enter_read_only_mode(depot->vdo, result);
+ return NULL;
+ }
+
+ return depot->slabs[slab_number];
+}
+
+/**
+ * vdo_get_increment_limit() - Determine how many new references a block can acquire.
+ * @depot: The slab depot.
+ * @pbn: The physical block number that is being queried.
+ *
+ * Context: This method must be called from the physical zone thread of the PBN.
+ *
+ * Return: The number of available references.
+ */
+u8 vdo_get_increment_limit(struct slab_depot *depot, physical_block_number_t pbn)
+{
+ struct vdo_slab *slab = vdo_get_slab(depot, pbn);
+ vdo_refcount_t *counter_ptr = NULL;
+ int result;
+
+ if ((slab == NULL) || (slab->status != VDO_SLAB_REBUILT))
+ return 0;
+
+ result = get_reference_counter(slab, pbn, &counter_ptr);
+ if (result != VDO_SUCCESS)
+ return 0;
+
+ if (*counter_ptr == PROVISIONAL_REFERENCE_COUNT)
+ return (MAXIMUM_REFERENCE_COUNT - 1);
+
+ return (MAXIMUM_REFERENCE_COUNT - *counter_ptr);
+}
+
+/**
+ * vdo_is_physical_data_block() - Determine whether the given PBN refers to a data block.
+ * @depot: The depot.
+ * @pbn: The physical block number to ask about.
+ *
+ * Return: True if the PBN corresponds to a data block.
+ */
+bool vdo_is_physical_data_block(const struct slab_depot *depot,
+ physical_block_number_t pbn)
+{
+ slab_count_t slab_number;
+ slab_block_number sbn;
+
+ return ((pbn == VDO_ZERO_BLOCK) ||
+ ((get_slab_number(depot, pbn, &slab_number) == VDO_SUCCESS) &&
+ (slab_block_number_from_pbn(depot->slabs[slab_number], pbn, &sbn) ==
+ VDO_SUCCESS)));
+}
+
+/**
+ * vdo_get_slab_depot_allocated_blocks() - Get the total number of data blocks allocated across all
+ * the slabs in the depot.
+ * @depot: The slab depot.
+ *
+ * This is the total number of blocks with a non-zero reference count.
+ *
+ * Context: This may be called from any thread.
+ *
+ * Return: The total number of blocks with a non-zero reference count.
+ */
+block_count_t vdo_get_slab_depot_allocated_blocks(const struct slab_depot *depot)
+{
+ block_count_t total = 0;
+ zone_count_t zone;
+
+ for (zone = 0; zone < depot->zone_count; zone++) {
+ /* The allocators are responsible for thread safety. */
+ total += READ_ONCE(depot->allocators[zone].allocated_blocks);
+ }
+
+ return total;
+}
+
+/**
+ * vdo_get_slab_depot_data_blocks() - Get the total number of data blocks in all the slabs in the
+ * depot.
+ * @depot: The slab depot.
+ *
+ * Context: This may be called from any thread.
+ *
+ * Return: The total number of data blocks in all slabs.
+ */
+block_count_t vdo_get_slab_depot_data_blocks(const struct slab_depot *depot)
+{
+ return (READ_ONCE(depot->slab_count) * depot->slab_config.data_blocks);
+}
+
+/**
+ * finish_combining_zones() - Clean up after saving out the combined slab summary.
+ * @completion: The vio which was used to write the summary data.
+ */
+static void finish_combining_zones(struct vdo_completion *completion)
+{
+ int result = completion->result;
+ struct vdo_completion *parent = completion->parent;
+
+ free_vio(as_vio(vdo_forget(completion)));
+ vdo_fail_completion(parent, result);
+}
+
+static void handle_combining_error(struct vdo_completion *completion)
+{
+ vio_record_metadata_io_error(as_vio(completion));
+ finish_combining_zones(completion);
+}
+
+static void write_summary_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct vdo *vdo = vio->completion.vdo;
+
+ continue_vio_after_io(vio, finish_combining_zones,
+ vdo->thread_config.admin_thread);
+}
+
+/**
+ * combine_summaries() - Treating the current entries buffer as the on-disk value of all zones,
+ * update every zone to the correct values for every slab.
+ * @depot: The depot whose summary entries should be combined.
+ */
+static void combine_summaries(struct slab_depot *depot)
+{
+ /*
+ * Combine all the old summary data into the portion of the buffer corresponding to the
+ * first zone.
+ */
+ zone_count_t zone = 0;
+ struct slab_summary_entry *entries = depot->summary_entries;
+
+ if (depot->old_zone_count > 1) {
+ slab_count_t entry_number;
+
+ for (entry_number = 0; entry_number < MAX_VDO_SLABS; entry_number++) {
+ if (zone != 0) {
+ memcpy(entries + entry_number,
+ entries + (zone * MAX_VDO_SLABS) + entry_number,
+ sizeof(struct slab_summary_entry));
+ }
+
+ zone++;
+ if (zone == depot->old_zone_count)
+ zone = 0;
+ }
+ }
+
+ /* Copy the combined data to each zones's region of the buffer. */
+ for (zone = 1; zone < MAX_VDO_PHYSICAL_ZONES; zone++) {
+ memcpy(entries + (zone * MAX_VDO_SLABS), entries,
+ MAX_VDO_SLABS * sizeof(struct slab_summary_entry));
+ }
+}
+
+/**
+ * finish_loading_summary() - Finish loading slab summary data.
+ * @completion: The vio which was used to read the summary data.
+ *
+ * Combines the slab summary data from all the previously written zones and copies the combined
+ * summary to each partition's data region. Then writes the combined summary back out to disk. This
+ * callback is registered in load_summary_endio().
+ */
+static void finish_loading_summary(struct vdo_completion *completion)
+{
+ struct slab_depot *depot = completion->vdo->depot;
+
+ /* Combine the summary from each zone so each zone is correct for all slabs. */
+ combine_summaries(depot);
+
+ /* Write the combined summary back out. */
+ vdo_submit_metadata_vio(as_vio(completion), depot->summary_origin,
+ write_summary_endio, handle_combining_error,
+ REQ_OP_WRITE);
+}
+
+static void load_summary_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct vdo *vdo = vio->completion.vdo;
+
+ continue_vio_after_io(vio, finish_loading_summary,
+ vdo->thread_config.admin_thread);
+}
+
+/**
+ * load_slab_summary() - The preamble of a load operation.
+ *
+ * Implements vdo_action_preamble_fn.
+ */
+static void load_slab_summary(void *context, struct vdo_completion *parent)
+{
+ int result;
+ struct vio *vio;
+ struct slab_depot *depot = context;
+ const struct admin_state_code *operation =
+ vdo_get_current_manager_operation(depot->action_manager);
+
+ result = create_multi_block_metadata_vio(depot->vdo, VIO_TYPE_SLAB_SUMMARY,
+ VIO_PRIORITY_METADATA, parent,
+ VDO_SLAB_SUMMARY_BLOCKS,
+ (char *) depot->summary_entries, &vio);
+ if (result != VDO_SUCCESS) {
+ vdo_fail_completion(parent, result);
+ return;
+ }
+
+ if ((operation == VDO_ADMIN_STATE_FORMATTING) ||
+ (operation == VDO_ADMIN_STATE_LOADING_FOR_REBUILD)) {
+ finish_loading_summary(&vio->completion);
+ return;
+ }
+
+ vdo_submit_metadata_vio(vio, depot->summary_origin, load_summary_endio,
+ handle_combining_error, REQ_OP_READ);
+}
+
+/* Implements vdo_zone_action_fn. */
+static void load_allocator(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct slab_depot *depot = context;
+
+ vdo_start_loading(&depot->allocators[zone_number].state,
+ vdo_get_current_manager_operation(depot->action_manager),
+ parent, initiate_load);
+}
+
+/**
+ * vdo_load_slab_depot() - Asynchronously load any slab depot state that isn't included in the
+ * super_block component.
+ * @depot: The depot to load.
+ * @operation: The type of load to perform.
+ * @parent: The completion to notify when the load is complete.
+ * @context: Additional context for the load operation; may be NULL.
+ *
+ * This method may be called only before entering normal operation from the load thread.
+ */
+void vdo_load_slab_depot(struct slab_depot *depot,
+ const struct admin_state_code *operation,
+ struct vdo_completion *parent, void *context)
+{
+ if (!vdo_assert_load_operation(operation, parent))
+ return;
+
+ vdo_schedule_operation_with_context(depot->action_manager, operation,
+ load_slab_summary, load_allocator,
+ NULL, context, parent);
+}
+
+/* Implements vdo_zone_action_fn. */
+static void prepare_to_allocate(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct slab_depot *depot = context;
+ struct block_allocator *allocator = &depot->allocators[zone_number];
+ int result;
+
+ result = vdo_prepare_slabs_for_allocation(allocator);
+ if (result != VDO_SUCCESS) {
+ vdo_fail_completion(parent, result);
+ return;
+ }
+
+ scrub_slabs(allocator, parent);
+}
+
+/**
+ * vdo_prepare_slab_depot_to_allocate() - Prepare the slab depot to come online and start
+ * allocating blocks.
+ * @depot: The depot to prepare.
+ * @load_type: The load type.
+ * @parent: The completion to notify when the operation is complete.
+ *
+ * This method may be called only before entering normal operation from the load thread. It must be
+ * called before allocation may proceed.
+ */
+void vdo_prepare_slab_depot_to_allocate(struct slab_depot *depot,
+ enum slab_depot_load_type load_type,
+ struct vdo_completion *parent)
+{
+ depot->load_type = load_type;
+ atomic_set(&depot->zones_to_scrub, depot->zone_count);
+ vdo_schedule_action(depot->action_manager, NULL,
+ prepare_to_allocate, NULL, parent);
+}
+
+/**
+ * vdo_update_slab_depot_size() - Update the slab depot to reflect its new size in memory.
+ * @depot: The depot to update.
+ *
+ * This size is saved to disk as part of the super block.
+ */
+void vdo_update_slab_depot_size(struct slab_depot *depot)
+{
+ depot->last_block = depot->new_last_block;
+}
+
+/**
+ * vdo_prepare_to_grow_slab_depot() - Allocate new memory needed for a resize of a slab depot to
+ * the given size.
+ * @depot: The depot to prepare to resize.
+ * @partition: The new depot partition
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_prepare_to_grow_slab_depot(struct slab_depot *depot,
+ const struct partition *partition)
+{
+ struct slab_depot_state_2_0 new_state;
+ int result;
+ slab_count_t new_slab_count;
+
+ if ((partition->count >> depot->slab_size_shift) <= depot->slab_count)
+ return VDO_INCREMENT_TOO_SMALL;
+
+ /* Generate the depot configuration for the new block count. */
+ VDO_ASSERT_LOG_ONLY(depot->first_block == partition->offset,
+ "New slab depot partition doesn't change origin");
+ result = vdo_configure_slab_depot(partition, depot->slab_config,
+ depot->zone_count, &new_state);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ new_slab_count = vdo_compute_slab_count(depot->first_block,
+ new_state.last_block,
+ depot->slab_size_shift);
+ if (new_slab_count <= depot->slab_count)
+ return vdo_log_error_strerror(VDO_INCREMENT_TOO_SMALL,
+ "Depot can only grow");
+ if (new_slab_count == depot->new_slab_count) {
+ /* Check it out, we've already got all the new slabs allocated! */
+ return VDO_SUCCESS;
+ }
+
+ vdo_abandon_new_slabs(depot);
+ result = allocate_slabs(depot, new_slab_count);
+ if (result != VDO_SUCCESS) {
+ vdo_abandon_new_slabs(depot);
+ return result;
+ }
+
+ depot->new_size = partition->count;
+ depot->old_last_block = depot->last_block;
+ depot->new_last_block = new_state.last_block;
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * finish_registration() - Finish registering new slabs now that all of the allocators have
+ * received their new slabs.
+ *
+ * Implements vdo_action_conclusion_fn.
+ */
+static int finish_registration(void *context)
+{
+ struct slab_depot *depot = context;
+
+ WRITE_ONCE(depot->slab_count, depot->new_slab_count);
+ vdo_free(depot->slabs);
+ depot->slabs = depot->new_slabs;
+ depot->new_slabs = NULL;
+ depot->new_slab_count = 0;
+ return VDO_SUCCESS;
+}
+
+/* Implements vdo_zone_action_fn. */
+static void register_new_slabs(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct slab_depot *depot = context;
+ struct block_allocator *allocator = &depot->allocators[zone_number];
+ slab_count_t i;
+
+ for (i = depot->slab_count; i < depot->new_slab_count; i++) {
+ struct vdo_slab *slab = depot->new_slabs[i];
+
+ if (slab->allocator == allocator)
+ register_slab_with_allocator(allocator, slab);
+ }
+
+ vdo_finish_completion(parent);
+}
+
+/**
+ * vdo_use_new_slabs() - Use the new slabs allocated for resize.
+ * @depot: The depot.
+ * @parent: The object to notify when complete.
+ */
+void vdo_use_new_slabs(struct slab_depot *depot, struct vdo_completion *parent)
+{
+ VDO_ASSERT_LOG_ONLY(depot->new_slabs != NULL, "Must have new slabs to use");
+ vdo_schedule_operation(depot->action_manager,
+ VDO_ADMIN_STATE_SUSPENDED_OPERATION,
+ NULL, register_new_slabs,
+ finish_registration, parent);
+}
+
+/**
+ * stop_scrubbing() - Tell the scrubber to stop scrubbing after it finishes the slab it is
+ * currently working on.
+ * @scrubber: The scrubber to stop.
+ * @parent: The completion to notify when scrubbing has stopped.
+ */
+static void stop_scrubbing(struct block_allocator *allocator)
+{
+ struct slab_scrubber *scrubber = &allocator->scrubber;
+
+ if (vdo_is_state_quiescent(&scrubber->admin_state)) {
+ vdo_finish_completion(&allocator->completion);
+ } else {
+ vdo_start_draining(&scrubber->admin_state,
+ VDO_ADMIN_STATE_SUSPENDING,
+ &allocator->completion, NULL);
+ }
+}
+
+/* Implements vdo_admin_initiator_fn. */
+static void initiate_summary_drain(struct admin_state *state)
+{
+ check_summary_drain_complete(container_of(state, struct block_allocator,
+ summary_state));
+}
+
+static void do_drain_step(struct vdo_completion *completion)
+{
+ struct block_allocator *allocator = vdo_as_block_allocator(completion);
+
+ vdo_prepare_completion_for_requeue(&allocator->completion, do_drain_step,
+ handle_operation_error, allocator->thread_id,
+ NULL);
+ switch (++allocator->drain_step) {
+ case VDO_DRAIN_ALLOCATOR_STEP_SCRUBBER:
+ stop_scrubbing(allocator);
+ return;
+
+ case VDO_DRAIN_ALLOCATOR_STEP_SLABS:
+ apply_to_slabs(allocator, do_drain_step);
+ return;
+
+ case VDO_DRAIN_ALLOCATOR_STEP_SUMMARY:
+ vdo_start_draining(&allocator->summary_state,
+ vdo_get_admin_state_code(&allocator->state),
+ completion, initiate_summary_drain);
+ return;
+
+ case VDO_DRAIN_ALLOCATOR_STEP_FINISHED:
+ VDO_ASSERT_LOG_ONLY(!is_vio_pool_busy(allocator->vio_pool),
+ "vio pool not busy");
+ vdo_finish_draining_with_result(&allocator->state, completion->result);
+ return;
+
+ default:
+ vdo_finish_draining_with_result(&allocator->state, UDS_BAD_STATE);
+ }
+}
+
+/* Implements vdo_admin_initiator_fn. */
+static void initiate_drain(struct admin_state *state)
+{
+ struct block_allocator *allocator =
+ container_of(state, struct block_allocator, state);
+
+ allocator->drain_step = VDO_DRAIN_ALLOCATOR_START;
+ do_drain_step(&allocator->completion);
+}
+
+/*
+ * Drain all allocator I/O. Depending upon the type of drain, some or all dirty metadata may be
+ * written to disk. The type of drain will be determined from the state of the allocator's depot.
+ *
+ * Implements vdo_zone_action_fn.
+ */
+static void drain_allocator(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct slab_depot *depot = context;
+
+ vdo_start_draining(&depot->allocators[zone_number].state,
+ vdo_get_current_manager_operation(depot->action_manager),
+ parent, initiate_drain);
+}
+
+/**
+ * vdo_drain_slab_depot() - Drain all slab depot I/O.
+ * @depot: The depot to drain.
+ * @operation: The drain operation (flush, rebuild, suspend, or save).
+ * @parent: The completion to finish when the drain is complete.
+ *
+ * If saving, or flushing, all dirty depot metadata will be written out. If saving or suspending,
+ * the depot will be left in a suspended state.
+ */
+void vdo_drain_slab_depot(struct slab_depot *depot,
+ const struct admin_state_code *operation,
+ struct vdo_completion *parent)
+{
+ vdo_schedule_operation(depot->action_manager, operation,
+ NULL, drain_allocator, NULL, parent);
+}
+
+/**
+ * resume_scrubbing() - Tell the scrubber to resume scrubbing if it has been stopped.
+ * @allocator: The allocator being resumed.
+ */
+static void resume_scrubbing(struct block_allocator *allocator)
+{
+ int result;
+ struct slab_scrubber *scrubber = &allocator->scrubber;
+
+ if (!has_slabs_to_scrub(scrubber)) {
+ vdo_finish_completion(&allocator->completion);
+ return;
+ }
+
+ result = vdo_resume_if_quiescent(&scrubber->admin_state);
+ if (result != VDO_SUCCESS) {
+ vdo_fail_completion(&allocator->completion, result);
+ return;
+ }
+
+ scrub_next_slab(scrubber);
+ vdo_finish_completion(&allocator->completion);
+}
+
+static void do_resume_step(struct vdo_completion *completion)
+{
+ struct block_allocator *allocator = vdo_as_block_allocator(completion);
+
+ vdo_prepare_completion_for_requeue(&allocator->completion, do_resume_step,
+ handle_operation_error,
+ allocator->thread_id, NULL);
+ switch (--allocator->drain_step) {
+ case VDO_DRAIN_ALLOCATOR_STEP_SUMMARY:
+ vdo_fail_completion(completion,
+ vdo_resume_if_quiescent(&allocator->summary_state));
+ return;
+
+ case VDO_DRAIN_ALLOCATOR_STEP_SLABS:
+ apply_to_slabs(allocator, do_resume_step);
+ return;
+
+ case VDO_DRAIN_ALLOCATOR_STEP_SCRUBBER:
+ resume_scrubbing(allocator);
+ return;
+
+ case VDO_DRAIN_ALLOCATOR_START:
+ vdo_finish_resuming_with_result(&allocator->state, completion->result);
+ return;
+
+ default:
+ vdo_finish_resuming_with_result(&allocator->state, UDS_BAD_STATE);
+ }
+}
+
+/* Implements vdo_admin_initiator_fn. */
+static void initiate_resume(struct admin_state *state)
+{
+ struct block_allocator *allocator =
+ container_of(state, struct block_allocator, state);
+
+ allocator->drain_step = VDO_DRAIN_ALLOCATOR_STEP_FINISHED;
+ do_resume_step(&allocator->completion);
+}
+
+/* Implements vdo_zone_action_fn. */
+static void resume_allocator(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct slab_depot *depot = context;
+
+ vdo_start_resuming(&depot->allocators[zone_number].state,
+ vdo_get_current_manager_operation(depot->action_manager),
+ parent, initiate_resume);
+}
+
+/**
+ * vdo_resume_slab_depot() - Resume a suspended slab depot.
+ * @depot: The depot to resume.
+ * @parent: The completion to finish when the depot has resumed.
+ */
+void vdo_resume_slab_depot(struct slab_depot *depot, struct vdo_completion *parent)
+{
+ if (vdo_is_read_only(depot->vdo)) {
+ vdo_continue_completion(parent, VDO_READ_ONLY);
+ return;
+ }
+
+ vdo_schedule_operation(depot->action_manager, VDO_ADMIN_STATE_RESUMING,
+ NULL, resume_allocator, NULL, parent);
+}
+
+/**
+ * vdo_commit_oldest_slab_journal_tail_blocks() - Commit all dirty tail blocks which are locking a
+ * given recovery journal block.
+ * @depot: The depot.
+ * @recovery_block_number: The sequence number of the recovery journal block whose locks should be
+ * released.
+ *
+ * Context: This method must be called from the journal zone thread.
+ */
+void vdo_commit_oldest_slab_journal_tail_blocks(struct slab_depot *depot,
+ sequence_number_t recovery_block_number)
+{
+ if (depot == NULL)
+ return;
+
+ depot->new_release_request = recovery_block_number;
+ vdo_schedule_default_action(depot->action_manager);
+}
+
+/* Implements vdo_zone_action_fn. */
+static void scrub_all_unrecovered_slabs(void *context, zone_count_t zone_number,
+ struct vdo_completion *parent)
+{
+ struct slab_depot *depot = context;
+
+ scrub_slabs(&depot->allocators[zone_number], NULL);
+ vdo_launch_completion(parent);
+}
+
+/**
+ * vdo_scrub_all_unrecovered_slabs() - Scrub all unrecovered slabs.
+ * @depot: The depot to scrub.
+ * @parent: The object to notify when scrubbing has been launched for all zones.
+ */
+void vdo_scrub_all_unrecovered_slabs(struct slab_depot *depot,
+ struct vdo_completion *parent)
+{
+ vdo_schedule_action(depot->action_manager, NULL,
+ scrub_all_unrecovered_slabs,
+ NULL, parent);
+}
+
+/**
+ * get_block_allocator_statistics() - Get the total of the statistics from all the block allocators
+ * in the depot.
+ * @depot: The slab depot.
+ *
+ * Return: The statistics from all block allocators in the depot.
+ */
+static struct block_allocator_statistics __must_check
+get_block_allocator_statistics(const struct slab_depot *depot)
+{
+ struct block_allocator_statistics totals;
+ zone_count_t zone;
+
+ memset(&totals, 0, sizeof(totals));
+
+ for (zone = 0; zone < depot->zone_count; zone++) {
+ const struct block_allocator *allocator = &depot->allocators[zone];
+ const struct block_allocator_statistics *stats = &allocator->statistics;
+
+ totals.slab_count += allocator->slab_count;
+ totals.slabs_opened += READ_ONCE(stats->slabs_opened);
+ totals.slabs_reopened += READ_ONCE(stats->slabs_reopened);
+ }
+
+ return totals;
+}
+
+/**
+ * get_ref_counts_statistics() - Get the cumulative ref_counts statistics for the depot.
+ * @depot: The slab depot.
+ *
+ * Return: The cumulative statistics for all ref_counts in the depot.
+ */
+static struct ref_counts_statistics __must_check
+get_ref_counts_statistics(const struct slab_depot *depot)
+{
+ struct ref_counts_statistics totals;
+ zone_count_t zone;
+
+ memset(&totals, 0, sizeof(totals));
+
+ for (zone = 0; zone < depot->zone_count; zone++) {
+ totals.blocks_written +=
+ READ_ONCE(depot->allocators[zone].ref_counts_statistics.blocks_written);
+ }
+
+ return totals;
+}
+
+/**
+ * get_slab_journal_statistics() - Get the aggregated slab journal statistics for the depot.
+ * @depot: The slab depot.
+ *
+ * Return: The aggregated statistics for all slab journals in the depot.
+ */
+static struct slab_journal_statistics __must_check
+get_slab_journal_statistics(const struct slab_depot *depot)
+{
+ struct slab_journal_statistics totals;
+ zone_count_t zone;
+
+ memset(&totals, 0, sizeof(totals));
+
+ for (zone = 0; zone < depot->zone_count; zone++) {
+ const struct slab_journal_statistics *stats =
+ &depot->allocators[zone].slab_journal_statistics;
+
+ totals.disk_full_count += READ_ONCE(stats->disk_full_count);
+ totals.flush_count += READ_ONCE(stats->flush_count);
+ totals.blocked_count += READ_ONCE(stats->blocked_count);
+ totals.blocks_written += READ_ONCE(stats->blocks_written);
+ totals.tail_busy_count += READ_ONCE(stats->tail_busy_count);
+ }
+
+ return totals;
+}
+
+/**
+ * vdo_get_slab_depot_statistics() - Get all the vdo_statistics fields that are properties of the
+ * slab depot.
+ * @depot: The slab depot.
+ * @stats: The vdo statistics structure to partially fill.
+ */
+void vdo_get_slab_depot_statistics(const struct slab_depot *depot,
+ struct vdo_statistics *stats)
+{
+ slab_count_t slab_count = READ_ONCE(depot->slab_count);
+ slab_count_t unrecovered = 0;
+ zone_count_t zone;
+
+ for (zone = 0; zone < depot->zone_count; zone++) {
+ /* The allocators are responsible for thread safety. */
+ unrecovered += READ_ONCE(depot->allocators[zone].scrubber.slab_count);
+ }
+
+ stats->recovery_percentage = (slab_count - unrecovered) * 100 / slab_count;
+ stats->allocator = get_block_allocator_statistics(depot);
+ stats->ref_counts = get_ref_counts_statistics(depot);
+ stats->slab_journal = get_slab_journal_statistics(depot);
+ stats->slab_summary = (struct slab_summary_statistics) {
+ .blocks_written = atomic64_read(&depot->summary_statistics.blocks_written),
+ };
+}
+
+/**
+ * vdo_dump_slab_depot() - Dump the slab depot, in a thread-unsafe fashion.
+ * @depot: The slab depot.
+ */
+void vdo_dump_slab_depot(const struct slab_depot *depot)
+{
+ vdo_log_info("vdo slab depot");
+ vdo_log_info(" zone_count=%u old_zone_count=%u slabCount=%u active_release_request=%llu new_release_request=%llu",
+ (unsigned int) depot->zone_count,
+ (unsigned int) depot->old_zone_count, READ_ONCE(depot->slab_count),
+ (unsigned long long) depot->active_release_request,
+ (unsigned long long) depot->new_release_request);
+}
diff --git a/drivers/md/dm-vdo/slab-depot.h b/drivers/md/dm-vdo/slab-depot.h
new file mode 100644
index 000000000000..f234853501ca
--- /dev/null
+++ b/drivers/md/dm-vdo/slab-depot.h
@@ -0,0 +1,601 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_SLAB_DEPOT_H
+#define VDO_SLAB_DEPOT_H
+
+#include <linux/atomic.h>
+#include <linux/dm-kcopyd.h>
+#include <linux/list.h>
+
+#include "numeric.h"
+
+#include "admin-state.h"
+#include "completion.h"
+#include "data-vio.h"
+#include "encodings.h"
+#include "physical-zone.h"
+#include "priority-table.h"
+#include "recovery-journal.h"
+#include "statistics.h"
+#include "types.h"
+#include "vio.h"
+#include "wait-queue.h"
+
+/*
+ * A slab_depot is responsible for managing all of the slabs and block allocators of a VDO. It has
+ * a single array of slabs in order to eliminate the need for additional math in order to compute
+ * which physical zone a PBN is in. It also has a block_allocator per zone.
+ *
+ * Each physical zone has a single dedicated queue and thread for performing all updates to the
+ * slabs assigned to that zone. The concurrency guarantees of this single-threaded model allow the
+ * code to omit more fine-grained locking for the various slab structures. Each physical zone
+ * maintains a separate copy of the slab summary to remove the need for explicit locking on that
+ * structure as well.
+ *
+ * Load operations must be performed on the admin thread. Normal operations, such as allocations
+ * and reference count updates, must be performed on the appropriate physical zone thread. Requests
+ * from the recovery journal to commit slab journal tail blocks must be scheduled from the recovery
+ * journal thread to run on the appropriate physical zone thread. Save operations must be launched
+ * from the same admin thread as the original load operation.
+ */
+
+enum {
+ /* The number of vios in the vio pool is proportional to the throughput of the VDO. */
+ BLOCK_ALLOCATOR_VIO_POOL_SIZE = 128,
+};
+
+/*
+ * Represents the possible status of a block.
+ */
+enum reference_status {
+ RS_FREE, /* this block is free */
+ RS_SINGLE, /* this block is singly-referenced */
+ RS_SHARED, /* this block is shared */
+ RS_PROVISIONAL /* this block is provisionally allocated */
+};
+
+struct vdo_slab;
+
+struct journal_lock {
+ u16 count;
+ sequence_number_t recovery_start;
+};
+
+struct slab_journal {
+ /* A waiter object for getting a VIO pool entry */
+ struct vdo_waiter resource_waiter;
+ /* A waiter object for updating the slab summary */
+ struct vdo_waiter slab_summary_waiter;
+ /* A waiter object for getting a vio with which to flush */
+ struct vdo_waiter flush_waiter;
+ /* The queue of VIOs waiting to make an entry */
+ struct vdo_wait_queue entry_waiters;
+ /* The parent slab reference of this journal */
+ struct vdo_slab *slab;
+
+ /* Whether a tail block commit is pending */
+ bool waiting_to_commit;
+ /* Whether the journal is updating the slab summary */
+ bool updating_slab_summary;
+ /* Whether the journal is adding entries from the entry_waiters queue */
+ bool adding_entries;
+ /* Whether a partial write is in progress */
+ bool partial_write_in_progress;
+
+ /* The oldest block in the journal on disk */
+ sequence_number_t head;
+ /* The oldest block in the journal which may not be reaped */
+ sequence_number_t unreapable;
+ /* The end of the half-open interval of the active journal */
+ sequence_number_t tail;
+ /* The next journal block to be committed */
+ sequence_number_t next_commit;
+ /* The tail sequence number that is written in the slab summary */
+ sequence_number_t summarized;
+ /* The tail sequence number that was last summarized in slab summary */
+ sequence_number_t last_summarized;
+
+ /* The sequence number of the recovery journal lock */
+ sequence_number_t recovery_lock;
+
+ /*
+ * The number of entries which fit in a single block. Can't use the constant because unit
+ * tests change this number.
+ */
+ journal_entry_count_t entries_per_block;
+ /*
+ * The number of full entries which fit in a single block. Can't use the constant because
+ * unit tests change this number.
+ */
+ journal_entry_count_t full_entries_per_block;
+
+ /* The recovery journal of the VDO (slab journal holds locks on it) */
+ struct recovery_journal *recovery_journal;
+
+ /* The statistics shared by all slab journals in our physical zone */
+ struct slab_journal_statistics *events;
+ /* A list of the VIO pool entries for outstanding journal block writes */
+ struct list_head uncommitted_blocks;
+
+ /*
+ * The current tail block header state. This will be packed into the block just before it
+ * is written.
+ */
+ struct slab_journal_block_header tail_header;
+ /* A pointer to a block-sized buffer holding the packed block data */
+ struct packed_slab_journal_block *block;
+
+ /* The number of blocks in the on-disk journal */
+ block_count_t size;
+ /* The number of blocks at which to start pushing reference blocks */
+ block_count_t flushing_threshold;
+ /* The number of blocks at which all reference blocks should be writing */
+ block_count_t flushing_deadline;
+ /* The number of blocks at which to wait for reference blocks to write */
+ block_count_t blocking_threshold;
+ /* The number of blocks at which to scrub the slab before coming online */
+ block_count_t scrubbing_threshold;
+
+ /* This list entry is for block_allocator to keep a queue of dirty journals */
+ struct list_head dirty_entry;
+
+ /* The lock for the oldest unreaped block of the journal */
+ struct journal_lock *reap_lock;
+ /* The locks for each on disk block */
+ struct journal_lock *locks;
+};
+
+/*
+ * Reference_block structure
+ *
+ * Blocks are used as a proxy, permitting saves of partial refcounts.
+ */
+struct reference_block {
+ /* This block waits on the ref_counts to tell it to write */
+ struct vdo_waiter waiter;
+ /* The slab to which this reference_block belongs */
+ struct vdo_slab *slab;
+ /* The number of references in this block that represent allocations */
+ block_size_t allocated_count;
+ /* The slab journal block on which this block must hold a lock */
+ sequence_number_t slab_journal_lock;
+ /* The slab journal block which should be released when this block is committed */
+ sequence_number_t slab_journal_lock_to_release;
+ /* The point up to which each sector is accurate on disk */
+ struct journal_point commit_points[VDO_SECTORS_PER_BLOCK];
+ /* Whether this block has been modified since it was written to disk */
+ bool is_dirty;
+ /* Whether this block is currently writing */
+ bool is_writing;
+};
+
+/* The search_cursor represents the saved position of a free block search. */
+struct search_cursor {
+ /* The reference block containing the current search index */
+ struct reference_block *block;
+ /* The position at which to start searching for the next free counter */
+ slab_block_number index;
+ /* The position just past the last valid counter in the current block */
+ slab_block_number end_index;
+
+ /* A pointer to the first reference block in the slab */
+ struct reference_block *first_block;
+ /* A pointer to the last reference block in the slab */
+ struct reference_block *last_block;
+};
+
+enum slab_rebuild_status {
+ VDO_SLAB_REBUILT,
+ VDO_SLAB_REPLAYING,
+ VDO_SLAB_REQUIRES_SCRUBBING,
+ VDO_SLAB_REQUIRES_HIGH_PRIORITY_SCRUBBING,
+ VDO_SLAB_REBUILDING,
+};
+
+/*
+ * This is the type declaration for the vdo_slab type. A vdo_slab currently consists of a run of
+ * 2^23 data blocks, but that will soon change to dedicate a small number of those blocks for
+ * metadata storage for the reference counts and slab journal for the slab.
+ *
+ * A reference count is maintained for each physical block number. The vast majority of blocks have
+ * a very small reference count (usually 0 or 1). For references less than or equal to MAXIMUM_REFS
+ * (254) the reference count is stored in counters[pbn].
+ */
+struct vdo_slab {
+ /* A list entry to queue this slab in a block_allocator list */
+ struct list_head allocq_entry;
+
+ /* The struct block_allocator that owns this slab */
+ struct block_allocator *allocator;
+
+ /* The journal for this slab */
+ struct slab_journal journal;
+
+ /* The slab number of this slab */
+ slab_count_t slab_number;
+ /* The offset in the allocator partition of the first block in this slab */
+ physical_block_number_t start;
+ /* The offset of the first block past the end of this slab */
+ physical_block_number_t end;
+ /* The starting translated PBN of the slab journal */
+ physical_block_number_t journal_origin;
+ /* The starting translated PBN of the reference counts */
+ physical_block_number_t ref_counts_origin;
+
+ /* The administrative state of the slab */
+ struct admin_state state;
+ /* The status of the slab */
+ enum slab_rebuild_status status;
+ /* Whether the slab was ever queued for scrubbing */
+ bool was_queued_for_scrubbing;
+
+ /* The priority at which this slab has been queued for allocation */
+ u8 priority;
+
+ /* Fields beyond this point are the reference counts for the data blocks in this slab. */
+ /* The size of the counters array */
+ u32 block_count;
+ /* The number of free blocks */
+ u32 free_blocks;
+ /* The array of reference counts */
+ vdo_refcount_t *counters; /* use vdo_allocate() to align data ptr */
+
+ /* The saved block pointer and array indexes for the free block search */
+ struct search_cursor search_cursor;
+
+ /* A list of the dirty blocks waiting to be written out */
+ struct vdo_wait_queue dirty_blocks;
+ /* The number of blocks which are currently writing */
+ size_t active_count;
+
+ /* A waiter object for updating the slab summary */
+ struct vdo_waiter summary_waiter;
+
+ /* The latest slab journal for which there has been a reference count update */
+ struct journal_point slab_journal_point;
+
+ /* The number of reference count blocks */
+ u32 reference_block_count;
+ /* reference count block array */
+ struct reference_block *reference_blocks;
+};
+
+enum block_allocator_drain_step {
+ VDO_DRAIN_ALLOCATOR_START,
+ VDO_DRAIN_ALLOCATOR_STEP_SCRUBBER,
+ VDO_DRAIN_ALLOCATOR_STEP_SLABS,
+ VDO_DRAIN_ALLOCATOR_STEP_SUMMARY,
+ VDO_DRAIN_ALLOCATOR_STEP_FINISHED,
+};
+
+struct slab_scrubber {
+ /* The queue of slabs to scrub first */
+ struct list_head high_priority_slabs;
+ /* The queue of slabs to scrub once there are no high_priority_slabs */
+ struct list_head slabs;
+ /* The queue of VIOs waiting for a slab to be scrubbed */
+ struct vdo_wait_queue waiters;
+
+ /*
+ * The number of slabs that are unrecovered or being scrubbed. This field is modified by
+ * the physical zone thread, but is queried by other threads.
+ */
+ slab_count_t slab_count;
+
+ /* The administrative state of the scrubber */
+ struct admin_state admin_state;
+ /* Whether to only scrub high-priority slabs */
+ bool high_priority_only;
+ /* The slab currently being scrubbed */
+ struct vdo_slab *slab;
+ /* The vio for loading slab journal blocks */
+ struct vio vio;
+};
+
+/* A sub-structure for applying actions in parallel to all an allocator's slabs. */
+struct slab_actor {
+ /* The number of slabs performing a slab action */
+ slab_count_t slab_action_count;
+ /* The method to call when a slab action has been completed by all slabs */
+ vdo_action_fn callback;
+};
+
+/* A slab_iterator is a structure for iterating over a set of slabs. */
+struct slab_iterator {
+ struct vdo_slab **slabs;
+ struct vdo_slab *next;
+ slab_count_t end;
+ slab_count_t stride;
+};
+
+/*
+ * The slab_summary provides hints during load and recovery about the state of the slabs in order
+ * to avoid the need to read the slab journals in their entirety before a VDO can come online.
+ *
+ * The information in the summary for each slab includes the rough number of free blocks (which is
+ * used to prioritize scrubbing), the cleanliness of a slab (so that clean slabs containing free
+ * space will be used on restart), and the location of the tail block of the slab's journal.
+ *
+ * The slab_summary has its own partition at the end of the volume which is sized to allow for a
+ * complete copy of the summary for each of up to 16 physical zones.
+ *
+ * During resize, the slab_summary moves its backing partition and is saved once moved; the
+ * slab_summary is not permitted to overwrite the previous recovery journal space.
+ *
+ * The slab_summary does not have its own version information, but relies on the VDO volume version
+ * number.
+ */
+
+/*
+ * A slab status is a very small structure for use in determining the ordering of slabs in the
+ * scrubbing process.
+ */
+struct slab_status {
+ slab_count_t slab_number;
+ bool is_clean;
+ u8 emptiness;
+};
+
+struct slab_summary_block {
+ /* The block_allocator to which this block belongs */
+ struct block_allocator *allocator;
+ /* The index of this block in its zone's summary */
+ block_count_t index;
+ /* Whether this block has a write outstanding */
+ bool writing;
+ /* Ring of updates waiting on the outstanding write */
+ struct vdo_wait_queue current_update_waiters;
+ /* Ring of updates waiting on the next write */
+ struct vdo_wait_queue next_update_waiters;
+ /* The active slab_summary_entry array for this block */
+ struct slab_summary_entry *entries;
+ /* The vio used to write this block */
+ struct vio vio;
+ /* The packed entries, one block long, backing the vio */
+ char *outgoing_entries;
+};
+
+/*
+ * The statistics for all the slab summary zones owned by this slab summary. These fields are all
+ * mutated only by their physical zone threads, but are read by other threads when gathering
+ * statistics for the entire depot.
+ */
+struct atomic_slab_summary_statistics {
+ /* Number of blocks written */
+ atomic64_t blocks_written;
+};
+
+struct block_allocator {
+ struct vdo_completion completion;
+ /* The slab depot for this allocator */
+ struct slab_depot *depot;
+ /* The nonce of the VDO */
+ nonce_t nonce;
+ /* The physical zone number of this allocator */
+ zone_count_t zone_number;
+ /* The thread ID for this allocator's physical zone */
+ thread_id_t thread_id;
+ /* The number of slabs in this allocator */
+ slab_count_t slab_count;
+ /* The number of the last slab owned by this allocator */
+ slab_count_t last_slab;
+ /* The reduced priority level used to preserve unopened slabs */
+ unsigned int unopened_slab_priority;
+ /* The state of this allocator */
+ struct admin_state state;
+ /* The actor for applying an action to all slabs */
+ struct slab_actor slab_actor;
+
+ /* The slab from which blocks are currently being allocated */
+ struct vdo_slab *open_slab;
+ /* A priority queue containing all slabs available for allocation */
+ struct priority_table *prioritized_slabs;
+ /* The slab scrubber */
+ struct slab_scrubber scrubber;
+ /* What phase of the close operation the allocator is to perform */
+ enum block_allocator_drain_step drain_step;
+
+ /*
+ * These statistics are all mutated only by the physical zone thread, but are read by other
+ * threads when gathering statistics for the entire depot.
+ */
+ /*
+ * The count of allocated blocks in this zone. Not in block_allocator_statistics for
+ * historical reasons.
+ */
+ u64 allocated_blocks;
+ /* Statistics for this block allocator */
+ struct block_allocator_statistics statistics;
+ /* Cumulative statistics for the slab journals in this zone */
+ struct slab_journal_statistics slab_journal_statistics;
+ /* Cumulative statistics for the reference counters in this zone */
+ struct ref_counts_statistics ref_counts_statistics;
+
+ /*
+ * This is the head of a queue of slab journals which have entries in their tail blocks
+ * which have not yet started to commit. When the recovery journal is under space pressure,
+ * slab journals which have uncommitted entries holding a lock on the recovery journal head
+ * are forced to commit their blocks early. This list is kept in order, with the tail
+ * containing the slab journal holding the most recent recovery journal lock.
+ */
+ struct list_head dirty_slab_journals;
+
+ /* The vio pool for reading and writing block allocator metadata */
+ struct vio_pool *vio_pool;
+ /* The dm_kcopyd client for erasing slab journals */
+ struct dm_kcopyd_client *eraser;
+ /* Iterator over the slabs to be erased */
+ struct slab_iterator slabs_to_erase;
+
+ /* The portion of the slab summary managed by this allocator */
+ /* The state of the slab summary */
+ struct admin_state summary_state;
+ /* The number of outstanding summary writes */
+ block_count_t summary_write_count;
+ /* The array (owned by the blocks) of all entries */
+ struct slab_summary_entry *summary_entries;
+ /* The array of slab_summary_blocks */
+ struct slab_summary_block *summary_blocks;
+};
+
+enum slab_depot_load_type {
+ VDO_SLAB_DEPOT_NORMAL_LOAD,
+ VDO_SLAB_DEPOT_RECOVERY_LOAD,
+ VDO_SLAB_DEPOT_REBUILD_LOAD
+};
+
+struct slab_depot {
+ zone_count_t zone_count;
+ zone_count_t old_zone_count;
+ struct vdo *vdo;
+ struct slab_config slab_config;
+ struct action_manager *action_manager;
+
+ physical_block_number_t first_block;
+ physical_block_number_t last_block;
+ physical_block_number_t origin;
+
+ /* slab_size == (1 << slab_size_shift) */
+ unsigned int slab_size_shift;
+
+ /* Determines how slabs should be queued during load */
+ enum slab_depot_load_type load_type;
+
+ /* The state for notifying slab journals to release recovery journal */
+ sequence_number_t active_release_request;
+ sequence_number_t new_release_request;
+
+ /* State variables for scrubbing complete handling */
+ atomic_t zones_to_scrub;
+
+ /* Array of pointers to individually allocated slabs */
+ struct vdo_slab **slabs;
+ /* The number of slabs currently allocated and stored in 'slabs' */
+ slab_count_t slab_count;
+
+ /* Array of pointers to a larger set of slabs (used during resize) */
+ struct vdo_slab **new_slabs;
+ /* The number of slabs currently allocated and stored in 'new_slabs' */
+ slab_count_t new_slab_count;
+ /* The size that 'new_slabs' was allocated for */
+ block_count_t new_size;
+
+ /* The last block before resize, for rollback */
+ physical_block_number_t old_last_block;
+ /* The last block after resize, for resize */
+ physical_block_number_t new_last_block;
+
+ /* The statistics for the slab summary */
+ struct atomic_slab_summary_statistics summary_statistics;
+ /* The start of the slab summary partition */
+ physical_block_number_t summary_origin;
+ /* The number of bits to shift to get a 7-bit fullness hint */
+ unsigned int hint_shift;
+ /* The slab summary entries for all of the zones the partition can hold */
+ struct slab_summary_entry *summary_entries;
+
+ /* The block allocators for this depot */
+ struct block_allocator allocators[];
+};
+
+struct reference_updater;
+
+bool __must_check vdo_attempt_replay_into_slab(struct vdo_slab *slab,
+ physical_block_number_t pbn,
+ enum journal_operation operation,
+ bool increment,
+ struct journal_point *recovery_point,
+ struct vdo_completion *parent);
+
+int __must_check vdo_adjust_reference_count_for_rebuild(struct slab_depot *depot,
+ physical_block_number_t pbn,
+ enum journal_operation operation);
+
+static inline struct block_allocator *vdo_as_block_allocator(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_BLOCK_ALLOCATOR_COMPLETION);
+ return container_of(completion, struct block_allocator, completion);
+}
+
+int __must_check vdo_acquire_provisional_reference(struct vdo_slab *slab,
+ physical_block_number_t pbn,
+ struct pbn_lock *lock);
+
+int __must_check vdo_allocate_block(struct block_allocator *allocator,
+ physical_block_number_t *block_number_ptr);
+
+int vdo_enqueue_clean_slab_waiter(struct block_allocator *allocator,
+ struct vdo_waiter *waiter);
+
+void vdo_modify_reference_count(struct vdo_completion *completion,
+ struct reference_updater *updater);
+
+int __must_check vdo_release_block_reference(struct block_allocator *allocator,
+ physical_block_number_t pbn);
+
+void vdo_notify_slab_journals_are_recovered(struct vdo_completion *completion);
+
+void vdo_dump_block_allocator(const struct block_allocator *allocator);
+
+int __must_check vdo_decode_slab_depot(struct slab_depot_state_2_0 state,
+ struct vdo *vdo,
+ struct partition *summary_partition,
+ struct slab_depot **depot_ptr);
+
+void vdo_free_slab_depot(struct slab_depot *depot);
+
+struct slab_depot_state_2_0 __must_check vdo_record_slab_depot(const struct slab_depot *depot);
+
+int __must_check vdo_allocate_reference_counters(struct slab_depot *depot);
+
+struct vdo_slab * __must_check vdo_get_slab(const struct slab_depot *depot,
+ physical_block_number_t pbn);
+
+u8 __must_check vdo_get_increment_limit(struct slab_depot *depot,
+ physical_block_number_t pbn);
+
+bool __must_check vdo_is_physical_data_block(const struct slab_depot *depot,
+ physical_block_number_t pbn);
+
+block_count_t __must_check vdo_get_slab_depot_allocated_blocks(const struct slab_depot *depot);
+
+block_count_t __must_check vdo_get_slab_depot_data_blocks(const struct slab_depot *depot);
+
+void vdo_get_slab_depot_statistics(const struct slab_depot *depot,
+ struct vdo_statistics *stats);
+
+void vdo_load_slab_depot(struct slab_depot *depot,
+ const struct admin_state_code *operation,
+ struct vdo_completion *parent, void *context);
+
+void vdo_prepare_slab_depot_to_allocate(struct slab_depot *depot,
+ enum slab_depot_load_type load_type,
+ struct vdo_completion *parent);
+
+void vdo_update_slab_depot_size(struct slab_depot *depot);
+
+int __must_check vdo_prepare_to_grow_slab_depot(struct slab_depot *depot,
+ const struct partition *partition);
+
+void vdo_use_new_slabs(struct slab_depot *depot, struct vdo_completion *parent);
+
+void vdo_abandon_new_slabs(struct slab_depot *depot);
+
+void vdo_drain_slab_depot(struct slab_depot *depot,
+ const struct admin_state_code *operation,
+ struct vdo_completion *parent);
+
+void vdo_resume_slab_depot(struct slab_depot *depot, struct vdo_completion *parent);
+
+void vdo_commit_oldest_slab_journal_tail_blocks(struct slab_depot *depot,
+ sequence_number_t recovery_block_number);
+
+void vdo_scrub_all_unrecovered_slabs(struct slab_depot *depot,
+ struct vdo_completion *parent);
+
+void vdo_dump_slab_depot(const struct slab_depot *depot);
+
+#endif /* VDO_SLAB_DEPOT_H */
diff --git a/drivers/md/dm-vdo/statistics.h b/drivers/md/dm-vdo/statistics.h
new file mode 100644
index 000000000000..c88a75dffba3
--- /dev/null
+++ b/drivers/md/dm-vdo/statistics.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef STATISTICS_H
+#define STATISTICS_H
+
+#include "types.h"
+
+enum {
+ STATISTICS_VERSION = 36,
+};
+
+struct block_allocator_statistics {
+ /* The total number of slabs from which blocks may be allocated */
+ u64 slab_count;
+ /* The total number of slabs from which blocks have ever been allocated */
+ u64 slabs_opened;
+ /* The number of times since loading that a slab has been re-opened */
+ u64 slabs_reopened;
+};
+
+/**
+ * Counters for tracking the number of items written (blocks, requests, etc.)
+ * that keep track of totals at steps in the write pipeline. Three counters
+ * allow the number of buffered, in-memory items and the number of in-flight,
+ * unacknowledged writes to be derived, while still tracking totals for
+ * reporting purposes
+ */
+struct commit_statistics {
+ /* The total number of items on which processing has started */
+ u64 started;
+ /* The total number of items for which a write operation has been issued */
+ u64 written;
+ /* The total number of items for which a write operation has completed */
+ u64 committed;
+};
+
+/** Counters for events in the recovery journal */
+struct recovery_journal_statistics {
+ /* Number of times the on-disk journal was full */
+ u64 disk_full;
+ /* Number of times the recovery journal requested slab journal commits. */
+ u64 slab_journal_commits_requested;
+ /* Write/Commit totals for individual journal entries */
+ struct commit_statistics entries;
+ /* Write/Commit totals for journal blocks */
+ struct commit_statistics blocks;
+};
+
+/** The statistics for the compressed block packer. */
+struct packer_statistics {
+ /* Number of compressed data items written since startup */
+ u64 compressed_fragments_written;
+ /* Number of blocks containing compressed items written since startup */
+ u64 compressed_blocks_written;
+ /* Number of VIOs that are pending in the packer */
+ u64 compressed_fragments_in_packer;
+};
+
+/** The statistics for the slab journals. */
+struct slab_journal_statistics {
+ /* Number of times the on-disk journal was full */
+ u64 disk_full_count;
+ /* Number of times an entry was added over the flush threshold */
+ u64 flush_count;
+ /* Number of times an entry was added over the block threshold */
+ u64 blocked_count;
+ /* Number of times a tail block was written */
+ u64 blocks_written;
+ /* Number of times we had to wait for the tail to write */
+ u64 tail_busy_count;
+};
+
+/** The statistics for the slab summary. */
+struct slab_summary_statistics {
+ /* Number of blocks written */
+ u64 blocks_written;
+};
+
+/** The statistics for the reference counts. */
+struct ref_counts_statistics {
+ /* Number of reference blocks written */
+ u64 blocks_written;
+};
+
+/** The statistics for the block map. */
+struct block_map_statistics {
+ /* number of dirty (resident) pages */
+ u32 dirty_pages;
+ /* number of clean (resident) pages */
+ u32 clean_pages;
+ /* number of free pages */
+ u32 free_pages;
+ /* number of pages in failed state */
+ u32 failed_pages;
+ /* number of pages incoming */
+ u32 incoming_pages;
+ /* number of pages outgoing */
+ u32 outgoing_pages;
+ /* how many times free page not avail */
+ u32 cache_pressure;
+ /* number of get_vdo_page() calls for read */
+ u64 read_count;
+ /* number of get_vdo_page() calls for write */
+ u64 write_count;
+ /* number of times pages failed to read */
+ u64 failed_reads;
+ /* number of times pages failed to write */
+ u64 failed_writes;
+ /* number of gets that are reclaimed */
+ u64 reclaimed;
+ /* number of gets for outgoing pages */
+ u64 read_outgoing;
+ /* number of gets that were already there */
+ u64 found_in_cache;
+ /* number of gets requiring discard */
+ u64 discard_required;
+ /* number of gets enqueued for their page */
+ u64 wait_for_page;
+ /* number of gets that have to fetch */
+ u64 fetch_required;
+ /* number of page fetches */
+ u64 pages_loaded;
+ /* number of page saves */
+ u64 pages_saved;
+ /* the number of flushes issued */
+ u64 flush_count;
+};
+
+/** The dedupe statistics from hash locks */
+struct hash_lock_statistics {
+ /* Number of times the UDS advice proved correct */
+ u64 dedupe_advice_valid;
+ /* Number of times the UDS advice proved incorrect */
+ u64 dedupe_advice_stale;
+ /* Number of writes with the same data as another in-flight write */
+ u64 concurrent_data_matches;
+ /* Number of writes whose hash collided with an in-flight write */
+ u64 concurrent_hash_collisions;
+ /* Current number of dedupe queries that are in flight */
+ u32 curr_dedupe_queries;
+};
+
+/** Counts of error conditions in VDO. */
+struct error_statistics {
+ /* number of times VDO got an invalid dedupe advice PBN from UDS */
+ u64 invalid_advice_pbn_count;
+ /* number of times a VIO completed with a VDO_NO_SPACE error */
+ u64 no_space_error_count;
+ /* number of times a VIO completed with a VDO_READ_ONLY error */
+ u64 read_only_error_count;
+};
+
+struct bio_stats {
+ /* Number of REQ_OP_READ bios */
+ u64 read;
+ /* Number of REQ_OP_WRITE bios with data */
+ u64 write;
+ /* Number of bios tagged with REQ_PREFLUSH and containing no data */
+ u64 empty_flush;
+ /* Number of REQ_OP_DISCARD bios */
+ u64 discard;
+ /* Number of bios tagged with REQ_PREFLUSH */
+ u64 flush;
+ /* Number of bios tagged with REQ_FUA */
+ u64 fua;
+};
+
+struct memory_usage {
+ /* Tracked bytes currently allocated. */
+ u64 bytes_used;
+ /* Maximum tracked bytes allocated. */
+ u64 peak_bytes_used;
+};
+
+/** UDS index statistics */
+struct index_statistics {
+ /* Number of records stored in the index */
+ u64 entries_indexed;
+ /* Number of post calls that found an existing entry */
+ u64 posts_found;
+ /* Number of post calls that added a new entry */
+ u64 posts_not_found;
+ /* Number of query calls that found an existing entry */
+ u64 queries_found;
+ /* Number of query calls that added a new entry */
+ u64 queries_not_found;
+ /* Number of update calls that found an existing entry */
+ u64 updates_found;
+ /* Number of update calls that added a new entry */
+ u64 updates_not_found;
+ /* Number of entries discarded */
+ u64 entries_discarded;
+};
+
+/** The statistics of the vdo service. */
+struct vdo_statistics {
+ u32 version;
+ /* Number of blocks used for data */
+ u64 data_blocks_used;
+ /* Number of blocks used for VDO metadata */
+ u64 overhead_blocks_used;
+ /* Number of logical blocks that are currently mapped to physical blocks */
+ u64 logical_blocks_used;
+ /* number of physical blocks */
+ block_count_t physical_blocks;
+ /* number of logical blocks */
+ block_count_t logical_blocks;
+ /* Size of the block map page cache, in bytes */
+ u64 block_map_cache_size;
+ /* The physical block size */
+ u64 block_size;
+ /* Number of times the VDO has successfully recovered */
+ u64 complete_recoveries;
+ /* Number of times the VDO has recovered from read-only mode */
+ u64 read_only_recoveries;
+ /* String describing the operating mode of the VDO */
+ char mode[15];
+ /* Whether the VDO is in recovery mode */
+ bool in_recovery_mode;
+ /* What percentage of recovery mode work has been completed */
+ u8 recovery_percentage;
+ /* The statistics for the compressed block packer */
+ struct packer_statistics packer;
+ /* Counters for events in the block allocator */
+ struct block_allocator_statistics allocator;
+ /* Counters for events in the recovery journal */
+ struct recovery_journal_statistics journal;
+ /* The statistics for the slab journals */
+ struct slab_journal_statistics slab_journal;
+ /* The statistics for the slab summary */
+ struct slab_summary_statistics slab_summary;
+ /* The statistics for the reference counts */
+ struct ref_counts_statistics ref_counts;
+ /* The statistics for the block map */
+ struct block_map_statistics block_map;
+ /* The dedupe statistics from hash locks */
+ struct hash_lock_statistics hash_lock;
+ /* Counts of error conditions */
+ struct error_statistics errors;
+ /* The VDO instance */
+ u32 instance;
+ /* Current number of active VIOs */
+ u32 current_vios_in_progress;
+ /* Maximum number of active VIOs */
+ u32 max_vios;
+ /* Number of times the UDS index was too slow in responding */
+ u64 dedupe_advice_timeouts;
+ /* Number of flush requests submitted to the storage device */
+ u64 flush_out;
+ /* Logical block size */
+ u64 logical_block_size;
+ /* Bios submitted into VDO from above */
+ struct bio_stats bios_in;
+ struct bio_stats bios_in_partial;
+ /* Bios submitted onward for user data */
+ struct bio_stats bios_out;
+ /* Bios submitted onward for metadata */
+ struct bio_stats bios_meta;
+ struct bio_stats bios_journal;
+ struct bio_stats bios_page_cache;
+ struct bio_stats bios_out_completed;
+ struct bio_stats bios_meta_completed;
+ struct bio_stats bios_journal_completed;
+ struct bio_stats bios_page_cache_completed;
+ struct bio_stats bios_acknowledged;
+ struct bio_stats bios_acknowledged_partial;
+ /* Current number of bios in progress */
+ struct bio_stats bios_in_progress;
+ /* Memory usage stats. */
+ struct memory_usage memory_usage;
+ /* The statistics for the UDS index */
+ struct index_statistics index;
+};
+
+#endif /* not STATISTICS_H */
diff --git a/drivers/md/dm-vdo/status-codes.c b/drivers/md/dm-vdo/status-codes.c
new file mode 100644
index 000000000000..d3493450b169
--- /dev/null
+++ b/drivers/md/dm-vdo/status-codes.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "status-codes.h"
+
+#include "errors.h"
+#include "logger.h"
+#include "permassert.h"
+#include "thread-utils.h"
+
+const struct error_info vdo_status_list[] = {
+ { "VDO_NOT_IMPLEMENTED", "Not implemented" },
+ { "VDO_OUT_OF_RANGE", "Out of range" },
+ { "VDO_REF_COUNT_INVALID", "Reference count would become invalid" },
+ { "VDO_NO_SPACE", "Out of space" },
+ { "VDO_BAD_CONFIGURATION", "Bad configuration option" },
+ { "VDO_COMPONENT_BUSY", "Prior operation still in progress" },
+ { "VDO_BAD_PAGE", "Corrupt or incorrect page" },
+ { "VDO_UNSUPPORTED_VERSION", "Unsupported component version" },
+ { "VDO_INCORRECT_COMPONENT", "Component id mismatch in decoder" },
+ { "VDO_PARAMETER_MISMATCH", "Parameters have conflicting values" },
+ { "VDO_UNKNOWN_PARTITION", "No partition exists with a given id" },
+ { "VDO_PARTITION_EXISTS", "A partition already exists with a given id" },
+ { "VDO_INCREMENT_TOO_SMALL", "Physical block growth of too few blocks" },
+ { "VDO_CHECKSUM_MISMATCH", "Incorrect checksum" },
+ { "VDO_LOCK_ERROR", "A lock is held incorrectly" },
+ { "VDO_READ_ONLY", "The device is in read-only mode" },
+ { "VDO_SHUTTING_DOWN", "The device is shutting down" },
+ { "VDO_CORRUPT_JOURNAL", "Recovery journal entries corrupted" },
+ { "VDO_TOO_MANY_SLABS", "Exceeds maximum number of slabs supported" },
+ { "VDO_INVALID_FRAGMENT", "Compressed block fragment is invalid" },
+ { "VDO_RETRY_AFTER_REBUILD", "Retry operation after rebuilding finishes" },
+ { "VDO_BAD_MAPPING", "Invalid page mapping" },
+ { "VDO_BIO_CREATION_FAILED", "Bio creation failed" },
+ { "VDO_BAD_MAGIC", "Bad magic number" },
+ { "VDO_BAD_NONCE", "Bad nonce" },
+ { "VDO_JOURNAL_OVERFLOW", "Journal sequence number overflow" },
+ { "VDO_INVALID_ADMIN_STATE", "Invalid operation for current state" },
+};
+
+/**
+ * vdo_register_status_codes() - Register the VDO status codes.
+ * Return: A success or error code.
+ */
+int vdo_register_status_codes(void)
+{
+ int result;
+
+ BUILD_BUG_ON((VDO_STATUS_CODE_LAST - VDO_STATUS_CODE_BASE) !=
+ ARRAY_SIZE(vdo_status_list));
+
+ result = uds_register_error_block("VDO Status", VDO_STATUS_CODE_BASE,
+ VDO_STATUS_CODE_BLOCK_END, vdo_status_list,
+ sizeof(vdo_status_list));
+ return (result == UDS_SUCCESS) ? VDO_SUCCESS : result;
+}
+
+/**
+ * vdo_status_to_errno() - Given an error code, return a value we can return to the OS.
+ * @error: The error code to convert.
+ *
+ * The input error code may be a system-generated value (such as -EIO), an errno macro used in our
+ * code (such as EIO), or a UDS or VDO status code; the result must be something the rest of the OS
+ * can consume (negative errno values such as -EIO, in the case of the kernel).
+ *
+ * Return: A system error code value.
+ */
+int vdo_status_to_errno(int error)
+{
+ char error_name[VDO_MAX_ERROR_NAME_SIZE];
+ char error_message[VDO_MAX_ERROR_MESSAGE_SIZE];
+
+ /* 0 is success, negative a system error code */
+ if (likely(error <= 0))
+ return error;
+ if (error < 1024)
+ return -error;
+
+ /* VDO or UDS error */
+ switch (error) {
+ case VDO_NO_SPACE:
+ return -ENOSPC;
+ case VDO_READ_ONLY:
+ return -EIO;
+ default:
+ vdo_log_info("%s: mapping internal status code %d (%s: %s) to EIO",
+ __func__, error,
+ uds_string_error_name(error, error_name, sizeof(error_name)),
+ uds_string_error(error, error_message, sizeof(error_message)));
+ return -EIO;
+ }
+}
diff --git a/drivers/md/dm-vdo/status-codes.h b/drivers/md/dm-vdo/status-codes.h
new file mode 100644
index 000000000000..72da04159f88
--- /dev/null
+++ b/drivers/md/dm-vdo/status-codes.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_STATUS_CODES_H
+#define VDO_STATUS_CODES_H
+
+#include "errors.h"
+
+enum {
+ UDS_ERRORS_BLOCK_SIZE = UDS_ERROR_CODE_BLOCK_END - UDS_ERROR_CODE_BASE,
+ VDO_ERRORS_BLOCK_START = UDS_ERROR_CODE_BLOCK_END,
+ VDO_ERRORS_BLOCK_END = VDO_ERRORS_BLOCK_START + UDS_ERRORS_BLOCK_SIZE,
+};
+
+/* VDO-specific status codes. */
+enum vdo_status_codes {
+ /* base of all VDO errors */
+ VDO_STATUS_CODE_BASE = VDO_ERRORS_BLOCK_START,
+ /* we haven't written this yet */
+ VDO_NOT_IMPLEMENTED = VDO_STATUS_CODE_BASE,
+ /* input out of range */
+ VDO_OUT_OF_RANGE,
+ /* an invalid reference count would result */
+ VDO_REF_COUNT_INVALID,
+ /* a free block could not be allocated */
+ VDO_NO_SPACE,
+ /* improper or missing configuration option */
+ VDO_BAD_CONFIGURATION,
+ /* prior operation still in progress */
+ VDO_COMPONENT_BUSY,
+ /* page contents incorrect or corrupt data */
+ VDO_BAD_PAGE,
+ /* unsupported version of some component */
+ VDO_UNSUPPORTED_VERSION,
+ /* component id mismatch in decoder */
+ VDO_INCORRECT_COMPONENT,
+ /* parameters have conflicting values */
+ VDO_PARAMETER_MISMATCH,
+ /* no partition exists with a given id */
+ VDO_UNKNOWN_PARTITION,
+ /* a partition already exists with a given id */
+ VDO_PARTITION_EXISTS,
+ /* physical block growth of too few blocks */
+ VDO_INCREMENT_TOO_SMALL,
+ /* incorrect checksum */
+ VDO_CHECKSUM_MISMATCH,
+ /* a lock is held incorrectly */
+ VDO_LOCK_ERROR,
+ /* the VDO is in read-only mode */
+ VDO_READ_ONLY,
+ /* the VDO is shutting down */
+ VDO_SHUTTING_DOWN,
+ /* the recovery journal has corrupt entries */
+ VDO_CORRUPT_JOURNAL,
+ /* exceeds maximum number of slabs supported */
+ VDO_TOO_MANY_SLABS,
+ /* a compressed block fragment is invalid */
+ VDO_INVALID_FRAGMENT,
+ /* action is unsupported while rebuilding */
+ VDO_RETRY_AFTER_REBUILD,
+ /* a block map entry is invalid */
+ VDO_BAD_MAPPING,
+ /* bio_add_page failed */
+ VDO_BIO_CREATION_FAILED,
+ /* bad magic number */
+ VDO_BAD_MAGIC,
+ /* bad nonce */
+ VDO_BAD_NONCE,
+ /* sequence number overflow */
+ VDO_JOURNAL_OVERFLOW,
+ /* the VDO is not in a state to perform an admin operation */
+ VDO_INVALID_ADMIN_STATE,
+ /* one more than last error code */
+ VDO_STATUS_CODE_LAST,
+ VDO_STATUS_CODE_BLOCK_END = VDO_ERRORS_BLOCK_END
+};
+
+extern const struct error_info vdo_status_list[];
+
+int vdo_register_status_codes(void);
+
+int vdo_status_to_errno(int error);
+
+#endif /* VDO_STATUS_CODES_H */
diff --git a/drivers/md/dm-vdo/string-utils.c b/drivers/md/dm-vdo/string-utils.c
new file mode 100644
index 000000000000..71e44b4683ea
--- /dev/null
+++ b/drivers/md/dm-vdo/string-utils.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "string-utils.h"
+
+char *vdo_append_to_buffer(char *buffer, char *buf_end, const char *fmt, ...)
+{
+ va_list args;
+ size_t n;
+
+ va_start(args, fmt);
+ n = vsnprintf(buffer, buf_end - buffer, fmt, args);
+ if (n >= (size_t) (buf_end - buffer))
+ buffer = buf_end;
+ else
+ buffer += n;
+ va_end(args);
+
+ return buffer;
+}
diff --git a/drivers/md/dm-vdo/string-utils.h b/drivers/md/dm-vdo/string-utils.h
new file mode 100644
index 000000000000..96eecd38b1c2
--- /dev/null
+++ b/drivers/md/dm-vdo/string-utils.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_STRING_UTILS_H
+#define VDO_STRING_UTILS_H
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+/* Utilities related to string manipulation */
+
+static inline const char *vdo_bool_to_string(bool value)
+{
+ return value ? "true" : "false";
+}
+
+/* Append a formatted string to the end of a buffer. */
+char *vdo_append_to_buffer(char *buffer, char *buf_end, const char *fmt, ...)
+ __printf(3, 4);
+
+#endif /* VDO_STRING_UTILS_H */
diff --git a/drivers/md/dm-vdo/thread-device.c b/drivers/md/dm-vdo/thread-device.c
new file mode 100644
index 000000000000..df13ca914db8
--- /dev/null
+++ b/drivers/md/dm-vdo/thread-device.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "thread-device.h"
+
+/* A registry of threads associated with device id numbers. */
+static struct thread_registry device_id_thread_registry;
+
+/* Any registered thread must be unregistered. */
+void vdo_register_thread_device_id(struct registered_thread *new_thread,
+ unsigned int *id_ptr)
+{
+ vdo_register_thread(&device_id_thread_registry, new_thread, id_ptr);
+}
+
+void vdo_unregister_thread_device_id(void)
+{
+ vdo_unregister_thread(&device_id_thread_registry);
+}
+
+int vdo_get_thread_device_id(void)
+{
+ const unsigned int *pointer;
+
+ pointer = vdo_lookup_thread(&device_id_thread_registry);
+ return (pointer != NULL) ? *pointer : -1;
+}
+
+void vdo_initialize_thread_device_registry(void)
+{
+ vdo_initialize_thread_registry(&device_id_thread_registry);
+}
diff --git a/drivers/md/dm-vdo/thread-device.h b/drivers/md/dm-vdo/thread-device.h
new file mode 100644
index 000000000000..494d9c9ef3f6
--- /dev/null
+++ b/drivers/md/dm-vdo/thread-device.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_THREAD_DEVICE_H
+#define VDO_THREAD_DEVICE_H
+
+#include "thread-registry.h"
+
+void vdo_register_thread_device_id(struct registered_thread *new_thread,
+ unsigned int *id_ptr);
+
+void vdo_unregister_thread_device_id(void);
+
+int vdo_get_thread_device_id(void);
+
+void vdo_initialize_thread_device_registry(void);
+
+#endif /* VDO_THREAD_DEVICE_H */
diff --git a/drivers/md/dm-vdo/thread-registry.c b/drivers/md/dm-vdo/thread-registry.c
new file mode 100644
index 000000000000..d4a077d58c60
--- /dev/null
+++ b/drivers/md/dm-vdo/thread-registry.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "thread-registry.h"
+
+#include <asm/current.h>
+#include <linux/rculist.h>
+
+#include "permassert.h"
+
+/*
+ * We need to be careful when using other facilities that may use thread registry functions in
+ * their normal operation. For example, we do not want to invoke the logger while holding a lock.
+ */
+
+void vdo_initialize_thread_registry(struct thread_registry *registry)
+{
+ INIT_LIST_HEAD(&registry->links);
+ spin_lock_init(&registry->lock);
+}
+
+/* Register the current thread and associate it with a data pointer. */
+void vdo_register_thread(struct thread_registry *registry,
+ struct registered_thread *new_thread, const void *pointer)
+{
+ struct registered_thread *thread;
+ bool found_it = false;
+
+ INIT_LIST_HEAD(&new_thread->links);
+ new_thread->pointer = pointer;
+ new_thread->task = current;
+
+ spin_lock(&registry->lock);
+ list_for_each_entry(thread, &registry->links, links) {
+ if (thread->task == current) {
+ /* There should be no existing entry. */
+ list_del_rcu(&thread->links);
+ found_it = true;
+ break;
+ }
+ }
+ list_add_tail_rcu(&new_thread->links, &registry->links);
+ spin_unlock(&registry->lock);
+
+ VDO_ASSERT_LOG_ONLY(!found_it, "new thread not already in registry");
+ if (found_it) {
+ /* Ensure no RCU iterators see it before re-initializing. */
+ synchronize_rcu();
+ INIT_LIST_HEAD(&thread->links);
+ }
+}
+
+void vdo_unregister_thread(struct thread_registry *registry)
+{
+ struct registered_thread *thread;
+ bool found_it = false;
+
+ spin_lock(&registry->lock);
+ list_for_each_entry(thread, &registry->links, links) {
+ if (thread->task == current) {
+ list_del_rcu(&thread->links);
+ found_it = true;
+ break;
+ }
+ }
+ spin_unlock(&registry->lock);
+
+ VDO_ASSERT_LOG_ONLY(found_it, "thread found in registry");
+ if (found_it) {
+ /* Ensure no RCU iterators see it before re-initializing. */
+ synchronize_rcu();
+ INIT_LIST_HEAD(&thread->links);
+ }
+}
+
+const void *vdo_lookup_thread(struct thread_registry *registry)
+{
+ struct registered_thread *thread;
+ const void *result = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(thread, &registry->links, links) {
+ if (thread->task == current) {
+ result = thread->pointer;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return result;
+}
diff --git a/drivers/md/dm-vdo/thread-registry.h b/drivers/md/dm-vdo/thread-registry.h
new file mode 100644
index 000000000000..cc6d78312b9e
--- /dev/null
+++ b/drivers/md/dm-vdo/thread-registry.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_THREAD_REGISTRY_H
+#define VDO_THREAD_REGISTRY_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+struct thread_registry {
+ struct list_head links;
+ spinlock_t lock;
+};
+
+struct registered_thread {
+ struct list_head links;
+ const void *pointer;
+ struct task_struct *task;
+};
+
+void vdo_initialize_thread_registry(struct thread_registry *registry);
+
+void vdo_register_thread(struct thread_registry *registry,
+ struct registered_thread *new_thread, const void *pointer);
+
+void vdo_unregister_thread(struct thread_registry *registry);
+
+const void *vdo_lookup_thread(struct thread_registry *registry);
+
+#endif /* VDO_THREAD_REGISTRY_H */
diff --git a/drivers/md/dm-vdo/thread-utils.c b/drivers/md/dm-vdo/thread-utils.c
new file mode 100644
index 000000000000..ec08478dd013
--- /dev/null
+++ b/drivers/md/dm-vdo/thread-utils.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "thread-utils.h"
+
+#include <asm/current.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "errors.h"
+#include "logger.h"
+#include "memory-alloc.h"
+
+static struct hlist_head thread_list;
+static struct mutex thread_mutex;
+
+struct thread {
+ void (*thread_function)(void *thread_data);
+ void *thread_data;
+ struct hlist_node thread_links;
+ struct task_struct *thread_task;
+ struct completion thread_done;
+};
+
+void vdo_initialize_threads_mutex(void)
+{
+ mutex_init(&thread_mutex);
+}
+
+static int thread_starter(void *arg)
+{
+ struct registered_thread allocating_thread;
+ struct thread *thread = arg;
+
+ thread->thread_task = current;
+ mutex_lock(&thread_mutex);
+ hlist_add_head(&thread->thread_links, &thread_list);
+ mutex_unlock(&thread_mutex);
+ vdo_register_allocating_thread(&allocating_thread, NULL);
+ thread->thread_function(thread->thread_data);
+ vdo_unregister_allocating_thread();
+ complete(&thread->thread_done);
+ return 0;
+}
+
+int vdo_create_thread(void (*thread_function)(void *), void *thread_data,
+ const char *name, struct thread **new_thread)
+{
+ char *name_colon = strchr(name, ':');
+ char *my_name_colon = strchr(current->comm, ':');
+ struct task_struct *task;
+ struct thread *thread;
+ int result;
+
+ result = vdo_allocate(1, struct thread, __func__, &thread);
+ if (result != VDO_SUCCESS) {
+ vdo_log_warning("Error allocating memory for %s", name);
+ return result;
+ }
+
+ thread->thread_function = thread_function;
+ thread->thread_data = thread_data;
+ init_completion(&thread->thread_done);
+ /*
+ * Start the thread, with an appropriate thread name.
+ *
+ * If the name supplied contains a colon character, use that name. This causes uds module
+ * threads to have names like "uds:callbackW" and the main test runner thread to be named
+ * "zub:runtest".
+ *
+ * Otherwise if the current thread has a name containing a colon character, prefix the name
+ * supplied with the name of the current thread up to (and including) the colon character.
+ * Thus when the "kvdo0:dedupeQ" thread opens an index session, all the threads associated
+ * with that index will have names like "kvdo0:foo".
+ *
+ * Otherwise just use the name supplied. This should be a rare occurrence.
+ */
+ if ((name_colon == NULL) && (my_name_colon != NULL)) {
+ task = kthread_run(thread_starter, thread, "%.*s:%s",
+ (int) (my_name_colon - current->comm), current->comm,
+ name);
+ } else {
+ task = kthread_run(thread_starter, thread, "%s", name);
+ }
+
+ if (IS_ERR(task)) {
+ vdo_free(thread);
+ return PTR_ERR(task);
+ }
+
+ *new_thread = thread;
+ return VDO_SUCCESS;
+}
+
+void vdo_join_threads(struct thread *thread)
+{
+ while (wait_for_completion_interruptible(&thread->thread_done))
+ fsleep(1000);
+
+ mutex_lock(&thread_mutex);
+ hlist_del(&thread->thread_links);
+ mutex_unlock(&thread_mutex);
+ vdo_free(thread);
+}
diff --git a/drivers/md/dm-vdo/thread-utils.h b/drivers/md/dm-vdo/thread-utils.h
new file mode 100644
index 000000000000..687ab43e2cee
--- /dev/null
+++ b/drivers/md/dm-vdo/thread-utils.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef THREAD_UTILS_H
+#define THREAD_UTILS_H
+
+#include <linux/atomic.h>
+
+/* Thread and synchronization utilities */
+
+struct thread;
+
+void vdo_initialize_threads_mutex(void);
+int __must_check vdo_create_thread(void (*thread_function)(void *), void *thread_data,
+ const char *name, struct thread **new_thread);
+void vdo_join_threads(struct thread *thread);
+
+#endif /* UDS_THREADS_H */
diff --git a/drivers/md/dm-vdo/time-utils.h b/drivers/md/dm-vdo/time-utils.h
new file mode 100644
index 000000000000..5f1e850fd826
--- /dev/null
+++ b/drivers/md/dm-vdo/time-utils.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef UDS_TIME_UTILS_H
+#define UDS_TIME_UTILS_H
+
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+static inline s64 ktime_to_seconds(ktime_t reltime)
+{
+ return reltime / NSEC_PER_SEC;
+}
+
+static inline ktime_t current_time_ns(clockid_t clock)
+{
+ return clock == CLOCK_MONOTONIC ? ktime_get_ns() : ktime_get_real_ns();
+}
+
+static inline ktime_t current_time_us(void)
+{
+ return current_time_ns(CLOCK_REALTIME) / NSEC_PER_USEC;
+}
+
+#endif /* UDS_TIME_UTILS_H */
diff --git a/drivers/md/dm-vdo/types.h b/drivers/md/dm-vdo/types.h
new file mode 100644
index 000000000000..dbe892b10f26
--- /dev/null
+++ b/drivers/md/dm-vdo/types.h
@@ -0,0 +1,393 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_TYPES_H
+#define VDO_TYPES_H
+
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/device-mapper.h>
+#include <linux/list.h>
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+#include "funnel-queue.h"
+
+/* A size type in blocks. */
+typedef u64 block_count_t;
+
+/* The size of a block. */
+typedef u16 block_size_t;
+
+/* A counter for data_vios */
+typedef u16 data_vio_count_t;
+
+/* A height within a tree. */
+typedef u8 height_t;
+
+/* The logical block number as used by the consumer. */
+typedef u64 logical_block_number_t;
+
+/* The type of the nonce used to identify instances of VDO. */
+typedef u64 nonce_t;
+
+/* A size in pages. */
+typedef u32 page_count_t;
+
+/* A page number. */
+typedef u32 page_number_t;
+
+/*
+ * The physical (well, less logical) block number at which the block is found on the underlying
+ * device.
+ */
+typedef u64 physical_block_number_t;
+
+/* A count of tree roots. */
+typedef u8 root_count_t;
+
+/* A number of sectors. */
+typedef u8 sector_count_t;
+
+/* A sequence number. */
+typedef u64 sequence_number_t;
+
+/* The offset of a block within a slab. */
+typedef u32 slab_block_number;
+
+/* A size type in slabs. */
+typedef u16 slab_count_t;
+
+/* A slot in a bin or block map page. */
+typedef u16 slot_number_t;
+
+/* typedef thread_count_t - A thread counter. */
+typedef u8 thread_count_t;
+
+/* typedef thread_id_t - A thread ID, vdo threads are numbered sequentially from 0. */
+typedef u8 thread_id_t;
+
+/* A zone counter */
+typedef u8 zone_count_t;
+
+/* The following enums are persisted on storage, so the values must be preserved. */
+
+/* The current operating mode of the VDO. */
+enum vdo_state {
+ VDO_DIRTY = 0,
+ VDO_NEW = 1,
+ VDO_CLEAN = 2,
+ VDO_READ_ONLY_MODE = 3,
+ VDO_FORCE_REBUILD = 4,
+ VDO_RECOVERING = 5,
+ VDO_REPLAYING = 6, /* VDO_REPLAYING is never set anymore, but retained for upgrade */
+ VDO_REBUILD_FOR_UPGRADE = 7,
+
+ /* Keep VDO_STATE_COUNT at the bottom. */
+ VDO_STATE_COUNT
+};
+
+/**
+ * vdo_state_requires_read_only_rebuild() - Check whether a vdo_state indicates
+ * that a read-only rebuild is required.
+ * @state: The vdo_state to check.
+ *
+ * Return: true if the state indicates a rebuild is required
+ */
+static inline bool __must_check vdo_state_requires_read_only_rebuild(enum vdo_state state)
+{
+ return ((state == VDO_FORCE_REBUILD) || (state == VDO_REBUILD_FOR_UPGRADE));
+}
+
+/**
+ * vdo_state_requires_recovery() - Check whether a vdo state indicates that recovery is needed.
+ * @state: The state to check.
+ *
+ * Return: true if the state indicates a recovery is required
+ */
+static inline bool __must_check vdo_state_requires_recovery(enum vdo_state state)
+{
+ return ((state == VDO_DIRTY) || (state == VDO_REPLAYING) || (state == VDO_RECOVERING));
+}
+
+/*
+ * The current operation on a physical block (from the point of view of the recovery journal, slab
+ * journals, and reference counts.
+ */
+enum journal_operation {
+ VDO_JOURNAL_DATA_REMAPPING = 0,
+ VDO_JOURNAL_BLOCK_MAP_REMAPPING = 1,
+} __packed;
+
+/* Partition IDs encoded in the volume layout in the super block. */
+enum partition_id {
+ VDO_BLOCK_MAP_PARTITION = 0,
+ VDO_SLAB_DEPOT_PARTITION = 1,
+ VDO_RECOVERY_JOURNAL_PARTITION = 2,
+ VDO_SLAB_SUMMARY_PARTITION = 3,
+} __packed;
+
+/* Metadata types for the vdo. */
+enum vdo_metadata_type {
+ VDO_METADATA_RECOVERY_JOURNAL = 1,
+ VDO_METADATA_SLAB_JOURNAL = 2,
+ VDO_METADATA_RECOVERY_JOURNAL_2 = 3,
+} __packed;
+
+/* A position in the block map where a block map entry is stored. */
+struct block_map_slot {
+ physical_block_number_t pbn;
+ slot_number_t slot;
+};
+
+/*
+ * Four bits of each five-byte block map entry contain a mapping state value used to distinguish
+ * unmapped or discarded logical blocks (which are treated as mapped to the zero block) from entries
+ * that have been mapped to a physical block, including the zero block.
+ *
+ * FIXME: these should maybe be defines.
+ */
+enum block_mapping_state {
+ VDO_MAPPING_STATE_UNMAPPED = 0, /* Must be zero to be the default value */
+ VDO_MAPPING_STATE_UNCOMPRESSED = 1, /* A normal (uncompressed) block */
+ VDO_MAPPING_STATE_COMPRESSED_BASE = 2, /* Compressed in slot 0 */
+ VDO_MAPPING_STATE_COMPRESSED_MAX = 15, /* Compressed in slot 13 */
+};
+
+enum {
+ VDO_MAX_COMPRESSION_SLOTS =
+ (VDO_MAPPING_STATE_COMPRESSED_MAX - VDO_MAPPING_STATE_COMPRESSED_BASE + 1),
+};
+
+
+struct data_location {
+ physical_block_number_t pbn;
+ enum block_mapping_state state;
+};
+
+/* The configuration of a single slab derived from the configured block size and slab size. */
+struct slab_config {
+ /* total number of blocks in the slab */
+ block_count_t slab_blocks;
+ /* number of blocks available for data */
+ block_count_t data_blocks;
+ /* number of blocks for reference counts */
+ block_count_t reference_count_blocks;
+ /* number of blocks for the slab journal */
+ block_count_t slab_journal_blocks;
+ /*
+ * Number of blocks after which the slab journal starts pushing out a reference_block for
+ * each new entry it receives.
+ */
+ block_count_t slab_journal_flushing_threshold;
+ /*
+ * Number of blocks after which the slab journal pushes out all reference_blocks and makes
+ * all vios wait.
+ */
+ block_count_t slab_journal_blocking_threshold;
+ /* Number of blocks after which the slab must be scrubbed before coming online. */
+ block_count_t slab_journal_scrubbing_threshold;
+} __packed;
+
+/*
+ * This structure is memcmp'd for equality. Keep it packed and don't add any fields that are not
+ * properly set in both extant and parsed configs.
+ */
+struct thread_count_config {
+ unsigned int bio_ack_threads;
+ unsigned int bio_threads;
+ unsigned int bio_rotation_interval;
+ unsigned int cpu_threads;
+ unsigned int logical_zones;
+ unsigned int physical_zones;
+ unsigned int hash_zones;
+} __packed;
+
+struct device_config {
+ struct dm_target *owning_target;
+ struct dm_dev *owned_device;
+ struct vdo *vdo;
+ /* All configs referencing a layer are kept on a list in the layer */
+ struct list_head config_list;
+ char *original_string;
+ unsigned int version;
+ char *parent_device_name;
+ block_count_t physical_blocks;
+ /*
+ * This is the number of logical blocks from VDO's internal point of view. It is the number
+ * of 4K blocks regardless of the value of the logical_block_size parameter below.
+ */
+ block_count_t logical_blocks;
+ unsigned int logical_block_size;
+ unsigned int cache_size;
+ unsigned int block_map_maximum_age;
+ bool deduplication;
+ bool compression;
+ struct thread_count_config thread_counts;
+ block_count_t max_discard_blocks;
+};
+
+enum vdo_completion_type {
+ /* Keep VDO_UNSET_COMPLETION_TYPE at the top. */
+ VDO_UNSET_COMPLETION_TYPE,
+ VDO_ACTION_COMPLETION,
+ VDO_ADMIN_COMPLETION,
+ VDO_BLOCK_ALLOCATOR_COMPLETION,
+ VDO_DATA_VIO_POOL_COMPLETION,
+ VDO_DECREMENT_COMPLETION,
+ VDO_FLUSH_COMPLETION,
+ VDO_FLUSH_NOTIFICATION_COMPLETION,
+ VDO_GENERATION_FLUSHED_COMPLETION,
+ VDO_HASH_ZONE_COMPLETION,
+ VDO_HASH_ZONES_COMPLETION,
+ VDO_LOCK_COUNTER_COMPLETION,
+ VDO_PAGE_COMPLETION,
+ VDO_READ_ONLY_MODE_COMPLETION,
+ VDO_REPAIR_COMPLETION,
+ VDO_SYNC_COMPLETION,
+ VIO_COMPLETION,
+} __packed;
+
+struct vdo_completion;
+
+/**
+ * typedef vdo_action_fn - An asynchronous VDO operation.
+ * @completion: The completion of the operation.
+ */
+typedef void (*vdo_action_fn)(struct vdo_completion *completion);
+
+enum vdo_completion_priority {
+ BIO_ACK_Q_ACK_PRIORITY = 0,
+ BIO_ACK_Q_MAX_PRIORITY = 0,
+ BIO_Q_COMPRESSED_DATA_PRIORITY = 0,
+ BIO_Q_DATA_PRIORITY = 0,
+ BIO_Q_FLUSH_PRIORITY = 2,
+ BIO_Q_HIGH_PRIORITY = 2,
+ BIO_Q_METADATA_PRIORITY = 1,
+ BIO_Q_VERIFY_PRIORITY = 1,
+ BIO_Q_MAX_PRIORITY = 2,
+ CPU_Q_COMPLETE_VIO_PRIORITY = 0,
+ CPU_Q_COMPLETE_READ_PRIORITY = 0,
+ CPU_Q_COMPRESS_BLOCK_PRIORITY = 0,
+ CPU_Q_EVENT_REPORTER_PRIORITY = 0,
+ CPU_Q_HASH_BLOCK_PRIORITY = 0,
+ CPU_Q_MAX_PRIORITY = 0,
+ UDS_Q_PRIORITY = 0,
+ UDS_Q_MAX_PRIORITY = 0,
+ VDO_DEFAULT_Q_COMPLETION_PRIORITY = 1,
+ VDO_DEFAULT_Q_FLUSH_PRIORITY = 2,
+ VDO_DEFAULT_Q_MAP_BIO_PRIORITY = 0,
+ VDO_DEFAULT_Q_SYNC_PRIORITY = 2,
+ VDO_DEFAULT_Q_VIO_CALLBACK_PRIORITY = 1,
+ VDO_DEFAULT_Q_MAX_PRIORITY = 2,
+ /* The maximum allowable priority */
+ VDO_WORK_Q_MAX_PRIORITY = 2,
+ /* A value which must be out of range for a valid priority */
+ VDO_WORK_Q_DEFAULT_PRIORITY = VDO_WORK_Q_MAX_PRIORITY + 1,
+};
+
+struct vdo_completion {
+ /* The type of completion this is */
+ enum vdo_completion_type type;
+
+ /*
+ * <code>true</code> once the processing of the operation is complete. This flag should not
+ * be used by waiters external to the VDO base as it is used to gate calling the callback.
+ */
+ bool complete;
+
+ /*
+ * If true, queue this completion on the next callback invocation, even if it is already
+ * running on the correct thread.
+ */
+ bool requeue;
+
+ /* The ID of the thread which should run the next callback */
+ thread_id_t callback_thread_id;
+
+ /* The result of the operation */
+ int result;
+
+ /* The VDO on which this completion operates */
+ struct vdo *vdo;
+
+ /* The callback which will be called once the operation is complete */
+ vdo_action_fn callback;
+
+ /* Callback which, if set, will be called if an error result is set */
+ vdo_action_fn error_handler;
+
+ /* The parent object, if any, that spawned this completion */
+ void *parent;
+
+ /* Entry link for lock-free work queue */
+ struct funnel_queue_entry work_queue_entry_link;
+ enum vdo_completion_priority priority;
+ struct vdo_work_queue *my_queue;
+};
+
+struct block_allocator;
+struct data_vio;
+struct vdo;
+struct vdo_config;
+
+/* vio types for statistics and instrumentation. */
+enum vio_type {
+ VIO_TYPE_UNINITIALIZED = 0,
+ VIO_TYPE_DATA,
+ VIO_TYPE_BLOCK_ALLOCATOR,
+ VIO_TYPE_BLOCK_MAP,
+ VIO_TYPE_BLOCK_MAP_INTERIOR,
+ VIO_TYPE_GEOMETRY,
+ VIO_TYPE_PARTITION_COPY,
+ VIO_TYPE_RECOVERY_JOURNAL,
+ VIO_TYPE_SLAB_JOURNAL,
+ VIO_TYPE_SLAB_SUMMARY,
+ VIO_TYPE_SUPER_BLOCK,
+} __packed;
+
+/* Priority levels for asynchronous I/O operations performed on a vio. */
+enum vio_priority {
+ VIO_PRIORITY_LOW = 0,
+ VIO_PRIORITY_DATA = VIO_PRIORITY_LOW,
+ VIO_PRIORITY_COMPRESSED_DATA = VIO_PRIORITY_DATA,
+ VIO_PRIORITY_METADATA,
+ VIO_PRIORITY_HIGH,
+} __packed;
+
+/*
+ * A wrapper for a bio. All I/O to the storage below a vdo is conducted via vios.
+ */
+struct vio {
+ /* The completion for this vio */
+ struct vdo_completion completion;
+
+ /* The bio zone in which I/O should be processed */
+ zone_count_t bio_zone;
+
+ /* The queueing priority of the vio operation */
+ enum vio_priority priority;
+
+ /* The vio type is used for statistics and instrumentation. */
+ enum vio_type type;
+
+ /* The size of this vio in blocks */
+ unsigned int block_count;
+
+ /* The data being read or written. */
+ char *data;
+
+ /* The VDO-owned bio to use for all IO for this vio */
+ struct bio *bio;
+
+ /*
+ * A list of enqueued bios with consecutive block numbers, stored by vdo_submit_bio() under
+ * the first-enqueued vio. The other vios are found via their bio entries in this list, and
+ * are not added to the work queue as separate completions.
+ */
+ struct bio_list bios_merged;
+};
+
+#endif /* VDO_TYPES_H */
diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c
new file mode 100644
index 000000000000..fff847767755
--- /dev/null
+++ b/drivers/md/dm-vdo/vdo.c
@@ -0,0 +1,1730 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+/*
+ * This file contains the main entry points for normal operations on a vdo as well as functions for
+ * constructing and destroying vdo instances (in memory).
+ */
+
+/**
+ * DOC:
+ *
+ * A read_only_notifier has a single completion which is used to perform read-only notifications,
+ * however, vdo_enter_read_only_mode() may be called from any thread. A pair of fields, protected
+ * by a spinlock, are used to control the read-only mode entry process. The first field holds the
+ * read-only error. The second is the state field, which may hold any of the four special values
+ * enumerated here.
+ *
+ * When vdo_enter_read_only_mode() is called from some vdo thread, if the read_only_error field
+ * already contains an error (i.e. its value is not VDO_SUCCESS), then some other error has already
+ * initiated the read-only process, and nothing more is done. Otherwise, the new error is stored in
+ * the read_only_error field, and the state field is consulted. If the state is MAY_NOTIFY, it is
+ * set to NOTIFYING, and the notification process begins. If the state is MAY_NOT_NOTIFY, then
+ * notifications are currently disallowed, generally due to the vdo being suspended. In this case,
+ * the nothing more will be done until the vdo is resumed, at which point the notification will be
+ * performed. In any other case, the vdo is already read-only, and there is nothing more to do.
+ */
+
+#include "vdo.h"
+
+#include <linux/completion.h>
+#include <linux/device-mapper.h>
+#include <linux/kernel.h>
+#include <linux/lz4.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+#include "string-utils.h"
+
+#include "block-map.h"
+#include "completion.h"
+#include "data-vio.h"
+#include "dedupe.h"
+#include "encodings.h"
+#include "funnel-workqueue.h"
+#include "io-submitter.h"
+#include "logical-zone.h"
+#include "packer.h"
+#include "physical-zone.h"
+#include "recovery-journal.h"
+#include "slab-depot.h"
+#include "statistics.h"
+#include "status-codes.h"
+#include "vio.h"
+
+#define PARANOID_THREAD_CONSISTENCY_CHECKS 0
+
+struct sync_completion {
+ struct vdo_completion vdo_completion;
+ struct completion completion;
+};
+
+/* A linked list is adequate for the small number of entries we expect. */
+struct device_registry {
+ struct list_head links;
+ /* TODO: Convert to rcu per kernel recommendation. */
+ rwlock_t lock;
+};
+
+static struct device_registry registry;
+
+/**
+ * vdo_initialize_device_registry_once() - Initialize the necessary structures for the device
+ * registry.
+ */
+void vdo_initialize_device_registry_once(void)
+{
+ INIT_LIST_HEAD(&registry.links);
+ rwlock_init(&registry.lock);
+}
+
+/** vdo_is_equal() - Implements vdo_filter_fn. */
+static bool vdo_is_equal(struct vdo *vdo, const void *context)
+{
+ return (vdo == context);
+}
+
+/**
+ * filter_vdos_locked() - Find a vdo in the registry if it exists there.
+ * @filter: The filter function to apply to devices.
+ * @context: A bit of context to provide the filter.
+ *
+ * Context: Must be called holding the lock.
+ *
+ * Return: the vdo object found, if any.
+ */
+static struct vdo * __must_check filter_vdos_locked(vdo_filter_fn filter,
+ const void *context)
+{
+ struct vdo *vdo;
+
+ list_for_each_entry(vdo, &registry.links, registration) {
+ if (filter(vdo, context))
+ return vdo;
+ }
+
+ return NULL;
+}
+
+/**
+ * vdo_find_matching() - Find and return the first (if any) vdo matching a given filter function.
+ * @filter: The filter function to apply to vdos.
+ * @context: A bit of context to provide the filter.
+ */
+struct vdo *vdo_find_matching(vdo_filter_fn filter, const void *context)
+{
+ struct vdo *vdo;
+
+ read_lock(&registry.lock);
+ vdo = filter_vdos_locked(filter, context);
+ read_unlock(&registry.lock);
+
+ return vdo;
+}
+
+static void start_vdo_request_queue(void *ptr)
+{
+ struct vdo_thread *thread = vdo_get_work_queue_owner(vdo_get_current_work_queue());
+
+ vdo_register_allocating_thread(&thread->allocating_thread,
+ &thread->vdo->allocations_allowed);
+}
+
+static void finish_vdo_request_queue(void *ptr)
+{
+ vdo_unregister_allocating_thread();
+}
+
+#ifdef MODULE
+#define MODULE_NAME THIS_MODULE->name
+#else
+#define MODULE_NAME "dm-vdo"
+#endif /* MODULE */
+
+static const struct vdo_work_queue_type default_queue_type = {
+ .start = start_vdo_request_queue,
+ .finish = finish_vdo_request_queue,
+ .max_priority = VDO_DEFAULT_Q_MAX_PRIORITY,
+ .default_priority = VDO_DEFAULT_Q_COMPLETION_PRIORITY,
+};
+
+static const struct vdo_work_queue_type bio_ack_q_type = {
+ .start = NULL,
+ .finish = NULL,
+ .max_priority = BIO_ACK_Q_MAX_PRIORITY,
+ .default_priority = BIO_ACK_Q_ACK_PRIORITY,
+};
+
+static const struct vdo_work_queue_type cpu_q_type = {
+ .start = NULL,
+ .finish = NULL,
+ .max_priority = CPU_Q_MAX_PRIORITY,
+ .default_priority = CPU_Q_MAX_PRIORITY,
+};
+
+static void uninitialize_thread_config(struct thread_config *config)
+{
+ vdo_free(vdo_forget(config->logical_threads));
+ vdo_free(vdo_forget(config->physical_threads));
+ vdo_free(vdo_forget(config->hash_zone_threads));
+ vdo_free(vdo_forget(config->bio_threads));
+ memset(config, 0, sizeof(struct thread_config));
+}
+
+static void assign_thread_ids(struct thread_config *config,
+ thread_id_t thread_ids[], zone_count_t count)
+{
+ zone_count_t zone;
+
+ for (zone = 0; zone < count; zone++)
+ thread_ids[zone] = config->thread_count++;
+}
+
+/**
+ * initialize_thread_config() - Initialize the thread mapping
+ *
+ * If the logical, physical, and hash zone counts are all 0, a single thread will be shared by all
+ * three plus the packer and recovery journal. Otherwise, there must be at least one of each type,
+ * and each will have its own thread, as will the packer and recovery journal.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int __must_check initialize_thread_config(struct thread_count_config counts,
+ struct thread_config *config)
+{
+ int result;
+ bool single = ((counts.logical_zones + counts.physical_zones + counts.hash_zones) == 0);
+
+ config->bio_thread_count = counts.bio_threads;
+ if (single) {
+ config->logical_zone_count = 1;
+ config->physical_zone_count = 1;
+ config->hash_zone_count = 1;
+ } else {
+ config->logical_zone_count = counts.logical_zones;
+ config->physical_zone_count = counts.physical_zones;
+ config->hash_zone_count = counts.hash_zones;
+ }
+
+ result = vdo_allocate(config->logical_zone_count, thread_id_t,
+ "logical thread array", &config->logical_threads);
+ if (result != VDO_SUCCESS) {
+ uninitialize_thread_config(config);
+ return result;
+ }
+
+ result = vdo_allocate(config->physical_zone_count, thread_id_t,
+ "physical thread array", &config->physical_threads);
+ if (result != VDO_SUCCESS) {
+ uninitialize_thread_config(config);
+ return result;
+ }
+
+ result = vdo_allocate(config->hash_zone_count, thread_id_t,
+ "hash thread array", &config->hash_zone_threads);
+ if (result != VDO_SUCCESS) {
+ uninitialize_thread_config(config);
+ return result;
+ }
+
+ result = vdo_allocate(config->bio_thread_count, thread_id_t,
+ "bio thread array", &config->bio_threads);
+ if (result != VDO_SUCCESS) {
+ uninitialize_thread_config(config);
+ return result;
+ }
+
+ if (single) {
+ config->logical_threads[0] = config->thread_count;
+ config->physical_threads[0] = config->thread_count;
+ config->hash_zone_threads[0] = config->thread_count++;
+ } else {
+ config->admin_thread = config->thread_count;
+ config->journal_thread = config->thread_count++;
+ config->packer_thread = config->thread_count++;
+ assign_thread_ids(config, config->logical_threads, counts.logical_zones);
+ assign_thread_ids(config, config->physical_threads, counts.physical_zones);
+ assign_thread_ids(config, config->hash_zone_threads, counts.hash_zones);
+ }
+
+ config->dedupe_thread = config->thread_count++;
+ config->bio_ack_thread =
+ ((counts.bio_ack_threads > 0) ? config->thread_count++ : VDO_INVALID_THREAD_ID);
+ config->cpu_thread = config->thread_count++;
+ assign_thread_ids(config, config->bio_threads, counts.bio_threads);
+ return VDO_SUCCESS;
+}
+
+/**
+ * read_geometry_block() - Synchronously read the geometry block from a vdo's underlying block
+ * device.
+ * @vdo: The vdo whose geometry is to be read.
+ *
+ * Return: VDO_SUCCESS or an error code.
+ */
+static int __must_check read_geometry_block(struct vdo *vdo)
+{
+ struct vio *vio;
+ char *block;
+ int result;
+
+ result = vdo_allocate(VDO_BLOCK_SIZE, u8, __func__, &block);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = create_metadata_vio(vdo, VIO_TYPE_GEOMETRY, VIO_PRIORITY_HIGH, NULL,
+ block, &vio);
+ if (result != VDO_SUCCESS) {
+ vdo_free(block);
+ return result;
+ }
+
+ /*
+ * This is only safe because, having not already loaded the geometry, the vdo's geometry's
+ * bio_offset field is 0, so the fact that vio_reset_bio() will subtract that offset from
+ * the supplied pbn is not a problem.
+ */
+ result = vio_reset_bio(vio, block, NULL, REQ_OP_READ,
+ VDO_GEOMETRY_BLOCK_LOCATION);
+ if (result != VDO_SUCCESS) {
+ free_vio(vdo_forget(vio));
+ vdo_free(block);
+ return result;
+ }
+
+ bio_set_dev(vio->bio, vdo_get_backing_device(vdo));
+ submit_bio_wait(vio->bio);
+ result = blk_status_to_errno(vio->bio->bi_status);
+ free_vio(vdo_forget(vio));
+ if (result != 0) {
+ vdo_log_error_strerror(result, "synchronous read failed");
+ vdo_free(block);
+ return -EIO;
+ }
+
+ result = vdo_parse_geometry_block((u8 *) block, &vdo->geometry);
+ vdo_free(block);
+ return result;
+}
+
+static bool get_zone_thread_name(const thread_id_t thread_ids[], zone_count_t count,
+ thread_id_t id, const char *prefix,
+ char *buffer, size_t buffer_length)
+{
+ if (id >= thread_ids[0]) {
+ thread_id_t index = id - thread_ids[0];
+
+ if (index < count) {
+ snprintf(buffer, buffer_length, "%s%d", prefix, index);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * get_thread_name() - Format the name of the worker thread desired to support a given work queue.
+ * @thread_config: The thread configuration.
+ * @thread_id: The thread id.
+ * @buffer: Where to put the formatted name.
+ * @buffer_length: Size of the output buffer.
+ *
+ * The physical layer may add a prefix identifying the product; the output from this function
+ * should just identify the thread.
+ */
+static void get_thread_name(const struct thread_config *thread_config,
+ thread_id_t thread_id, char *buffer, size_t buffer_length)
+{
+ if (thread_id == thread_config->journal_thread) {
+ if (thread_config->packer_thread == thread_id) {
+ /*
+ * This is the "single thread" config where one thread is used for the
+ * journal, packer, logical, physical, and hash zones. In that case, it is
+ * known as the "request queue."
+ */
+ snprintf(buffer, buffer_length, "reqQ");
+ return;
+ }
+
+ snprintf(buffer, buffer_length, "journalQ");
+ return;
+ } else if (thread_id == thread_config->admin_thread) {
+ /* Theoretically this could be different from the journal thread. */
+ snprintf(buffer, buffer_length, "adminQ");
+ return;
+ } else if (thread_id == thread_config->packer_thread) {
+ snprintf(buffer, buffer_length, "packerQ");
+ return;
+ } else if (thread_id == thread_config->dedupe_thread) {
+ snprintf(buffer, buffer_length, "dedupeQ");
+ return;
+ } else if (thread_id == thread_config->bio_ack_thread) {
+ snprintf(buffer, buffer_length, "ackQ");
+ return;
+ } else if (thread_id == thread_config->cpu_thread) {
+ snprintf(buffer, buffer_length, "cpuQ");
+ return;
+ }
+
+ if (get_zone_thread_name(thread_config->logical_threads,
+ thread_config->logical_zone_count,
+ thread_id, "logQ", buffer, buffer_length))
+ return;
+
+ if (get_zone_thread_name(thread_config->physical_threads,
+ thread_config->physical_zone_count,
+ thread_id, "physQ", buffer, buffer_length))
+ return;
+
+ if (get_zone_thread_name(thread_config->hash_zone_threads,
+ thread_config->hash_zone_count,
+ thread_id, "hashQ", buffer, buffer_length))
+ return;
+
+ if (get_zone_thread_name(thread_config->bio_threads,
+ thread_config->bio_thread_count,
+ thread_id, "bioQ", buffer, buffer_length))
+ return;
+
+ /* Some sort of misconfiguration? */
+ snprintf(buffer, buffer_length, "reqQ%d", thread_id);
+}
+
+/**
+ * vdo_make_thread() - Construct a single vdo work_queue and its associated thread (or threads for
+ * round-robin queues).
+ * @vdo: The vdo which owns the thread.
+ * @thread_id: The id of the thread to create (as determined by the thread_config).
+ * @type: The description of the work queue for this thread.
+ * @queue_count: The number of actual threads/queues contained in the "thread".
+ * @contexts: An array of queue_count contexts, one for each individual queue; may be NULL.
+ *
+ * Each "thread" constructed by this method is represented by a unique thread id in the thread
+ * config, and completions can be enqueued to the queue and run on the threads comprising this
+ * entity.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_make_thread(struct vdo *vdo, thread_id_t thread_id,
+ const struct vdo_work_queue_type *type,
+ unsigned int queue_count, void *contexts[])
+{
+ struct vdo_thread *thread = &vdo->threads[thread_id];
+ char queue_name[MAX_VDO_WORK_QUEUE_NAME_LEN];
+
+ if (type == NULL)
+ type = &default_queue_type;
+
+ if (thread->queue != NULL) {
+ return VDO_ASSERT(vdo_work_queue_type_is(thread->queue, type),
+ "already constructed vdo thread %u is of the correct type",
+ thread_id);
+ }
+
+ thread->vdo = vdo;
+ thread->thread_id = thread_id;
+ get_thread_name(&vdo->thread_config, thread_id, queue_name, sizeof(queue_name));
+ return vdo_make_work_queue(vdo->thread_name_prefix, queue_name, thread,
+ type, queue_count, contexts, &thread->queue);
+}
+
+/**
+ * register_vdo() - Register a VDO; it must not already be registered.
+ * @vdo: The vdo to register.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+static int register_vdo(struct vdo *vdo)
+{
+ int result;
+
+ write_lock(&registry.lock);
+ result = VDO_ASSERT(filter_vdos_locked(vdo_is_equal, vdo) == NULL,
+ "VDO not already registered");
+ if (result == VDO_SUCCESS) {
+ INIT_LIST_HEAD(&vdo->registration);
+ list_add_tail(&vdo->registration, &registry.links);
+ }
+ write_unlock(&registry.lock);
+
+ return result;
+}
+
+/**
+ * initialize_vdo() - Do the portion of initializing a vdo which will clean up after itself on
+ * error.
+ * @vdo: The vdo being initialized
+ * @config: The configuration of the vdo
+ * @instance: The instance number of the vdo
+ * @reason: The buffer to hold the failure reason on error
+ */
+static int initialize_vdo(struct vdo *vdo, struct device_config *config,
+ unsigned int instance, char **reason)
+{
+ int result;
+ zone_count_t i;
+
+ vdo->device_config = config;
+ vdo->starting_sector_offset = config->owning_target->begin;
+ vdo->instance = instance;
+ vdo->allocations_allowed = true;
+ vdo_set_admin_state_code(&vdo->admin.state, VDO_ADMIN_STATE_NEW);
+ INIT_LIST_HEAD(&vdo->device_config_list);
+ vdo_initialize_completion(&vdo->admin.completion, vdo, VDO_ADMIN_COMPLETION);
+ init_completion(&vdo->admin.callback_sync);
+ mutex_init(&vdo->stats_mutex);
+ result = read_geometry_block(vdo);
+ if (result != VDO_SUCCESS) {
+ *reason = "Could not load geometry block";
+ return result;
+ }
+
+ result = initialize_thread_config(config->thread_counts, &vdo->thread_config);
+ if (result != VDO_SUCCESS) {
+ *reason = "Cannot create thread configuration";
+ return result;
+ }
+
+ vdo_log_info("zones: %d logical, %d physical, %d hash; total threads: %d",
+ config->thread_counts.logical_zones,
+ config->thread_counts.physical_zones,
+ config->thread_counts.hash_zones, vdo->thread_config.thread_count);
+
+ /* Compression context storage */
+ result = vdo_allocate(config->thread_counts.cpu_threads, char *, "LZ4 context",
+ &vdo->compression_context);
+ if (result != VDO_SUCCESS) {
+ *reason = "cannot allocate LZ4 context";
+ return result;
+ }
+
+ for (i = 0; i < config->thread_counts.cpu_threads; i++) {
+ result = vdo_allocate(LZ4_MEM_COMPRESS, char, "LZ4 context",
+ &vdo->compression_context[i]);
+ if (result != VDO_SUCCESS) {
+ *reason = "cannot allocate LZ4 context";
+ return result;
+ }
+ }
+
+ result = register_vdo(vdo);
+ if (result != VDO_SUCCESS) {
+ *reason = "Cannot add VDO to device registry";
+ return result;
+ }
+
+ vdo_set_admin_state_code(&vdo->admin.state, VDO_ADMIN_STATE_INITIALIZED);
+ return result;
+}
+
+/**
+ * vdo_make() - Allocate and initialize a vdo.
+ * @instance: Device instantiation counter.
+ * @config: The device configuration.
+ * @reason: The reason for any failure during this call.
+ * @vdo_ptr: A pointer to hold the created vdo.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_make(unsigned int instance, struct device_config *config, char **reason,
+ struct vdo **vdo_ptr)
+{
+ int result;
+ struct vdo *vdo;
+
+ /* Initialize with a generic failure reason to prevent returning garbage. */
+ *reason = "Unspecified error";
+
+ result = vdo_allocate(1, struct vdo, __func__, &vdo);
+ if (result != VDO_SUCCESS) {
+ *reason = "Cannot allocate VDO";
+ return result;
+ }
+
+ result = initialize_vdo(vdo, config, instance, reason);
+ if (result != VDO_SUCCESS) {
+ vdo_destroy(vdo);
+ return result;
+ }
+
+ /* From here on, the caller will clean up if there is an error. */
+ *vdo_ptr = vdo;
+
+ snprintf(vdo->thread_name_prefix, sizeof(vdo->thread_name_prefix),
+ "%s%u", MODULE_NAME, instance);
+ BUG_ON(vdo->thread_name_prefix[0] == '\0');
+ result = vdo_allocate(vdo->thread_config.thread_count,
+ struct vdo_thread, __func__, &vdo->threads);
+ if (result != VDO_SUCCESS) {
+ *reason = "Cannot allocate thread structures";
+ return result;
+ }
+
+ result = vdo_make_thread(vdo, vdo->thread_config.admin_thread,
+ &default_queue_type, 1, NULL);
+ if (result != VDO_SUCCESS) {
+ *reason = "Cannot make admin thread";
+ return result;
+ }
+
+ result = vdo_make_flusher(vdo);
+ if (result != VDO_SUCCESS) {
+ *reason = "Cannot make flusher zones";
+ return result;
+ }
+
+ result = vdo_make_packer(vdo, DEFAULT_PACKER_BINS, &vdo->packer);
+ if (result != VDO_SUCCESS) {
+ *reason = "Cannot make packer zones";
+ return result;
+ }
+
+ BUG_ON(vdo->device_config->logical_block_size <= 0);
+ BUG_ON(vdo->device_config->owned_device == NULL);
+ result = make_data_vio_pool(vdo, MAXIMUM_VDO_USER_VIOS,
+ MAXIMUM_VDO_USER_VIOS * 3 / 4,
+ &vdo->data_vio_pool);
+ if (result != VDO_SUCCESS) {
+ *reason = "Cannot allocate data_vio pool";
+ return result;
+ }
+
+ result = vdo_make_io_submitter(config->thread_counts.bio_threads,
+ config->thread_counts.bio_rotation_interval,
+ get_data_vio_pool_request_limit(vdo->data_vio_pool),
+ vdo, &vdo->io_submitter);
+ if (result != VDO_SUCCESS) {
+ *reason = "bio submission initialization failed";
+ return result;
+ }
+
+ if (vdo_uses_bio_ack_queue(vdo)) {
+ result = vdo_make_thread(vdo, vdo->thread_config.bio_ack_thread,
+ &bio_ack_q_type,
+ config->thread_counts.bio_ack_threads, NULL);
+ if (result != VDO_SUCCESS) {
+ *reason = "bio ack queue initialization failed";
+ return result;
+ }
+ }
+
+ result = vdo_make_thread(vdo, vdo->thread_config.cpu_thread, &cpu_q_type,
+ config->thread_counts.cpu_threads,
+ (void **) vdo->compression_context);
+ if (result != VDO_SUCCESS) {
+ *reason = "CPU queue initialization failed";
+ return result;
+ }
+
+ return VDO_SUCCESS;
+}
+
+static void finish_vdo(struct vdo *vdo)
+{
+ int i;
+
+ if (vdo->threads == NULL)
+ return;
+
+ vdo_cleanup_io_submitter(vdo->io_submitter);
+ vdo_finish_dedupe_index(vdo->hash_zones);
+
+ for (i = 0; i < vdo->thread_config.thread_count; i++)
+ vdo_finish_work_queue(vdo->threads[i].queue);
+}
+
+/**
+ * free_listeners() - Free the list of read-only listeners associated with a thread.
+ * @thread_data: The thread holding the list to free.
+ */
+static void free_listeners(struct vdo_thread *thread)
+{
+ struct read_only_listener *listener, *next;
+
+ for (listener = vdo_forget(thread->listeners); listener != NULL; listener = next) {
+ next = vdo_forget(listener->next);
+ vdo_free(listener);
+ }
+}
+
+static void uninitialize_super_block(struct vdo_super_block *super_block)
+{
+ free_vio_components(&super_block->vio);
+ vdo_free(super_block->buffer);
+}
+
+/**
+ * unregister_vdo() - Remove a vdo from the device registry.
+ * @vdo: The vdo to remove.
+ */
+static void unregister_vdo(struct vdo *vdo)
+{
+ write_lock(&registry.lock);
+ if (filter_vdos_locked(vdo_is_equal, vdo) == vdo)
+ list_del_init(&vdo->registration);
+
+ write_unlock(&registry.lock);
+}
+
+/**
+ * vdo_destroy() - Destroy a vdo instance.
+ * @vdo: The vdo to destroy (may be NULL).
+ */
+void vdo_destroy(struct vdo *vdo)
+{
+ unsigned int i;
+
+ if (vdo == NULL)
+ return;
+
+ /* A running VDO should never be destroyed without suspending first. */
+ BUG_ON(vdo_get_admin_state(vdo)->normal);
+
+ vdo->allocations_allowed = true;
+
+ finish_vdo(vdo);
+ unregister_vdo(vdo);
+ free_data_vio_pool(vdo->data_vio_pool);
+ vdo_free_io_submitter(vdo_forget(vdo->io_submitter));
+ vdo_free_flusher(vdo_forget(vdo->flusher));
+ vdo_free_packer(vdo_forget(vdo->packer));
+ vdo_free_recovery_journal(vdo_forget(vdo->recovery_journal));
+ vdo_free_slab_depot(vdo_forget(vdo->depot));
+ vdo_uninitialize_layout(&vdo->layout);
+ vdo_uninitialize_layout(&vdo->next_layout);
+ if (vdo->partition_copier)
+ dm_kcopyd_client_destroy(vdo_forget(vdo->partition_copier));
+ uninitialize_super_block(&vdo->super_block);
+ vdo_free_block_map(vdo_forget(vdo->block_map));
+ vdo_free_hash_zones(vdo_forget(vdo->hash_zones));
+ vdo_free_physical_zones(vdo_forget(vdo->physical_zones));
+ vdo_free_logical_zones(vdo_forget(vdo->logical_zones));
+
+ if (vdo->threads != NULL) {
+ for (i = 0; i < vdo->thread_config.thread_count; i++) {
+ free_listeners(&vdo->threads[i]);
+ vdo_free_work_queue(vdo_forget(vdo->threads[i].queue));
+ }
+ vdo_free(vdo_forget(vdo->threads));
+ }
+
+ uninitialize_thread_config(&vdo->thread_config);
+
+ if (vdo->compression_context != NULL) {
+ for (i = 0; i < vdo->device_config->thread_counts.cpu_threads; i++)
+ vdo_free(vdo_forget(vdo->compression_context[i]));
+
+ vdo_free(vdo_forget(vdo->compression_context));
+ }
+ vdo_free(vdo);
+}
+
+static int initialize_super_block(struct vdo *vdo, struct vdo_super_block *super_block)
+{
+ int result;
+
+ result = vdo_allocate(VDO_BLOCK_SIZE, char, "encoded super block",
+ (char **) &vdo->super_block.buffer);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ return allocate_vio_components(vdo, VIO_TYPE_SUPER_BLOCK,
+ VIO_PRIORITY_METADATA, NULL, 1,
+ (char *) super_block->buffer,
+ &vdo->super_block.vio);
+}
+
+/**
+ * finish_reading_super_block() - Continue after loading the super block.
+ * @completion: The super block vio.
+ *
+ * This callback is registered in vdo_load_super_block().
+ */
+static void finish_reading_super_block(struct vdo_completion *completion)
+{
+ struct vdo_super_block *super_block =
+ container_of(as_vio(completion), struct vdo_super_block, vio);
+
+ vdo_continue_completion(vdo_forget(completion->parent),
+ vdo_decode_super_block(super_block->buffer));
+}
+
+/**
+ * handle_super_block_read_error() - Handle an error reading the super block.
+ * @completion: The super block vio.
+ *
+ * This error handler is registered in vdo_load_super_block().
+ */
+static void handle_super_block_read_error(struct vdo_completion *completion)
+{
+ vio_record_metadata_io_error(as_vio(completion));
+ finish_reading_super_block(completion);
+}
+
+static void read_super_block_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct vdo_completion *parent = vio->completion.parent;
+
+ continue_vio_after_io(vio, finish_reading_super_block,
+ parent->callback_thread_id);
+}
+
+/**
+ * vdo_load_super_block() - Allocate a super block and read its contents from storage.
+ * @vdo: The vdo containing the super block on disk.
+ * @parent: The completion to notify after loading the super block.
+ */
+void vdo_load_super_block(struct vdo *vdo, struct vdo_completion *parent)
+{
+ int result;
+
+ result = initialize_super_block(vdo, &vdo->super_block);
+ if (result != VDO_SUCCESS) {
+ vdo_continue_completion(parent, result);
+ return;
+ }
+
+ vdo->super_block.vio.completion.parent = parent;
+ vdo_submit_metadata_vio(&vdo->super_block.vio,
+ vdo_get_data_region_start(vdo->geometry),
+ read_super_block_endio,
+ handle_super_block_read_error,
+ REQ_OP_READ);
+}
+
+/**
+ * vdo_get_backing_device() - Get the block device object underlying a vdo.
+ * @vdo: The vdo.
+ *
+ * Return: The vdo's current block device.
+ */
+struct block_device *vdo_get_backing_device(const struct vdo *vdo)
+{
+ return vdo->device_config->owned_device->bdev;
+}
+
+/**
+ * vdo_get_device_name() - Get the device name associated with the vdo target.
+ * @target: The target device interface.
+ *
+ * Return: The block device name.
+ */
+const char *vdo_get_device_name(const struct dm_target *target)
+{
+ return dm_device_name(dm_table_get_md(target->table));
+}
+
+/**
+ * vdo_synchronous_flush() - Issue a flush request and wait for it to complete.
+ * @vdo: The vdo.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_synchronous_flush(struct vdo *vdo)
+{
+ int result;
+ struct bio bio;
+
+ bio_init(&bio, vdo_get_backing_device(vdo), NULL, 0,
+ REQ_OP_WRITE | REQ_PREFLUSH);
+ submit_bio_wait(&bio);
+ result = blk_status_to_errno(bio.bi_status);
+
+ atomic64_inc(&vdo->stats.flush_out);
+ if (result != 0) {
+ vdo_log_error_strerror(result, "synchronous flush failed");
+ result = -EIO;
+ }
+
+ bio_uninit(&bio);
+ return result;
+}
+
+/**
+ * vdo_get_state() - Get the current state of the vdo.
+ * @vdo: The vdo.
+
+ * Context: This method may be called from any thread.
+ *
+ * Return: The current state of the vdo.
+ */
+enum vdo_state vdo_get_state(const struct vdo *vdo)
+{
+ enum vdo_state state = atomic_read(&vdo->state);
+
+ /* pairs with barriers where state field is changed */
+ smp_rmb();
+ return state;
+}
+
+/**
+ * vdo_set_state() - Set the current state of the vdo.
+ * @vdo: The vdo whose state is to be set.
+ * @state: The new state of the vdo.
+ *
+ * Context: This method may be called from any thread.
+ */
+void vdo_set_state(struct vdo *vdo, enum vdo_state state)
+{
+ /* pairs with barrier in vdo_get_state */
+ smp_wmb();
+ atomic_set(&vdo->state, state);
+}
+
+/**
+ * vdo_get_admin_state() - Get the admin state of the vdo.
+ * @vdo: The vdo.
+ *
+ * Return: The code for the vdo's current admin state.
+ */
+const struct admin_state_code *vdo_get_admin_state(const struct vdo *vdo)
+{
+ return vdo_get_admin_state_code(&vdo->admin.state);
+}
+
+/**
+ * record_vdo() - Record the state of the VDO for encoding in the super block.
+ */
+static void record_vdo(struct vdo *vdo)
+{
+ /* This is for backwards compatibility. */
+ vdo->states.unused = vdo->geometry.unused;
+ vdo->states.vdo.state = vdo_get_state(vdo);
+ vdo->states.block_map = vdo_record_block_map(vdo->block_map);
+ vdo->states.recovery_journal = vdo_record_recovery_journal(vdo->recovery_journal);
+ vdo->states.slab_depot = vdo_record_slab_depot(vdo->depot);
+ vdo->states.layout = vdo->layout;
+}
+
+/**
+ * continue_super_block_parent() - Continue the parent of a super block save operation.
+ * @completion: The super block vio.
+ *
+ * This callback is registered in vdo_save_components().
+ */
+static void continue_super_block_parent(struct vdo_completion *completion)
+{
+ vdo_continue_completion(vdo_forget(completion->parent), completion->result);
+}
+
+/**
+ * handle_save_error() - Log a super block save error.
+ * @completion: The super block vio.
+ *
+ * This error handler is registered in vdo_save_components().
+ */
+static void handle_save_error(struct vdo_completion *completion)
+{
+ struct vdo_super_block *super_block =
+ container_of(as_vio(completion), struct vdo_super_block, vio);
+
+ vio_record_metadata_io_error(&super_block->vio);
+ vdo_log_error_strerror(completion->result, "super block save failed");
+ /*
+ * Mark the super block as unwritable so that we won't attempt to write it again. This
+ * avoids the case where a growth attempt fails writing the super block with the new size,
+ * but the subsequent attempt to write out the read-only state succeeds. In this case,
+ * writes which happened just before the suspend would not be visible if the VDO is
+ * restarted without rebuilding, but, after a read-only rebuild, the effects of those
+ * writes would reappear.
+ */
+ super_block->unwritable = true;
+ completion->callback(completion);
+}
+
+static void super_block_write_endio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct vdo_completion *parent = vio->completion.parent;
+
+ continue_vio_after_io(vio, continue_super_block_parent,
+ parent->callback_thread_id);
+}
+
+/**
+ * vdo_save_components() - Encode the vdo and save the super block asynchronously.
+ * @vdo: The vdo whose state is being saved.
+ * @parent: The completion to notify when the save is complete.
+ */
+void vdo_save_components(struct vdo *vdo, struct vdo_completion *parent)
+{
+ struct vdo_super_block *super_block = &vdo->super_block;
+
+ if (super_block->unwritable) {
+ vdo_continue_completion(parent, VDO_READ_ONLY);
+ return;
+ }
+
+ if (super_block->vio.completion.parent != NULL) {
+ vdo_continue_completion(parent, VDO_COMPONENT_BUSY);
+ return;
+ }
+
+ record_vdo(vdo);
+
+ vdo_encode_super_block(super_block->buffer, &vdo->states);
+ super_block->vio.completion.parent = parent;
+ super_block->vio.completion.callback_thread_id = parent->callback_thread_id;
+ vdo_submit_metadata_vio(&super_block->vio,
+ vdo_get_data_region_start(vdo->geometry),
+ super_block_write_endio, handle_save_error,
+ REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA);
+}
+
+/**
+ * vdo_register_read_only_listener() - Register a listener to be notified when the VDO goes
+ * read-only.
+ * @vdo: The vdo to register with.
+ * @listener: The object to notify.
+ * @notification: The function to call to send the notification.
+ * @thread_id: The id of the thread on which to send the notification.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_register_read_only_listener(struct vdo *vdo, void *listener,
+ vdo_read_only_notification_fn notification,
+ thread_id_t thread_id)
+{
+ struct vdo_thread *thread = &vdo->threads[thread_id];
+ struct read_only_listener *read_only_listener;
+ int result;
+
+ result = VDO_ASSERT(thread_id != vdo->thread_config.dedupe_thread,
+ "read only listener not registered on dedupe thread");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = vdo_allocate(1, struct read_only_listener, __func__,
+ &read_only_listener);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *read_only_listener = (struct read_only_listener) {
+ .listener = listener,
+ .notify = notification,
+ .next = thread->listeners,
+ };
+
+ thread->listeners = read_only_listener;
+ return VDO_SUCCESS;
+}
+
+/**
+ * notify_vdo_of_read_only_mode() - Notify a vdo that it is going read-only.
+ * @listener: The vdo.
+ * @parent: The completion to notify in order to acknowledge the notification.
+ *
+ * This will save the read-only state to the super block.
+ *
+ * Implements vdo_read_only_notification_fn.
+ */
+static void notify_vdo_of_read_only_mode(void *listener, struct vdo_completion *parent)
+{
+ struct vdo *vdo = listener;
+
+ if (vdo_in_read_only_mode(vdo))
+ vdo_finish_completion(parent);
+
+ vdo_set_state(vdo, VDO_READ_ONLY_MODE);
+ vdo_save_components(vdo, parent);
+}
+
+/**
+ * vdo_enable_read_only_entry() - Enable a vdo to enter read-only mode on errors.
+ * @vdo: The vdo to enable.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int vdo_enable_read_only_entry(struct vdo *vdo)
+{
+ thread_id_t id;
+ bool is_read_only = vdo_in_read_only_mode(vdo);
+ struct read_only_notifier *notifier = &vdo->read_only_notifier;
+
+ if (is_read_only) {
+ notifier->read_only_error = VDO_READ_ONLY;
+ notifier->state = NOTIFIED;
+ } else {
+ notifier->state = MAY_NOT_NOTIFY;
+ }
+
+ spin_lock_init(&notifier->lock);
+ vdo_initialize_completion(&notifier->completion, vdo,
+ VDO_READ_ONLY_MODE_COMPLETION);
+
+ for (id = 0; id < vdo->thread_config.thread_count; id++)
+ vdo->threads[id].is_read_only = is_read_only;
+
+ return vdo_register_read_only_listener(vdo, vdo, notify_vdo_of_read_only_mode,
+ vdo->thread_config.admin_thread);
+}
+
+/**
+ * vdo_wait_until_not_entering_read_only_mode() - Wait until no read-only notifications are in
+ * progress and prevent any subsequent
+ * notifications.
+ * @parent: The completion to notify when no threads are entering read-only mode.
+ *
+ * Notifications may be re-enabled by calling vdo_allow_read_only_mode_entry().
+ */
+void vdo_wait_until_not_entering_read_only_mode(struct vdo_completion *parent)
+{
+ struct vdo *vdo = parent->vdo;
+ struct read_only_notifier *notifier = &vdo->read_only_notifier;
+
+ vdo_assert_on_admin_thread(vdo, __func__);
+
+ if (notifier->waiter != NULL) {
+ vdo_continue_completion(parent, VDO_COMPONENT_BUSY);
+ return;
+ }
+
+ spin_lock(&notifier->lock);
+ if (notifier->state == NOTIFYING)
+ notifier->waiter = parent;
+ else if (notifier->state == MAY_NOTIFY)
+ notifier->state = MAY_NOT_NOTIFY;
+ spin_unlock(&notifier->lock);
+
+ if (notifier->waiter == NULL) {
+ /*
+ * A notification was not in progress, and now they are
+ * disallowed.
+ */
+ vdo_launch_completion(parent);
+ return;
+ }
+}
+
+/**
+ * as_notifier() - Convert a generic vdo_completion to a read_only_notifier.
+ * @completion: The completion to convert.
+ *
+ * Return: The completion as a read_only_notifier.
+ */
+static inline struct read_only_notifier *as_notifier(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_READ_ONLY_MODE_COMPLETION);
+ return container_of(completion, struct read_only_notifier, completion);
+}
+
+/**
+ * finish_entering_read_only_mode() - Complete the process of entering read only mode.
+ * @completion: The read-only mode completion.
+ */
+static void finish_entering_read_only_mode(struct vdo_completion *completion)
+{
+ struct read_only_notifier *notifier = as_notifier(completion);
+
+ vdo_assert_on_admin_thread(completion->vdo, __func__);
+
+ spin_lock(&notifier->lock);
+ notifier->state = NOTIFIED;
+ spin_unlock(&notifier->lock);
+
+ if (notifier->waiter != NULL)
+ vdo_continue_completion(vdo_forget(notifier->waiter),
+ completion->result);
+}
+
+/**
+ * make_thread_read_only() - Inform each thread that the VDO is in read-only mode.
+ * @completion: The read-only mode completion.
+ */
+static void make_thread_read_only(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ thread_id_t thread_id = completion->callback_thread_id;
+ struct read_only_notifier *notifier = as_notifier(completion);
+ struct read_only_listener *listener = completion->parent;
+
+ if (listener == NULL) {
+ /* This is the first call on this thread */
+ struct vdo_thread *thread = &vdo->threads[thread_id];
+
+ thread->is_read_only = true;
+ listener = thread->listeners;
+ if (thread_id == 0)
+ vdo_log_error_strerror(READ_ONCE(notifier->read_only_error),
+ "Unrecoverable error, entering read-only mode");
+ } else {
+ /* We've just finished notifying a listener */
+ listener = listener->next;
+ }
+
+ if (listener != NULL) {
+ /* We have a listener to notify */
+ vdo_prepare_completion(completion, make_thread_read_only,
+ make_thread_read_only, thread_id,
+ listener);
+ listener->notify(listener->listener, completion);
+ return;
+ }
+
+ /* We're done with this thread */
+ if (++thread_id == vdo->thread_config.dedupe_thread) {
+ /*
+ * We don't want to notify the dedupe thread since it may be
+ * blocked rebuilding the index.
+ */
+ thread_id++;
+ }
+
+ if (thread_id >= vdo->thread_config.thread_count) {
+ /* There are no more threads */
+ vdo_prepare_completion(completion, finish_entering_read_only_mode,
+ finish_entering_read_only_mode,
+ vdo->thread_config.admin_thread, NULL);
+ } else {
+ vdo_prepare_completion(completion, make_thread_read_only,
+ make_thread_read_only, thread_id, NULL);
+ }
+
+ vdo_launch_completion(completion);
+}
+
+/**
+ * vdo_allow_read_only_mode_entry() - Allow the notifier to put the VDO into read-only mode,
+ * reversing the effects of
+ * vdo_wait_until_not_entering_read_only_mode().
+ * @parent: The object to notify once the operation is complete.
+ *
+ * If some thread tried to put the vdo into read-only mode while notifications were disallowed, it
+ * will be done when this method is called. If that happens, the parent will not be notified until
+ * the vdo has actually entered read-only mode and attempted to save the super block.
+ *
+ * Context: This method may only be called from the admin thread.
+ */
+void vdo_allow_read_only_mode_entry(struct vdo_completion *parent)
+{
+ struct vdo *vdo = parent->vdo;
+ struct read_only_notifier *notifier = &vdo->read_only_notifier;
+
+ vdo_assert_on_admin_thread(vdo, __func__);
+
+ if (notifier->waiter != NULL) {
+ vdo_continue_completion(parent, VDO_COMPONENT_BUSY);
+ return;
+ }
+
+ spin_lock(&notifier->lock);
+ if (notifier->state == MAY_NOT_NOTIFY) {
+ if (notifier->read_only_error == VDO_SUCCESS) {
+ notifier->state = MAY_NOTIFY;
+ } else {
+ notifier->state = NOTIFYING;
+ notifier->waiter = parent;
+ }
+ }
+ spin_unlock(&notifier->lock);
+
+ if (notifier->waiter == NULL) {
+ /* We're done */
+ vdo_launch_completion(parent);
+ return;
+ }
+
+ /* Do the pending notification. */
+ make_thread_read_only(&notifier->completion);
+}
+
+/**
+ * vdo_enter_read_only_mode() - Put a VDO into read-only mode and save the read-only state in the
+ * super block.
+ * @vdo: The vdo.
+ * @error_code: The error which caused the VDO to enter read-only mode.
+ *
+ * This method is a no-op if the VDO is already read-only.
+ */
+void vdo_enter_read_only_mode(struct vdo *vdo, int error_code)
+{
+ bool notify = false;
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+ struct read_only_notifier *notifier = &vdo->read_only_notifier;
+ struct vdo_thread *thread;
+
+ if (thread_id != VDO_INVALID_THREAD_ID) {
+ thread = &vdo->threads[thread_id];
+ if (thread->is_read_only) {
+ /* This thread has already gone read-only. */
+ return;
+ }
+
+ /* Record for this thread that the VDO is read-only. */
+ thread->is_read_only = true;
+ }
+
+ spin_lock(&notifier->lock);
+ if (notifier->read_only_error == VDO_SUCCESS) {
+ WRITE_ONCE(notifier->read_only_error, error_code);
+ if (notifier->state == MAY_NOTIFY) {
+ notifier->state = NOTIFYING;
+ notify = true;
+ }
+ }
+ spin_unlock(&notifier->lock);
+
+ if (!notify) {
+ /* The notifier is already aware of a read-only error */
+ return;
+ }
+
+ /* Initiate a notification starting on the lowest numbered thread. */
+ vdo_launch_completion_callback(&notifier->completion, make_thread_read_only, 0);
+}
+
+/**
+ * vdo_is_read_only() - Check whether the VDO is read-only.
+ * @vdo: The vdo.
+ *
+ * Return: true if the vdo is read-only.
+ *
+ * This method may be called from any thread, as opposed to examining the VDO's state field which
+ * is only safe to check from the admin thread.
+ */
+bool vdo_is_read_only(struct vdo *vdo)
+{
+ return vdo->threads[vdo_get_callback_thread_id()].is_read_only;
+}
+
+/**
+ * vdo_in_read_only_mode() - Check whether a vdo is in read-only mode.
+ * @vdo: The vdo to query.
+ *
+ * Return: true if the vdo is in read-only mode.
+ */
+bool vdo_in_read_only_mode(const struct vdo *vdo)
+{
+ return (vdo_get_state(vdo) == VDO_READ_ONLY_MODE);
+}
+
+/**
+ * vdo_in_recovery_mode() - Check whether the vdo is in recovery mode.
+ * @vdo: The vdo to query.
+ *
+ * Return: true if the vdo is in recovery mode.
+ */
+bool vdo_in_recovery_mode(const struct vdo *vdo)
+{
+ return (vdo_get_state(vdo) == VDO_RECOVERING);
+}
+
+/**
+ * vdo_enter_recovery_mode() - Put the vdo into recovery mode.
+ * @vdo: The vdo.
+ */
+void vdo_enter_recovery_mode(struct vdo *vdo)
+{
+ vdo_assert_on_admin_thread(vdo, __func__);
+
+ if (vdo_in_read_only_mode(vdo))
+ return;
+
+ vdo_log_info("Entering recovery mode");
+ vdo_set_state(vdo, VDO_RECOVERING);
+}
+
+/**
+ * complete_synchronous_action() - Signal the waiting thread that a synchronous action is complete.
+ * @completion: The sync completion.
+ */
+static void complete_synchronous_action(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VDO_SYNC_COMPLETION);
+ complete(&(container_of(completion, struct sync_completion,
+ vdo_completion)->completion));
+}
+
+/**
+ * perform_synchronous_action() - Launch an action on a VDO thread and wait for it to complete.
+ * @vdo: The vdo.
+ * @action: The callback to launch.
+ * @thread_id: The thread on which to run the action.
+ * @parent: The parent of the sync completion (may be NULL).
+ */
+static int perform_synchronous_action(struct vdo *vdo, vdo_action_fn action,
+ thread_id_t thread_id, void *parent)
+{
+ struct sync_completion sync;
+
+ vdo_initialize_completion(&sync.vdo_completion, vdo, VDO_SYNC_COMPLETION);
+ init_completion(&sync.completion);
+ sync.vdo_completion.parent = parent;
+ vdo_launch_completion_callback(&sync.vdo_completion, action, thread_id);
+ wait_for_completion(&sync.completion);
+ return sync.vdo_completion.result;
+}
+
+/**
+ * set_compression_callback() - Callback to turn compression on or off.
+ * @completion: The completion.
+ */
+static void set_compression_callback(struct vdo_completion *completion)
+{
+ struct vdo *vdo = completion->vdo;
+ bool *enable = completion->parent;
+ bool was_enabled = vdo_get_compressing(vdo);
+
+ if (*enable != was_enabled) {
+ WRITE_ONCE(vdo->compressing, *enable);
+ if (was_enabled) {
+ /* Signal the packer to flush since compression has been disabled. */
+ vdo_flush_packer(vdo->packer);
+ }
+ }
+
+ vdo_log_info("compression is %s", (*enable ? "enabled" : "disabled"));
+ *enable = was_enabled;
+ complete_synchronous_action(completion);
+}
+
+/**
+ * vdo_set_compressing() - Turn compression on or off.
+ * @vdo: The vdo.
+ * @enable: Whether to enable or disable compression.
+ *
+ * Return: Whether compression was previously on or off.
+ */
+bool vdo_set_compressing(struct vdo *vdo, bool enable)
+{
+ perform_synchronous_action(vdo, set_compression_callback,
+ vdo->thread_config.packer_thread,
+ &enable);
+ return enable;
+}
+
+/**
+ * vdo_get_compressing() - Get whether compression is enabled in a vdo.
+ * @vdo: The vdo.
+ *
+ * Return: State of compression.
+ */
+bool vdo_get_compressing(struct vdo *vdo)
+{
+ return READ_ONCE(vdo->compressing);
+}
+
+static size_t get_block_map_cache_size(const struct vdo *vdo)
+{
+ return ((size_t) vdo->device_config->cache_size) * VDO_BLOCK_SIZE;
+}
+
+static struct error_statistics __must_check get_vdo_error_statistics(const struct vdo *vdo)
+{
+ /*
+ * The error counts can be incremented from arbitrary threads and so must be incremented
+ * atomically, but they are just statistics with no semantics that could rely on memory
+ * order, so unfenced reads are sufficient.
+ */
+ const struct atomic_statistics *atoms = &vdo->stats;
+
+ return (struct error_statistics) {
+ .invalid_advice_pbn_count = atomic64_read(&atoms->invalid_advice_pbn_count),
+ .no_space_error_count = atomic64_read(&atoms->no_space_error_count),
+ .read_only_error_count = atomic64_read(&atoms->read_only_error_count),
+ };
+}
+
+static void copy_bio_stat(struct bio_stats *b, const struct atomic_bio_stats *a)
+{
+ b->read = atomic64_read(&a->read);
+ b->write = atomic64_read(&a->write);
+ b->discard = atomic64_read(&a->discard);
+ b->flush = atomic64_read(&a->flush);
+ b->empty_flush = atomic64_read(&a->empty_flush);
+ b->fua = atomic64_read(&a->fua);
+}
+
+static struct bio_stats subtract_bio_stats(struct bio_stats minuend,
+ struct bio_stats subtrahend)
+{
+ return (struct bio_stats) {
+ .read = minuend.read - subtrahend.read,
+ .write = minuend.write - subtrahend.write,
+ .discard = minuend.discard - subtrahend.discard,
+ .flush = minuend.flush - subtrahend.flush,
+ .empty_flush = minuend.empty_flush - subtrahend.empty_flush,
+ .fua = minuend.fua - subtrahend.fua,
+ };
+}
+
+/**
+ * vdo_get_physical_blocks_allocated() - Get the number of physical blocks in use by user data.
+ * @vdo: The vdo.
+ *
+ * Return: The number of blocks allocated for user data.
+ */
+static block_count_t __must_check vdo_get_physical_blocks_allocated(const struct vdo *vdo)
+{
+ return (vdo_get_slab_depot_allocated_blocks(vdo->depot) -
+ vdo_get_journal_block_map_data_blocks_used(vdo->recovery_journal));
+}
+
+/**
+ * vdo_get_physical_blocks_overhead() - Get the number of physical blocks used by vdo metadata.
+ * @vdo: The vdo.
+ *
+ * Return: The number of overhead blocks.
+ */
+static block_count_t __must_check vdo_get_physical_blocks_overhead(const struct vdo *vdo)
+{
+ /*
+ * config.physical_blocks is mutated during resize and is in a packed structure,
+ * but resize runs on admin thread.
+ * TODO: Verify that this is always safe.
+ */
+ return (vdo->states.vdo.config.physical_blocks -
+ vdo_get_slab_depot_data_blocks(vdo->depot) +
+ vdo_get_journal_block_map_data_blocks_used(vdo->recovery_journal));
+}
+
+static const char *vdo_describe_state(enum vdo_state state)
+{
+ /* These strings should all fit in the 15 chars of VDOStatistics.mode. */
+ switch (state) {
+ case VDO_RECOVERING:
+ return "recovering";
+
+ case VDO_READ_ONLY_MODE:
+ return "read-only";
+
+ default:
+ return "normal";
+ }
+}
+
+/**
+ * get_vdo_statistics() - Populate a vdo_statistics structure on the admin thread.
+ * @vdo: The vdo.
+ * @stats: The statistics structure to populate.
+ */
+static void get_vdo_statistics(const struct vdo *vdo, struct vdo_statistics *stats)
+{
+ struct recovery_journal *journal = vdo->recovery_journal;
+ enum vdo_state state = vdo_get_state(vdo);
+
+ vdo_assert_on_admin_thread(vdo, __func__);
+
+ /* start with a clean slate */
+ memset(stats, 0, sizeof(struct vdo_statistics));
+
+ /*
+ * These are immutable properties of the vdo object, so it is safe to query them from any
+ * thread.
+ */
+ stats->version = STATISTICS_VERSION;
+ stats->logical_blocks = vdo->states.vdo.config.logical_blocks;
+ /*
+ * config.physical_blocks is mutated during resize and is in a packed structure, but resize
+ * runs on the admin thread.
+ * TODO: verify that this is always safe
+ */
+ stats->physical_blocks = vdo->states.vdo.config.physical_blocks;
+ stats->block_size = VDO_BLOCK_SIZE;
+ stats->complete_recoveries = vdo->states.vdo.complete_recoveries;
+ stats->read_only_recoveries = vdo->states.vdo.read_only_recoveries;
+ stats->block_map_cache_size = get_block_map_cache_size(vdo);
+
+ /* The callees are responsible for thread-safety. */
+ stats->data_blocks_used = vdo_get_physical_blocks_allocated(vdo);
+ stats->overhead_blocks_used = vdo_get_physical_blocks_overhead(vdo);
+ stats->logical_blocks_used = vdo_get_recovery_journal_logical_blocks_used(journal);
+ vdo_get_slab_depot_statistics(vdo->depot, stats);
+ stats->journal = vdo_get_recovery_journal_statistics(journal);
+ stats->packer = vdo_get_packer_statistics(vdo->packer);
+ stats->block_map = vdo_get_block_map_statistics(vdo->block_map);
+ vdo_get_dedupe_statistics(vdo->hash_zones, stats);
+ stats->errors = get_vdo_error_statistics(vdo);
+ stats->in_recovery_mode = (state == VDO_RECOVERING);
+ snprintf(stats->mode, sizeof(stats->mode), "%s", vdo_describe_state(state));
+
+ stats->instance = vdo->instance;
+ stats->current_vios_in_progress = get_data_vio_pool_active_requests(vdo->data_vio_pool);
+ stats->max_vios = get_data_vio_pool_maximum_requests(vdo->data_vio_pool);
+
+ stats->flush_out = atomic64_read(&vdo->stats.flush_out);
+ stats->logical_block_size = vdo->device_config->logical_block_size;
+ copy_bio_stat(&stats->bios_in, &vdo->stats.bios_in);
+ copy_bio_stat(&stats->bios_in_partial, &vdo->stats.bios_in_partial);
+ copy_bio_stat(&stats->bios_out, &vdo->stats.bios_out);
+ copy_bio_stat(&stats->bios_meta, &vdo->stats.bios_meta);
+ copy_bio_stat(&stats->bios_journal, &vdo->stats.bios_journal);
+ copy_bio_stat(&stats->bios_page_cache, &vdo->stats.bios_page_cache);
+ copy_bio_stat(&stats->bios_out_completed, &vdo->stats.bios_out_completed);
+ copy_bio_stat(&stats->bios_meta_completed, &vdo->stats.bios_meta_completed);
+ copy_bio_stat(&stats->bios_journal_completed,
+ &vdo->stats.bios_journal_completed);
+ copy_bio_stat(&stats->bios_page_cache_completed,
+ &vdo->stats.bios_page_cache_completed);
+ copy_bio_stat(&stats->bios_acknowledged, &vdo->stats.bios_acknowledged);
+ copy_bio_stat(&stats->bios_acknowledged_partial, &vdo->stats.bios_acknowledged_partial);
+ stats->bios_in_progress =
+ subtract_bio_stats(stats->bios_in, stats->bios_acknowledged);
+ vdo_get_memory_stats(&stats->memory_usage.bytes_used,
+ &stats->memory_usage.peak_bytes_used);
+}
+
+/**
+ * vdo_fetch_statistics_callback() - Action to populate a vdo_statistics
+ * structure on the admin thread.
+ * @completion: The completion.
+ *
+ * This callback is registered in vdo_fetch_statistics().
+ */
+static void vdo_fetch_statistics_callback(struct vdo_completion *completion)
+{
+ get_vdo_statistics(completion->vdo, completion->parent);
+ complete_synchronous_action(completion);
+}
+
+/**
+ * vdo_fetch_statistics() - Fetch statistics on the correct thread.
+ * @vdo: The vdo.
+ * @stats: The vdo statistics are returned here.
+ */
+void vdo_fetch_statistics(struct vdo *vdo, struct vdo_statistics *stats)
+{
+ perform_synchronous_action(vdo, vdo_fetch_statistics_callback,
+ vdo->thread_config.admin_thread, stats);
+}
+
+/**
+ * vdo_get_callback_thread_id() - Get the id of the callback thread on which a completion is
+ * currently running.
+ *
+ * Return: The current thread ID, or -1 if no such thread.
+ */
+thread_id_t vdo_get_callback_thread_id(void)
+{
+ struct vdo_work_queue *queue = vdo_get_current_work_queue();
+ struct vdo_thread *thread;
+ thread_id_t thread_id;
+
+ if (queue == NULL)
+ return VDO_INVALID_THREAD_ID;
+
+ thread = vdo_get_work_queue_owner(queue);
+ thread_id = thread->thread_id;
+
+ if (PARANOID_THREAD_CONSISTENCY_CHECKS) {
+ BUG_ON(thread_id >= thread->vdo->thread_config.thread_count);
+ BUG_ON(thread != &thread->vdo->threads[thread_id]);
+ }
+
+ return thread_id;
+}
+
+/**
+ * vdo_dump_status() - Dump status information about a vdo to the log for debugging.
+ * @vdo: The vdo to dump.
+ */
+void vdo_dump_status(const struct vdo *vdo)
+{
+ zone_count_t zone;
+
+ vdo_dump_flusher(vdo->flusher);
+ vdo_dump_recovery_journal_statistics(vdo->recovery_journal);
+ vdo_dump_packer(vdo->packer);
+ vdo_dump_slab_depot(vdo->depot);
+
+ for (zone = 0; zone < vdo->thread_config.logical_zone_count; zone++)
+ vdo_dump_logical_zone(&vdo->logical_zones->zones[zone]);
+
+ for (zone = 0; zone < vdo->thread_config.physical_zone_count; zone++)
+ vdo_dump_physical_zone(&vdo->physical_zones->zones[zone]);
+
+ vdo_dump_hash_zones(vdo->hash_zones);
+}
+
+/**
+ * vdo_assert_on_admin_thread() - Assert that we are running on the admin thread.
+ * @vdo: The vdo.
+ * @name: The name of the function which should be running on the admin thread (for logging).
+ */
+void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.admin_thread),
+ "%s called on admin thread", name);
+}
+
+/**
+ * vdo_assert_on_logical_zone_thread() - Assert that this function was called on the specified
+ * logical zone thread.
+ * @vdo: The vdo.
+ * @logical_zone: The number of the logical zone.
+ * @name: The name of the calling function.
+ */
+void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logical_zone,
+ const char *name)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() ==
+ vdo->thread_config.logical_threads[logical_zone]),
+ "%s called on logical thread", name);
+}
+
+/**
+ * vdo_assert_on_physical_zone_thread() - Assert that this function was called on the specified
+ * physical zone thread.
+ * @vdo: The vdo.
+ * @physical_zone: The number of the physical zone.
+ * @name: The name of the calling function.
+ */
+void vdo_assert_on_physical_zone_thread(const struct vdo *vdo,
+ zone_count_t physical_zone, const char *name)
+{
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() ==
+ vdo->thread_config.physical_threads[physical_zone]),
+ "%s called on physical thread", name);
+}
+
+/**
+ * vdo_get_physical_zone() - Get the physical zone responsible for a given physical block number.
+ * @vdo: The vdo containing the physical zones.
+ * @pbn: The PBN of the data block.
+ * @zone_ptr: A pointer to return the physical zone.
+ *
+ * Gets the physical zone responsible for a given physical block number of a data block in this vdo
+ * instance, or of the zero block (for which a NULL zone is returned). For any other block number
+ * that is not in the range of valid data block numbers in any slab, an error will be returned.
+ * This function is safe to call on invalid block numbers; it will not put the vdo into read-only
+ * mode.
+ *
+ * Return: VDO_SUCCESS or VDO_OUT_OF_RANGE if the block number is invalid or an error code for any
+ * other failure.
+ */
+int vdo_get_physical_zone(const struct vdo *vdo, physical_block_number_t pbn,
+ struct physical_zone **zone_ptr)
+{
+ struct vdo_slab *slab;
+ int result;
+
+ if (pbn == VDO_ZERO_BLOCK) {
+ *zone_ptr = NULL;
+ return VDO_SUCCESS;
+ }
+
+ /*
+ * Used because it does a more restrictive bounds check than vdo_get_slab(), and done first
+ * because it won't trigger read-only mode on an invalid PBN.
+ */
+ if (!vdo_is_physical_data_block(vdo->depot, pbn))
+ return VDO_OUT_OF_RANGE;
+
+ /* With the PBN already checked, we should always succeed in finding a slab. */
+ slab = vdo_get_slab(vdo->depot, pbn);
+ result = VDO_ASSERT(slab != NULL, "vdo_get_slab must succeed on all valid PBNs");
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *zone_ptr = &vdo->physical_zones->zones[slab->allocator->zone_number];
+ return VDO_SUCCESS;
+}
diff --git a/drivers/md/dm-vdo/vdo.h b/drivers/md/dm-vdo/vdo.h
new file mode 100644
index 000000000000..483ae873e002
--- /dev/null
+++ b/drivers/md/dm-vdo/vdo.h
@@ -0,0 +1,362 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_H
+#define VDO_H
+
+#include <linux/atomic.h>
+#include <linux/blk_types.h>
+#include <linux/completion.h>
+#include <linux/dm-kcopyd.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include "admin-state.h"
+#include "encodings.h"
+#include "funnel-workqueue.h"
+#include "packer.h"
+#include "physical-zone.h"
+#include "statistics.h"
+#include "thread-registry.h"
+#include "types.h"
+
+enum notifier_state {
+ /* Notifications are allowed but not in progress */
+ MAY_NOTIFY,
+ /* A notification is in progress */
+ NOTIFYING,
+ /* Notifications are not allowed */
+ MAY_NOT_NOTIFY,
+ /* A notification has completed */
+ NOTIFIED,
+};
+
+/**
+ * typedef vdo_read_only_notification_fn - A function to notify a listener that the VDO has gone
+ * read-only.
+ * @listener: The object to notify.
+ * @parent: The completion to notify in order to acknowledge the notification.
+ */
+typedef void (*vdo_read_only_notification_fn)(void *listener, struct vdo_completion *parent);
+
+/*
+ * An object to be notified when the VDO enters read-only mode
+ */
+struct read_only_listener {
+ /* The listener */
+ void *listener;
+ /* The method to call to notify the listener */
+ vdo_read_only_notification_fn notify;
+ /* A pointer to the next listener */
+ struct read_only_listener *next;
+};
+
+struct vdo_thread {
+ struct vdo *vdo;
+ thread_id_t thread_id;
+ struct vdo_work_queue *queue;
+ /*
+ * Each thread maintains its own notion of whether the VDO is read-only so that the
+ * read-only state can be checked from any base thread without worrying about
+ * synchronization or thread safety. This does mean that knowledge of the VDO going
+ * read-only does not occur simultaneously across the VDO's threads, but that does not seem
+ * to cause any problems.
+ */
+ bool is_read_only;
+ /*
+ * A list of objects waiting to be notified on this thread that the VDO has entered
+ * read-only mode.
+ */
+ struct read_only_listener *listeners;
+ struct registered_thread allocating_thread;
+};
+
+/* Keep struct bio statistics atomically */
+struct atomic_bio_stats {
+ atomic64_t read; /* Number of not REQ_WRITE bios */
+ atomic64_t write; /* Number of REQ_WRITE bios */
+ atomic64_t discard; /* Number of REQ_DISCARD bios */
+ atomic64_t flush; /* Number of REQ_FLUSH bios */
+ atomic64_t empty_flush; /* Number of REQ_PREFLUSH bios without data */
+ atomic64_t fua; /* Number of REQ_FUA bios */
+};
+
+/* Counters are atomic since updates can arrive concurrently from arbitrary threads. */
+struct atomic_statistics {
+ atomic64_t bios_submitted;
+ atomic64_t bios_completed;
+ atomic64_t flush_out;
+ atomic64_t invalid_advice_pbn_count;
+ atomic64_t no_space_error_count;
+ atomic64_t read_only_error_count;
+ struct atomic_bio_stats bios_in;
+ struct atomic_bio_stats bios_in_partial;
+ struct atomic_bio_stats bios_out;
+ struct atomic_bio_stats bios_out_completed;
+ struct atomic_bio_stats bios_acknowledged;
+ struct atomic_bio_stats bios_acknowledged_partial;
+ struct atomic_bio_stats bios_meta;
+ struct atomic_bio_stats bios_meta_completed;
+ struct atomic_bio_stats bios_journal;
+ struct atomic_bio_stats bios_journal_completed;
+ struct atomic_bio_stats bios_page_cache;
+ struct atomic_bio_stats bios_page_cache_completed;
+};
+
+struct read_only_notifier {
+ /* The completion for entering read-only mode */
+ struct vdo_completion completion;
+ /* A completion waiting for notifications to be drained or enabled */
+ struct vdo_completion *waiter;
+ /* Lock to protect the next two fields */
+ spinlock_t lock;
+ /* The code of the error which put the VDO into read-only mode */
+ int read_only_error;
+ /* The current state of the notifier (values described above) */
+ enum notifier_state state;
+};
+
+/*
+ * The thread ID returned when the current thread is not a vdo thread, or can not be determined
+ * (usually due to being at interrupt context).
+ */
+#define VDO_INVALID_THREAD_ID ((thread_id_t) -1)
+
+struct thread_config {
+ zone_count_t logical_zone_count;
+ zone_count_t physical_zone_count;
+ zone_count_t hash_zone_count;
+ thread_count_t bio_thread_count;
+ thread_count_t thread_count;
+ thread_id_t admin_thread;
+ thread_id_t journal_thread;
+ thread_id_t packer_thread;
+ thread_id_t dedupe_thread;
+ thread_id_t bio_ack_thread;
+ thread_id_t cpu_thread;
+ thread_id_t *logical_threads;
+ thread_id_t *physical_threads;
+ thread_id_t *hash_zone_threads;
+ thread_id_t *bio_threads;
+};
+
+struct thread_count_config;
+
+struct vdo_super_block {
+ /* The vio for reading and writing the super block to disk */
+ struct vio vio;
+ /* A buffer to hold the super block */
+ u8 *buffer;
+ /* Whether this super block may not be written */
+ bool unwritable;
+};
+
+struct data_vio_pool;
+
+struct vdo_administrator {
+ struct vdo_completion completion;
+ struct admin_state state;
+ atomic_t busy;
+ u32 phase;
+ struct completion callback_sync;
+};
+
+struct vdo {
+ char thread_name_prefix[MAX_VDO_WORK_QUEUE_NAME_LEN];
+ struct vdo_thread *threads;
+ vdo_action_fn action;
+ struct vdo_completion *completion;
+ struct vio_tracer *vio_tracer;
+
+ /* The atomic version of the state of this vdo */
+ atomic_t state;
+ /* The full state of all components */
+ struct vdo_component_states states;
+ /*
+ * A counter value to attach to thread names and log messages to identify the individual
+ * device.
+ */
+ unsigned int instance;
+ /* The read-only notifier */
+ struct read_only_notifier read_only_notifier;
+ /* The load-time configuration of this vdo */
+ struct device_config *device_config;
+ /* The thread mapping */
+ struct thread_config thread_config;
+
+ /* The super block */
+ struct vdo_super_block super_block;
+
+ /* The partitioning of the underlying storage */
+ struct layout layout;
+ struct layout next_layout;
+ struct dm_kcopyd_client *partition_copier;
+
+ /* The block map */
+ struct block_map *block_map;
+
+ /* The journal for block map recovery */
+ struct recovery_journal *recovery_journal;
+
+ /* The slab depot */
+ struct slab_depot *depot;
+
+ /* The compressed-block packer */
+ struct packer *packer;
+ /* Whether incoming data should be compressed */
+ bool compressing;
+
+ /* The handler for flush requests */
+ struct flusher *flusher;
+
+ /* The state the vdo was in when loaded (primarily for unit tests) */
+ enum vdo_state load_state;
+
+ /* The logical zones of this vdo */
+ struct logical_zones *logical_zones;
+
+ /* The physical zones of this vdo */
+ struct physical_zones *physical_zones;
+
+ /* The hash lock zones of this vdo */
+ struct hash_zones *hash_zones;
+
+ /* Bio submission manager used for sending bios to the storage device. */
+ struct io_submitter *io_submitter;
+
+ /* The pool of data_vios for servicing incoming bios */
+ struct data_vio_pool *data_vio_pool;
+
+ /* The manager for administrative operations */
+ struct vdo_administrator admin;
+
+ /* Flags controlling administrative operations */
+ const struct admin_state_code *suspend_type;
+ bool allocations_allowed;
+ bool dump_on_shutdown;
+ atomic_t processing_message;
+
+ /*
+ * Statistics
+ * Atomic stats counters
+ */
+ struct atomic_statistics stats;
+ /* Used to gather statistics without allocating memory */
+ struct vdo_statistics stats_buffer;
+ /* Protects the stats_buffer */
+ struct mutex stats_mutex;
+
+ /* A list of all device_configs referencing this vdo */
+ struct list_head device_config_list;
+
+ /* This VDO's list entry for the device registry */
+ struct list_head registration;
+
+ /* Underlying block device info. */
+ u64 starting_sector_offset;
+ struct volume_geometry geometry;
+
+ /* N blobs of context data for LZ4 code, one per CPU thread. */
+ char **compression_context;
+};
+
+/**
+ * vdo_uses_bio_ack_queue() - Indicate whether the vdo is configured to use a separate work queue
+ * for acknowledging received and processed bios.
+ * @vdo: The vdo.
+ *
+ * Note that this directly controls the handling of write operations, but the compile-time flag
+ * VDO_USE_BIO_ACK_QUEUE_FOR_READ is also checked for read operations.
+ *
+ * Return: Whether a bio-acknowledgement work queue is in use.
+ */
+static inline bool vdo_uses_bio_ack_queue(struct vdo *vdo)
+{
+ return vdo->device_config->thread_counts.bio_ack_threads > 0;
+}
+
+/**
+ * typedef vdo_filter_fn - Method type for vdo matching methods.
+ *
+ * A filter function returns false if the vdo doesn't match.
+ */
+typedef bool (*vdo_filter_fn)(struct vdo *vdo, const void *context);
+
+void vdo_initialize_device_registry_once(void);
+struct vdo * __must_check vdo_find_matching(vdo_filter_fn filter, const void *context);
+
+int __must_check vdo_make_thread(struct vdo *vdo, thread_id_t thread_id,
+ const struct vdo_work_queue_type *type,
+ unsigned int queue_count, void *contexts[]);
+
+static inline int __must_check vdo_make_default_thread(struct vdo *vdo,
+ thread_id_t thread_id)
+{
+ return vdo_make_thread(vdo, thread_id, NULL, 1, NULL);
+}
+
+int __must_check vdo_make(unsigned int instance, struct device_config *config,
+ char **reason, struct vdo **vdo_ptr);
+
+void vdo_destroy(struct vdo *vdo);
+
+void vdo_load_super_block(struct vdo *vdo, struct vdo_completion *parent);
+
+struct block_device * __must_check vdo_get_backing_device(const struct vdo *vdo);
+
+const char * __must_check vdo_get_device_name(const struct dm_target *target);
+
+int __must_check vdo_synchronous_flush(struct vdo *vdo);
+
+const struct admin_state_code * __must_check vdo_get_admin_state(const struct vdo *vdo);
+
+bool vdo_set_compressing(struct vdo *vdo, bool enable);
+
+bool vdo_get_compressing(struct vdo *vdo);
+
+void vdo_fetch_statistics(struct vdo *vdo, struct vdo_statistics *stats);
+
+thread_id_t vdo_get_callback_thread_id(void);
+
+enum vdo_state __must_check vdo_get_state(const struct vdo *vdo);
+
+void vdo_set_state(struct vdo *vdo, enum vdo_state state);
+
+void vdo_save_components(struct vdo *vdo, struct vdo_completion *parent);
+
+int vdo_register_read_only_listener(struct vdo *vdo, void *listener,
+ vdo_read_only_notification_fn notification,
+ thread_id_t thread_id);
+
+int vdo_enable_read_only_entry(struct vdo *vdo);
+
+void vdo_wait_until_not_entering_read_only_mode(struct vdo_completion *parent);
+
+void vdo_allow_read_only_mode_entry(struct vdo_completion *parent);
+
+void vdo_enter_read_only_mode(struct vdo *vdo, int error_code);
+
+bool __must_check vdo_is_read_only(struct vdo *vdo);
+
+bool __must_check vdo_in_read_only_mode(const struct vdo *vdo);
+
+bool __must_check vdo_in_recovery_mode(const struct vdo *vdo);
+
+void vdo_enter_recovery_mode(struct vdo *vdo);
+
+void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name);
+
+void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logical_zone,
+ const char *name);
+
+void vdo_assert_on_physical_zone_thread(const struct vdo *vdo, zone_count_t physical_zone,
+ const char *name);
+
+int __must_check vdo_get_physical_zone(const struct vdo *vdo, physical_block_number_t pbn,
+ struct physical_zone **zone_ptr);
+
+void vdo_dump_status(const struct vdo *vdo);
+
+#endif /* VDO_H */
diff --git a/drivers/md/dm-vdo/vio.c b/drivers/md/dm-vdo/vio.c
new file mode 100644
index 000000000000..b291578f726f
--- /dev/null
+++ b/drivers/md/dm-vdo/vio.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "vio.h"
+
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/kernel.h>
+#include <linux/ratelimit.h>
+
+#include "logger.h"
+#include "memory-alloc.h"
+#include "permassert.h"
+
+#include "constants.h"
+#include "io-submitter.h"
+#include "vdo.h"
+
+/* A vio_pool is a collection of preallocated vios. */
+struct vio_pool {
+ /* The number of objects managed by the pool */
+ size_t size;
+ /* The list of objects which are available */
+ struct list_head available;
+ /* The queue of requestors waiting for objects from the pool */
+ struct vdo_wait_queue waiting;
+ /* The number of objects currently in use */
+ size_t busy_count;
+ /* The list of objects which are in use */
+ struct list_head busy;
+ /* The ID of the thread on which this pool may be used */
+ thread_id_t thread_id;
+ /* The buffer backing the pool's vios */
+ char *buffer;
+ /* The pool entries */
+ struct pooled_vio vios[];
+};
+
+physical_block_number_t pbn_from_vio_bio(struct bio *bio)
+{
+ struct vio *vio = bio->bi_private;
+ struct vdo *vdo = vio->completion.vdo;
+ physical_block_number_t pbn = bio->bi_iter.bi_sector / VDO_SECTORS_PER_BLOCK;
+
+ return ((pbn == VDO_GEOMETRY_BLOCK_LOCATION) ? pbn : pbn + vdo->geometry.bio_offset);
+}
+
+static int create_multi_block_bio(block_count_t size, struct bio **bio_ptr)
+{
+ struct bio *bio = NULL;
+ int result;
+
+ result = vdo_allocate_extended(struct bio, size + 1, struct bio_vec,
+ "bio", &bio);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ *bio_ptr = bio;
+ return VDO_SUCCESS;
+}
+
+int vdo_create_bio(struct bio **bio_ptr)
+{
+ return create_multi_block_bio(1, bio_ptr);
+}
+
+void vdo_free_bio(struct bio *bio)
+{
+ if (bio == NULL)
+ return;
+
+ bio_uninit(bio);
+ vdo_free(vdo_forget(bio));
+}
+
+int allocate_vio_components(struct vdo *vdo, enum vio_type vio_type,
+ enum vio_priority priority, void *parent,
+ unsigned int block_count, char *data, struct vio *vio)
+{
+ struct bio *bio;
+ int result;
+
+ result = VDO_ASSERT(block_count <= MAX_BLOCKS_PER_VIO,
+ "block count %u does not exceed maximum %u", block_count,
+ MAX_BLOCKS_PER_VIO);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = VDO_ASSERT(((vio_type != VIO_TYPE_UNINITIALIZED) && (vio_type != VIO_TYPE_DATA)),
+ "%d is a metadata type", vio_type);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ result = create_multi_block_bio(block_count, &bio);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ initialize_vio(vio, bio, block_count, vio_type, priority, vdo);
+ vio->completion.parent = parent;
+ vio->data = data;
+ return VDO_SUCCESS;
+}
+
+/**
+ * create_multi_block_metadata_vio() - Create a vio.
+ * @vdo: The vdo on which the vio will operate.
+ * @vio_type: The type of vio to create.
+ * @priority: The relative priority to assign to the vio.
+ * @parent: The parent of the vio.
+ * @block_count: The size of the vio in blocks.
+ * @data: The buffer.
+ * @vio_ptr: A pointer to hold the new vio.
+ *
+ * Return: VDO_SUCCESS or an error.
+ */
+int create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type,
+ enum vio_priority priority, void *parent,
+ unsigned int block_count, char *data,
+ struct vio **vio_ptr)
+{
+ struct vio *vio;
+ int result;
+
+ BUILD_BUG_ON(sizeof(struct vio) > 256);
+
+ /*
+ * Metadata vios should use direct allocation and not use the buffer pool, which is
+ * reserved for submissions from the linux block layer.
+ */
+ result = vdo_allocate(1, struct vio, __func__, &vio);
+ if (result != VDO_SUCCESS) {
+ vdo_log_error("metadata vio allocation failure %d", result);
+ return result;
+ }
+
+ result = allocate_vio_components(vdo, vio_type, priority, parent, block_count,
+ data, vio);
+ if (result != VDO_SUCCESS) {
+ vdo_free(vio);
+ return result;
+ }
+
+ *vio_ptr = vio;
+ return VDO_SUCCESS;
+}
+
+/**
+ * free_vio_components() - Free the components of a vio embedded in a larger structure.
+ * @vio: The vio to destroy
+ */
+void free_vio_components(struct vio *vio)
+{
+ if (vio == NULL)
+ return;
+
+ BUG_ON(is_data_vio(vio));
+ vdo_free_bio(vdo_forget(vio->bio));
+}
+
+/**
+ * free_vio() - Destroy a vio.
+ * @vio: The vio to destroy.
+ */
+void free_vio(struct vio *vio)
+{
+ free_vio_components(vio);
+ vdo_free(vio);
+}
+
+/* Set bio properties for a VDO read or write. */
+void vdo_set_bio_properties(struct bio *bio, struct vio *vio, bio_end_io_t callback,
+ blk_opf_t bi_opf, physical_block_number_t pbn)
+{
+ struct vdo *vdo = vio->completion.vdo;
+ struct device_config *config = vdo->device_config;
+
+ pbn -= vdo->geometry.bio_offset;
+ vio->bio_zone = ((pbn / config->thread_counts.bio_rotation_interval) %
+ config->thread_counts.bio_threads);
+
+ bio->bi_private = vio;
+ bio->bi_end_io = callback;
+ bio->bi_opf = bi_opf;
+ bio->bi_iter.bi_sector = pbn * VDO_SECTORS_PER_BLOCK;
+}
+
+/*
+ * Prepares the bio to perform IO with the specified buffer. May only be used on a VDO-allocated
+ * bio, as it assumes the bio wraps a 4k buffer that is 4k aligned, but there does not have to be a
+ * vio associated with the bio.
+ */
+int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback,
+ blk_opf_t bi_opf, physical_block_number_t pbn)
+{
+ int bvec_count, offset, len, i;
+ struct bio *bio = vio->bio;
+
+ bio_reset(bio, bio->bi_bdev, bi_opf);
+ vdo_set_bio_properties(bio, vio, callback, bi_opf, pbn);
+ if (data == NULL)
+ return VDO_SUCCESS;
+
+ bio->bi_io_vec = bio->bi_inline_vecs;
+ bio->bi_max_vecs = vio->block_count + 1;
+ len = VDO_BLOCK_SIZE * vio->block_count;
+ offset = offset_in_page(data);
+ bvec_count = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+
+ /*
+ * If we knew that data was always on one page, or contiguous pages, we wouldn't need the
+ * loop. But if we're using vmalloc, it's not impossible that the data is in different
+ * pages that can't be merged in bio_add_page...
+ */
+ for (i = 0; (i < bvec_count) && (len > 0); i++) {
+ struct page *page;
+ int bytes_added;
+ int bytes = PAGE_SIZE - offset;
+
+ if (bytes > len)
+ bytes = len;
+
+ page = is_vmalloc_addr(data) ? vmalloc_to_page(data) : virt_to_page(data);
+ bytes_added = bio_add_page(bio, page, bytes, offset);
+
+ if (bytes_added != bytes) {
+ return vdo_log_error_strerror(VDO_BIO_CREATION_FAILED,
+ "Could only add %i bytes to bio",
+ bytes_added);
+ }
+
+ data += bytes;
+ len -= bytes;
+ offset = 0;
+ }
+
+ return VDO_SUCCESS;
+}
+
+/**
+ * update_vio_error_stats() - Update per-vio error stats and log the error.
+ * @vio: The vio which got an error.
+ * @format: The format of the message to log (a printf style format).
+ */
+void update_vio_error_stats(struct vio *vio, const char *format, ...)
+{
+ static DEFINE_RATELIMIT_STATE(error_limiter, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ va_list args;
+ int priority;
+ struct vdo *vdo = vio->completion.vdo;
+
+ switch (vio->completion.result) {
+ case VDO_READ_ONLY:
+ atomic64_inc(&vdo->stats.read_only_error_count);
+ return;
+
+ case VDO_NO_SPACE:
+ atomic64_inc(&vdo->stats.no_space_error_count);
+ priority = VDO_LOG_DEBUG;
+ break;
+
+ default:
+ priority = VDO_LOG_ERR;
+ }
+
+ if (!__ratelimit(&error_limiter))
+ return;
+
+ va_start(args, format);
+ vdo_vlog_strerror(priority, vio->completion.result, VDO_LOGGING_MODULE_NAME,
+ format, args);
+ va_end(args);
+}
+
+void vio_record_metadata_io_error(struct vio *vio)
+{
+ const char *description;
+ physical_block_number_t pbn = pbn_from_vio_bio(vio->bio);
+
+ if (bio_op(vio->bio) == REQ_OP_READ) {
+ description = "read";
+ } else if ((vio->bio->bi_opf & REQ_PREFLUSH) == REQ_PREFLUSH) {
+ description = (((vio->bio->bi_opf & REQ_FUA) == REQ_FUA) ?
+ "write+preflush+fua" :
+ "write+preflush");
+ } else if ((vio->bio->bi_opf & REQ_FUA) == REQ_FUA) {
+ description = "write+fua";
+ } else {
+ description = "write";
+ }
+
+ update_vio_error_stats(vio,
+ "Completing %s vio of type %u for physical block %llu with error",
+ description, vio->type, (unsigned long long) pbn);
+}
+
+/**
+ * make_vio_pool() - Create a new vio pool.
+ * @vdo: The vdo.
+ * @pool_size: The number of vios in the pool.
+ * @thread_id: The ID of the thread using this pool.
+ * @vio_type: The type of vios in the pool.
+ * @priority: The priority with which vios from the pool should be enqueued.
+ * @context: The context that each entry will have.
+ * @pool_ptr: The resulting pool.
+ *
+ * Return: A success or error code.
+ */
+int make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
+ enum vio_type vio_type, enum vio_priority priority, void *context,
+ struct vio_pool **pool_ptr)
+{
+ struct vio_pool *pool;
+ char *ptr;
+ int result;
+
+ result = vdo_allocate_extended(struct vio_pool, pool_size, struct pooled_vio,
+ __func__, &pool);
+ if (result != VDO_SUCCESS)
+ return result;
+
+ pool->thread_id = thread_id;
+ INIT_LIST_HEAD(&pool->available);
+ INIT_LIST_HEAD(&pool->busy);
+
+ result = vdo_allocate(pool_size * VDO_BLOCK_SIZE, char,
+ "VIO pool buffer", &pool->buffer);
+ if (result != VDO_SUCCESS) {
+ free_vio_pool(pool);
+ return result;
+ }
+
+ ptr = pool->buffer;
+ for (pool->size = 0; pool->size < pool_size; pool->size++, ptr += VDO_BLOCK_SIZE) {
+ struct pooled_vio *pooled = &pool->vios[pool->size];
+
+ result = allocate_vio_components(vdo, vio_type, priority, NULL, 1, ptr,
+ &pooled->vio);
+ if (result != VDO_SUCCESS) {
+ free_vio_pool(pool);
+ return result;
+ }
+
+ pooled->context = context;
+ list_add_tail(&pooled->pool_entry, &pool->available);
+ }
+
+ *pool_ptr = pool;
+ return VDO_SUCCESS;
+}
+
+/**
+ * free_vio_pool() - Destroy a vio pool.
+ * @pool: The pool to free.
+ */
+void free_vio_pool(struct vio_pool *pool)
+{
+ struct pooled_vio *pooled, *tmp;
+
+ if (pool == NULL)
+ return;
+
+ /* Remove all available vios from the object pool. */
+ VDO_ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&pool->waiting),
+ "VIO pool must not have any waiters when being freed");
+ VDO_ASSERT_LOG_ONLY((pool->busy_count == 0),
+ "VIO pool must not have %zu busy entries when being freed",
+ pool->busy_count);
+ VDO_ASSERT_LOG_ONLY(list_empty(&pool->busy),
+ "VIO pool must not have busy entries when being freed");
+
+ list_for_each_entry_safe(pooled, tmp, &pool->available, pool_entry) {
+ list_del(&pooled->pool_entry);
+ free_vio_components(&pooled->vio);
+ pool->size--;
+ }
+
+ VDO_ASSERT_LOG_ONLY(pool->size == 0,
+ "VIO pool must not have missing entries when being freed");
+
+ vdo_free(vdo_forget(pool->buffer));
+ vdo_free(pool);
+}
+
+/**
+ * is_vio_pool_busy() - Check whether an vio pool has outstanding entries.
+ *
+ * Return: true if the pool is busy.
+ */
+bool is_vio_pool_busy(struct vio_pool *pool)
+{
+ return (pool->busy_count != 0);
+}
+
+/**
+ * acquire_vio_from_pool() - Acquire a vio and buffer from the pool (asynchronous).
+ * @pool: The vio pool.
+ * @waiter: Object that is requesting a vio.
+ */
+void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter)
+{
+ struct pooled_vio *pooled;
+
+ VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
+ "acquire from active vio_pool called from correct thread");
+
+ if (list_empty(&pool->available)) {
+ vdo_waitq_enqueue_waiter(&pool->waiting, waiter);
+ return;
+ }
+
+ pooled = list_first_entry(&pool->available, struct pooled_vio, pool_entry);
+ pool->busy_count++;
+ list_move_tail(&pooled->pool_entry, &pool->busy);
+ (*waiter->callback)(waiter, pooled);
+}
+
+/**
+ * return_vio_to_pool() - Return a vio to the pool
+ * @pool: The vio pool.
+ * @vio: The pooled vio to return.
+ */
+void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio)
+{
+ VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
+ "vio pool entry returned on same thread as it was acquired");
+
+ vio->vio.completion.error_handler = NULL;
+ vio->vio.completion.parent = NULL;
+ if (vdo_waitq_has_waiters(&pool->waiting)) {
+ vdo_waitq_notify_next_waiter(&pool->waiting, NULL, vio);
+ return;
+ }
+
+ list_move_tail(&vio->pool_entry, &pool->available);
+ --pool->busy_count;
+}
+
+/*
+ * Various counting functions for statistics.
+ * These are used for bios coming into VDO, as well as bios generated by VDO.
+ */
+void vdo_count_bios(struct atomic_bio_stats *bio_stats, struct bio *bio)
+{
+ if (((bio->bi_opf & REQ_PREFLUSH) != 0) && (bio->bi_iter.bi_size == 0)) {
+ atomic64_inc(&bio_stats->empty_flush);
+ atomic64_inc(&bio_stats->flush);
+ return;
+ }
+
+ switch (bio_op(bio)) {
+ case REQ_OP_WRITE:
+ atomic64_inc(&bio_stats->write);
+ break;
+ case REQ_OP_READ:
+ atomic64_inc(&bio_stats->read);
+ break;
+ case REQ_OP_DISCARD:
+ atomic64_inc(&bio_stats->discard);
+ break;
+ /*
+ * All other operations are filtered out in dmvdo.c, or not created by VDO, so
+ * shouldn't exist.
+ */
+ default:
+ VDO_ASSERT_LOG_ONLY(0, "Bio operation %d not a write, read, discard, or empty flush",
+ bio_op(bio));
+ }
+
+ if ((bio->bi_opf & REQ_PREFLUSH) != 0)
+ atomic64_inc(&bio_stats->flush);
+ if (bio->bi_opf & REQ_FUA)
+ atomic64_inc(&bio_stats->fua);
+}
+
+static void count_all_bios_completed(struct vio *vio, struct bio *bio)
+{
+ struct atomic_statistics *stats = &vio->completion.vdo->stats;
+
+ if (is_data_vio(vio)) {
+ vdo_count_bios(&stats->bios_out_completed, bio);
+ return;
+ }
+
+ vdo_count_bios(&stats->bios_meta_completed, bio);
+ if (vio->type == VIO_TYPE_RECOVERY_JOURNAL)
+ vdo_count_bios(&stats->bios_journal_completed, bio);
+ else if (vio->type == VIO_TYPE_BLOCK_MAP)
+ vdo_count_bios(&stats->bios_page_cache_completed, bio);
+}
+
+void vdo_count_completed_bios(struct bio *bio)
+{
+ struct vio *vio = (struct vio *) bio->bi_private;
+
+ atomic64_inc(&vio->completion.vdo->stats.bios_completed);
+ count_all_bios_completed(vio, bio);
+}
diff --git a/drivers/md/dm-vdo/vio.h b/drivers/md/dm-vdo/vio.h
new file mode 100644
index 000000000000..3490e9f59b04
--- /dev/null
+++ b/drivers/md/dm-vdo/vio.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VIO_H
+#define VIO_H
+
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include "completion.h"
+#include "constants.h"
+#include "types.h"
+#include "vdo.h"
+
+enum {
+ MAX_BLOCKS_PER_VIO = (BIO_MAX_VECS << PAGE_SHIFT) / VDO_BLOCK_SIZE,
+};
+
+struct pooled_vio {
+ /* The underlying vio */
+ struct vio vio;
+ /* The list entry for chaining pooled vios together */
+ struct list_head list_entry;
+ /* The context set by the pool */
+ void *context;
+ /* The list entry used by the pool */
+ struct list_head pool_entry;
+};
+
+/**
+ * as_vio() - Convert a generic vdo_completion to a vio.
+ * @completion: The completion to convert.
+ *
+ * Return: The completion as a vio.
+ */
+static inline struct vio *as_vio(struct vdo_completion *completion)
+{
+ vdo_assert_completion_type(completion, VIO_COMPLETION);
+ return container_of(completion, struct vio, completion);
+}
+
+/**
+ * get_vio_bio_zone_thread_id() - Get the thread id of the bio zone in which a vio should submit
+ * its I/O.
+ * @vio: The vio.
+ *
+ * Return: The id of the bio zone thread the vio should use.
+ */
+static inline thread_id_t __must_check get_vio_bio_zone_thread_id(struct vio *vio)
+{
+ return vio->completion.vdo->thread_config.bio_threads[vio->bio_zone];
+}
+
+physical_block_number_t __must_check pbn_from_vio_bio(struct bio *bio);
+
+/**
+ * assert_vio_in_bio_zone() - Check that a vio is running on the correct thread for its bio zone.
+ * @vio: The vio to check.
+ */
+static inline void assert_vio_in_bio_zone(struct vio *vio)
+{
+ thread_id_t expected = get_vio_bio_zone_thread_id(vio);
+ thread_id_t thread_id = vdo_get_callback_thread_id();
+
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "vio I/O for physical block %llu on thread %u, should be on bio zone thread %u",
+ (unsigned long long) pbn_from_vio_bio(vio->bio), thread_id,
+ expected);
+}
+
+int vdo_create_bio(struct bio **bio_ptr);
+void vdo_free_bio(struct bio *bio);
+int allocate_vio_components(struct vdo *vdo, enum vio_type vio_type,
+ enum vio_priority priority, void *parent,
+ unsigned int block_count, char *data, struct vio *vio);
+int __must_check create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type,
+ enum vio_priority priority,
+ void *parent, unsigned int block_count,
+ char *data, struct vio **vio_ptr);
+
+static inline int __must_check create_metadata_vio(struct vdo *vdo, enum vio_type vio_type,
+ enum vio_priority priority,
+ void *parent, char *data,
+ struct vio **vio_ptr)
+{
+ return create_multi_block_metadata_vio(vdo, vio_type, priority, parent, 1, data,
+ vio_ptr);
+}
+
+void free_vio_components(struct vio *vio);
+void free_vio(struct vio *vio);
+
+/**
+ * initialize_vio() - Initialize a vio.
+ * @vio: The vio to initialize.
+ * @bio: The bio this vio should use for its I/O.
+ * @block_count: The size of this vio in vdo blocks.
+ * @vio_type: The vio type.
+ * @priority: The relative priority of the vio.
+ * @vdo: The vdo for this vio.
+ */
+static inline void initialize_vio(struct vio *vio, struct bio *bio,
+ unsigned int block_count, enum vio_type vio_type,
+ enum vio_priority priority, struct vdo *vdo)
+{
+ /* data_vio's may not span multiple blocks */
+ BUG_ON((vio_type == VIO_TYPE_DATA) && (block_count != 1));
+
+ vio->bio = bio;
+ vio->block_count = block_count;
+ vio->type = vio_type;
+ vio->priority = priority;
+ vdo_initialize_completion(&vio->completion, vdo, VIO_COMPLETION);
+}
+
+void vdo_set_bio_properties(struct bio *bio, struct vio *vio, bio_end_io_t callback,
+ blk_opf_t bi_opf, physical_block_number_t pbn);
+
+int vio_reset_bio(struct vio *vio, char *data, bio_end_io_t callback,
+ blk_opf_t bi_opf, physical_block_number_t pbn);
+
+void update_vio_error_stats(struct vio *vio, const char *format, ...)
+ __printf(2, 3);
+
+/**
+ * is_data_vio() - Check whether a vio is servicing an external data request.
+ * @vio: The vio to check.
+ */
+static inline bool is_data_vio(struct vio *vio)
+{
+ return (vio->type == VIO_TYPE_DATA);
+}
+
+/**
+ * get_metadata_priority() - Convert a vio's priority to a work item priority.
+ * @vio: The vio.
+ *
+ * Return: The priority with which to submit the vio's bio.
+ */
+static inline enum vdo_completion_priority get_metadata_priority(struct vio *vio)
+{
+ return ((vio->priority == VIO_PRIORITY_HIGH) ?
+ BIO_Q_HIGH_PRIORITY :
+ BIO_Q_METADATA_PRIORITY);
+}
+
+/**
+ * continue_vio() - Enqueue a vio to run its next callback.
+ * @vio: The vio to continue.
+ *
+ * Return: The result of the current operation.
+ */
+static inline void continue_vio(struct vio *vio, int result)
+{
+ if (unlikely(result != VDO_SUCCESS))
+ vdo_set_completion_result(&vio->completion, result);
+
+ vdo_enqueue_completion(&vio->completion, VDO_WORK_Q_DEFAULT_PRIORITY);
+}
+
+void vdo_count_bios(struct atomic_bio_stats *bio_stats, struct bio *bio);
+void vdo_count_completed_bios(struct bio *bio);
+
+/**
+ * continue_vio_after_io() - Continue a vio now that its I/O has returned.
+ */
+static inline void continue_vio_after_io(struct vio *vio, vdo_action_fn callback,
+ thread_id_t thread)
+{
+ vdo_count_completed_bios(vio->bio);
+ vdo_set_completion_callback(&vio->completion, callback, thread);
+ continue_vio(vio, blk_status_to_errno(vio->bio->bi_status));
+}
+
+void vio_record_metadata_io_error(struct vio *vio);
+
+/* A vio_pool is a collection of preallocated vios used to write arbitrary metadata blocks. */
+
+static inline struct pooled_vio *vio_as_pooled_vio(struct vio *vio)
+{
+ return container_of(vio, struct pooled_vio, vio);
+}
+
+struct vio_pool;
+
+int __must_check make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
+ enum vio_type vio_type, enum vio_priority priority,
+ void *context, struct vio_pool **pool_ptr);
+void free_vio_pool(struct vio_pool *pool);
+bool __must_check is_vio_pool_busy(struct vio_pool *pool);
+void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter);
+void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio);
+
+#endif /* VIO_H */
diff --git a/drivers/md/dm-vdo/wait-queue.c b/drivers/md/dm-vdo/wait-queue.c
new file mode 100644
index 000000000000..6e1e739277ef
--- /dev/null
+++ b/drivers/md/dm-vdo/wait-queue.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#include "wait-queue.h"
+
+#include <linux/device-mapper.h>
+
+#include "permassert.h"
+
+#include "status-codes.h"
+
+/**
+ * vdo_waitq_enqueue_waiter() - Add a waiter to the tail end of a waitq.
+ * @waitq: The vdo_wait_queue to which to add the waiter.
+ * @waiter: The waiter to add to the waitq.
+ *
+ * The waiter must not already be waiting in a waitq.
+ */
+void vdo_waitq_enqueue_waiter(struct vdo_wait_queue *waitq, struct vdo_waiter *waiter)
+{
+ BUG_ON(waiter->next_waiter != NULL);
+
+ if (waitq->last_waiter == NULL) {
+ /*
+ * The waitq is empty, so form the initial circular list by self-linking the
+ * initial waiter.
+ */
+ waiter->next_waiter = waiter;
+ } else {
+ /* Splice the new waiter in at the end of the waitq. */
+ waiter->next_waiter = waitq->last_waiter->next_waiter;
+ waitq->last_waiter->next_waiter = waiter;
+ }
+
+ /* In both cases, the waiter we added to the ring becomes the last waiter. */
+ waitq->last_waiter = waiter;
+ waitq->length += 1;
+}
+
+/**
+ * vdo_waitq_transfer_all_waiters() - Transfer all waiters from one waitq to
+ * a second waitq, emptying the first waitq.
+ * @from_waitq: The waitq containing the waiters to move.
+ * @to_waitq: The waitq that will receive the waiters from the first waitq.
+ */
+void vdo_waitq_transfer_all_waiters(struct vdo_wait_queue *from_waitq,
+ struct vdo_wait_queue *to_waitq)
+{
+ /* If the source waitq is empty, there's nothing to do. */
+ if (!vdo_waitq_has_waiters(from_waitq))
+ return;
+
+ if (vdo_waitq_has_waiters(to_waitq)) {
+ /*
+ * Both are non-empty. Splice the two circular lists together
+ * by swapping the next (head) pointers in the list tails.
+ */
+ struct vdo_waiter *from_head = from_waitq->last_waiter->next_waiter;
+ struct vdo_waiter *to_head = to_waitq->last_waiter->next_waiter;
+
+ to_waitq->last_waiter->next_waiter = from_head;
+ from_waitq->last_waiter->next_waiter = to_head;
+ }
+
+ to_waitq->last_waiter = from_waitq->last_waiter;
+ to_waitq->length += from_waitq->length;
+ vdo_waitq_init(from_waitq);
+}
+
+/**
+ * vdo_waitq_notify_all_waiters() - Notify all the entries waiting in a waitq.
+ * @waitq: The vdo_wait_queue containing the waiters to notify.
+ * @callback: The function to call to notify each waiter, or NULL to invoke the callback field
+ * registered in each waiter.
+ * @context: The context to pass to the callback function.
+ *
+ * Notifies all the entries waiting in a waitq to continue execution by invoking a callback
+ * function on each of them in turn. The waitq is copied and emptied before invoking any callbacks,
+ * and only the waiters that were in the waitq at the start of the call will be notified.
+ */
+void vdo_waitq_notify_all_waiters(struct vdo_wait_queue *waitq,
+ vdo_waiter_callback_fn callback, void *context)
+{
+ /*
+ * Copy and empty the waitq first, avoiding the possibility of an infinite
+ * loop if entries are returned to the waitq by the callback function.
+ */
+ struct vdo_wait_queue waiters;
+
+ vdo_waitq_init(&waiters);
+ vdo_waitq_transfer_all_waiters(waitq, &waiters);
+
+ /* Drain the copied waitq, invoking the callback on every entry. */
+ while (vdo_waitq_has_waiters(&waiters))
+ vdo_waitq_notify_next_waiter(&waiters, callback, context);
+}
+
+/**
+ * vdo_waitq_get_first_waiter() - Return the waiter that is at the head end of a waitq.
+ * @waitq: The vdo_wait_queue from which to get the first waiter.
+ *
+ * Return: The first (oldest) waiter in the waitq, or NULL if the waitq is empty.
+ */
+struct vdo_waiter *vdo_waitq_get_first_waiter(const struct vdo_wait_queue *waitq)
+{
+ struct vdo_waiter *last_waiter = waitq->last_waiter;
+
+ if (last_waiter == NULL) {
+ /* There are no waiters, so we're done. */
+ return NULL;
+ }
+
+ /* The waitq is circular, so the last entry links to the head of the waitq. */
+ return last_waiter->next_waiter;
+}
+
+/**
+ * vdo_waitq_dequeue_matching_waiters() - Remove all waiters that match based on the specified
+ * matching method and append them to a vdo_wait_queue.
+ * @waitq: The vdo_wait_queue to process.
+ * @waiter_match: The method to determine matching.
+ * @match_context: Contextual info for the match method.
+ * @matched_waitq: A wait_waitq to store matches.
+ */
+void vdo_waitq_dequeue_matching_waiters(struct vdo_wait_queue *waitq,
+ vdo_waiter_match_fn waiter_match,
+ void *match_context,
+ struct vdo_wait_queue *matched_waitq)
+{
+ struct vdo_wait_queue iteration_waitq;
+
+ vdo_waitq_init(&iteration_waitq);
+ vdo_waitq_transfer_all_waiters(waitq, &iteration_waitq);
+
+ while (vdo_waitq_has_waiters(&iteration_waitq)) {
+ struct vdo_waiter *waiter = vdo_waitq_dequeue_waiter(&iteration_waitq);
+
+ vdo_waitq_enqueue_waiter((waiter_match(waiter, match_context) ?
+ matched_waitq : waitq), waiter);
+ }
+}
+
+/**
+ * vdo_waitq_dequeue_waiter() - Remove the first (oldest) waiter from a waitq.
+ * @waitq: The vdo_wait_queue from which to remove the first entry.
+ *
+ * The caller will be responsible for waking the waiter by continuing its
+ * execution appropriately.
+ *
+ * Return: The first (oldest) waiter in the waitq, or NULL if the waitq is empty.
+ */
+struct vdo_waiter *vdo_waitq_dequeue_waiter(struct vdo_wait_queue *waitq)
+{
+ struct vdo_waiter *first_waiter = vdo_waitq_get_first_waiter(waitq);
+ struct vdo_waiter *last_waiter = waitq->last_waiter;
+
+ if (first_waiter == NULL)
+ return NULL;
+
+ if (first_waiter == last_waiter) {
+ /* The waitq has a single entry, so empty it by nulling the tail. */
+ waitq->last_waiter = NULL;
+ } else {
+ /*
+ * The waitq has multiple waiters, so splice the first waiter out
+ * of the circular waitq.
+ */
+ last_waiter->next_waiter = first_waiter->next_waiter;
+ }
+
+ /* The waiter is no longer in a waitq. */
+ first_waiter->next_waiter = NULL;
+ waitq->length -= 1;
+
+ return first_waiter;
+}
+
+/**
+ * vdo_waitq_notify_next_waiter() - Notify the next entry waiting in a waitq.
+ * @waitq: The vdo_wait_queue containing the waiter to notify.
+ * @callback: The function to call to notify the waiter, or NULL to invoke the callback field
+ * registered in the waiter.
+ * @context: The context to pass to the callback function.
+ *
+ * Notifies the next entry waiting in a waitq to continue execution by invoking a callback function
+ * on it after removing it from the waitq.
+ *
+ * Return: true if there was a waiter in the waitq.
+ */
+bool vdo_waitq_notify_next_waiter(struct vdo_wait_queue *waitq,
+ vdo_waiter_callback_fn callback, void *context)
+{
+ struct vdo_waiter *waiter = vdo_waitq_dequeue_waiter(waitq);
+
+ if (waiter == NULL)
+ return false;
+
+ if (callback == NULL)
+ callback = waiter->callback;
+ callback(waiter, context);
+
+ return true;
+}
diff --git a/drivers/md/dm-vdo/wait-queue.h b/drivers/md/dm-vdo/wait-queue.h
new file mode 100644
index 000000000000..7e8ee6afe7c7
--- /dev/null
+++ b/drivers/md/dm-vdo/wait-queue.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2023 Red Hat
+ */
+
+#ifndef VDO_WAIT_QUEUE_H
+#define VDO_WAIT_QUEUE_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+/**
+ * A vdo_wait_queue is a circular singly linked list of entries waiting to be notified
+ * of a change in a condition. Keeping a circular list allows the vdo_wait_queue
+ * structure to simply be a pointer to the tail (newest) entry, supporting
+ * constant-time enqueue and dequeue operations. A null pointer is an empty waitq.
+ *
+ * An empty waitq:
+ * waitq0.last_waiter -> NULL
+ *
+ * A singleton waitq:
+ * waitq1.last_waiter -> entry1 -> entry1 -> [...]
+ *
+ * A three-element waitq:
+ * waitq2.last_waiter -> entry3 -> entry1 -> entry2 -> entry3 -> [...]
+ *
+ * linux/wait.h's wait_queue_head is _not_ used because vdo_wait_queue's
+ * interface is much less complex (doesn't need locking, priorities or timers).
+ * Made possible by vdo's thread-based resource allocation and locking; and
+ * the polling nature of vdo_wait_queue consumers.
+ *
+ * FIXME: could be made to use a linux/list.h's list_head but its extra barriers
+ * really aren't needed. Nor is a doubly linked list, but vdo_wait_queue could
+ * make use of __list_del_clearprev() -- but that would compromise the ability
+ * to make full use of linux's list interface.
+ */
+
+struct vdo_waiter;
+
+struct vdo_wait_queue {
+ /* The tail of the queue, the last (most recently added) entry */
+ struct vdo_waiter *last_waiter;
+ /* The number of waiters currently in the queue */
+ size_t length;
+};
+
+/**
+ * vdo_waiter_callback_fn - Callback type that will be called to resume processing
+ * of a waiter after it has been removed from its wait queue.
+ */
+typedef void (*vdo_waiter_callback_fn)(struct vdo_waiter *waiter, void *context);
+
+/**
+ * vdo_waiter_match_fn - Method type for waiter matching methods.
+ *
+ * Returns false if the waiter does not match.
+ */
+typedef bool (*vdo_waiter_match_fn)(struct vdo_waiter *waiter, void *context);
+
+/* The structure for entries in a vdo_wait_queue. */
+struct vdo_waiter {
+ /*
+ * The next waiter in the waitq. If this entry is the last waiter, then this
+ * is actually a pointer back to the head of the waitq.
+ */
+ struct vdo_waiter *next_waiter;
+
+ /* Optional waiter-specific callback to invoke when dequeuing this waiter. */
+ vdo_waiter_callback_fn callback;
+};
+
+/**
+ * vdo_waiter_is_waiting() - Check whether a waiter is waiting.
+ * @waiter: The waiter to check.
+ *
+ * Return: true if the waiter is on some vdo_wait_queue.
+ */
+static inline bool vdo_waiter_is_waiting(struct vdo_waiter *waiter)
+{
+ return (waiter->next_waiter != NULL);
+}
+
+/**
+ * vdo_waitq_init() - Initialize a vdo_wait_queue.
+ * @waitq: The vdo_wait_queue to initialize.
+ */
+static inline void vdo_waitq_init(struct vdo_wait_queue *waitq)
+{
+ *waitq = (struct vdo_wait_queue) {
+ .last_waiter = NULL,
+ .length = 0,
+ };
+}
+
+/**
+ * vdo_waitq_has_waiters() - Check whether a vdo_wait_queue has any entries waiting.
+ * @waitq: The vdo_wait_queue to query.
+ *
+ * Return: true if there are any waiters in the waitq.
+ */
+static inline bool __must_check vdo_waitq_has_waiters(const struct vdo_wait_queue *waitq)
+{
+ return (waitq->last_waiter != NULL);
+}
+
+void vdo_waitq_enqueue_waiter(struct vdo_wait_queue *waitq,
+ struct vdo_waiter *waiter);
+
+struct vdo_waiter *vdo_waitq_dequeue_waiter(struct vdo_wait_queue *waitq);
+
+void vdo_waitq_notify_all_waiters(struct vdo_wait_queue *waitq,
+ vdo_waiter_callback_fn callback, void *context);
+
+bool vdo_waitq_notify_next_waiter(struct vdo_wait_queue *waitq,
+ vdo_waiter_callback_fn callback, void *context);
+
+void vdo_waitq_transfer_all_waiters(struct vdo_wait_queue *from_waitq,
+ struct vdo_wait_queue *to_waitq);
+
+struct vdo_waiter *vdo_waitq_get_first_waiter(const struct vdo_wait_queue *waitq);
+
+void vdo_waitq_dequeue_matching_waiters(struct vdo_wait_queue *waitq,
+ vdo_waiter_match_fn waiter_match,
+ void *match_context,
+ struct vdo_wait_queue *matched_waitq);
+
+/**
+ * vdo_waitq_num_waiters() - Return the number of waiters in a vdo_wait_queue.
+ * @waitq: The vdo_wait_queue to query.
+ *
+ * Return: The number of waiters in the waitq.
+ */
+static inline size_t __must_check vdo_waitq_num_waiters(const struct vdo_wait_queue *waitq)
+{
+ return waitq->length;
+}
+
+#endif /* VDO_WAIT_QUEUE_H */
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index b475200d8586..e46aee6f932e 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -60,7 +60,8 @@ static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
* to the data block. Caller is responsible for releasing buf.
*/
static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
- unsigned int *offset, struct dm_buffer **buf)
+ unsigned int *offset, struct dm_buffer **buf,
+ unsigned short ioprio)
{
u64 position, block, rem;
u8 *res;
@@ -69,7 +70,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
block = div64_u64_rem(position, v->fec->io_size, &rem);
*offset = (unsigned int)rem;
- res = dm_bufio_read(v->fec->bufio, block, buf);
+ res = dm_bufio_read_with_ioprio(v->fec->bufio, block, buf, ioprio);
if (IS_ERR(res)) {
DMERR("%s: FEC %llu: parity read failed (block %llu): %ld",
v->data_dev->name, (unsigned long long)rsb,
@@ -121,16 +122,17 @@ static inline unsigned int fec_buffer_rs_index(unsigned int i, unsigned int j)
* Decode all RS blocks from buffers and copy corrected bytes into fio->output
* starting from block_offset.
*/
-static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
- u64 rsb, int byte_index, unsigned int block_offset,
- int neras)
+static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_io *io,
+ struct dm_verity_fec_io *fio, u64 rsb, int byte_index,
+ unsigned int block_offset, int neras)
{
int r, corrected = 0, res;
struct dm_buffer *buf;
unsigned int n, i, offset;
u8 *par, *block;
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
- par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
+ par = fec_read_parity(v, rsb, block_offset, &offset, &buf, bio_prio(bio));
if (IS_ERR(par))
return PTR_ERR(par);
@@ -158,7 +160,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
if (offset >= v->fec->io_size) {
dm_bufio_release(buf);
- par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
+ par = fec_read_parity(v, rsb, block_offset, &offset, &buf, bio_prio(bio));
if (IS_ERR(par))
return PTR_ERR(par);
}
@@ -210,6 +212,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
u8 *bbuf, *rs_block;
u8 want_digest[HASH_MAX_DIGESTSIZE];
unsigned int n, k;
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
if (neras)
*neras = 0;
@@ -248,7 +251,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
bufio = v->bufio;
}
- bbuf = dm_bufio_read(bufio, block, &buf);
+ bbuf = dm_bufio_read_with_ioprio(bufio, block, &buf, bio_prio(bio));
if (IS_ERR(bbuf)) {
DMWARN_LIMIT("%s: FEC %llu: read failed (%llu): %ld",
v->data_dev->name,
@@ -377,7 +380,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
if (unlikely(r < 0))
return r;
- r = fec_decode_bufs(v, fio, rsb, r, pos, neras);
+ r = fec_decode_bufs(v, io, fio, rsb, r, pos, neras);
if (r < 0)
return r;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 82662f5769c4..bb5da66da4c1 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -46,11 +46,12 @@ static unsigned int dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE
module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, 0644);
-static DEFINE_STATIC_KEY_FALSE(use_tasklet_enabled);
+static DEFINE_STATIC_KEY_FALSE(use_bh_wq_enabled);
struct dm_verity_prefetch_work {
struct work_struct work;
struct dm_verity *v;
+ unsigned short ioprio;
sector_t block;
unsigned int n_blocks;
};
@@ -294,10 +295,11 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
int r;
sector_t hash_block;
unsigned int offset;
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
verity_hash_at_level(v, block, level, &hash_block, &offset);
- if (static_branch_unlikely(&use_tasklet_enabled) && io->in_tasklet) {
+ if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
data = dm_bufio_get(v->bufio, hash_block, &buf);
if (data == NULL) {
/*
@@ -307,8 +309,10 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
*/
return -EAGAIN;
}
- } else
- data = dm_bufio_read(v->bufio, hash_block, &buf);
+ } else {
+ data = dm_bufio_read_with_ioprio(v->bufio, hash_block,
+ &buf, bio_prio(bio));
+ }
if (IS_ERR(data))
return PTR_ERR(data);
@@ -323,15 +327,14 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
r = verity_hash(v, verity_io_hash_req(v, io),
data, 1 << v->hash_dev_block_bits,
- verity_io_real_digest(v, io), !io->in_tasklet);
+ verity_io_real_digest(v, io), !io->in_bh);
if (unlikely(r < 0))
goto release_ret_r;
if (likely(memcmp(verity_io_real_digest(v, io), want_digest,
v->digest_size) == 0))
aux->hash_verified = 1;
- else if (static_branch_unlikely(&use_tasklet_enabled) &&
- io->in_tasklet) {
+ else if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
/*
* Error handling code (FEC included) cannot be run in a
* tasklet since it may sleep, so fallback to work-queue.
@@ -482,6 +485,63 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
return 0;
}
+static int verity_recheck_copy(struct dm_verity *v, struct dm_verity_io *io,
+ u8 *data, size_t len)
+{
+ memcpy(data, io->recheck_buffer, len);
+ io->recheck_buffer += len;
+
+ return 0;
+}
+
+static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
+ struct bvec_iter start, sector_t cur_block)
+{
+ struct page *page;
+ void *buffer;
+ int r;
+ struct dm_io_request io_req;
+ struct dm_io_region io_loc;
+
+ page = mempool_alloc(&v->recheck_pool, GFP_NOIO);
+ buffer = page_to_virt(page);
+
+ io_req.bi_opf = REQ_OP_READ;
+ io_req.mem.type = DM_IO_KMEM;
+ io_req.mem.ptr.addr = buffer;
+ io_req.notify.fn = NULL;
+ io_req.client = v->io;
+ io_loc.bdev = v->data_dev->bdev;
+ io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT);
+ io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
+ if (unlikely(r))
+ goto free_ret;
+
+ r = verity_hash(v, verity_io_hash_req(v, io), buffer,
+ 1 << v->data_dev_block_bits,
+ verity_io_real_digest(v, io), true);
+ if (unlikely(r))
+ goto free_ret;
+
+ if (memcmp(verity_io_real_digest(v, io),
+ verity_io_want_digest(v, io), v->digest_size)) {
+ r = -EIO;
+ goto free_ret;
+ }
+
+ io->recheck_buffer = buffer;
+ r = verity_for_bv_block(v, io, &start, verity_recheck_copy);
+ if (unlikely(r))
+ goto free_ret;
+
+ r = 0;
+free_ret:
+ mempool_free(page, &v->recheck_pool);
+
+ return r;
+}
+
static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
u8 *data, size_t len)
{
@@ -508,16 +568,14 @@ static int verity_verify_io(struct dm_verity_io *io)
{
bool is_zero;
struct dm_verity *v = io->v;
-#if defined(CONFIG_DM_VERITY_FEC)
struct bvec_iter start;
-#endif
struct bvec_iter iter_copy;
struct bvec_iter *iter;
struct crypto_wait wait;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
unsigned int b;
- if (static_branch_unlikely(&use_tasklet_enabled) && io->in_tasklet) {
+ if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
/*
* Copy the iterator in case we need to restart
* verification in a work-queue.
@@ -557,14 +615,11 @@ static int verity_verify_io(struct dm_verity_io *io)
continue;
}
- r = verity_hash_init(v, req, &wait, !io->in_tasklet);
+ r = verity_hash_init(v, req, &wait, !io->in_bh);
if (unlikely(r < 0))
return r;
-#if defined(CONFIG_DM_VERITY_FEC)
- if (verity_fec_is_enabled(v))
- start = *iter;
-#endif
+ start = *iter;
r = verity_for_io_block(v, io, iter, &wait);
if (unlikely(r < 0))
return r;
@@ -579,13 +634,16 @@ static int verity_verify_io(struct dm_verity_io *io)
if (v->validated_blocks)
set_bit(cur_block, v->validated_blocks);
continue;
- } else if (static_branch_unlikely(&use_tasklet_enabled) &&
- io->in_tasklet) {
+ } else if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
/*
* Error handling code (FEC included) cannot be run in a
* tasklet since it may sleep, so fallback to work-queue.
*/
return -EAGAIN;
+ } else if (verity_recheck(v, io, start, cur_block) == 0) {
+ if (v->validated_blocks)
+ set_bit(cur_block, v->validated_blocks);
+ continue;
#if defined(CONFIG_DM_VERITY_FEC)
} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
cur_block, NULL, &start) == 0) {
@@ -630,7 +688,7 @@ static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
bio->bi_end_io = io->orig_bi_end_io;
bio->bi_status = status;
- if (!static_branch_unlikely(&use_tasklet_enabled) || !io->in_tasklet)
+ if (!static_branch_unlikely(&use_bh_wq_enabled) || !io->in_bh)
verity_fec_finish_io(io);
bio_endio(bio);
@@ -640,11 +698,28 @@ static void verity_work(struct work_struct *w)
{
struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
- io->in_tasklet = false;
+ io->in_bh = false;
verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
}
+static void verity_bh_work(struct work_struct *w)
+{
+ struct dm_verity_io *io = container_of(w, struct dm_verity_io, bh_work);
+ int err;
+
+ io->in_bh = true;
+ err = verity_verify_io(io);
+ if (err == -EAGAIN || err == -ENOMEM) {
+ /* fallback to retrying with work-queue */
+ INIT_WORK(&io->work, verity_work);
+ queue_work(io->v->verify_wq, &io->work);
+ return;
+ }
+
+ verity_finish_io(io, errno_to_blk_status(err));
+}
+
static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
@@ -657,8 +732,13 @@ static void verity_end_io(struct bio *bio)
return;
}
- INIT_WORK(&io->work, verity_work);
- queue_work(io->v->verify_wq, &io->work);
+ if (static_branch_unlikely(&use_bh_wq_enabled) && io->v->use_bh_wq) {
+ INIT_WORK(&io->bh_work, verity_bh_work);
+ queue_work(system_bh_wq, &io->bh_work);
+ } else {
+ INIT_WORK(&io->work, verity_work);
+ queue_work(io->v->verify_wq, &io->work);
+ }
}
/*
@@ -696,14 +776,16 @@ static void verity_prefetch_io(struct work_struct *work)
hash_block_end = v->hash_blocks - 1;
}
no_prefetch_cluster:
- dm_bufio_prefetch(v->bufio, hash_block_start,
- hash_block_end - hash_block_start + 1);
+ dm_bufio_prefetch_with_ioprio(v->bufio, hash_block_start,
+ hash_block_end - hash_block_start + 1,
+ pw->ioprio);
}
kfree(pw);
}
-static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
+static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io,
+ unsigned short ioprio)
{
sector_t block = io->block;
unsigned int n_blocks = io->n_blocks;
@@ -731,6 +813,7 @@ static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
pw->v = v;
pw->block = block;
pw->n_blocks = n_blocks;
+ pw->ioprio = ioprio;
queue_work(v->verify_wq, &pw->work);
}
@@ -773,7 +856,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
verity_fec_init_io(io);
- verity_submit_prefetch(v, io);
+ verity_submit_prefetch(v, io, bio_prio(bio));
submit_bio_noacct(bio);
@@ -822,7 +905,7 @@ static void verity_status(struct dm_target *ti, status_type_t type,
args++;
if (v->validated_blocks)
args++;
- if (v->use_tasklet)
+ if (v->use_bh_wq)
args++;
if (v->signature_key_desc)
args += DM_VERITY_ROOT_HASH_VERIFICATION_OPTS;
@@ -849,7 +932,7 @@ static void verity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
if (v->validated_blocks)
DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE);
- if (v->use_tasklet)
+ if (v->use_bh_wq)
DMEMIT(" " DM_VERITY_OPT_TASKLET_VERIFY);
sz = verity_fec_status_table(v, sz, result, maxlen);
if (v->signature_key_desc)
@@ -941,6 +1024,10 @@ static void verity_dtr(struct dm_target *ti)
if (v->verify_wq)
destroy_workqueue(v->verify_wq);
+ mempool_exit(&v->recheck_pool);
+ if (v->io)
+ dm_io_client_destroy(v->io);
+
if (v->bufio)
dm_bufio_client_destroy(v->bufio);
@@ -964,8 +1051,8 @@ static void verity_dtr(struct dm_target *ti)
kfree(v->signature_key_desc);
- if (v->use_tasklet)
- static_branch_dec(&use_tasklet_enabled);
+ if (v->use_bh_wq)
+ static_branch_dec(&use_bh_wq_enabled);
kfree(v);
@@ -1099,8 +1186,8 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
continue;
} else if (!strcasecmp(arg_name, DM_VERITY_OPT_TASKLET_VERIFY)) {
- v->use_tasklet = true;
- static_branch_inc(&use_tasklet_enabled);
+ v->use_bh_wq = true;
+ static_branch_inc(&use_bh_wq_enabled);
continue;
} else if (verity_is_fec_opt_arg(arg_name)) {
@@ -1271,7 +1358,7 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
v->tfm = crypto_alloc_ahash(v->alg_name, 0,
- v->use_tasklet ? CRYPTO_ALG_ASYNC : 0);
+ v->use_bh_wq ? CRYPTO_ALG_ASYNC : 0);
if (IS_ERR(v->tfm)) {
ti->error = "Cannot initialize hash function";
r = PTR_ERR(v->tfm);
@@ -1379,10 +1466,24 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
v->hash_blocks = hash_position;
+ r = mempool_init_page_pool(&v->recheck_pool, 1, 0);
+ if (unlikely(r)) {
+ ti->error = "Cannot allocate mempool";
+ goto bad;
+ }
+
+ v->io = dm_io_client_create();
+ if (IS_ERR(v->io)) {
+ r = PTR_ERR(v->io);
+ v->io = NULL;
+ ti->error = "Cannot allocate dm io";
+ goto bad;
+ }
+
v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
dm_bufio_alloc_callback, NULL,
- v->use_tasklet ? DM_BUFIO_CLIENT_NO_SLEEP : 0);
+ v->use_bh_wq ? DM_BUFIO_CLIENT_NO_SLEEP : 0);
if (IS_ERR(v->bufio)) {
ti->error = "Cannot initialize dm-bufio";
r = PTR_ERR(v->bufio);
@@ -1401,7 +1502,7 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
* reducing wait times when reading from a dm-verity device.
*
* Also as required for the "try_verify_in_tasklet" feature: WQ_HIGHPRI
- * allows verify_wq to preempt softirq since verification in tasklet
+ * allows verify_wq to preempt softirq since verification in BH workqueue
* will fall-back to using it for error handling (or if the bufio cache
* doesn't have required hashes).
*/
@@ -1485,8 +1586,8 @@ int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned i
static struct target_type verity_target = {
.name = "verity",
- .features = DM_TARGET_IMMUTABLE,
- .version = {1, 9, 0},
+ .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
+ .version = {1, 10, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index f3f607008419..20b1bcf03474 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -11,6 +11,7 @@
#ifndef DM_VERITY_H
#define DM_VERITY_H
+#include <linux/dm-io.h>
#include <linux/dm-bufio.h>
#include <linux/device-mapper.h>
#include <linux/interrupt.h>
@@ -53,7 +54,7 @@ struct dm_verity {
unsigned char levels; /* the number of tree levels */
unsigned char version;
bool hash_failed:1; /* set if hash of any block failed */
- bool use_tasklet:1; /* try to verify in tasklet before work-queue */
+ bool use_bh_wq:1; /* try to verify in BH wq before normal work-queue */
unsigned int digest_size; /* digest size for the current hash algorithm */
unsigned int ahash_reqsize;/* the size of temporary space for crypto */
enum verity_mode mode; /* mode for handling verification errors */
@@ -68,6 +69,9 @@ struct dm_verity {
unsigned long *validated_blocks; /* bitset blocks validated */
char *signature_key_desc; /* signature keyring reference */
+
+ struct dm_io_client *io;
+ mempool_t recheck_pool;
};
struct dm_verity_io {
@@ -76,13 +80,16 @@ struct dm_verity_io {
/* original value of bio->bi_end_io */
bio_end_io_t *orig_bi_end_io;
+ struct bvec_iter iter;
+
sector_t block;
unsigned int n_blocks;
- bool in_tasklet;
-
- struct bvec_iter iter;
+ bool in_bh;
struct work_struct work;
+ struct work_struct bh_work;
+
+ char *recheck_buffer;
/*
* Three variably-size fields follow this struct:
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index b463c28c39ad..7ce8847b3404 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -531,7 +531,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
req.notify.context = &endio;
/* writing via async dm-io (implied by notify.fn above) won't return an error */
- (void) dm_io(&req, 1, &region, NULL);
+ (void) dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
i = j;
}
@@ -568,7 +568,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc)
req.notify.fn = NULL;
req.notify.context = NULL;
- r = dm_io(&req, 1, &region, NULL);
+ r = dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
if (unlikely(r))
writecache_error(wc, r, "error writing superblock");
}
@@ -596,7 +596,7 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
req.client = wc->dm_io;
req.notify.fn = NULL;
- r = dm_io(&req, 1, &region, NULL);
+ r = dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
if (unlikely(r))
writecache_error(wc, r, "error flushing metadata: %d", r);
}
@@ -990,7 +990,7 @@ static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors
req.client = wc->dm_io;
req.notify.fn = NULL;
- return dm_io(&req, 1, &region, NULL);
+ return dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
}
static void writecache_resume(struct dm_target *ti)
@@ -2776,5 +2776,5 @@ static struct target_type writecache_target = {
module_dm(writecache);
MODULE_DESCRIPTION(DM_NAME " writecache target");
-MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
+MODULE_AUTHOR("Mikulas Patocka <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index fdfe30f7b697..8156881a31de 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1655,10 +1655,13 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
struct dmz_dev *dev = zone->dev;
+ unsigned int noio_flag;
+ noio_flag = memalloc_noio_save();
ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET,
dmz_start_sect(zmd, zone),
- zmd->zone_nr_sectors, GFP_NOIO);
+ zmd->zone_nr_sectors);
+ memalloc_noio_restore(noio_flag);
if (ret) {
dmz_dev_err(dev, "Reset zone %u failed %d",
zone->id, ret);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8dcabf84d866..06263b0a7b58 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -726,7 +726,8 @@ static struct table_device *open_table_device(struct mapped_device *md,
dev_t dev, blk_mode_t mode)
{
struct table_device *td;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
+ struct block_device *bdev;
u64 part_off;
int r;
@@ -735,34 +736,36 @@ static struct table_device *open_table_device(struct mapped_device *md,
return ERR_PTR(-ENOMEM);
refcount_set(&td->count, 1);
- bdev_handle = bdev_open_by_dev(dev, mode, _dm_claim_ptr, NULL);
- if (IS_ERR(bdev_handle)) {
- r = PTR_ERR(bdev_handle);
+ bdev_file = bdev_file_open_by_dev(dev, mode, _dm_claim_ptr, NULL);
+ if (IS_ERR(bdev_file)) {
+ r = PTR_ERR(bdev_file);
goto out_free_td;
}
+ bdev = file_bdev(bdev_file);
+
/*
* We can be called before the dm disk is added. In that case we can't
* register the holder relation here. It will be done once add_disk was
* called.
*/
if (md->disk->slave_dir) {
- r = bd_link_disk_holder(bdev_handle->bdev, md->disk);
+ r = bd_link_disk_holder(bdev, md->disk);
if (r)
goto out_blkdev_put;
}
td->dm_dev.mode = mode;
- td->dm_dev.bdev = bdev_handle->bdev;
- td->dm_dev.bdev_handle = bdev_handle;
- td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev, &part_off,
+ td->dm_dev.bdev = bdev;
+ td->dm_dev.bdev_file = bdev_file;
+ td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off,
NULL, NULL);
format_dev_t(td->dm_dev.name, dev);
list_add(&td->list, &md->table_devices);
return td;
out_blkdev_put:
- bdev_release(bdev_handle);
+ fput(bdev_file);
out_free_td:
kfree(td);
return ERR_PTR(r);
@@ -775,7 +778,7 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
{
if (md->disk->slave_dir)
bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
- bdev_release(td->dm_dev.bdev_handle);
+ fput(td->dm_dev.bdev_file);
put_dax(td->dm_dev.dax_dev);
list_del(&td->list);
kfree(td);
@@ -2098,8 +2101,8 @@ static struct mapped_device *alloc_dev(int minor)
* established. If request-based table is loaded: blk-mq will
* override accordingly.
*/
- md->disk = blk_alloc_disk(md->numa_node_id);
- if (!md->disk)
+ md->disk = blk_alloc_disk(NULL, md->numa_node_id);
+ if (IS_ERR(md->disk))
goto bad;
md->queue = md->disk->queue;
@@ -2945,6 +2948,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend
static void __dm_internal_resume(struct mapped_device *md)
{
+ int r;
+ struct dm_table *map;
+
BUG_ON(!md->internal_suspend_count);
if (--md->internal_suspend_count)
@@ -2953,12 +2959,23 @@ static void __dm_internal_resume(struct mapped_device *md)
if (dm_suspended_md(md))
goto done; /* resume from nested suspend */
- /*
- * NOTE: existing callers don't need to call dm_table_resume_targets
- * (which may fail -- so best to avoid it for now by passing NULL map)
- */
- (void) __dm_resume(md, NULL);
-
+ map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
+ r = __dm_resume(md, map);
+ if (r) {
+ /*
+ * If a preresume method of some target failed, we are in a
+ * tricky situation. We can't return an error to the caller. We
+ * can't fake success because then the "resume" and
+ * "postsuspend" methods would not be paired correctly, and it
+ * would break various targets, for example it would cause list
+ * corruption in the "origin" target.
+ *
+ * So, we fake normal suspend here, to make sure that the
+ * "resume" and "postsuspend" methods will be paired correctly.
+ */
+ DMERR("Preresume method failed: %d", r);
+ set_bit(DMF_SUSPENDED, &md->flags);
+ }
done:
clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
smp_mb__after_atomic();
@@ -3512,5 +3529,5 @@ module_param(swap_bios, int, 0644);
MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
MODULE_DESCRIPTION(DM_NAME " driver");
-MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 9672f75c3050..059afc24c08b 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -234,7 +234,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
sector_t doff;
bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
- if (pg_index == store->file_pages - 1) {
+ /* we compare length (page numbers), not page offset. */
+ if ((pg_index - store->sb_index) == store->file_pages - 1) {
unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1);
if (last_page_size == 0)
@@ -438,8 +439,8 @@ static void filemap_write_page(struct bitmap *bitmap, unsigned long pg_index,
struct page *page = store->filemap[pg_index];
if (mddev_is_clustered(bitmap->mddev)) {
- pg_index += bitmap->cluster_slot *
- DIV_ROUND_UP(store->bytes, PAGE_SIZE);
+ /* go to node bitmap area starting point */
+ pg_index += store->sb_index;
}
if (store->file)
@@ -952,6 +953,7 @@ static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
unsigned long index = file_page_index(store, chunk);
unsigned long node_offset = 0;
+ index += store->sb_index;
if (mddev_is_clustered(bitmap->mddev))
node_offset = bitmap->cluster_slot * store->file_pages;
@@ -982,6 +984,7 @@ static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
unsigned long index = file_page_index(store, chunk);
unsigned long node_offset = 0;
+ index += store->sb_index;
if (mddev_is_clustered(bitmap->mddev))
node_offset = bitmap->cluster_slot * store->file_pages;
@@ -1043,9 +1046,8 @@ void md_bitmap_unplug(struct bitmap *bitmap)
if (dirty || need_write) {
if (!writing) {
md_bitmap_wait_writes(bitmap);
- if (bitmap->mddev->queue)
- blk_add_trace_msg(bitmap->mddev->queue,
- "md bitmap_unplug");
+ mddev_add_trace_msg(bitmap->mddev,
+ "md bitmap_unplug");
}
clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
filemap_write_page(bitmap, i, false);
@@ -1316,9 +1318,7 @@ void md_bitmap_daemon_work(struct mddev *mddev)
}
bitmap->allclean = 1;
- if (bitmap->mddev->queue)
- blk_add_trace_msg(bitmap->mddev->queue,
- "md bitmap_daemon_work");
+ mddev_add_trace_msg(bitmap->mddev, "md bitmap_daemon_work");
/* Any file-page which is PENDING now needs to be written.
* So set NEEDWRITE now, then after we make any last-minute changes
diff --git a/drivers/md/md-linear.h b/drivers/md/md-linear.h
deleted file mode 100644
index 5587eeedb882..000000000000
--- a/drivers/md/md-linear.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINEAR_H
-#define _LINEAR_H
-
-struct dev_info {
- struct md_rdev *rdev;
- sector_t end_sector;
-};
-
-struct linear_conf
-{
- struct rcu_head rcu;
- sector_t array_sectors;
- int raid_disks; /* a copy of mddev->raid_disks */
- struct dev_info disks[] __counted_by(raid_disks);
-};
-#endif
diff --git a/drivers/md/md-multipath.h b/drivers/md/md-multipath.h
deleted file mode 100644
index b3099e5fc4d7..000000000000
--- a/drivers/md/md-multipath.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _MULTIPATH_H
-#define _MULTIPATH_H
-
-struct multipath_info {
- struct md_rdev *rdev;
-};
-
-struct mpconf {
- struct mddev *mddev;
- struct multipath_info *multipaths;
- int raid_disks;
- spinlock_t device_lock;
- struct list_head retry_list;
-
- mempool_t pool;
-};
-
-/*
- * this is our 'private' 'collective' MULTIPATH buffer head.
- * it contains information about what kind of IO operations were started
- * for this MULTIPATH operation, and about their status:
- */
-
-struct multipath_bh {
- struct mddev *mddev;
- struct bio *master_bio;
- struct bio bio;
- int path;
- struct list_head retry_list;
-};
-#endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 2266358d8074..e575e74aabf5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -65,7 +65,6 @@
#include <linux/percpu-refcount.h>
#include <linux/part_stat.h>
-#include <trace/events/block.h>
#include "md.h"
#include "md-bitmap.h"
#include "md-cluster.h"
@@ -99,18 +98,6 @@ static void mddev_detach(struct mddev *mddev);
static void export_rdev(struct md_rdev *rdev, struct mddev *mddev);
static void md_wakeup_thread_directly(struct md_thread __rcu *thread);
-enum md_ro_state {
- MD_RDWR,
- MD_RDONLY,
- MD_AUTO_READ,
- MD_MAX_STATE
-};
-
-static bool md_is_rdwr(struct mddev *mddev)
-{
- return (mddev->ro == MD_RDWR);
-}
-
/*
* Default number of read corrections we'll attempt on an rdev
* before ejecting it from the array. We divide the read error
@@ -378,7 +365,7 @@ static bool is_suspended(struct mddev *mddev, struct bio *bio)
return true;
}
-void md_handle_request(struct mddev *mddev, struct bio *bio)
+bool md_handle_request(struct mddev *mddev, struct bio *bio)
{
check_suspended:
if (is_suspended(mddev, bio)) {
@@ -386,7 +373,7 @@ check_suspended:
/* Bail out if REQ_NOWAIT is set for the bio */
if (bio->bi_opf & REQ_NOWAIT) {
bio_wouldblock_error(bio);
- return;
+ return true;
}
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
@@ -402,10 +389,13 @@ check_suspended:
if (!mddev->pers->make_request(mddev, bio)) {
percpu_ref_put(&mddev->active_io);
+ if (!mddev->gendisk && mddev->pers->prepare_suspend)
+ return false;
goto check_suspended;
}
percpu_ref_put(&mddev->active_io);
+ return true;
}
EXPORT_SYMBOL(md_handle_request);
@@ -529,6 +519,24 @@ void mddev_resume(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(mddev_resume);
+/* sync bdev before setting device to readonly or stopping raid*/
+static int mddev_set_closing_and_sync_blockdev(struct mddev *mddev, int opener_num)
+{
+ mutex_lock(&mddev->open_mutex);
+ if (mddev->pers && atomic_read(&mddev->openers) > opener_num) {
+ mutex_unlock(&mddev->open_mutex);
+ return -EBUSY;
+ }
+ if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
+ mutex_unlock(&mddev->open_mutex);
+ return -EBUSY;
+ }
+ mutex_unlock(&mddev->open_mutex);
+
+ sync_blockdev(mddev->gendisk->part0);
+ return 0;
+}
+
/*
* Generic flush handling for md
*/
@@ -579,8 +587,12 @@ static void submit_flushes(struct work_struct *ws)
rcu_read_lock();
}
rcu_read_unlock();
- if (atomic_dec_and_test(&mddev->flush_pending))
+ if (atomic_dec_and_test(&mddev->flush_pending)) {
+ /* The pair is percpu_ref_get() from md_flush_request() */
+ percpu_ref_put(&mddev->active_io);
+
queue_work(md_wq, &mddev->flush_work);
+ }
}
static void md_submit_flush_data(struct work_struct *ws)
@@ -2402,7 +2414,7 @@ int md_integrity_register(struct mddev *mddev)
if (list_empty(&mddev->disks))
return 0; /* nothing to do */
- if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
+ if (mddev_is_dm(mddev) || blk_get_integrity(mddev->gendisk))
return 0; /* shouldn't register, or already is */
rdev_for_each(rdev, mddev) {
/* skip spares and non-functional disks */
@@ -2455,7 +2467,7 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
struct blk_integrity *bi_mddev;
- if (!mddev->gendisk)
+ if (mddev_is_dm(mddev))
return 0;
bi_mddev = blk_get_integrity(mddev->gendisk);
@@ -2562,6 +2574,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
fail:
pr_warn("md: failed to register dev-%s for %s\n",
b, mdname(mddev));
+ mddev_destroy_serial_pool(mddev, rdev);
return err;
}
@@ -2578,7 +2591,7 @@ static void export_rdev(struct md_rdev *rdev, struct mddev *mddev)
if (test_bit(AutoDetected, &rdev->flags))
md_autodetect_dev(rdev->bdev->bd_dev);
#endif
- bdev_release(rdev->bdev_handle);
+ fput(rdev->bdev_file);
rdev->bdev = NULL;
kobject_put(&rdev->kobj);
}
@@ -2591,7 +2604,7 @@ static void md_kick_rdev_from_array(struct md_rdev *rdev)
list_del_rcu(&rdev->same_set);
pr_debug("md: unbind<%pg>\n", rdev->bdev);
mddev_destroy_serial_pool(rdev->mddev, rdev);
- rdev->mddev = NULL;
+ WRITE_ONCE(rdev->mddev, NULL);
sysfs_remove_link(&rdev->kobj, "block");
sysfs_put(rdev->sysfs_state);
sysfs_put(rdev->sysfs_unack_badblocks);
@@ -2847,8 +2860,7 @@ repeat:
pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
mdname(mddev), mddev->in_sync);
- if (mddev->queue)
- blk_add_trace_msg(mddev->queue, "md md_update_sb");
+ mddev_add_trace_msg(mddev, "md md_update_sb");
rewrite:
md_bitmap_update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
@@ -2929,7 +2941,6 @@ static int add_bound_rdev(struct md_rdev *rdev)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_new_event();
- md_wakeup_thread(mddev->thread);
return 0;
}
@@ -3044,10 +3055,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
if (err == 0) {
md_kick_rdev_from_array(rdev);
- if (mddev->pers) {
+ if (mddev->pers)
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
- md_wakeup_thread(mddev->thread);
- }
md_new_event();
}
}
@@ -3077,7 +3086,6 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
- md_wakeup_thread(rdev->mddev->thread);
err = 0;
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
@@ -3115,7 +3123,6 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
!test_bit(Replacement, &rdev->flags))
set_bit(WantReplacement, &rdev->flags);
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
- md_wakeup_thread(rdev->mddev->thread);
err = 0;
} else if (cmd_match(buf, "-want_replacement")) {
/* Clearing 'want_replacement' is always allowed.
@@ -3245,7 +3252,6 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
if (rdev->raid_disk >= 0)
return -EBUSY;
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
- md_wakeup_thread(rdev->mddev->thread);
} else if (rdev->mddev->pers) {
/* Activating a spare .. or possibly reactivating
* if we ever get bitmaps working here.
@@ -3339,8 +3345,7 @@ static ssize_t new_offset_store(struct md_rdev *rdev,
if (kstrtoull(buf, 10, &new_offset) < 0)
return -EINVAL;
- if (mddev->sync_thread ||
- test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
if (new_offset == rdev->data_offset)
/* reset is always permitted */
@@ -3671,7 +3676,7 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr,
struct kernfs_node *kn = NULL;
bool suspend = false;
ssize_t rv;
- struct mddev *mddev = rdev->mddev;
+ struct mddev *mddev = READ_ONCE(rdev->mddev);
if (!entry->store)
return -EIO;
@@ -3773,16 +3778,16 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
if (err)
goto out_clear_rdev;
- rdev->bdev_handle = bdev_open_by_dev(newdev,
+ rdev->bdev_file = bdev_file_open_by_dev(newdev,
BLK_OPEN_READ | BLK_OPEN_WRITE,
super_format == -2 ? &claim_rdev : rdev, NULL);
- if (IS_ERR(rdev->bdev_handle)) {
+ if (IS_ERR(rdev->bdev_file)) {
pr_warn("md: could not open device unknown-block(%u,%u).\n",
MAJOR(newdev), MINOR(newdev));
- err = PTR_ERR(rdev->bdev_handle);
+ err = PTR_ERR(rdev->bdev_file);
goto out_clear_rdev;
}
- rdev->bdev = rdev->bdev_handle->bdev;
+ rdev->bdev = file_bdev(rdev->bdev_file);
kobject_init(&rdev->kobj, &rdev_ktype);
@@ -3813,7 +3818,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
return rdev;
out_blkdev_put:
- bdev_release(rdev->bdev_handle);
+ fput(rdev->bdev_file);
out_clear_rdev:
md_rdev_clear(rdev);
out_free_rdev:
@@ -4013,8 +4018,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
*/
rv = -EBUSY;
- if (mddev->sync_thread ||
- test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
mddev->reshape_position != MaxSector ||
mddev->sysfs_active)
goto out_unlock;
@@ -4164,7 +4168,6 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
mddev->in_sync = 1;
del_timer_sync(&mddev->safemode_timer);
}
- blk_set_stacking_limits(&mddev->queue->limits);
pers->run(mddev);
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
if (!mddev->thread)
@@ -4471,8 +4474,8 @@ array_state_show(struct mddev *mddev, char *page)
return sprintf(page, "%s\n", array_states[st]);
}
-static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
-static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
+static int do_md_stop(struct mddev *mddev, int ro);
+static int md_set_readonly(struct mddev *mddev);
static int restart_array(struct mddev *mddev);
static ssize_t
@@ -4489,6 +4492,17 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
case broken: /* cannot be set */
case bad_word:
return -EINVAL;
+ case clear:
+ case readonly:
+ case inactive:
+ case read_auto:
+ if (!mddev->pers || !md_is_rdwr(mddev))
+ break;
+ /* write sysfs will not open mddev and opener should be 0 */
+ err = mddev_set_closing_and_sync_blockdev(mddev, 0);
+ if (err)
+ return err;
+ break;
default:
break;
}
@@ -4522,14 +4536,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
case inactive:
/* stop an active array, return 0 otherwise */
if (mddev->pers)
- err = do_md_stop(mddev, 2, NULL);
+ err = do_md_stop(mddev, 2);
break;
case clear:
- err = do_md_stop(mddev, 0, NULL);
+ err = do_md_stop(mddev, 0);
break;
case readonly:
if (mddev->pers)
- err = md_set_readonly(mddev, NULL);
+ err = md_set_readonly(mddev);
else {
mddev->ro = MD_RDONLY;
set_disk_ro(mddev->gendisk, 1);
@@ -4539,7 +4553,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
case read_auto:
if (mddev->pers) {
if (md_is_rdwr(mddev))
- err = md_set_readonly(mddev, NULL);
+ err = md_set_readonly(mddev);
else if (mddev->ro == MD_RDONLY)
err = restart_array(mddev);
if (err == 0) {
@@ -4588,6 +4602,11 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
mddev_unlock(mddev);
+
+ if (st == readonly || st == read_auto || st == inactive ||
+ (err && st == clear))
+ clear_bit(MD_CLOSING, &mddev->flags);
+
return err ?: len;
}
static struct md_sysfs_entry md_array_state =
@@ -4915,6 +4934,35 @@ static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq)
mddev_lock_nointr(mddev);
}
+void md_idle_sync_thread(struct mddev *mddev)
+{
+ lockdep_assert_held(&mddev->reconfig_mutex);
+
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ stop_sync_thread(mddev, true, true);
+}
+EXPORT_SYMBOL_GPL(md_idle_sync_thread);
+
+void md_frozen_sync_thread(struct mddev *mddev)
+{
+ lockdep_assert_held(&mddev->reconfig_mutex);
+
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ stop_sync_thread(mddev, true, false);
+}
+EXPORT_SYMBOL_GPL(md_frozen_sync_thread);
+
+void md_unfrozen_sync_thread(struct mddev *mddev)
+{
+ lockdep_assert_held(&mddev->reconfig_mutex);
+
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ sysfs_notify_dirent_safe(mddev->sysfs_action);
+}
+EXPORT_SYMBOL_GPL(md_unfrozen_sync_thread);
+
static void idle_sync_thread(struct mddev *mddev)
{
mutex_lock(&mddev->sync_mutex);
@@ -5706,6 +5754,51 @@ static const struct kobj_type md_ktype = {
int mdp_major = 0;
+/* stack the limit for all rdevs into lim */
+void mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim)
+{
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, mddev) {
+ queue_limits_stack_bdev(lim, rdev->bdev, rdev->data_offset,
+ mddev->gendisk->disk_name);
+ }
+}
+EXPORT_SYMBOL_GPL(mddev_stack_rdev_limits);
+
+/* apply the extra stacking limits from a new rdev into mddev */
+int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev)
+{
+ struct queue_limits lim;
+
+ if (mddev_is_dm(mddev))
+ return 0;
+
+ lim = queue_limits_start_update(mddev->gendisk->queue);
+ queue_limits_stack_bdev(&lim, rdev->bdev, rdev->data_offset,
+ mddev->gendisk->disk_name);
+ return queue_limits_commit_update(mddev->gendisk->queue, &lim);
+}
+EXPORT_SYMBOL_GPL(mddev_stack_new_rdev);
+
+/* update the optimal I/O size after a reshape */
+void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes)
+{
+ struct queue_limits lim;
+
+ if (mddev_is_dm(mddev))
+ return;
+
+ /* don't bother updating io_opt if we can't suspend the array */
+ if (mddev_suspend(mddev, false) < 0)
+ return;
+ lim = queue_limits_start_update(mddev->gendisk->queue);
+ lim.io_opt = lim.io_min * nr_stripes;
+ queue_limits_commit_update(mddev->gendisk->queue, &lim);
+ mddev_resume(mddev);
+}
+EXPORT_SYMBOL_GPL(mddev_update_io_opt);
+
static void mddev_delayed_delete(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
@@ -5770,10 +5863,11 @@ struct mddev *md_alloc(dev_t dev, char *name)
*/
mddev->hold_active = UNTIL_STOP;
- error = -ENOMEM;
- disk = blk_alloc_disk(NUMA_NO_NODE);
- if (!disk)
+ disk = blk_alloc_disk(NULL, NUMA_NO_NODE);
+ if (IS_ERR(disk)) {
+ error = PTR_ERR(disk);
goto out_free_mddev;
+ }
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
@@ -5787,9 +5881,7 @@ struct mddev *md_alloc(dev_t dev, char *name)
disk->fops = &md_fops;
disk->private_data = mddev;
- mddev->queue = disk->queue;
- blk_set_stacking_limits(&mddev->queue->limits);
- blk_queue_write_cache(mddev->queue, true, true);
+ blk_queue_write_cache(disk->queue, true, true);
disk->events |= DISK_EVENT_MEDIA_CHANGE;
mddev->gendisk = disk;
error = add_disk(disk);
@@ -5931,7 +6023,7 @@ int md_run(struct mddev *mddev)
invalidate_bdev(rdev->bdev);
if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) {
mddev->ro = MD_RDONLY;
- if (mddev->gendisk)
+ if (!mddev_is_dm(mddev))
set_disk_ro(mddev->gendisk, 1);
}
@@ -6034,7 +6126,10 @@ int md_run(struct mddev *mddev)
pr_warn("True protection against single-disk failure might be compromised.\n");
}
- mddev->recovery = 0;
+ /* dm-raid expect sync_thread to be frozen until resume */
+ if (mddev->gendisk)
+ mddev->recovery = 0;
+
/* may be over-ridden by personality */
mddev->resync_max_sectors = mddev->dev_sectors;
@@ -6090,7 +6185,8 @@ int md_run(struct mddev *mddev)
}
}
- if (mddev->queue) {
+ if (!mddev_is_dm(mddev)) {
+ struct request_queue *q = mddev->gendisk->queue;
bool nonrot = true;
rdev_for_each(rdev, mddev) {
@@ -6102,14 +6198,14 @@ int md_run(struct mddev *mddev)
if (mddev->degraded)
nonrot = false;
if (nonrot)
- blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
else
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
- blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
+ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+ blk_queue_flag_set(QUEUE_FLAG_IO_STAT, q);
/* Set the NOWAIT flags if all underlying devices support it */
if (nowait)
- blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
+ blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
@@ -6188,7 +6284,6 @@ int do_md_run(struct mddev *mddev)
/* run start up tasks that require md_thread */
md_start(mddev);
- md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
@@ -6209,7 +6304,6 @@ int md_start(struct mddev *mddev)
if (mddev->pers->start) {
set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
ret = mddev->pers->start(mddev);
clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
md_wakeup_thread(mddev->sync_thread);
@@ -6254,7 +6348,6 @@ static int restart_array(struct mddev *mddev)
pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
/* Kick recovery or resync if necessary */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
@@ -6274,7 +6367,15 @@ static void md_clean(struct mddev *mddev)
mddev->persistent = 0;
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
- mddev->flags = 0;
+ /*
+ * Don't clear MD_CLOSING, or mddev can be opened again.
+ * 'hold_active != 0' means mddev is still in the creation
+ * process and will be used later.
+ */
+ if (mddev->hold_active)
+ mddev->flags = 0;
+ else
+ mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
mddev->sb_flags = 0;
mddev->ro = MD_RDWR;
mddev->metadata_type[0] = 0;
@@ -6311,7 +6412,6 @@ static void md_clean(struct mddev *mddev)
static void __md_stop_writes(struct mddev *mddev)
{
- stop_sync_thread(mddev, true, false);
del_timer_sync(&mddev->safemode_timer);
if (mddev->pers && mddev->pers->quiesce) {
@@ -6336,6 +6436,8 @@ static void __md_stop_writes(struct mddev *mddev)
void md_stop_writes(struct mddev *mddev)
{
mddev_lock_nointr(mddev);
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ stop_sync_thread(mddev, true, false);
__md_stop_writes(mddev);
mddev_unlock(mddev);
}
@@ -6349,8 +6451,10 @@ static void mddev_detach(struct mddev *mddev)
mddev->pers->quiesce(mddev, 0);
}
md_unregister_thread(mddev, &mddev->thread);
- if (mddev->queue)
- blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
+
+ /* the unplug fn references 'conf' */
+ if (!mddev_is_dm(mddev))
+ blk_sync_queue(mddev->gendisk->queue);
}
static void __md_stop(struct mddev *mddev)
@@ -6387,7 +6491,8 @@ void md_stop(struct mddev *mddev)
EXPORT_SYMBOL_GPL(md_stop);
-static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
+/* ensure 'mddev->pers' exist before calling md_set_readonly() */
+static int md_set_readonly(struct mddev *mddev)
{
int err = 0;
int did_freeze = 0;
@@ -6398,7 +6503,6 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
did_freeze = 1;
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
}
stop_sync_thread(mddev, false, false);
@@ -6406,36 +6510,29 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
mddev_lock_nointr(mddev);
- mutex_lock(&mddev->open_mutex);
- if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
- mddev->sync_thread ||
- test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
pr_warn("md: %s still in use.\n",mdname(mddev));
err = -EBUSY;
goto out;
}
- if (mddev->pers) {
- __md_stop_writes(mddev);
-
- if (mddev->ro == MD_RDONLY) {
- err = -ENXIO;
- goto out;
- }
+ __md_stop_writes(mddev);
- mddev->ro = MD_RDONLY;
- set_disk_ro(mddev->gendisk, 1);
+ if (mddev->ro == MD_RDONLY) {
+ err = -ENXIO;
+ goto out;
}
+ mddev->ro = MD_RDONLY;
+ set_disk_ro(mddev->gendisk, 1);
+
out:
- if ((mddev->pers && !err) || did_freeze) {
+ if (!err || did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
- mutex_unlock(&mddev->open_mutex);
return err;
}
@@ -6443,8 +6540,7 @@ out:
* 0 - completely stop and dis-assemble array
* 2 - stop but do not disassemble array
*/
-static int do_md_stop(struct mddev *mddev, int mode,
- struct block_device *bdev)
+static int do_md_stop(struct mddev *mddev, int mode)
{
struct gendisk *disk = mddev->gendisk;
struct md_rdev *rdev;
@@ -6453,22 +6549,16 @@ static int do_md_stop(struct mddev *mddev, int mode,
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
did_freeze = 1;
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
}
stop_sync_thread(mddev, true, false);
- mutex_lock(&mddev->open_mutex);
- if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
- mddev->sysfs_active ||
- mddev->sync_thread ||
+ if (mddev->sysfs_active ||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
pr_warn("md: %s still in use.\n",mdname(mddev));
- mutex_unlock(&mddev->open_mutex);
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
}
return -EBUSY;
}
@@ -6487,13 +6577,11 @@ static int do_md_stop(struct mddev *mddev, int mode,
sysfs_unlink_rdev(mddev, rdev);
set_capacity_and_notify(disk, 0);
- mutex_unlock(&mddev->open_mutex);
mddev->changed = 1;
if (!md_is_rdwr(mddev))
mddev->ro = MD_RDWR;
- } else
- mutex_unlock(&mddev->open_mutex);
+ }
/*
* Free resources if final stop
*/
@@ -6539,7 +6627,7 @@ static void autorun_array(struct mddev *mddev)
err = do_md_run(mddev);
if (err) {
pr_warn("md: do_md_run() returned %d\n", err);
- do_md_stop(mddev, 0, NULL);
+ do_md_stop(mddev, 0);
}
}
@@ -7009,9 +7097,7 @@ kick_rdev:
md_kick_rdev_from_array(rdev);
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
- if (mddev->thread)
- md_wakeup_thread(mddev->thread);
- else
+ if (!mddev->thread)
md_update_sb(mddev, 1);
md_new_event();
@@ -7086,14 +7172,13 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
if (!bdev_nowait(rdev->bdev)) {
pr_info("%s: Disabling nowait because %pg does not support nowait\n",
mdname(mddev), rdev->bdev);
- blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
+ blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->gendisk->queue);
}
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
*/
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_wakeup_thread(mddev->thread);
md_new_event();
return 0;
@@ -7307,8 +7392,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
* of each device. If num_sectors is zero, we find the largest size
* that fits.
*/
- if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
- mddev->sync_thread)
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
if (!md_is_rdwr(mddev))
return -EROFS;
@@ -7325,10 +7409,9 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
if (!rv) {
if (mddev_is_clustered(mddev))
md_cluster_ops->update_size(mddev, old_dev_sectors);
- else if (mddev->queue) {
+ else if (!mddev_is_dm(mddev))
set_capacity_and_notify(mddev->gendisk,
mddev->array_sectors);
- }
}
return rv;
}
@@ -7345,8 +7428,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
if (raid_disks <= 0 ||
(mddev->max_disks && raid_disks >= mddev->max_disks))
return -EINVAL;
- if (mddev->sync_thread ||
- test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
mddev->reshape_position != MaxSector)
return -EBUSY;
@@ -7542,16 +7624,17 @@ static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-static inline bool md_ioctl_valid(unsigned int cmd)
+static inline int md_ioctl_valid(unsigned int cmd)
{
switch (cmd) {
- case ADD_NEW_DISK:
case GET_ARRAY_INFO:
- case GET_BITMAP_FILE:
case GET_DISK_INFO:
+ case RAID_VERSION:
+ return 0;
+ case ADD_NEW_DISK:
+ case GET_BITMAP_FILE:
case HOT_ADD_DISK:
case HOT_REMOVE_DISK:
- case RAID_VERSION:
case RESTART_ARRAY_RW:
case RUN_ARRAY:
case SET_ARRAY_INFO:
@@ -7560,9 +7643,11 @@ static inline bool md_ioctl_valid(unsigned int cmd)
case STOP_ARRAY:
case STOP_ARRAY_RO:
case CLUSTERED_DISK_NACK:
- return true;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ return 0;
default:
- return false;
+ return -ENOTTY;
}
}
@@ -7620,31 +7705,17 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
int err = 0;
void __user *argp = (void __user *)arg;
struct mddev *mddev = NULL;
- bool did_set_md_closing = false;
-
- if (!md_ioctl_valid(cmd))
- return -ENOTTY;
- switch (cmd) {
- case RAID_VERSION:
- case GET_ARRAY_INFO:
- case GET_DISK_INFO:
- break;
- default:
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- }
+ err = md_ioctl_valid(cmd);
+ if (err)
+ return err;
/*
* Commands dealing with the RAID driver but not any
* particular array:
*/
- switch (cmd) {
- case RAID_VERSION:
- err = get_version(argp);
- goto out;
- default:;
- }
+ if (cmd == RAID_VERSION)
+ return get_version(argp);
/*
* Commands creating/starting a new array:
@@ -7652,35 +7723,23 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
mddev = bdev->bd_disk->private_data;
- if (!mddev) {
- BUG();
- goto out;
- }
-
/* Some actions do not requires the mutex */
switch (cmd) {
case GET_ARRAY_INFO:
if (!mddev->raid_disks && !mddev->external)
- err = -ENODEV;
- else
- err = get_array_info(mddev, argp);
- goto out;
+ return -ENODEV;
+ return get_array_info(mddev, argp);
case GET_DISK_INFO:
if (!mddev->raid_disks && !mddev->external)
- err = -ENODEV;
- else
- err = get_disk_info(mddev, argp);
- goto out;
+ return -ENODEV;
+ return get_disk_info(mddev, argp);
case SET_DISK_FAULTY:
- err = set_disk_faulty(mddev, new_decode_dev(arg));
- goto out;
+ return set_disk_faulty(mddev, new_decode_dev(arg));
case GET_BITMAP_FILE:
- err = get_bitmap_file(mddev, argp);
- goto out;
-
+ return get_bitmap_file(mddev, argp);
}
if (cmd == HOT_REMOVE_DISK)
@@ -7693,20 +7752,9 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
/* Need to flush page cache, and ensure no-one else opens
* and writes
*/
- mutex_lock(&mddev->open_mutex);
- if (mddev->pers && atomic_read(&mddev->openers) > 1) {
- mutex_unlock(&mddev->open_mutex);
- err = -EBUSY;
- goto out;
- }
- if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
- mutex_unlock(&mddev->open_mutex);
- err = -EBUSY;
- goto out;
- }
- did_set_md_closing = true;
- mutex_unlock(&mddev->open_mutex);
- sync_blockdev(bdev);
+ err = mddev_set_closing_and_sync_blockdev(mddev, 1);
+ if (err)
+ return err;
}
if (!md_is_rdwr(mddev))
@@ -7747,11 +7795,12 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
goto unlock;
case STOP_ARRAY:
- err = do_md_stop(mddev, 0, bdev);
+ err = do_md_stop(mddev, 0);
goto unlock;
case STOP_ARRAY_RO:
- err = md_set_readonly(mddev, bdev);
+ if (mddev->pers)
+ err = md_set_readonly(mddev);
goto unlock;
case HOT_REMOVE_DISK:
@@ -7846,7 +7895,7 @@ unlock:
mddev_unlock(mddev);
out:
- if(did_set_md_closing)
+ if (cmd == STOP_ARRAY_RO || (err && cmd == STOP_ARRAY))
clear_bit(MD_CLOSING, &mddev->flags);
return err;
}
@@ -8683,10 +8732,7 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
bio_chain(discard_bio, bio);
bio_clone_blkg_association(discard_bio, bio);
- if (mddev->gendisk)
- trace_block_bio_remap(discard_bio,
- disk_devt(mddev->gendisk),
- bio->bi_iter.bi_sector);
+ mddev_trace_remap(mddev, discard_bio, bio->bi_iter.bi_sector);
submit_bio_noacct(discard_bio);
}
EXPORT_SYMBOL_GPL(md_submit_discard_bio);
@@ -8733,6 +8779,23 @@ void md_account_bio(struct mddev *mddev, struct bio **bio)
}
EXPORT_SYMBOL_GPL(md_account_bio);
+void md_free_cloned_bio(struct bio *bio)
+{
+ struct md_io_clone *md_io_clone = bio->bi_private;
+ struct bio *orig_bio = md_io_clone->orig_bio;
+ struct mddev *mddev = md_io_clone->mddev;
+
+ if (bio->bi_status && !orig_bio->bi_status)
+ orig_bio->bi_status = bio->bi_status;
+
+ if (md_io_clone->start_time)
+ bio_end_io_acct(orig_bio, md_io_clone->start_time);
+
+ bio_put(bio);
+ percpu_ref_put(&mddev->active_io);
+}
+EXPORT_SYMBOL_GPL(md_free_cloned_bio);
+
/* md_allow_write(mddev)
* Calling this ensures that the array is marked 'active' so that writes
* may proceed without blocking. It is important to call this before
@@ -8788,12 +8851,16 @@ void md_do_sync(struct md_thread *thread)
int ret;
/* just incase thread restarts... */
- if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
- test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
return;
- if (!md_is_rdwr(mddev)) {/* never try to sync a read-only array */
+
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ goto skip;
+
+ if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) ||
+ !md_is_rdwr(mddev)) {/* never try to sync a read-only array */
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- return;
+ goto skip;
}
if (mddev_is_clustered(mddev)) {
@@ -9162,7 +9229,7 @@ void md_do_sync(struct md_thread *thread)
mddev->delta_disks > 0 &&
mddev->pers->finish_reshape &&
mddev->pers->size &&
- mddev->queue) {
+ !mddev_is_dm(mddev)) {
mddev_lock_nointr(mddev);
md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
mddev_unlock(mddev);
@@ -9262,9 +9329,14 @@ static bool md_spares_need_change(struct mddev *mddev)
{
struct md_rdev *rdev;
- rdev_for_each(rdev, mddev)
- if (rdev_removeable(rdev) || rdev_addable(rdev))
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev) {
+ if (rdev_removeable(rdev) || rdev_addable(rdev)) {
+ rcu_read_unlock();
return true;
+ }
+ }
+ rcu_read_unlock();
return false;
}
@@ -9368,13 +9440,19 @@ static void md_start_sync(struct work_struct *ws)
struct mddev *mddev = container_of(ws, struct mddev, sync_work);
int spares = 0;
bool suspend = false;
+ char *name;
- if (md_spares_need_change(mddev))
+ /*
+ * If reshape is still in progress, spares won't be added or removed
+ * from conf until reshape is done.
+ */
+ if (mddev->reshape_position == MaxSector &&
+ md_spares_need_change(mddev)) {
suspend = true;
+ mddev_suspend(mddev, false);
+ }
- suspend ? mddev_suspend_and_lock_nointr(mddev) :
- mddev_lock_nointr(mddev);
-
+ mddev_lock_nointr(mddev);
if (!md_is_rdwr(mddev)) {
/*
* On a read-only array we can:
@@ -9400,8 +9478,10 @@ static void md_start_sync(struct work_struct *ws)
if (spares)
md_bitmap_write_all(mddev->bitmap);
+ name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ?
+ "reshape" : "resync";
rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "resync"));
+ md_register_thread(md_do_sync, mddev, name));
if (!mddev->sync_thread) {
pr_warn("%s: could not start resync thread...\n",
mdname(mddev));
@@ -9445,6 +9525,20 @@ not_running:
sysfs_notify_dirent_safe(mddev->sysfs_action);
}
+static void unregister_sync_thread(struct mddev *mddev)
+{
+ if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
+ /* resync/recovery still happening */
+ clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ return;
+ }
+
+ if (WARN_ON_ONCE(!mddev->sync_thread))
+ return;
+
+ md_reap_sync_thread(mddev);
+}
+
/*
* This routine is regularly called by all per-raid-array threads to
* deal with generic issues like resync and super-block update.
@@ -9469,9 +9563,6 @@ not_running:
*/
void md_check_recovery(struct mddev *mddev)
{
- if (READ_ONCE(mddev->suspended))
- return;
-
if (mddev->bitmap)
md_bitmap_daemon_work(mddev);
@@ -9485,7 +9576,8 @@ void md_check_recovery(struct mddev *mddev)
}
if (!md_is_rdwr(mddev) &&
- !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_DONE, &mddev->recovery))
return;
if ( ! (
(mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
@@ -9507,8 +9599,7 @@ void md_check_recovery(struct mddev *mddev)
struct md_rdev *rdev;
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- /* sync_work already queued. */
- clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ unregister_sync_thread(mddev);
goto unlock;
}
@@ -9571,16 +9662,7 @@ void md_check_recovery(struct mddev *mddev)
* still set.
*/
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
- if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
- /* resync/recovery still happening */
- clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- goto unlock;
- }
-
- if (WARN_ON_ONCE(!mddev->sync_thread))
- goto unlock;
-
- md_reap_sync_thread(mddev);
+ unregister_sync_thread(mddev);
goto unlock;
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 8d881cc59799..097d9dbd69b8 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -18,6 +18,7 @@
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <trace/events/block.h>
#include "md-cluster.h"
#define MaxSector (~(sector_t)0)
@@ -59,7 +60,7 @@ struct md_rdev {
*/
struct block_device *meta_bdev;
struct block_device *bdev; /* block device handle */
- struct bdev_handle *bdev_handle; /* Handle from open for bdev */
+ struct file *bdev_file; /* Handle from open for bdev */
struct page *sb_page, *bb_page;
int sb_loaded;
@@ -207,6 +208,7 @@ enum flag_bits {
* check if there is collision between raid1
* serial bios.
*/
+ Nonrot, /* non-rotational device (SSD) */
};
static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
@@ -222,6 +224,16 @@ static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
}
return 0;
}
+
+static inline int rdev_has_badblock(struct md_rdev *rdev, sector_t s,
+ int sectors)
+{
+ sector_t first_bad;
+ int bad_sectors;
+
+ return is_badblock(rdev, s, sectors, &first_bad, &bad_sectors);
+}
+
extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new);
extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
@@ -468,7 +480,6 @@ struct mddev {
struct timer_list safemode_timer;
struct percpu_ref writes_pending;
int sync_checkers; /* # of threads checking writes_pending */
- struct request_queue *queue; /* for plugging ... */
struct bitmap *bitmap; /* the bitmap for the device */
struct {
@@ -558,6 +569,37 @@ enum recovery_flags {
MD_RESYNCING_REMOTE, /* remote node is running resync thread */
};
+enum md_ro_state {
+ MD_RDWR,
+ MD_RDONLY,
+ MD_AUTO_READ,
+ MD_MAX_STATE
+};
+
+static inline bool md_is_rdwr(struct mddev *mddev)
+{
+ return (mddev->ro == MD_RDWR);
+}
+
+static inline bool reshape_interrupted(struct mddev *mddev)
+{
+ /* reshape never start */
+ if (mddev->reshape_position == MaxSector)
+ return false;
+
+ /* interrupted */
+ if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ return true;
+
+ /* running reshape will be interrupted soon. */
+ if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
+ return true;
+
+ return false;
+}
+
static inline int __must_check mddev_lock(struct mddev *mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
@@ -617,6 +659,7 @@ struct md_personality
int (*start_reshape) (struct mddev *mddev);
void (*finish_reshape) (struct mddev *mddev);
void (*update_reshape_pos) (struct mddev *mddev);
+ void (*prepare_suspend) (struct mddev *mddev);
/* quiesce suspends or resumes internal processing.
* 1 - stop new actions and wait for action io to complete
* 0 - return to normal behaviour
@@ -750,6 +793,7 @@ extern void md_finish_reshape(struct mddev *mddev);
void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
struct bio *bio, sector_t start, sector_t size);
void md_account_bio(struct mddev *mddev, struct bio **bio);
+void md_free_cloned_bio(struct bio *bio);
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
@@ -778,9 +822,12 @@ extern void md_stop_writes(struct mddev *mddev);
extern int md_rdev_init(struct md_rdev *rdev);
extern void md_rdev_clear(struct md_rdev *rdev);
-extern void md_handle_request(struct mddev *mddev, struct bio *bio);
+extern bool md_handle_request(struct mddev *mddev, struct bio *bio);
extern int mddev_suspend(struct mddev *mddev, bool interruptible);
extern void mddev_resume(struct mddev *mddev);
+extern void md_idle_sync_thread(struct mddev *mddev);
+extern void md_frozen_sync_thread(struct mddev *mddev);
+extern void md_unfrozen_sync_thread(struct mddev *mddev);
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
@@ -821,7 +868,7 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio
{
if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors)
- mddev->queue->limits.max_write_zeroes_sectors = 0;
+ mddev->gendisk->queue->limits.max_write_zeroes_sectors = 0;
}
static inline int mddev_suspend_and_lock(struct mddev *mddev)
@@ -860,7 +907,31 @@ void md_autostart_arrays(int part);
int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info);
int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info);
int do_md_run(struct mddev *mddev);
+void mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim);
+int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev);
+void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes);
extern const struct block_device_operations md_fops;
+/*
+ * MD devices can be used undeneath by DM, in which case ->gendisk is NULL.
+ */
+static inline bool mddev_is_dm(struct mddev *mddev)
+{
+ return !mddev->gendisk;
+}
+
+static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio,
+ sector_t sector)
+{
+ if (!mddev_is_dm(mddev))
+ trace_block_bio_remap(bio, disk_devt(mddev->gendisk), sector);
+}
+
+#define mddev_add_trace_msg(mddev, fmt, args...) \
+do { \
+ if (!mddev_is_dm(mddev)) \
+ blk_add_trace_msg((mddev)->gendisk->queue, fmt, ##args); \
+} while (0)
+
#endif /* _MD_MD_H */
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 0e010e1204aa..b17b54df673b 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -656,7 +656,7 @@ EXPORT_SYMBOL_GPL(dm_bm_checksum);
/*----------------------------------------------------------------*/
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
MODULE_DESCRIPTION("Immutable metadata library for dm");
/*----------------------------------------------------------------*/
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c50a7abda744..c5d4aeb68404 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -379,6 +379,19 @@ static void raid0_free(struct mddev *mddev, void *priv)
free_conf(mddev, conf);
}
+static int raid0_set_limits(struct mddev *mddev)
+{
+ struct queue_limits lim;
+
+ blk_set_stacking_limits(&lim);
+ lim.max_hw_sectors = mddev->chunk_sectors;
+ lim.max_write_zeroes_sectors = mddev->chunk_sectors;
+ lim.io_min = mddev->chunk_sectors << 9;
+ lim.io_opt = lim.io_min * mddev->raid_disks;
+ mddev_stack_rdev_limits(mddev, &lim);
+ return queue_limits_set(mddev->gendisk->queue, &lim);
+}
+
static int raid0_run(struct mddev *mddev)
{
struct r0conf *conf;
@@ -399,20 +412,10 @@ static int raid0_run(struct mddev *mddev)
mddev->private = conf;
}
conf = mddev->private;
- if (mddev->queue) {
- struct md_rdev *rdev;
-
- blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
- blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
-
- blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
- blk_queue_io_opt(mddev->queue,
- (mddev->chunk_sectors << 9) * mddev->raid_disks);
-
- rdev_for_each(rdev, mddev) {
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
- }
+ if (!mddev_is_dm(mddev)) {
+ ret = raid0_set_limits(mddev);
+ if (ret)
+ goto out_free_conf;
}
/* calculate array device size */
@@ -426,8 +429,10 @@ static int raid0_run(struct mddev *mddev)
ret = md_integrity_register(mddev);
if (ret)
- free_conf(mddev, conf);
-
+ goto out_free_conf;
+ return 0;
+out_free_conf:
+ free_conf(mddev, conf);
return ret;
}
@@ -578,10 +583,7 @@ static void raid0_map_submit_bio(struct mddev *mddev, struct bio *bio)
bio_set_dev(bio, tmp_dev->bdev);
bio->bi_iter.bi_sector = sector + zone->dev_start +
tmp_dev->data_offset;
-
- if (mddev->gendisk)
- trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
- bio_sector);
+ mddev_trace_remap(mddev, bio, bio_sector);
mddev_check_write_zeroes(mddev, bio);
submit_bio_noacct(bio);
}
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 512746551f36..2ea1710a3b70 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -227,3 +227,72 @@ static inline bool exceed_read_errors(struct mddev *mddev, struct md_rdev *rdev)
return false;
}
+
+/**
+ * raid1_check_read_range() - check a given read range for bad blocks,
+ * available read length is returned;
+ * @rdev: the rdev to read;
+ * @this_sector: read position;
+ * @len: read length;
+ *
+ * helper function for read_balance()
+ *
+ * 1) If there are no bad blocks in the range, @len is returned;
+ * 2) If the range are all bad blocks, 0 is returned;
+ * 3) If there are partial bad blocks:
+ * - If the bad block range starts after @this_sector, the length of first
+ * good region is returned;
+ * - If the bad block range starts before @this_sector, 0 is returned and
+ * the @len is updated to the offset into the region before we get to the
+ * good blocks;
+ */
+static inline int raid1_check_read_range(struct md_rdev *rdev,
+ sector_t this_sector, int *len)
+{
+ sector_t first_bad;
+ int bad_sectors;
+
+ /* no bad block overlap */
+ if (!is_badblock(rdev, this_sector, *len, &first_bad, &bad_sectors))
+ return *len;
+
+ /*
+ * bad block range starts offset into our range so we can return the
+ * number of sectors before the bad blocks start.
+ */
+ if (first_bad > this_sector)
+ return first_bad - this_sector;
+
+ /* read range is fully consumed by bad blocks. */
+ if (this_sector + *len <= first_bad + bad_sectors)
+ return 0;
+
+ /*
+ * final case, bad block range starts before or at the start of our
+ * range but does not cover our entire range so we still return 0 but
+ * update the length with the number of sectors before we get to the
+ * good ones.
+ */
+ *len = first_bad + bad_sectors - this_sector;
+ return 0;
+}
+
+/*
+ * Check if read should choose the first rdev.
+ *
+ * Balance on the whole device if no resync is going on (recovery is ok) or
+ * below the resync window. Otherwise, take the first readable disk.
+ */
+static inline bool raid1_should_read_first(struct mddev *mddev,
+ sector_t this_sector, int len)
+{
+ if ((mddev->recovery_cp < this_sector + len))
+ return true;
+
+ if (mddev_is_clustered(mddev) &&
+ md_cluster_ops->area_resyncing(mddev, READ, this_sector,
+ this_sector + len))
+ return true;
+
+ return false;
+}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 286f8b16c7bd..be8ac24f50b6 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -46,9 +46,6 @@
static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
-#define raid1_log(md, fmt, args...) \
- do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
-
#define RAID_1_10_NAME "raid1"
#include "raid1-10.c"
@@ -498,9 +495,6 @@ static void raid1_end_write_request(struct bio *bio)
* to user-side. So if something waits for IO, then it
* will wait for the 'master' bio.
*/
- sector_t first_bad;
- int bad_sectors;
-
r1_bio->bios[mirror] = NULL;
to_put = bio;
/*
@@ -516,8 +510,8 @@ static void raid1_end_write_request(struct bio *bio)
set_bit(R1BIO_Uptodate, &r1_bio->state);
/* Maybe we can clear some bad blocks. */
- if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
- &first_bad, &bad_sectors) && !discard_error) {
+ if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
+ !discard_error) {
r1_bio->bios[mirror] = IO_MADE_GOOD;
set_bit(R1BIO_MadeGood, &r1_bio->state);
}
@@ -582,211 +576,312 @@ static sector_t align_to_barrier_unit_end(sector_t start_sector,
return len;
}
-/*
- * This routine returns the disk from which the requested read should
- * be done. There is a per-array 'next expected sequential IO' sector
- * number - if this matches on the next IO then we use the last disk.
- * There is also a per-disk 'last know head position' sector that is
- * maintained from IRQ contexts, both the normal and the resync IO
- * completion handlers update this position correctly. If there is no
- * perfect sequential match then we pick the disk whose head is closest.
- *
- * If there are 2 mirrors in the same 2 devices, performance degrades
- * because position is mirror, not device based.
- *
- * The rdev for the device selected will have nr_pending incremented.
- */
-static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
+static void update_read_sectors(struct r1conf *conf, int disk,
+ sector_t this_sector, int len)
{
- const sector_t this_sector = r1_bio->sector;
- int sectors;
- int best_good_sectors;
- int best_disk, best_dist_disk, best_pending_disk;
- int has_nonrot_disk;
+ struct raid1_info *info = &conf->mirrors[disk];
+
+ atomic_inc(&info->rdev->nr_pending);
+ if (info->next_seq_sect != this_sector)
+ info->seq_start = this_sector;
+ info->next_seq_sect = this_sector + len;
+}
+
+static int choose_first_rdev(struct r1conf *conf, struct r1bio *r1_bio,
+ int *max_sectors)
+{
+ sector_t this_sector = r1_bio->sector;
+ int len = r1_bio->sectors;
int disk;
- sector_t best_dist;
- unsigned int min_pending;
- struct md_rdev *rdev;
- int choose_first;
- int choose_next_idle;
- /*
- * Check if we can balance. We can balance on the whole
- * device if no resync is going on, or below the resync window.
- * We take the first readable disk when above the resync window.
- */
- retry:
- sectors = r1_bio->sectors;
- best_disk = -1;
- best_dist_disk = -1;
- best_dist = MaxSector;
- best_pending_disk = -1;
- min_pending = UINT_MAX;
- best_good_sectors = 0;
- has_nonrot_disk = 0;
- choose_next_idle = 0;
- clear_bit(R1BIO_FailFast, &r1_bio->state);
+ for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
+ struct md_rdev *rdev;
+ int read_len;
- if ((conf->mddev->recovery_cp < this_sector + sectors) ||
- (mddev_is_clustered(conf->mddev) &&
- md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
- this_sector + sectors)))
- choose_first = 1;
- else
- choose_first = 0;
+ if (r1_bio->bios[disk] == IO_BLOCKED)
+ continue;
+
+ rdev = conf->mirrors[disk].rdev;
+ if (!rdev || test_bit(Faulty, &rdev->flags))
+ continue;
+
+ /* choose the first disk even if it has some bad blocks. */
+ read_len = raid1_check_read_range(rdev, this_sector, &len);
+ if (read_len > 0) {
+ update_read_sectors(conf, disk, this_sector, read_len);
+ *max_sectors = read_len;
+ return disk;
+ }
+ }
+
+ return -1;
+}
+
+static int choose_bb_rdev(struct r1conf *conf, struct r1bio *r1_bio,
+ int *max_sectors)
+{
+ sector_t this_sector = r1_bio->sector;
+ int best_disk = -1;
+ int best_len = 0;
+ int disk;
for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
- sector_t dist;
- sector_t first_bad;
- int bad_sectors;
- unsigned int pending;
- bool nonrot;
+ struct md_rdev *rdev;
+ int len;
+ int read_len;
+
+ if (r1_bio->bios[disk] == IO_BLOCKED)
+ continue;
rdev = conf->mirrors[disk].rdev;
- if (r1_bio->bios[disk] == IO_BLOCKED
- || rdev == NULL
- || test_bit(Faulty, &rdev->flags))
+ if (!rdev || test_bit(Faulty, &rdev->flags) ||
+ test_bit(WriteMostly, &rdev->flags))
continue;
- if (!test_bit(In_sync, &rdev->flags) &&
- rdev->recovery_offset < this_sector + sectors)
+
+ /* keep track of the disk with the most readable sectors. */
+ len = r1_bio->sectors;
+ read_len = raid1_check_read_range(rdev, this_sector, &len);
+ if (read_len > best_len) {
+ best_disk = disk;
+ best_len = read_len;
+ }
+ }
+
+ if (best_disk != -1) {
+ *max_sectors = best_len;
+ update_read_sectors(conf, best_disk, this_sector, best_len);
+ }
+
+ return best_disk;
+}
+
+static int choose_slow_rdev(struct r1conf *conf, struct r1bio *r1_bio,
+ int *max_sectors)
+{
+ sector_t this_sector = r1_bio->sector;
+ int bb_disk = -1;
+ int bb_read_len = 0;
+ int disk;
+
+ for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
+ struct md_rdev *rdev;
+ int len;
+ int read_len;
+
+ if (r1_bio->bios[disk] == IO_BLOCKED)
continue;
- if (test_bit(WriteMostly, &rdev->flags)) {
- /* Don't balance among write-mostly, just
- * use the first as a last resort */
- if (best_dist_disk < 0) {
- if (is_badblock(rdev, this_sector, sectors,
- &first_bad, &bad_sectors)) {
- if (first_bad <= this_sector)
- /* Cannot use this */
- continue;
- best_good_sectors = first_bad - this_sector;
- } else
- best_good_sectors = sectors;
- best_dist_disk = disk;
- best_pending_disk = disk;
- }
+
+ rdev = conf->mirrors[disk].rdev;
+ if (!rdev || test_bit(Faulty, &rdev->flags) ||
+ !test_bit(WriteMostly, &rdev->flags))
continue;
+
+ /* there are no bad blocks, we can use this disk */
+ len = r1_bio->sectors;
+ read_len = raid1_check_read_range(rdev, this_sector, &len);
+ if (read_len == r1_bio->sectors) {
+ update_read_sectors(conf, disk, this_sector, read_len);
+ return disk;
}
- /* This is a reasonable device to use. It might
- * even be best.
+
+ /*
+ * there are partial bad blocks, choose the rdev with largest
+ * read length.
*/
- if (is_badblock(rdev, this_sector, sectors,
- &first_bad, &bad_sectors)) {
- if (best_dist < MaxSector)
- /* already have a better device */
- continue;
- if (first_bad <= this_sector) {
- /* cannot read here. If this is the 'primary'
- * device, then we must not read beyond
- * bad_sectors from another device..
- */
- bad_sectors -= (this_sector - first_bad);
- if (choose_first && sectors > bad_sectors)
- sectors = bad_sectors;
- if (best_good_sectors > sectors)
- best_good_sectors = sectors;
-
- } else {
- sector_t good_sectors = first_bad - this_sector;
- if (good_sectors > best_good_sectors) {
- best_good_sectors = good_sectors;
- best_disk = disk;
- }
- if (choose_first)
- break;
- }
- continue;
- } else {
- if ((sectors > best_good_sectors) && (best_disk >= 0))
- best_disk = -1;
- best_good_sectors = sectors;
+ if (read_len > bb_read_len) {
+ bb_disk = disk;
+ bb_read_len = read_len;
}
+ }
+
+ if (bb_disk != -1) {
+ *max_sectors = bb_read_len;
+ update_read_sectors(conf, bb_disk, this_sector, bb_read_len);
+ }
+
+ return bb_disk;
+}
+
+static bool is_sequential(struct r1conf *conf, int disk, struct r1bio *r1_bio)
+{
+ /* TODO: address issues with this check and concurrency. */
+ return conf->mirrors[disk].next_seq_sect == r1_bio->sector ||
+ conf->mirrors[disk].head_position == r1_bio->sector;
+}
+
+/*
+ * If buffered sequential IO size exceeds optimal iosize, check if there is idle
+ * disk. If yes, choose the idle disk.
+ */
+static bool should_choose_next(struct r1conf *conf, int disk)
+{
+ struct raid1_info *mirror = &conf->mirrors[disk];
+ int opt_iosize;
+
+ if (!test_bit(Nonrot, &mirror->rdev->flags))
+ return false;
+
+ opt_iosize = bdev_io_opt(mirror->rdev->bdev) >> 9;
+ return opt_iosize > 0 && mirror->seq_start != MaxSector &&
+ mirror->next_seq_sect > opt_iosize &&
+ mirror->next_seq_sect - opt_iosize >= mirror->seq_start;
+}
+
+static bool rdev_readable(struct md_rdev *rdev, struct r1bio *r1_bio)
+{
+ if (!rdev || test_bit(Faulty, &rdev->flags))
+ return false;
+
+ /* still in recovery */
+ if (!test_bit(In_sync, &rdev->flags) &&
+ rdev->recovery_offset < r1_bio->sector + r1_bio->sectors)
+ return false;
+
+ /* don't read from slow disk unless have to */
+ if (test_bit(WriteMostly, &rdev->flags))
+ return false;
+
+ /* don't split IO for bad blocks unless have to */
+ if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors))
+ return false;
+
+ return true;
+}
+
+struct read_balance_ctl {
+ sector_t closest_dist;
+ int closest_dist_disk;
+ int min_pending;
+ int min_pending_disk;
+ int sequential_disk;
+ int readable_disks;
+};
+
+static int choose_best_rdev(struct r1conf *conf, struct r1bio *r1_bio)
+{
+ int disk;
+ struct read_balance_ctl ctl = {
+ .closest_dist_disk = -1,
+ .closest_dist = MaxSector,
+ .min_pending_disk = -1,
+ .min_pending = UINT_MAX,
+ .sequential_disk = -1,
+ };
+
+ for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
+ struct md_rdev *rdev;
+ sector_t dist;
+ unsigned int pending;
- if (best_disk >= 0)
- /* At least two disks to choose from so failfast is OK */
+ if (r1_bio->bios[disk] == IO_BLOCKED)
+ continue;
+
+ rdev = conf->mirrors[disk].rdev;
+ if (!rdev_readable(rdev, r1_bio))
+ continue;
+
+ /* At least two disks to choose from so failfast is OK */
+ if (ctl.readable_disks++ == 1)
set_bit(R1BIO_FailFast, &r1_bio->state);
- nonrot = bdev_nonrot(rdev->bdev);
- has_nonrot_disk |= nonrot;
pending = atomic_read(&rdev->nr_pending);
- dist = abs(this_sector - conf->mirrors[disk].head_position);
- if (choose_first) {
- best_disk = disk;
- break;
- }
+ dist = abs(r1_bio->sector - conf->mirrors[disk].head_position);
+
/* Don't change to another disk for sequential reads */
- if (conf->mirrors[disk].next_seq_sect == this_sector
- || dist == 0) {
- int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
- struct raid1_info *mirror = &conf->mirrors[disk];
+ if (is_sequential(conf, disk, r1_bio)) {
+ if (!should_choose_next(conf, disk))
+ return disk;
- best_disk = disk;
/*
- * If buffered sequential IO size exceeds optimal
- * iosize, check if there is idle disk. If yes, choose
- * the idle disk. read_balance could already choose an
- * idle disk before noticing it's a sequential IO in
- * this disk. This doesn't matter because this disk
- * will idle, next time it will be utilized after the
- * first disk has IO size exceeds optimal iosize. In
- * this way, iosize of the first disk will be optimal
- * iosize at least. iosize of the second disk might be
- * small, but not a big deal since when the second disk
- * starts IO, the first disk is likely still busy.
+ * Add 'pending' to avoid choosing this disk if
+ * there is other idle disk.
*/
- if (nonrot && opt_iosize > 0 &&
- mirror->seq_start != MaxSector &&
- mirror->next_seq_sect > opt_iosize &&
- mirror->next_seq_sect - opt_iosize >=
- mirror->seq_start) {
- choose_next_idle = 1;
- continue;
- }
- break;
+ pending++;
+ /*
+ * If there is no other idle disk, this disk
+ * will be chosen.
+ */
+ ctl.sequential_disk = disk;
}
- if (choose_next_idle)
- continue;
-
- if (min_pending > pending) {
- min_pending = pending;
- best_pending_disk = disk;
+ if (ctl.min_pending > pending) {
+ ctl.min_pending = pending;
+ ctl.min_pending_disk = disk;
}
- if (dist < best_dist) {
- best_dist = dist;
- best_dist_disk = disk;
+ if (ctl.closest_dist > dist) {
+ ctl.closest_dist = dist;
+ ctl.closest_dist_disk = disk;
}
}
/*
+ * sequential IO size exceeds optimal iosize, however, there is no other
+ * idle disk, so choose the sequential disk.
+ */
+ if (ctl.sequential_disk != -1 && ctl.min_pending != 0)
+ return ctl.sequential_disk;
+
+ /*
* If all disks are rotational, choose the closest disk. If any disk is
* non-rotational, choose the disk with less pending request even the
* disk is rotational, which might/might not be optimal for raids with
* mixed ratation/non-rotational disks depending on workload.
*/
- if (best_disk == -1) {
- if (has_nonrot_disk || min_pending == 0)
- best_disk = best_pending_disk;
- else
- best_disk = best_dist_disk;
- }
+ if (ctl.min_pending_disk != -1 &&
+ (READ_ONCE(conf->nonrot_disks) || ctl.min_pending == 0))
+ return ctl.min_pending_disk;
+ else
+ return ctl.closest_dist_disk;
+}
- if (best_disk >= 0) {
- rdev = conf->mirrors[best_disk].rdev;
- if (!rdev)
- goto retry;
- atomic_inc(&rdev->nr_pending);
- sectors = best_good_sectors;
+/*
+ * This routine returns the disk from which the requested read should be done.
+ *
+ * 1) If resync is in progress, find the first usable disk and use it even if it
+ * has some bad blocks.
+ *
+ * 2) Now that there is no resync, loop through all disks and skipping slow
+ * disks and disks with bad blocks for now. Only pay attention to key disk
+ * choice.
+ *
+ * 3) If we've made it this far, now look for disks with bad blocks and choose
+ * the one with most number of sectors.
+ *
+ * 4) If we are all the way at the end, we have no choice but to use a disk even
+ * if it is write mostly.
+ *
+ * The rdev for the device selected will have nr_pending incremented.
+ */
+static int read_balance(struct r1conf *conf, struct r1bio *r1_bio,
+ int *max_sectors)
+{
+ int disk;
- if (conf->mirrors[best_disk].next_seq_sect != this_sector)
- conf->mirrors[best_disk].seq_start = this_sector;
+ clear_bit(R1BIO_FailFast, &r1_bio->state);
+
+ if (raid1_should_read_first(conf->mddev, r1_bio->sector,
+ r1_bio->sectors))
+ return choose_first_rdev(conf, r1_bio, max_sectors);
- conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
+ disk = choose_best_rdev(conf, r1_bio);
+ if (disk >= 0) {
+ *max_sectors = r1_bio->sectors;
+ update_read_sectors(conf, disk, r1_bio->sector,
+ r1_bio->sectors);
+ return disk;
}
- *max_sectors = sectors;
- return best_disk;
+ /*
+ * If we are here it means we didn't find a perfectly good disk so
+ * now spend a bit more time trying to find one with the most good
+ * sectors.
+ */
+ disk = choose_bb_rdev(conf, r1_bio, max_sectors);
+ if (disk >= 0)
+ return disk;
+
+ return choose_slow_rdev(conf, r1_bio, max_sectors);
}
static void wake_up_barrier(struct r1conf *conf)
@@ -1098,7 +1193,7 @@ static void freeze_array(struct r1conf *conf, int extra)
*/
spin_lock_irq(&conf->resync_lock);
conf->array_frozen = 1;
- raid1_log(conf->mddev, "wait freeze");
+ mddev_add_trace_msg(conf->mddev, "raid1 wait freeze");
wait_event_lock_irq_cmd(
conf->wait_barrier,
get_unqueued_pending(conf) == extra,
@@ -1287,7 +1382,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
* Reading from a write-mostly device must take care not to
* over-take any writes that are 'behind'
*/
- raid1_log(mddev, "wait behind writes");
+ mddev_add_trace_msg(mddev, "raid1 wait behind writes");
wait_event(bitmap->behind_wait,
atomic_read(&bitmap->behind_writes) == 0);
}
@@ -1320,11 +1415,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
test_bit(R1BIO_FailFast, &r1_bio->state))
read_bio->bi_opf |= MD_FAILFAST;
read_bio->bi_private = r1_bio;
-
- if (mddev->gendisk)
- trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
- r1_bio->sector);
-
+ mddev_trace_remap(mddev, read_bio, r1_bio->sector);
submit_bio_noacct(read_bio);
}
@@ -1474,7 +1565,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
bio_wouldblock_error(bio);
return;
}
- raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
+ mddev_add_trace_msg(mddev, "raid1 wait rdev %d blocked",
+ blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
wait_barrier(conf, bio->bi_iter.bi_sector, false);
goto retry_write;
@@ -1557,10 +1649,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
mbio->bi_private = r1_bio;
atomic_inc(&r1_bio->remaining);
-
- if (mddev->gendisk)
- trace_block_bio_remap(mbio, disk_devt(mddev->gendisk),
- r1_bio->sector);
+ mddev_trace_remap(mddev, mbio, r1_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
mbio->bi_bdev = (void *)rdev;
if (!raid1_add_bio_to_plug(mddev, mbio, raid1_unplug, disks)) {
@@ -1760,6 +1849,52 @@ static int raid1_spare_active(struct mddev *mddev)
return count;
}
+static bool raid1_add_conf(struct r1conf *conf, struct md_rdev *rdev, int disk,
+ bool replacement)
+{
+ struct raid1_info *info = conf->mirrors + disk;
+
+ if (replacement)
+ info += conf->raid_disks;
+
+ if (info->rdev)
+ return false;
+
+ if (bdev_nonrot(rdev->bdev)) {
+ set_bit(Nonrot, &rdev->flags);
+ WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks + 1);
+ }
+
+ rdev->raid_disk = disk;
+ info->head_position = 0;
+ info->seq_start = MaxSector;
+ WRITE_ONCE(info->rdev, rdev);
+
+ return true;
+}
+
+static bool raid1_remove_conf(struct r1conf *conf, int disk)
+{
+ struct raid1_info *info = conf->mirrors + disk;
+ struct md_rdev *rdev = info->rdev;
+
+ if (!rdev || test_bit(In_sync, &rdev->flags) ||
+ atomic_read(&rdev->nr_pending))
+ return false;
+
+ /* Only remove non-faulty devices if recovery is not possible. */
+ if (!test_bit(Faulty, &rdev->flags) &&
+ rdev->mddev->recovery_disabled != conf->recovery_disabled &&
+ rdev->mddev->degraded < conf->raid_disks)
+ return false;
+
+ if (test_and_clear_bit(Nonrot, &rdev->flags))
+ WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks - 1);
+
+ WRITE_ONCE(info->rdev, NULL);
+ return true;
+}
+
static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r1conf *conf = mddev->private;
@@ -1791,19 +1926,16 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
for (mirror = first; mirror <= last; mirror++) {
p = conf->mirrors + mirror;
if (!p->rdev) {
- if (mddev->gendisk)
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
+ err = mddev_stack_new_rdev(mddev, rdev);
+ if (err)
+ return err;
- p->head_position = 0;
- rdev->raid_disk = mirror;
- err = 0;
+ raid1_add_conf(conf, rdev, mirror, false);
/* As all devices are equivalent, we don't need a full recovery
* if this was recently any drive of the array
*/
if (rdev->saved_raid_disk < 0)
conf->fullsync = 1;
- WRITE_ONCE(p->rdev, rdev);
break;
}
if (test_bit(WantReplacement, &p->rdev->flags) &&
@@ -1813,13 +1945,11 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (err && repl_slot >= 0) {
/* Add this device as a replacement */
- p = conf->mirrors + repl_slot;
clear_bit(In_sync, &rdev->flags);
set_bit(Replacement, &rdev->flags);
- rdev->raid_disk = repl_slot;
+ raid1_add_conf(conf, rdev, repl_slot, true);
err = 0;
conf->fullsync = 1;
- WRITE_ONCE(p[conf->raid_disks].rdev, rdev);
}
print_conf(conf);
@@ -1836,27 +1966,20 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
if (unlikely(number >= conf->raid_disks))
goto abort;
- if (rdev != p->rdev)
- p = conf->mirrors + conf->raid_disks + number;
+ if (rdev != p->rdev) {
+ number += conf->raid_disks;
+ p = conf->mirrors + number;
+ }
print_conf(conf);
if (rdev == p->rdev) {
- if (test_bit(In_sync, &rdev->flags) ||
- atomic_read(&rdev->nr_pending)) {
+ if (!raid1_remove_conf(conf, number)) {
err = -EBUSY;
goto abort;
}
- /* Only remove non-faulty devices if recovery
- * is not possible.
- */
- if (!test_bit(Faulty, &rdev->flags) &&
- mddev->recovery_disabled != conf->recovery_disabled &&
- mddev->degraded < conf->raid_disks) {
- err = -EBUSY;
- goto abort;
- }
- WRITE_ONCE(p->rdev, NULL);
- if (conf->mirrors[conf->raid_disks + number].rdev) {
+
+ if (number < conf->raid_disks &&
+ conf->mirrors[conf->raid_disks + number].rdev) {
/* We just removed a device that is being replaced.
* Move down the replacement. We drain all IO before
* doing this to avoid confusion.
@@ -1944,8 +2067,6 @@ static void end_sync_write(struct bio *bio)
struct r1bio *r1_bio = get_resync_r1bio(bio);
struct mddev *mddev = r1_bio->mddev;
struct r1conf *conf = mddev->private;
- sector_t first_bad;
- int bad_sectors;
struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
if (!uptodate) {
@@ -1955,14 +2076,11 @@ static void end_sync_write(struct bio *bio)
set_bit(MD_RECOVERY_NEEDED, &
mddev->recovery);
set_bit(R1BIO_WriteError, &r1_bio->state);
- } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
- &first_bad, &bad_sectors) &&
- !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
- r1_bio->sector,
- r1_bio->sectors,
- &first_bad, &bad_sectors)
- )
+ } else if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
+ !rdev_has_badblock(conf->mirrors[r1_bio->read_disk].rdev,
+ r1_bio->sector, r1_bio->sectors)) {
set_bit(R1BIO_MadeGood, &r1_bio->state);
+ }
put_sync_write_buf(r1_bio, uptodate);
}
@@ -2279,16 +2397,12 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
s = PAGE_SIZE >> 9;
do {
- sector_t first_bad;
- int bad_sectors;
-
rdev = conf->mirrors[d].rdev;
if (rdev &&
(test_bit(In_sync, &rdev->flags) ||
(!test_bit(Faulty, &rdev->flags) &&
rdev->recovery_offset >= sect + s)) &&
- is_badblock(rdev, sect, s,
- &first_bad, &bad_sectors) == 0) {
+ rdev_has_badblock(rdev, sect, s) == 0) {
atomic_inc(&rdev->nr_pending);
if (sync_page_io(rdev, sect, s<<9,
conf->tmppage, REQ_OP_READ, false))
@@ -3006,23 +3120,17 @@ static struct r1conf *setup_conf(struct mddev *mddev)
err = -EINVAL;
spin_lock_init(&conf->device_lock);
+ conf->raid_disks = mddev->raid_disks;
rdev_for_each(rdev, mddev) {
int disk_idx = rdev->raid_disk;
- if (disk_idx >= mddev->raid_disks
- || disk_idx < 0)
+
+ if (disk_idx >= conf->raid_disks || disk_idx < 0)
continue;
- if (test_bit(Replacement, &rdev->flags))
- disk = conf->mirrors + mddev->raid_disks + disk_idx;
- else
- disk = conf->mirrors + disk_idx;
- if (disk->rdev)
+ if (!raid1_add_conf(conf, rdev, disk_idx,
+ test_bit(Replacement, &rdev->flags)))
goto abort;
- disk->rdev = rdev;
- disk->head_position = 0;
- disk->seq_start = MaxSector;
}
- conf->raid_disks = mddev->raid_disks;
conf->mddev = mddev;
INIT_LIST_HEAD(&conf->retry_list);
INIT_LIST_HEAD(&conf->bio_end_io_list);
@@ -3086,12 +3194,21 @@ static struct r1conf *setup_conf(struct mddev *mddev)
return ERR_PTR(err);
}
+static int raid1_set_limits(struct mddev *mddev)
+{
+ struct queue_limits lim;
+
+ blk_set_stacking_limits(&lim);
+ lim.max_write_zeroes_sectors = 0;
+ mddev_stack_rdev_limits(mddev, &lim);
+ return queue_limits_set(mddev->gendisk->queue, &lim);
+}
+
static void raid1_free(struct mddev *mddev, void *priv);
static int raid1_run(struct mddev *mddev)
{
struct r1conf *conf;
int i;
- struct md_rdev *rdev;
int ret;
if (mddev->level != 1) {
@@ -3118,14 +3235,10 @@ static int raid1_run(struct mddev *mddev)
if (IS_ERR(conf))
return PTR_ERR(conf);
- if (mddev->queue)
- blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
-
- rdev_for_each(rdev, mddev) {
- if (!mddev->gendisk)
- continue;
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
+ if (!mddev_is_dm(mddev)) {
+ ret = raid1_set_limits(mddev);
+ if (ret)
+ goto abort;
}
mddev->degraded = 0;
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 14d4211a123a..5300cbaa58a4 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -71,6 +71,7 @@ struct r1conf {
* allow for replacements.
*/
int raid_disks;
+ int nonrot_disks;
spinlock_t device_lock;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7412066ea22c..a4556d2e46bf 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -76,9 +76,6 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
static void end_reshape_write(struct bio *bio);
static void end_reshape(struct r10conf *conf);
-#define raid10_log(md, fmt, args...) \
- do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
-
#include "raid1-10.c"
#define NULL_CMD
@@ -518,11 +515,7 @@ static void raid10_end_write_request(struct bio *bio)
* The 'master' represents the composite IO operation to
* user-side. So if something waits for IO, then it will
* wait for the 'master' bio.
- */
- sector_t first_bad;
- int bad_sectors;
-
- /*
+ *
* Do not set R10BIO_Uptodate if the current device is
* rebuilding or Faulty. This is because we cannot use
* such device for properly reading the data back (we could
@@ -535,10 +528,9 @@ static void raid10_end_write_request(struct bio *bio)
set_bit(R10BIO_Uptodate, &r10_bio->state);
/* Maybe we can clear some bad blocks. */
- if (is_badblock(rdev,
- r10_bio->devs[slot].addr,
- r10_bio->sectors,
- &first_bad, &bad_sectors) && !discard_error) {
+ if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
+ r10_bio->sectors) &&
+ !discard_error) {
bio_put(bio);
if (repl)
r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
@@ -753,17 +745,8 @@ static struct md_rdev *read_balance(struct r10conf *conf,
best_good_sectors = 0;
do_balance = 1;
clear_bit(R10BIO_FailFast, &r10_bio->state);
- /*
- * Check if we can balance. We can balance on the whole
- * device if no resync is going on (recovery is ok), or below
- * the resync window. We take the first readable disk when
- * above the resync window.
- */
- if ((conf->mddev->recovery_cp < MaxSector
- && (this_sector + sectors >= conf->next_resync)) ||
- (mddev_is_clustered(conf->mddev) &&
- md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
- this_sector + sectors)))
+
+ if (raid1_should_read_first(conf->mddev, this_sector, sectors))
do_balance = 0;
for (slot = 0; slot < conf->copies ; slot++) {
@@ -1033,7 +1016,7 @@ static bool wait_barrier(struct r10conf *conf, bool nowait)
ret = false;
} else {
conf->nr_waiting++;
- raid10_log(conf->mddev, "wait barrier");
+ mddev_add_trace_msg(conf->mddev, "raid10 wait barrier");
wait_event_barrier(conf, stop_waiting_barrier(conf));
conf->nr_waiting--;
}
@@ -1152,7 +1135,7 @@ static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf,
bio_wouldblock_error(bio);
return false;
}
- raid10_log(conf->mddev, "wait reshape");
+ mddev_add_trace_msg(conf->mddev, "raid10 wait reshape");
wait_event(conf->wait_barrier,
conf->reshape_progress <= bio->bi_iter.bi_sector ||
conf->reshape_progress >= bio->bi_iter.bi_sector +
@@ -1249,10 +1232,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
test_bit(R10BIO_FailFast, &r10_bio->state))
read_bio->bi_opf |= MD_FAILFAST;
read_bio->bi_private = r10_bio;
-
- if (mddev->gendisk)
- trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
- r10_bio->sector);
+ mddev_trace_remap(mddev, read_bio, r10_bio->sector);
submit_bio_noacct(read_bio);
return;
}
@@ -1288,10 +1268,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
&& enough(conf, devnum))
mbio->bi_opf |= MD_FAILFAST;
mbio->bi_private = r10_bio;
-
- if (conf->mddev->gendisk)
- trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk),
- r10_bio->sector);
+ mddev_trace_remap(mddev, mbio, r10_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
mbio->bi_bdev = (void *)rdev;
@@ -1330,10 +1307,7 @@ retry_wait:
}
if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
- sector_t first_bad;
sector_t dev_sector = r10_bio->devs[i].addr;
- int bad_sectors;
- int is_bad;
/*
* Discard request doesn't care the write result
@@ -1342,9 +1316,8 @@ retry_wait:
if (!r10_bio->sectors)
continue;
- is_bad = is_badblock(rdev, dev_sector, r10_bio->sectors,
- &first_bad, &bad_sectors);
- if (is_bad < 0) {
+ if (rdev_has_badblock(rdev, dev_sector,
+ r10_bio->sectors) < 0) {
/*
* Mustn't write here until the bad block
* is acknowledged
@@ -1360,8 +1333,9 @@ retry_wait:
if (unlikely(blocked_rdev)) {
/* Have to wait for this device to get unblocked, then retry */
allow_barrier(conf);
- raid10_log(conf->mddev, "%s wait rdev %d blocked",
- __func__, blocked_rdev->raid_disk);
+ mddev_add_trace_msg(conf->mddev,
+ "raid10 %s wait rdev %d blocked",
+ __func__, blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
wait_barrier(conf, false);
goto retry_wait;
@@ -1416,7 +1390,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
bio_wouldblock_error(bio);
return;
}
- raid10_log(conf->mddev, "wait reshape metadata");
+ mddev_add_trace_msg(conf->mddev,
+ "raid10 wait reshape metadata");
wait_event(mddev->sb_wait,
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
@@ -2131,10 +2106,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
continue;
}
- if (mddev->gendisk)
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
-
+ err = mddev_stack_new_rdev(mddev, rdev);
+ if (err)
+ return err;
p->head_position = 0;
p->recovery_disabled = mddev->recovery_disabled - 1;
rdev->raid_disk = mirror;
@@ -2150,10 +2124,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
clear_bit(In_sync, &rdev->flags);
set_bit(Replacement, &rdev->flags);
rdev->raid_disk = repl_slot;
- err = 0;
- if (mddev->gendisk)
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
+ err = mddev_stack_new_rdev(mddev, rdev);
+ if (err)
+ return err;
conf->fullsync = 1;
WRITE_ONCE(p->replacement, rdev);
}
@@ -2290,8 +2263,6 @@ static void end_sync_write(struct bio *bio)
struct mddev *mddev = r10_bio->mddev;
struct r10conf *conf = mddev->private;
int d;
- sector_t first_bad;
- int bad_sectors;
int slot;
int repl;
struct md_rdev *rdev = NULL;
@@ -2312,11 +2283,10 @@ static void end_sync_write(struct bio *bio)
&rdev->mddev->recovery);
set_bit(R10BIO_WriteError, &r10_bio->state);
}
- } else if (is_badblock(rdev,
- r10_bio->devs[slot].addr,
- r10_bio->sectors,
- &first_bad, &bad_sectors))
+ } else if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
+ r10_bio->sectors)) {
set_bit(R10BIO_MadeGood, &r10_bio->state);
+ }
rdev_dec_pending(rdev, mddev);
@@ -2597,11 +2567,8 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
int sectors, struct page *page, enum req_op op)
{
- sector_t first_bad;
- int bad_sectors;
-
- if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
- && (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
+ if (rdev_has_badblock(rdev, sector, sectors) &&
+ (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
return -1;
if (sync_page_io(rdev, sector, sectors << 9, page, op, false))
/* success */
@@ -2658,16 +2625,14 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
s = PAGE_SIZE >> 9;
do {
- sector_t first_bad;
- int bad_sectors;
-
d = r10_bio->devs[sl].devnum;
rdev = conf->mirrors[d].rdev;
if (rdev &&
test_bit(In_sync, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags) &&
- is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
- &first_bad, &bad_sectors) == 0) {
+ rdev_has_badblock(rdev,
+ r10_bio->devs[sl].addr + sect,
+ s) == 0) {
atomic_inc(&rdev->nr_pending);
success = sync_page_io(rdev,
r10_bio->devs[sl].addr +
@@ -4002,14 +3967,26 @@ static struct r10conf *setup_conf(struct mddev *mddev)
return ERR_PTR(err);
}
-static void raid10_set_io_opt(struct r10conf *conf)
+static unsigned int raid10_nr_stripes(struct r10conf *conf)
{
- int raid_disks = conf->geo.raid_disks;
+ unsigned int raid_disks = conf->geo.raid_disks;
+
+ if (conf->geo.raid_disks % conf->geo.near_copies)
+ return raid_disks;
+ return raid_disks / conf->geo.near_copies;
+}
- if (!(conf->geo.raid_disks % conf->geo.near_copies))
- raid_disks /= conf->geo.near_copies;
- blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
- raid_disks);
+static int raid10_set_queue_limits(struct mddev *mddev)
+{
+ struct r10conf *conf = mddev->private;
+ struct queue_limits lim;
+
+ blk_set_stacking_limits(&lim);
+ lim.max_write_zeroes_sectors = 0;
+ lim.io_min = mddev->chunk_sectors << 9;
+ lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
+ mddev_stack_rdev_limits(mddev, &lim);
+ return queue_limits_set(mddev->gendisk->queue, &lim);
}
static int raid10_run(struct mddev *mddev)
@@ -4021,6 +3998,7 @@ static int raid10_run(struct mddev *mddev)
sector_t size;
sector_t min_offset_diff = 0;
int first = 1;
+ int ret = -EIO;
if (mddev->private == NULL) {
conf = setup_conf(mddev);
@@ -4047,12 +4025,6 @@ static int raid10_run(struct mddev *mddev)
}
}
- if (mddev->queue) {
- blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
- blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
- raid10_set_io_opt(conf);
- }
-
rdev_for_each(rdev, mddev) {
long long diff;
@@ -4081,14 +4053,16 @@ static int raid10_run(struct mddev *mddev)
if (first || diff < min_offset_diff)
min_offset_diff = diff;
- if (mddev->gendisk)
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
-
disk->head_position = 0;
first = 0;
}
+ if (!mddev_is_dm(conf->mddev)) {
+ ret = raid10_set_queue_limits(mddev);
+ if (ret)
+ goto out_free_conf;
+ }
+
/* need to check that every block has at least one working mirror */
if (!enough(conf, -1)) {
pr_err("md/raid10:%s: not enough operational mirrors.\n",
@@ -4175,11 +4149,7 @@ static int raid10_run(struct mddev *mddev)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "reshape"));
- if (!mddev->sync_thread)
- goto out_free_conf;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
return 0;
@@ -4189,7 +4159,7 @@ out_free_conf:
raid10_free_conf(conf);
mddev->private = NULL;
out:
- return -EIO;
+ return ret;
}
static void raid10_free(struct mddev *mddev, void *priv)
@@ -4573,16 +4543,8 @@ out:
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
-
- rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "reshape"));
- if (!mddev->sync_thread) {
- ret = -EAGAIN;
- goto abort;
- }
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
conf->reshape_checkpoint = jiffies;
- md_wakeup_thread(mddev->sync_thread);
md_new_event();
return 0;
@@ -4966,8 +4928,7 @@ static void end_reshape(struct r10conf *conf)
conf->reshape_safe = MaxSector;
spin_unlock_irq(&conf->device_lock);
- if (conf->mddev->queue)
- raid10_set_io_opt(conf);
+ mddev_update_io_opt(conf->mddev, raid10_nr_stripes(conf));
conf->fullsync = 0;
}
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index da4ba736c4f0..a70cbec12ed0 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1393,7 +1393,8 @@ int ppl_init_log(struct r5conf *conf)
ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
ppl_conf->block_size = 512;
} else {
- ppl_conf->block_size = queue_logical_block_size(mddev->queue);
+ ppl_conf->block_size =
+ queue_logical_block_size(mddev->gendisk->queue);
}
for (i = 0; i < ppl_conf->count; i++) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8497880135ee..d874abfc1836 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -36,6 +36,7 @@
*/
#include <linux/blkdev.h>
+#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
@@ -760,6 +761,7 @@ enum stripe_result {
STRIPE_RETRY,
STRIPE_SCHEDULE_AND_RETRY,
STRIPE_FAIL,
+ STRIPE_WAIT_RESHAPE,
};
struct stripe_request_ctx {
@@ -1210,10 +1212,8 @@ again:
*/
while (op_is_write(op) && rdev &&
test_bit(WriteErrorSeen, &rdev->flags)) {
- sector_t first_bad;
- int bad_sectors;
- int bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
- &first_bad, &bad_sectors);
+ int bad = rdev_has_badblock(rdev, sh->sector,
+ RAID5_STRIPE_SECTORS(conf));
if (!bad)
break;
@@ -1295,10 +1295,7 @@ again:
if (rrdev)
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
- if (conf->mddev->gendisk)
- trace_block_bio_remap(bi,
- disk_devt(conf->mddev->gendisk),
- sh->dev[i].sector);
+ mddev_trace_remap(conf->mddev, bi, sh->dev[i].sector);
if (should_defer && op_is_write(op))
bio_list_add(&pending_bios, bi);
else
@@ -1342,10 +1339,7 @@ again:
*/
if (op == REQ_OP_DISCARD)
rbi->bi_vcnt = 0;
- if (conf->mddev->gendisk)
- trace_block_bio_remap(rbi,
- disk_devt(conf->mddev->gendisk),
- sh->dev[i].sector);
+ mddev_trace_remap(conf->mddev, rbi, sh->dev[i].sector);
if (should_defer && op_is_write(op))
bio_list_add(&pending_bios, rbi);
else
@@ -2412,7 +2406,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
atomic_inc(&conf->active_stripes);
raid5_release_stripe(sh);
- conf->max_nr_stripes++;
+ WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes + 1);
return 1;
}
@@ -2422,12 +2416,12 @@ static int grow_stripes(struct r5conf *conf, int num)
size_t namelen = sizeof(conf->cache_name[0]);
int devs = max(conf->raid_disks, conf->previous_raid_disks);
- if (conf->mddev->gendisk)
+ if (mddev_is_dm(conf->mddev))
snprintf(conf->cache_name[0], namelen,
- "raid%d-%s", conf->level, mdname(conf->mddev));
+ "raid%d-%p", conf->level, conf->mddev);
else
snprintf(conf->cache_name[0], namelen,
- "raid%d-%p", conf->level, conf->mddev);
+ "raid%d-%s", conf->level, mdname(conf->mddev));
snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
conf->active_name = 0;
@@ -2707,7 +2701,7 @@ static int drop_one_stripe(struct r5conf *conf)
shrink_buffers(sh);
free_stripe(conf->slab_cache, sh);
atomic_dec(&conf->active_stripes);
- conf->max_nr_stripes--;
+ WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes - 1);
return 1;
}
@@ -2855,8 +2849,6 @@ static void raid5_end_write_request(struct bio *bi)
struct r5conf *conf = sh->raid_conf;
int disks = sh->disks, i;
struct md_rdev *rdev;
- sector_t first_bad;
- int bad_sectors;
int replacement = 0;
for (i = 0 ; i < disks; i++) {
@@ -2888,9 +2880,8 @@ static void raid5_end_write_request(struct bio *bi)
if (replacement) {
if (bi->bi_status)
md_error(conf->mddev, rdev);
- else if (is_badblock(rdev, sh->sector,
- RAID5_STRIPE_SECTORS(conf),
- &first_bad, &bad_sectors))
+ else if (rdev_has_badblock(rdev, sh->sector,
+ RAID5_STRIPE_SECTORS(conf)))
set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
} else {
if (bi->bi_status) {
@@ -2900,9 +2891,8 @@ static void raid5_end_write_request(struct bio *bi)
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED,
&rdev->mddev->recovery);
- } else if (is_badblock(rdev, sh->sector,
- RAID5_STRIPE_SECTORS(conf),
- &first_bad, &bad_sectors)) {
+ } else if (rdev_has_badblock(rdev, sh->sector,
+ RAID5_STRIPE_SECTORS(conf))) {
set_bit(R5_MadeGood, &sh->dev[i].flags);
if (test_bit(R5_ReadError, &sh->dev[i].flags))
/* That was a successful write so make
@@ -4205,10 +4195,9 @@ static int handle_stripe_dirtying(struct r5conf *conf,
set_bit(STRIPE_HANDLE, &sh->state);
if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) {
/* prefer read-modify-write, but need to get some data */
- if (conf->mddev->queue)
- blk_add_trace_msg(conf->mddev->queue,
- "raid5 rmw %llu %d",
- (unsigned long long)sh->sector, rmw);
+ mddev_add_trace_msg(conf->mddev, "raid5 rmw %llu %d",
+ sh->sector, rmw);
+
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (test_bit(R5_InJournal, &dev->flags) &&
@@ -4285,10 +4274,11 @@ static int handle_stripe_dirtying(struct r5conf *conf,
set_bit(STRIPE_DELAYED, &sh->state);
}
}
- if (rcw && conf->mddev->queue)
- blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
- (unsigned long long)sh->sector,
- rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
+ if (rcw && !mddev_is_dm(conf->mddev))
+ blk_add_trace_msg(conf->mddev->gendisk->queue,
+ "raid5 rcw %llu %d %d %d",
+ (unsigned long long)sh->sector, rcw, qread,
+ test_bit(STRIPE_DELAYED, &sh->state));
}
if (rcw > disks && rmw > disks &&
@@ -4674,8 +4664,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
/* Now to look around and see what can be done */
for (i=disks; i--; ) {
struct md_rdev *rdev;
- sector_t first_bad;
- int bad_sectors;
int is_bad = 0;
dev = &sh->dev[i];
@@ -4719,8 +4707,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
rdev = conf->disks[i].replacement;
if (rdev && !test_bit(Faulty, &rdev->flags) &&
rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) &&
- !is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
- &first_bad, &bad_sectors))
+ !rdev_has_badblock(rdev, sh->sector,
+ RAID5_STRIPE_SECTORS(conf)))
set_bit(R5_ReadRepl, &dev->flags);
else {
if (rdev && !test_bit(Faulty, &rdev->flags))
@@ -4733,8 +4721,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (rdev && test_bit(Faulty, &rdev->flags))
rdev = NULL;
if (rdev) {
- is_bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
- &first_bad, &bad_sectors);
+ is_bad = rdev_has_badblock(rdev, sh->sector,
+ RAID5_STRIPE_SECTORS(conf));
if (s->blocked_rdev == NULL
&& (test_bit(Blocked, &rdev->flags)
|| is_bad < 0)) {
@@ -5463,8 +5451,8 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
struct r5conf *conf = mddev->private;
struct bio *align_bio;
struct md_rdev *rdev;
- sector_t sector, end_sector, first_bad;
- int bad_sectors, dd_idx;
+ sector_t sector, end_sector;
+ int dd_idx;
bool did_inc;
if (!in_chunk_boundary(mddev, raid_bio)) {
@@ -5493,8 +5481,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
atomic_inc(&rdev->nr_pending);
- if (is_badblock(rdev, sector, bio_sectors(raid_bio), &first_bad,
- &bad_sectors)) {
+ if (rdev_has_badblock(rdev, sector, bio_sectors(raid_bio))) {
rdev_dec_pending(rdev, mddev);
return 0;
}
@@ -5530,9 +5517,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
spin_unlock_irq(&conf->device_lock);
}
- if (mddev->gendisk)
- trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk),
- raid_bio->bi_iter.bi_sector);
+ mddev_trace_remap(mddev, align_bio, raid_bio->bi_iter.bi_sector);
submit_bio_noacct(align_bio);
return 1;
}
@@ -5701,8 +5686,8 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
}
release_inactive_stripe_list(conf, cb->temp_inactive_list,
NR_STRIPE_HASH_LOCKS);
- if (mddev->queue)
- trace_block_unplug(mddev->queue, cnt, !from_schedule);
+ if (!mddev_is_dm(mddev))
+ trace_block_unplug(mddev->gendisk->queue, cnt, !from_schedule);
kfree(cb);
}
@@ -5946,7 +5931,8 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
if (ahead_of_reshape(mddev, logical_sector,
conf->reshape_safe)) {
spin_unlock_irq(&conf->device_lock);
- return STRIPE_SCHEDULE_AND_RETRY;
+ ret = STRIPE_SCHEDULE_AND_RETRY;
+ goto out;
}
}
spin_unlock_irq(&conf->device_lock);
@@ -6025,6 +6011,12 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
out_release:
raid5_release_stripe(sh);
+out:
+ if (ret == STRIPE_SCHEDULE_AND_RETRY && reshape_interrupted(mddev)) {
+ bi->bi_status = BLK_STS_RESOURCE;
+ ret = STRIPE_WAIT_RESHAPE;
+ pr_err_ratelimited("dm-raid456: io across reshape position while reshape can't make progress");
+ }
return ret;
}
@@ -6146,7 +6138,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
while (1) {
res = make_stripe_request(mddev, conf, &ctx, logical_sector,
bi);
- if (res == STRIPE_FAIL)
+ if (res == STRIPE_FAIL || res == STRIPE_WAIT_RESHAPE)
break;
if (res == STRIPE_RETRY)
@@ -6184,6 +6176,11 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
if (rw == WRITE)
md_write_end(mddev);
+ if (res == STRIPE_WAIT_RESHAPE) {
+ md_free_cloned_bio(bi);
+ return false;
+ }
+
bio_endio(bi);
return true;
}
@@ -6773,7 +6770,18 @@ static void raid5d(struct md_thread *thread)
spin_unlock_irq(&conf->device_lock);
md_check_recovery(mddev);
spin_lock_irq(&conf->device_lock);
+
+ /*
+ * Waiting on MD_SB_CHANGE_PENDING below may deadlock
+ * seeing md_check_recovery() is needed to clear
+ * the flag when using mdmon.
+ */
+ continue;
}
+
+ wait_event_lock_irq(mddev->sb_wait,
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
+ conf->device_lock);
}
pr_debug("%d stripes handled\n", handled);
@@ -6820,7 +6828,7 @@ raid5_set_cache_size(struct mddev *mddev, int size)
if (size <= 16 || size > 32768)
return -EINVAL;
- conf->min_nr_stripes = size;
+ WRITE_ONCE(conf->min_nr_stripes, size);
mutex_lock(&conf->cache_size_mutex);
while (size < conf->max_nr_stripes &&
drop_one_stripe(conf))
@@ -6832,7 +6840,7 @@ raid5_set_cache_size(struct mddev *mddev, int size)
mutex_lock(&conf->cache_size_mutex);
while (size > conf->max_nr_stripes)
if (!grow_one_stripe(conf, GFP_KERNEL)) {
- conf->min_nr_stripes = conf->max_nr_stripes;
+ WRITE_ONCE(conf->min_nr_stripes, conf->max_nr_stripes);
result = -ENOMEM;
break;
}
@@ -6967,10 +6975,8 @@ raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len)
pr_debug("md/raid: change stripe_size from %lu to %lu\n",
conf->stripe_size, new);
- if (mddev->sync_thread ||
- test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
- mddev->reshape_position != MaxSector ||
- mddev->sysfs_active) {
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+ mddev->reshape_position != MaxSector || mddev->sysfs_active) {
err = -EBUSY;
goto out_unlock;
}
@@ -7084,7 +7090,7 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
if (!conf)
err = -ENODEV;
else if (new != conf->skip_copy) {
- struct request_queue *q = mddev->queue;
+ struct request_queue *q = mddev->gendisk->queue;
conf->skip_copy = new;
if (new)
@@ -7390,11 +7396,13 @@ static unsigned long raid5_cache_count(struct shrinker *shrink,
struct shrink_control *sc)
{
struct r5conf *conf = shrink->private_data;
+ int max_stripes = READ_ONCE(conf->max_nr_stripes);
+ int min_stripes = READ_ONCE(conf->min_nr_stripes);
- if (conf->max_nr_stripes < conf->min_nr_stripes)
+ if (max_stripes < min_stripes)
/* unlikely, but not impossible */
return 0;
- return conf->max_nr_stripes - conf->min_nr_stripes;
+ return max_stripes - min_stripes;
}
static struct r5conf *setup_conf(struct mddev *mddev)
@@ -7684,10 +7692,65 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
return 0;
}
-static void raid5_set_io_opt(struct r5conf *conf)
+static int raid5_set_limits(struct mddev *mddev)
{
- blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) *
- (conf->raid_disks - conf->max_degraded));
+ struct r5conf *conf = mddev->private;
+ struct queue_limits lim;
+ int data_disks, stripe;
+ struct md_rdev *rdev;
+
+ /*
+ * The read-ahead size must cover two whole stripes, which is
+ * 2 * (datadisks) * chunksize where 'n' is the number of raid devices.
+ */
+ data_disks = conf->previous_raid_disks - conf->max_degraded;
+
+ /*
+ * We can only discard a whole stripe. It doesn't make sense to
+ * discard data disk but write parity disk
+ */
+ stripe = roundup_pow_of_two(data_disks * (mddev->chunk_sectors << 9));
+
+ blk_set_stacking_limits(&lim);
+ lim.io_min = mddev->chunk_sectors << 9;
+ lim.io_opt = lim.io_min * (conf->raid_disks - conf->max_degraded);
+ lim.raid_partial_stripes_expensive = 1;
+ lim.discard_granularity = stripe;
+ lim.max_write_zeroes_sectors = 0;
+ mddev_stack_rdev_limits(mddev, &lim);
+ rdev_for_each(rdev, mddev)
+ queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset,
+ mddev->gendisk->disk_name);
+
+ /*
+ * Zeroing is required for discard, otherwise data could be lost.
+ *
+ * Consider a scenario: discard a stripe (the stripe could be
+ * inconsistent if discard_zeroes_data is 0); write one disk of the
+ * stripe (the stripe could be inconsistent again depending on which
+ * disks are used to calculate parity); the disk is broken; The stripe
+ * data of this disk is lost.
+ *
+ * We only allow DISCARD if the sysadmin has confirmed that only safe
+ * devices are in use by setting a module parameter. A better idea
+ * might be to turn DISCARD into WRITE_ZEROES requests, as that is
+ * required to be safe.
+ */
+ if (!devices_handle_discard_safely ||
+ lim.max_discard_sectors < (stripe >> 9) ||
+ lim.discard_granularity < stripe)
+ lim.max_hw_discard_sectors = 0;
+
+ /*
+ * Requests require having a bitmap for each stripe.
+ * Limit the max sectors based on this.
+ */
+ lim.max_hw_sectors = RAID5_MAX_REQ_STRIPES << RAID5_STRIPE_SHIFT(conf);
+
+ /* No restrictions on the number of segments in the request */
+ lim.max_segments = USHRT_MAX;
+
+ return queue_limits_set(mddev->gendisk->queue, &lim);
}
static int raid5_run(struct mddev *mddev)
@@ -7700,6 +7763,7 @@ static int raid5_run(struct mddev *mddev)
int i;
long long min_offset_diff = 0;
int first = 1;
+ int ret = -EIO;
if (mddev->recovery_cp != MaxSector)
pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
@@ -7936,11 +8000,7 @@ static int raid5_run(struct mddev *mddev)
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "reshape"));
- if (!mddev->sync_thread)
- goto abort;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
/* Ok, everything is just fine now */
@@ -7952,66 +8012,10 @@ static int raid5_run(struct mddev *mddev)
mdname(mddev));
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
- if (mddev->queue) {
- int chunk_size;
- /* read-ahead size must cover two whole stripes, which
- * is 2 * (datadisks) * chunksize where 'n' is the
- * number of raid devices
- */
- int data_disks = conf->previous_raid_disks - conf->max_degraded;
- int stripe = data_disks *
- ((mddev->chunk_sectors << 9) / PAGE_SIZE);
-
- chunk_size = mddev->chunk_sectors << 9;
- blk_queue_io_min(mddev->queue, chunk_size);
- raid5_set_io_opt(conf);
- mddev->queue->limits.raid_partial_stripes_expensive = 1;
- /*
- * We can only discard a whole stripe. It doesn't make sense to
- * discard data disk but write parity disk
- */
- stripe = stripe * PAGE_SIZE;
- stripe = roundup_pow_of_two(stripe);
- mddev->queue->limits.discard_granularity = stripe;
-
- blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
-
- rdev_for_each(rdev, mddev) {
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->new_data_offset << 9);
- }
-
- /*
- * zeroing is required, otherwise data
- * could be lost. Consider a scenario: discard a stripe
- * (the stripe could be inconsistent if
- * discard_zeroes_data is 0); write one disk of the
- * stripe (the stripe could be inconsistent again
- * depending on which disks are used to calculate
- * parity); the disk is broken; The stripe data of this
- * disk is lost.
- *
- * We only allow DISCARD if the sysadmin has confirmed that
- * only safe devices are in use by setting a module parameter.
- * A better idea might be to turn DISCARD into WRITE_ZEROES
- * requests, as that is required to be safe.
- */
- if (!devices_handle_discard_safely ||
- mddev->queue->limits.max_discard_sectors < (stripe >> 9) ||
- mddev->queue->limits.discard_granularity < stripe)
- blk_queue_max_discard_sectors(mddev->queue, 0);
-
- /*
- * Requests require having a bitmap for each stripe.
- * Limit the max sectors based on this.
- */
- blk_queue_max_hw_sectors(mddev->queue,
- RAID5_MAX_REQ_STRIPES << RAID5_STRIPE_SHIFT(conf));
-
- /* No restrictions on the number of segments in the request */
- blk_queue_max_segments(mddev->queue, USHRT_MAX);
+ if (!mddev_is_dm(mddev)) {
+ ret = raid5_set_limits(mddev);
+ if (ret)
+ goto abort;
}
if (log_init(conf, journal_dev, raid5_has_ppl(conf)))
@@ -8024,7 +8028,7 @@ abort:
free_conf(conf);
mddev->private = NULL;
pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev));
- return -EIO;
+ return ret;
}
static void raid5_free(struct mddev *mddev, void *priv)
@@ -8506,29 +8510,8 @@ static int raid5_start_reshape(struct mddev *mddev)
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
- set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- rcu_assign_pointer(mddev->sync_thread,
- md_register_thread(md_do_sync, mddev, "reshape"));
- if (!mddev->sync_thread) {
- mddev->recovery = 0;
- spin_lock_irq(&conf->device_lock);
- write_seqcount_begin(&conf->gen_lock);
- mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
- mddev->new_chunk_sectors =
- conf->chunk_sectors = conf->prev_chunk_sectors;
- mddev->new_layout = conf->algorithm = conf->prev_algo;
- rdev_for_each(rdev, mddev)
- rdev->new_data_offset = rdev->data_offset;
- smp_wmb();
- conf->generation --;
- conf->reshape_progress = MaxSector;
- mddev->reshape_position = MaxSector;
- write_seqcount_end(&conf->gen_lock);
- spin_unlock_irq(&conf->device_lock);
- return -EAGAIN;
- }
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
conf->reshape_checkpoint = jiffies;
- md_wakeup_thread(mddev->sync_thread);
md_new_event();
return 0;
}
@@ -8556,8 +8539,8 @@ static void end_reshape(struct r5conf *conf)
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
- if (conf->mddev->queue)
- raid5_set_io_opt(conf);
+ mddev_update_io_opt(conf->mddev,
+ conf->raid_disks - conf->max_degraded);
}
}
@@ -8934,6 +8917,18 @@ static int raid5_start(struct mddev *mddev)
return r5l_start(conf->log);
}
+/*
+ * This is only used for dm-raid456, caller already frozen sync_thread, hence
+ * if rehsape is still in progress, io that is waiting for reshape can never be
+ * done now, hence wake up and handle those IO.
+ */
+static void raid5_prepare_suspend(struct mddev *mddev)
+{
+ struct r5conf *conf = mddev->private;
+
+ wake_up(&conf->wait_for_overlap);
+}
+
static struct md_personality raid6_personality =
{
.name = "raid6",
@@ -8957,6 +8952,7 @@ static struct md_personality raid6_personality =
.quiesce = raid5_quiesce,
.takeover = raid6_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
+ .prepare_suspend = raid5_prepare_suspend,
};
static struct md_personality raid5_personality =
{
@@ -8981,6 +8977,7 @@ static struct md_personality raid5_personality =
.quiesce = raid5_quiesce,
.takeover = raid5_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
+ .prepare_suspend = raid5_prepare_suspend,
};
static struct md_personality raid4_personality =
@@ -9006,6 +9003,7 @@ static struct md_personality raid4_personality =
.quiesce = raid5_quiesce,
.takeover = raid4_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
+ .prepare_suspend = raid5_prepare_suspend,
};
static int __init raid5_init(void)
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
index aebd3c12020b..c381c22135a2 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
@@ -725,6 +725,9 @@ irqreturn_t rkisp1_capture_isr(int irq, void *ctx)
unsigned int i;
u32 status;
+ if (!rkisp1->irqs_enabled)
+ return IRQ_NONE;
+
status = rkisp1_read(rkisp1, RKISP1_CIF_MI_MIS);
if (!status)
return IRQ_NONE;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
index 4b6b28c05b89..b757f75edecf 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
@@ -450,6 +450,7 @@ struct rkisp1_debug {
* @debug: debug params to be exposed on debugfs
* @info: version-specific ISP information
* @irqs: IRQ line numbers
+ * @irqs_enabled: the hardware is enabled and can cause interrupts
*/
struct rkisp1_device {
void __iomem *base_addr;
@@ -471,6 +472,7 @@ struct rkisp1_device {
struct rkisp1_debug debug;
const struct rkisp1_info *info;
int irqs[RKISP1_NUM_IRQS];
+ bool irqs_enabled;
};
/*
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
index b6e47e2f1b94..4202642e0523 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
@@ -196,6 +196,9 @@ irqreturn_t rkisp1_csi_isr(int irq, void *ctx)
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
u32 val, status;
+ if (!rkisp1->irqs_enabled)
+ return IRQ_NONE;
+
status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS);
if (!status)
return IRQ_NONE;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index f96f821a7b50..73cf08a74011 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -305,6 +305,24 @@ static int __maybe_unused rkisp1_runtime_suspend(struct device *dev)
{
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+ rkisp1->irqs_enabled = false;
+ /* Make sure the IRQ handler will see the above */
+ mb();
+
+ /*
+ * Wait until any running IRQ handler has returned. The IRQ handler
+ * may get called even after this (as it's a shared interrupt line)
+ * but the 'irqs_enabled' flag will make the handler return immediately.
+ */
+ for (unsigned int il = 0; il < ARRAY_SIZE(rkisp1->irqs); ++il) {
+ if (rkisp1->irqs[il] == -1)
+ continue;
+
+ /* Skip if the irq line is the same as previous */
+ if (il == 0 || rkisp1->irqs[il - 1] != rkisp1->irqs[il])
+ synchronize_irq(rkisp1->irqs[il]);
+ }
+
clk_bulk_disable_unprepare(rkisp1->clk_size, rkisp1->clks);
return pinctrl_pm_select_sleep_state(dev);
}
@@ -321,6 +339,10 @@ static int __maybe_unused rkisp1_runtime_resume(struct device *dev)
if (ret)
return ret;
+ rkisp1->irqs_enabled = true;
+ /* Make sure the IRQ handler will see the above */
+ mb();
+
return 0;
}
@@ -559,7 +581,7 @@ static int rkisp1_probe(struct platform_device *pdev)
rkisp1->irqs[il] = irq;
}
- ret = devm_request_irq(dev, irq, info->isrs[i].isr, 0,
+ ret = devm_request_irq(dev, irq, info->isrs[i].isr, IRQF_SHARED,
dev_driver_string(dev), dev);
if (ret) {
dev_err(dev, "request irq failed: %d\n", ret);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index f00873d31c42..78a1f7a1499b 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -976,6 +976,9 @@ irqreturn_t rkisp1_isp_isr(int irq, void *ctx)
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
u32 status, isp_err;
+ if (!rkisp1->irqs_enabled)
+ return IRQ_NONE;
+
status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS);
if (!status)
return IRQ_NONE;
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 2afe67ffa285..74d69ce22a33 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -319,6 +319,7 @@ config IR_PWM_TX
tristate "PWM IR transmitter"
depends on LIRC
depends on PWM
+ depends on HIGH_RES_TIMERS
depends on OF
help
Say Y if you want to use a PWM based IR transmitter. This is
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index fe17c7f98e81..2f7564f26445 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -110,7 +110,7 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
case BPF_FUNC_trace_printk:
- if (perfmon_capable())
+ if (bpf_token_capable(prog->aux->token, CAP_PERFMON))
return bpf_get_trace_printk_proto();
fallthrough;
default:
@@ -253,7 +253,7 @@ int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
if (attr->attach_flags)
return -EINVAL;
- rcdev = rc_dev_get_from_fd(attr->target_fd);
+ rcdev = rc_dev_get_from_fd(attr->target_fd, true);
if (IS_ERR(rcdev))
return PTR_ERR(rcdev);
@@ -278,7 +278,7 @@ int lirc_prog_detach(const union bpf_attr *attr)
if (IS_ERR(prog))
return PTR_ERR(prog);
- rcdev = rc_dev_get_from_fd(attr->target_fd);
+ rcdev = rc_dev_get_from_fd(attr->target_fd, true);
if (IS_ERR(rcdev)) {
bpf_prog_put(prog);
return PTR_ERR(rcdev);
@@ -303,7 +303,7 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
if (attr->query.query_flags)
return -EINVAL;
- rcdev = rc_dev_get_from_fd(attr->query.target_fd);
+ rcdev = rc_dev_get_from_fd(attr->query.target_fd, false);
if (IS_ERR(rcdev))
return PTR_ERR(rcdev);
diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
index 196806709259..69e630d85262 100644
--- a/drivers/media/rc/ir_toy.c
+++ b/drivers/media/rc/ir_toy.c
@@ -332,6 +332,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
sizeof(COMMAND_SMODE_EXIT), STATE_COMMAND_NO_RESP);
if (err) {
dev_err(irtoy->dev, "exit sample mode: %d\n", err);
+ kfree(buf);
return err;
}
@@ -339,6 +340,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND);
if (err) {
dev_err(irtoy->dev, "enter sample mode: %d\n", err);
+ kfree(buf);
return err;
}
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index a537734832c5..caad59f76793 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -814,7 +814,7 @@ void __exit lirc_dev_exit(void)
unregister_chrdev_region(lirc_base_dev, RC_DEV_MAX);
}
-struct rc_dev *rc_dev_get_from_fd(int fd)
+struct rc_dev *rc_dev_get_from_fd(int fd, bool write)
{
struct fd f = fdget(fd);
struct lirc_fh *fh;
@@ -828,6 +828,9 @@ struct rc_dev *rc_dev_get_from_fd(int fd)
return ERR_PTR(-EINVAL);
}
+ if (write && !(f.file->f_mode & FMODE_WRITE))
+ return ERR_PTR(-EPERM);
+
fh = f.file->private_data;
dev = fh->rc;
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index ef1e95e1af7f..7df949fc65e2 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -325,7 +325,7 @@ void lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev);
void lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc);
int lirc_register(struct rc_dev *dev);
void lirc_unregister(struct rc_dev *dev);
-struct rc_dev *rc_dev_get_from_fd(int fd);
+struct rc_dev *rc_dev_get_from_fd(int fd, bool write);
#else
static inline int lirc_dev_init(void) { return 0; }
static inline void lirc_dev_exit(void) {}
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index 434982545be6..8c5ad5c025fa 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -72,7 +72,6 @@ static DEFINE_SPINLOCK(emif_lock);
static unsigned long irq_state;
static LIST_HEAD(device_list);
-#ifdef CONFIG_DEBUG_FS
static void do_emif_regdump_show(struct seq_file *s, struct emif_data *emif,
struct emif_regs *regs)
{
@@ -140,31 +139,24 @@ static int emif_mr4_show(struct seq_file *s, void *unused)
DEFINE_SHOW_ATTRIBUTE(emif_mr4);
-static int __init_or_module emif_debugfs_init(struct emif_data *emif)
+static void emif_debugfs_init(struct emif_data *emif)
{
- emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL);
- debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif,
- &emif_regdump_fops);
- debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif,
- &emif_mr4_fops);
- return 0;
-}
-
-static void __exit emif_debugfs_exit(struct emif_data *emif)
-{
- debugfs_remove_recursive(emif->debugfs_root);
- emif->debugfs_root = NULL;
-}
-#else
-static inline int __init_or_module emif_debugfs_init(struct emif_data *emif)
-{
- return 0;
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ emif->debugfs_root = debugfs_create_dir(dev_name(emif->dev), NULL);
+ debugfs_create_file("regcache_dump", S_IRUGO, emif->debugfs_root, emif,
+ &emif_regdump_fops);
+ debugfs_create_file("mr4", S_IRUGO, emif->debugfs_root, emif,
+ &emif_mr4_fops);
+ }
}
-static inline void __exit emif_debugfs_exit(struct emif_data *emif)
+static void emif_debugfs_exit(struct emif_data *emif)
{
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ debugfs_remove_recursive(emif->debugfs_root);
+ emif->debugfs_root = NULL;
+ }
}
-#endif
/*
* Get bus width used by EMIF. Note that this may be different from the
@@ -679,7 +671,7 @@ static void disable_and_clear_all_interrupts(struct emif_data *emif)
clear_all_interrupts(emif);
}
-static int __init_or_module setup_interrupts(struct emif_data *emif, u32 irq)
+static int setup_interrupts(struct emif_data *emif, u32 irq)
{
u32 interrupts, type;
void __iomem *base = emif->base;
@@ -710,7 +702,7 @@ static int __init_or_module setup_interrupts(struct emif_data *emif, u32 irq)
}
-static void __init_or_module emif_onetime_settings(struct emif_data *emif)
+static void emif_onetime_settings(struct emif_data *emif)
{
u32 pwr_mgmt_ctrl, zq, temp_alert_cfg;
void __iomem *base = emif->base;
@@ -834,8 +826,7 @@ static int is_custom_config_valid(struct emif_custom_configs *cust_cfgs,
return valid;
}
-#if defined(CONFIG_OF)
-static void __init_or_module of_get_custom_configs(struct device_node *np_emif,
+static void of_get_custom_configs(struct device_node *np_emif,
struct emif_data *emif)
{
struct emif_custom_configs *cust_cfgs = NULL;
@@ -884,7 +875,7 @@ static void __init_or_module of_get_custom_configs(struct device_node *np_emif,
emif->plat_data->custom_configs = cust_cfgs;
}
-static void __init_or_module of_get_ddr_info(struct device_node *np_emif,
+static void of_get_ddr_info(struct device_node *np_emif,
struct device_node *np_ddr,
struct ddr_device_info *dev_info)
{
@@ -918,7 +909,7 @@ static void __init_or_module of_get_ddr_info(struct device_node *np_emif,
dev_info->io_width = __fls(io_width) - 1;
}
-static struct emif_data * __init_or_module of_get_memory_device_details(
+static struct emif_data *of_get_memory_device_details(
struct device_node *np_emif, struct device *dev)
{
struct emif_data *emif = NULL;
@@ -991,16 +982,7 @@ out:
return emif;
}
-#else
-
-static struct emif_data * __init_or_module of_get_memory_device_details(
- struct device_node *np_emif, struct device *dev)
-{
- return NULL;
-}
-#endif
-
-static struct emif_data *__init_or_module get_device_details(
+static struct emif_data *get_device_details(
struct platform_device *pdev)
{
u32 size;
@@ -1104,7 +1086,7 @@ error:
return NULL;
}
-static int __init_or_module emif_probe(struct platform_device *pdev)
+static int emif_probe(struct platform_device *pdev)
{
struct emif_data *emif;
int irq, ret;
@@ -1159,7 +1141,7 @@ error:
return -ENODEV;
}
-static void __exit emif_remove(struct platform_device *pdev)
+static void emif_remove(struct platform_device *pdev)
{
struct emif_data *emif = platform_get_drvdata(pdev);
@@ -1183,7 +1165,8 @@ MODULE_DEVICE_TABLE(of, emif_of_match);
#endif
static struct platform_driver emif_driver = {
- .remove_new = __exit_p(emif_remove),
+ .probe = emif_probe,
+ .remove_new = emif_remove,
.shutdown = emif_shutdown,
.driver = {
.name = "emif",
@@ -1191,7 +1174,7 @@ static struct platform_driver emif_driver = {
},
};
-module_platform_driver_probe(emif_driver, emif_probe);
+module_platform_driver(emif_driver);
MODULE_DESCRIPTION("TI EMIF SDRAM Controller Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/memory/stm32-fmc2-ebi.c b/drivers/memory/stm32-fmc2-ebi.c
index 47d0ea5f1616..1c63eeacd071 100644
--- a/drivers/memory/stm32-fmc2-ebi.c
+++ b/drivers/memory/stm32-fmc2-ebi.c
@@ -11,6 +11,7 @@
#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -20,8 +21,15 @@
#define FMC2_BCR(x) ((x) * 0x8 + FMC2_BCR1)
#define FMC2_BTR(x) ((x) * 0x8 + FMC2_BTR1)
#define FMC2_PCSCNTR 0x20
+#define FMC2_CFGR 0x20
+#define FMC2_SR 0x84
#define FMC2_BWTR1 0x104
#define FMC2_BWTR(x) ((x) * 0x8 + FMC2_BWTR1)
+#define FMC2_SECCFGR 0x300
+#define FMC2_CIDCFGR0 0x30c
+#define FMC2_CIDCFGR(x) ((x) * 0x8 + FMC2_CIDCFGR0)
+#define FMC2_SEMCR0 0x310
+#define FMC2_SEMCR(x) ((x) * 0x8 + FMC2_SEMCR0)
/* Register: FMC2_BCR1 */
#define FMC2_BCR1_CCLKEN BIT(20)
@@ -42,6 +50,7 @@
#define FMC2_BCR_ASYNCWAIT BIT(15)
#define FMC2_BCR_CPSIZE GENMASK(18, 16)
#define FMC2_BCR_CBURSTRW BIT(19)
+#define FMC2_BCR_CSCOUNT GENMASK(21, 20)
#define FMC2_BCR_NBLSET GENMASK(23, 22)
/* Register: FMC2_BTRx/FMC2_BWTRx */
@@ -58,8 +67,28 @@
#define FMC2_PCSCNTR_CSCOUNT GENMASK(15, 0)
#define FMC2_PCSCNTR_CNTBEN(x) BIT((x) + 16)
+/* Register: FMC2_CFGR */
+#define FMC2_CFGR_CLKDIV GENMASK(19, 16)
+#define FMC2_CFGR_CCLKEN BIT(20)
+#define FMC2_CFGR_FMC2EN BIT(31)
+
+/* Register: FMC2_SR */
+#define FMC2_SR_ISOST GENMASK(1, 0)
+
+/* Register: FMC2_CIDCFGR */
+#define FMC2_CIDCFGR_CFEN BIT(0)
+#define FMC2_CIDCFGR_SEMEN BIT(1)
+#define FMC2_CIDCFGR_SCID GENMASK(6, 4)
+#define FMC2_CIDCFGR_SEMWLC1 BIT(17)
+
+/* Register: FMC2_SEMCR */
+#define FMC2_SEMCR_SEM_MUTEX BIT(0)
+#define FMC2_SEMCR_SEMCID GENMASK(6, 4)
+
#define FMC2_MAX_EBI_CE 4
#define FMC2_MAX_BANKS 5
+#define FMC2_MAX_RESOURCES 6
+#define FMC2_CID1 1
#define FMC2_BCR_CPSIZE_0 0x0
#define FMC2_BCR_CPSIZE_128 0x1
@@ -74,6 +103,11 @@
#define FMC2_BCR_MTYP_PSRAM 0x1
#define FMC2_BCR_MTYP_NOR 0x2
+#define FMC2_BCR_CSCOUNT_0 0x0
+#define FMC2_BCR_CSCOUNT_1 0x1
+#define FMC2_BCR_CSCOUNT_64 0x2
+#define FMC2_BCR_CSCOUNT_256 0x3
+
#define FMC2_BXTR_EXTMOD_A 0x0
#define FMC2_BXTR_EXTMOD_B 0x1
#define FMC2_BXTR_EXTMOD_C 0x2
@@ -88,6 +122,7 @@
#define FMC2_BTR_CLKDIV_MAX 0xf
#define FMC2_BTR_DATLAT_MAX 0xf
#define FMC2_PCSCNTR_CSCOUNT_MAX 0xff
+#define FMC2_CFGR_CLKDIV_MAX 0xf
enum stm32_fmc2_ebi_bank {
FMC2_EBI1 = 0,
@@ -101,7 +136,8 @@ enum stm32_fmc2_ebi_register_type {
FMC2_REG_BCR = 1,
FMC2_REG_BTR,
FMC2_REG_BWTR,
- FMC2_REG_PCSCNTR
+ FMC2_REG_PCSCNTR,
+ FMC2_REG_CFGR
};
enum stm32_fmc2_ebi_transaction_type {
@@ -132,16 +168,42 @@ enum stm32_fmc2_ebi_cpsize {
FMC2_CPSIZE_1024 = 1024
};
+enum stm32_fmc2_ebi_cscount {
+ FMC2_CSCOUNT_0 = 0,
+ FMC2_CSCOUNT_1 = 1,
+ FMC2_CSCOUNT_64 = 64,
+ FMC2_CSCOUNT_256 = 256
+};
+
+struct stm32_fmc2_ebi;
+
+struct stm32_fmc2_ebi_data {
+ const struct stm32_fmc2_prop *child_props;
+ unsigned int nb_child_props;
+ u32 fmc2_enable_reg;
+ u32 fmc2_enable_bit;
+ int (*nwait_used_by_ctrls)(struct stm32_fmc2_ebi *ebi);
+ void (*set_setup)(struct stm32_fmc2_ebi *ebi);
+ int (*save_setup)(struct stm32_fmc2_ebi *ebi);
+ int (*check_rif)(struct stm32_fmc2_ebi *ebi, u32 resource);
+ void (*put_sems)(struct stm32_fmc2_ebi *ebi);
+ void (*get_sems)(struct stm32_fmc2_ebi *ebi);
+};
+
struct stm32_fmc2_ebi {
struct device *dev;
struct clk *clk;
struct regmap *regmap;
+ const struct stm32_fmc2_ebi_data *data;
u8 bank_assigned;
+ u8 sem_taken;
+ bool access_granted;
u32 bcr[FMC2_MAX_EBI_CE];
u32 btr[FMC2_MAX_EBI_CE];
u32 bwtr[FMC2_MAX_EBI_CE];
u32 pcscntr;
+ u32 cfgr;
};
/*
@@ -181,8 +243,11 @@ static int stm32_fmc2_ebi_check_mux(struct stm32_fmc2_ebi *ebi,
int cs)
{
u32 bcr;
+ int ret;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
if (bcr & FMC2_BCR_MTYP)
return 0;
@@ -195,8 +260,11 @@ static int stm32_fmc2_ebi_check_waitcfg(struct stm32_fmc2_ebi *ebi,
int cs)
{
u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR);
+ int ret;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN)
return 0;
@@ -209,8 +277,11 @@ static int stm32_fmc2_ebi_check_sync_trans(struct stm32_fmc2_ebi *ebi,
int cs)
{
u32 bcr;
+ int ret;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
if (bcr & FMC2_BCR_BURSTEN)
return 0;
@@ -218,13 +289,43 @@ static int stm32_fmc2_ebi_check_sync_trans(struct stm32_fmc2_ebi *ebi,
return -EINVAL;
}
+static int stm32_fmc2_ebi_mp25_check_cclk(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs)
+{
+ if (!ebi->access_granted)
+ return -EACCES;
+
+ return stm32_fmc2_ebi_check_sync_trans(ebi, prop, cs);
+}
+
+static int stm32_fmc2_ebi_mp25_check_clk_period(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs)
+{
+ u32 cfgr;
+ int ret;
+
+ ret = regmap_read(ebi->regmap, FMC2_CFGR, &cfgr);
+ if (ret)
+ return ret;
+
+ if (cfgr & FMC2_CFGR_CCLKEN && !ebi->access_granted)
+ return -EACCES;
+
+ return stm32_fmc2_ebi_check_sync_trans(ebi, prop, cs);
+}
+
static int stm32_fmc2_ebi_check_async_trans(struct stm32_fmc2_ebi *ebi,
const struct stm32_fmc2_prop *prop,
int cs)
{
u32 bcr;
+ int ret;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
if (!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW))
return 0;
@@ -237,8 +338,11 @@ static int stm32_fmc2_ebi_check_cpsize(struct stm32_fmc2_ebi *ebi,
int cs)
{
u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM);
+ int ret;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN)
return 0;
@@ -251,12 +355,18 @@ static int stm32_fmc2_ebi_check_address_hold(struct stm32_fmc2_ebi *ebi,
int cs)
{
u32 bcr, bxtr, val = FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D);
+ int ret;
+
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
if (prop->reg_type == FMC2_REG_BWTR)
- regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
+ ret = regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
else
- regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
+ ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
+ if (ret)
+ return ret;
if ((!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW)) &&
((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN))
@@ -270,12 +380,19 @@ static int stm32_fmc2_ebi_check_clk_period(struct stm32_fmc2_ebi *ebi,
int cs)
{
u32 bcr, bcr1;
+ int ret;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
- if (cs)
- regmap_read(ebi->regmap, FMC2_BCR1, &bcr1);
- else
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
+
+ if (cs) {
+ ret = regmap_read(ebi->regmap, FMC2_BCR1, &bcr1);
+ if (ret)
+ return ret;
+ } else {
bcr1 = bcr;
+ }
if (bcr & FMC2_BCR_BURSTEN && (!cs || !(bcr1 & FMC2_BCR1_CCLKEN)))
return 0;
@@ -307,18 +424,48 @@ static u32 stm32_fmc2_ebi_ns_to_clk_period(struct stm32_fmc2_ebi *ebi,
{
u32 nb_clk_cycles = stm32_fmc2_ebi_ns_to_clock_cycles(ebi, cs, setup);
u32 bcr, btr, clk_period;
+ int ret;
+
+ ret = regmap_read(ebi->regmap, FMC2_BCR1, &bcr);
+ if (ret)
+ return ret;
- regmap_read(ebi->regmap, FMC2_BCR1, &bcr);
if (bcr & FMC2_BCR1_CCLKEN || !cs)
- regmap_read(ebi->regmap, FMC2_BTR1, &btr);
+ ret = regmap_read(ebi->regmap, FMC2_BTR1, &btr);
else
- regmap_read(ebi->regmap, FMC2_BTR(cs), &btr);
+ ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &btr);
+ if (ret)
+ return ret;
clk_period = FIELD_GET(FMC2_BTR_CLKDIV, btr) + 1;
return DIV_ROUND_UP(nb_clk_cycles, clk_period);
}
+static u32 stm32_fmc2_ebi_mp25_ns_to_clk_period(struct stm32_fmc2_ebi *ebi,
+ int cs, u32 setup)
+{
+ u32 nb_clk_cycles = stm32_fmc2_ebi_ns_to_clock_cycles(ebi, cs, setup);
+ u32 cfgr, btr, clk_period;
+ int ret;
+
+ ret = regmap_read(ebi->regmap, FMC2_CFGR, &cfgr);
+ if (ret)
+ return ret;
+
+ if (cfgr & FMC2_CFGR_CCLKEN) {
+ clk_period = FIELD_GET(FMC2_CFGR_CLKDIV, cfgr) + 1;
+ } else {
+ ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &btr);
+ if (ret)
+ return ret;
+
+ clk_period = FIELD_GET(FMC2_BTR_CLKDIV, btr) + 1;
+ }
+
+ return DIV_ROUND_UP(nb_clk_cycles, clk_period);
+}
+
static int stm32_fmc2_ebi_get_reg(int reg_type, int cs, u32 *reg)
{
switch (reg_type) {
@@ -334,6 +481,9 @@ static int stm32_fmc2_ebi_get_reg(int reg_type, int cs, u32 *reg)
case FMC2_REG_PCSCNTR:
*reg = FMC2_PCSCNTR;
break;
+ case FMC2_REG_CFGR:
+ *reg = FMC2_CFGR;
+ break;
default:
return -EINVAL;
}
@@ -571,11 +721,16 @@ static int stm32_fmc2_ebi_set_address_setup(struct stm32_fmc2_ebi *ebi,
if (ret)
return ret;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
+
if (prop->reg_type == FMC2_REG_BWTR)
- regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
+ ret = regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr);
else
- regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
+ ret = regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr);
+ if (ret)
+ return ret;
if ((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN)
val = clamp_val(setup, 1, FMC2_BXTR_ADDSET_MAX);
@@ -675,6 +830,30 @@ static int stm32_fmc2_ebi_set_clk_period(struct stm32_fmc2_ebi *ebi,
return 0;
}
+static int stm32_fmc2_ebi_mp25_set_clk_period(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup)
+{
+ u32 val, cfgr;
+ int ret;
+
+ ret = regmap_read(ebi->regmap, FMC2_CFGR, &cfgr);
+ if (ret)
+ return ret;
+
+ if (cfgr & FMC2_CFGR_CCLKEN) {
+ val = setup ? clamp_val(setup - 1, 1, FMC2_CFGR_CLKDIV_MAX) : 1;
+ val = FIELD_PREP(FMC2_CFGR_CLKDIV, val);
+ regmap_update_bits(ebi->regmap, FMC2_CFGR, FMC2_CFGR_CLKDIV, val);
+ } else {
+ val = setup ? clamp_val(setup - 1, 1, FMC2_BTR_CLKDIV_MAX) : 1;
+ val = FIELD_PREP(FMC2_BTR_CLKDIV, val);
+ regmap_update_bits(ebi->regmap, FMC2_BTR(cs), FMC2_BTR_CLKDIV, val);
+ }
+
+ return 0;
+}
+
static int stm32_fmc2_ebi_set_data_latency(struct stm32_fmc2_ebi *ebi,
const struct stm32_fmc2_prop *prop,
int cs, u32 setup)
@@ -693,11 +872,14 @@ static int stm32_fmc2_ebi_set_max_low_pulse(struct stm32_fmc2_ebi *ebi,
int cs, u32 setup)
{
u32 old_val, new_val, pcscntr;
+ int ret;
if (setup < 1)
return 0;
- regmap_read(ebi->regmap, FMC2_PCSCNTR, &pcscntr);
+ ret = regmap_read(ebi->regmap, FMC2_PCSCNTR, &pcscntr);
+ if (ret)
+ return ret;
/* Enable counter for the bank */
regmap_update_bits(ebi->regmap, FMC2_PCSCNTR,
@@ -717,6 +899,27 @@ static int stm32_fmc2_ebi_set_max_low_pulse(struct stm32_fmc2_ebi *ebi,
return 0;
}
+static int stm32_fmc2_ebi_mp25_set_max_low_pulse(struct stm32_fmc2_ebi *ebi,
+ const struct stm32_fmc2_prop *prop,
+ int cs, u32 setup)
+{
+ u32 val;
+
+ if (setup == FMC2_CSCOUNT_0)
+ val = FIELD_PREP(FMC2_BCR_CSCOUNT, FMC2_BCR_CSCOUNT_0);
+ else if (setup == FMC2_CSCOUNT_1)
+ val = FIELD_PREP(FMC2_BCR_CSCOUNT, FMC2_BCR_CSCOUNT_1);
+ else if (setup <= FMC2_CSCOUNT_64)
+ val = FIELD_PREP(FMC2_BCR_CSCOUNT, FMC2_BCR_CSCOUNT_64);
+ else
+ val = FIELD_PREP(FMC2_BCR_CSCOUNT, FMC2_BCR_CSCOUNT_256);
+
+ regmap_update_bits(ebi->regmap, FMC2_BCR(cs),
+ FMC2_BCR_CSCOUNT, val);
+
+ return 0;
+}
+
static const struct stm32_fmc2_prop stm32_fmc2_child_props[] = {
/* st,fmc2-ebi-cs-trans-type must be the first property */
{
@@ -882,6 +1085,275 @@ static const struct stm32_fmc2_prop stm32_fmc2_child_props[] = {
},
};
+static const struct stm32_fmc2_prop stm32_fmc2_mp25_child_props[] = {
+ /* st,fmc2-ebi-cs-trans-type must be the first property */
+ {
+ .name = "st,fmc2-ebi-cs-transaction-type",
+ .mprop = true,
+ .set = stm32_fmc2_ebi_set_trans_type,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-cclk-enable",
+ .bprop = true,
+ .reg_type = FMC2_REG_CFGR,
+ .reg_mask = FMC2_CFGR_CCLKEN,
+ .check = stm32_fmc2_ebi_mp25_check_cclk,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-mux-enable",
+ .bprop = true,
+ .reg_type = FMC2_REG_BCR,
+ .reg_mask = FMC2_BCR_MUXEN,
+ .check = stm32_fmc2_ebi_check_mux,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-buswidth",
+ .reset_val = FMC2_BUSWIDTH_16,
+ .set = stm32_fmc2_ebi_set_buswidth,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-waitpol-high",
+ .bprop = true,
+ .reg_type = FMC2_REG_BCR,
+ .reg_mask = FMC2_BCR_WAITPOL,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-waitcfg-enable",
+ .bprop = true,
+ .reg_type = FMC2_REG_BCR,
+ .reg_mask = FMC2_BCR_WAITCFG,
+ .check = stm32_fmc2_ebi_check_waitcfg,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-wait-enable",
+ .bprop = true,
+ .reg_type = FMC2_REG_BCR,
+ .reg_mask = FMC2_BCR_WAITEN,
+ .check = stm32_fmc2_ebi_check_sync_trans,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-asyncwait-enable",
+ .bprop = true,
+ .reg_type = FMC2_REG_BCR,
+ .reg_mask = FMC2_BCR_ASYNCWAIT,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .set = stm32_fmc2_ebi_set_bit_field,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-cpsize",
+ .check = stm32_fmc2_ebi_check_cpsize,
+ .set = stm32_fmc2_ebi_set_cpsize,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-byte-lane-setup-ns",
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_bl_setup,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-address-setup-ns",
+ .reg_type = FMC2_REG_BTR,
+ .reset_val = FMC2_BXTR_ADDSET_MAX,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_address_setup,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-address-hold-ns",
+ .reg_type = FMC2_REG_BTR,
+ .reset_val = FMC2_BXTR_ADDHLD_MAX,
+ .check = stm32_fmc2_ebi_check_address_hold,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_address_hold,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-data-setup-ns",
+ .reg_type = FMC2_REG_BTR,
+ .reset_val = FMC2_BXTR_DATAST_MAX,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_data_setup,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-bus-turnaround-ns",
+ .reg_type = FMC2_REG_BTR,
+ .reset_val = FMC2_BXTR_BUSTURN_MAX + 1,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_bus_turnaround,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-data-hold-ns",
+ .reg_type = FMC2_REG_BTR,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_data_hold,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-clk-period-ns",
+ .reset_val = FMC2_CFGR_CLKDIV_MAX + 1,
+ .check = stm32_fmc2_ebi_mp25_check_clk_period,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_mp25_set_clk_period,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-data-latency-ns",
+ .check = stm32_fmc2_ebi_check_sync_trans,
+ .calculate = stm32_fmc2_ebi_mp25_ns_to_clk_period,
+ .set = stm32_fmc2_ebi_set_data_latency,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-write-address-setup-ns",
+ .reg_type = FMC2_REG_BWTR,
+ .reset_val = FMC2_BXTR_ADDSET_MAX,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_address_setup,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-write-address-hold-ns",
+ .reg_type = FMC2_REG_BWTR,
+ .reset_val = FMC2_BXTR_ADDHLD_MAX,
+ .check = stm32_fmc2_ebi_check_address_hold,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_address_hold,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-write-data-setup-ns",
+ .reg_type = FMC2_REG_BWTR,
+ .reset_val = FMC2_BXTR_DATAST_MAX,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_data_setup,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-write-bus-turnaround-ns",
+ .reg_type = FMC2_REG_BWTR,
+ .reset_val = FMC2_BXTR_BUSTURN_MAX + 1,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_bus_turnaround,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-write-data-hold-ns",
+ .reg_type = FMC2_REG_BWTR,
+ .check = stm32_fmc2_ebi_check_async_trans,
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_set_data_hold,
+ },
+ {
+ .name = "st,fmc2-ebi-cs-max-low-pulse-ns",
+ .calculate = stm32_fmc2_ebi_ns_to_clock_cycles,
+ .set = stm32_fmc2_ebi_mp25_set_max_low_pulse,
+ },
+};
+
+static int stm32_fmc2_ebi_mp25_check_rif(struct stm32_fmc2_ebi *ebi, u32 resource)
+{
+ u32 seccfgr, cidcfgr, semcr;
+ int cid, ret;
+
+ if (resource >= FMC2_MAX_RESOURCES)
+ return -EINVAL;
+
+ ret = regmap_read(ebi->regmap, FMC2_SECCFGR, &seccfgr);
+ if (ret)
+ return ret;
+
+ if (seccfgr & BIT(resource)) {
+ if (resource)
+ dev_err(ebi->dev, "resource %d is configured as secure\n",
+ resource);
+
+ return -EACCES;
+ }
+
+ ret = regmap_read(ebi->regmap, FMC2_CIDCFGR(resource), &cidcfgr);
+ if (ret)
+ return ret;
+
+ if (!(cidcfgr & FMC2_CIDCFGR_CFEN))
+ /* CID filtering is turned off: access granted */
+ return 0;
+
+ if (!(cidcfgr & FMC2_CIDCFGR_SEMEN)) {
+ /* Static CID mode */
+ cid = FIELD_GET(FMC2_CIDCFGR_SCID, cidcfgr);
+ if (cid != FMC2_CID1) {
+ if (resource)
+ dev_err(ebi->dev, "static CID%d set for resource %d\n",
+ cid, resource);
+
+ return -EACCES;
+ }
+
+ return 0;
+ }
+
+ /* Pass-list with semaphore mode */
+ if (!(cidcfgr & FMC2_CIDCFGR_SEMWLC1)) {
+ if (resource)
+ dev_err(ebi->dev, "CID1 is block-listed for resource %d\n",
+ resource);
+
+ return -EACCES;
+ }
+
+ ret = regmap_read(ebi->regmap, FMC2_SEMCR(resource), &semcr);
+ if (ret)
+ return ret;
+
+ if (!(semcr & FMC2_SEMCR_SEM_MUTEX)) {
+ regmap_update_bits(ebi->regmap, FMC2_SEMCR(resource),
+ FMC2_SEMCR_SEM_MUTEX, FMC2_SEMCR_SEM_MUTEX);
+
+ ret = regmap_read(ebi->regmap, FMC2_SEMCR(resource), &semcr);
+ if (ret)
+ return ret;
+ }
+
+ cid = FIELD_GET(FMC2_SEMCR_SEMCID, semcr);
+ if (cid != FMC2_CID1) {
+ if (resource)
+ dev_err(ebi->dev, "resource %d is already used by CID%d\n",
+ resource, cid);
+
+ return -EACCES;
+ }
+
+ ebi->sem_taken |= BIT(resource);
+
+ return 0;
+}
+
+static void stm32_fmc2_ebi_mp25_put_sems(struct stm32_fmc2_ebi *ebi)
+{
+ unsigned int resource;
+
+ for (resource = 0; resource < FMC2_MAX_RESOURCES; resource++) {
+ if (!(ebi->sem_taken & BIT(resource)))
+ continue;
+
+ regmap_update_bits(ebi->regmap, FMC2_SEMCR(resource),
+ FMC2_SEMCR_SEM_MUTEX, 0);
+ }
+}
+
+static void stm32_fmc2_ebi_mp25_get_sems(struct stm32_fmc2_ebi *ebi)
+{
+ unsigned int resource;
+
+ for (resource = 0; resource < FMC2_MAX_RESOURCES; resource++) {
+ if (!(ebi->sem_taken & BIT(resource)))
+ continue;
+
+ regmap_update_bits(ebi->regmap, FMC2_SEMCR(resource),
+ FMC2_SEMCR_SEM_MUTEX, FMC2_SEMCR_SEM_MUTEX);
+ }
+}
+
static int stm32_fmc2_ebi_parse_prop(struct stm32_fmc2_ebi *ebi,
struct device_node *dev_node,
const struct stm32_fmc2_prop *prop,
@@ -944,17 +1416,48 @@ static void stm32_fmc2_ebi_disable_bank(struct stm32_fmc2_ebi *ebi, int cs)
regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_MBKEN, 0);
}
-static void stm32_fmc2_ebi_save_setup(struct stm32_fmc2_ebi *ebi)
+static int stm32_fmc2_ebi_save_setup(struct stm32_fmc2_ebi *ebi)
{
unsigned int cs;
+ int ret;
for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
- regmap_read(ebi->regmap, FMC2_BCR(cs), &ebi->bcr[cs]);
- regmap_read(ebi->regmap, FMC2_BTR(cs), &ebi->btr[cs]);
- regmap_read(ebi->regmap, FMC2_BWTR(cs), &ebi->bwtr[cs]);
+ if (!(ebi->bank_assigned & BIT(cs)))
+ continue;
+
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &ebi->bcr[cs]);
+ ret |= regmap_read(ebi->regmap, FMC2_BTR(cs), &ebi->btr[cs]);
+ ret |= regmap_read(ebi->regmap, FMC2_BWTR(cs), &ebi->bwtr[cs]);
+ if (ret)
+ return ret;
}
- regmap_read(ebi->regmap, FMC2_PCSCNTR, &ebi->pcscntr);
+ return 0;
+}
+
+static int stm32_fmc2_ebi_mp1_save_setup(struct stm32_fmc2_ebi *ebi)
+{
+ int ret;
+
+ ret = stm32_fmc2_ebi_save_setup(ebi);
+ if (ret)
+ return ret;
+
+ return regmap_read(ebi->regmap, FMC2_PCSCNTR, &ebi->pcscntr);
+}
+
+static int stm32_fmc2_ebi_mp25_save_setup(struct stm32_fmc2_ebi *ebi)
+{
+ int ret;
+
+ ret = stm32_fmc2_ebi_save_setup(ebi);
+ if (ret)
+ return ret;
+
+ if (ebi->access_granted)
+ ret = regmap_read(ebi->regmap, FMC2_CFGR, &ebi->cfgr);
+
+ return ret;
}
static void stm32_fmc2_ebi_set_setup(struct stm32_fmc2_ebi *ebi)
@@ -962,14 +1465,29 @@ static void stm32_fmc2_ebi_set_setup(struct stm32_fmc2_ebi *ebi)
unsigned int cs;
for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
+ if (!(ebi->bank_assigned & BIT(cs)))
+ continue;
+
regmap_write(ebi->regmap, FMC2_BCR(cs), ebi->bcr[cs]);
regmap_write(ebi->regmap, FMC2_BTR(cs), ebi->btr[cs]);
regmap_write(ebi->regmap, FMC2_BWTR(cs), ebi->bwtr[cs]);
}
+}
+static void stm32_fmc2_ebi_mp1_set_setup(struct stm32_fmc2_ebi *ebi)
+{
+ stm32_fmc2_ebi_set_setup(ebi);
regmap_write(ebi->regmap, FMC2_PCSCNTR, ebi->pcscntr);
}
+static void stm32_fmc2_ebi_mp25_set_setup(struct stm32_fmc2_ebi *ebi)
+{
+ stm32_fmc2_ebi_set_setup(ebi);
+
+ if (ebi->access_granted)
+ regmap_write(ebi->regmap, FMC2_CFGR, ebi->cfgr);
+}
+
static void stm32_fmc2_ebi_disable_banks(struct stm32_fmc2_ebi *ebi)
{
unsigned int cs;
@@ -983,33 +1501,48 @@ static void stm32_fmc2_ebi_disable_banks(struct stm32_fmc2_ebi *ebi)
}
/* NWAIT signal can not be connected to EBI controller and NAND controller */
-static bool stm32_fmc2_ebi_nwait_used_by_ctrls(struct stm32_fmc2_ebi *ebi)
+static int stm32_fmc2_ebi_nwait_used_by_ctrls(struct stm32_fmc2_ebi *ebi)
{
+ struct device *dev = ebi->dev;
unsigned int cs;
u32 bcr;
+ int ret;
for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) {
if (!(ebi->bank_assigned & BIT(cs)))
continue;
- regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ ret = regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr);
+ if (ret)
+ return ret;
+
if ((bcr & FMC2_BCR_WAITEN || bcr & FMC2_BCR_ASYNCWAIT) &&
- ebi->bank_assigned & BIT(FMC2_NAND))
- return true;
+ ebi->bank_assigned & BIT(FMC2_NAND)) {
+ dev_err(dev, "NWAIT signal connected to EBI and NAND controllers\n");
+ return -EINVAL;
+ }
}
- return false;
+ return 0;
}
static void stm32_fmc2_ebi_enable(struct stm32_fmc2_ebi *ebi)
{
- regmap_update_bits(ebi->regmap, FMC2_BCR1,
- FMC2_BCR1_FMC2EN, FMC2_BCR1_FMC2EN);
+ if (!ebi->access_granted)
+ return;
+
+ regmap_update_bits(ebi->regmap, ebi->data->fmc2_enable_reg,
+ ebi->data->fmc2_enable_bit,
+ ebi->data->fmc2_enable_bit);
}
static void stm32_fmc2_ebi_disable(struct stm32_fmc2_ebi *ebi)
{
- regmap_update_bits(ebi->regmap, FMC2_BCR1, FMC2_BCR1_FMC2EN, 0);
+ if (!ebi->access_granted)
+ return;
+
+ regmap_update_bits(ebi->regmap, ebi->data->fmc2_enable_reg,
+ ebi->data->fmc2_enable_bit, 0);
}
static int stm32_fmc2_ebi_setup_cs(struct stm32_fmc2_ebi *ebi,
@@ -1021,8 +1554,8 @@ static int stm32_fmc2_ebi_setup_cs(struct stm32_fmc2_ebi *ebi,
stm32_fmc2_ebi_disable_bank(ebi, cs);
- for (i = 0; i < ARRAY_SIZE(stm32_fmc2_child_props); i++) {
- const struct stm32_fmc2_prop *p = &stm32_fmc2_child_props[i];
+ for (i = 0; i < ebi->data->nb_child_props; i++) {
+ const struct stm32_fmc2_prop *p = &ebi->data->child_props[i];
ret = stm32_fmc2_ebi_parse_prop(ebi, dev_node, p, cs);
if (ret) {
@@ -1066,6 +1599,15 @@ static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi)
return -EINVAL;
}
+ if (ebi->data->check_rif) {
+ ret = ebi->data->check_rif(ebi, bank + 1);
+ if (ret) {
+ dev_err(dev, "bank access failed: %d\n", bank);
+ of_node_put(child);
+ return ret;
+ }
+ }
+
if (bank < FMC2_MAX_EBI_CE) {
ret = stm32_fmc2_ebi_setup_cs(ebi, child, bank);
if (ret) {
@@ -1085,9 +1627,10 @@ static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi)
return -ENODEV;
}
- if (stm32_fmc2_ebi_nwait_used_by_ctrls(ebi)) {
- dev_err(dev, "NWAIT signal connected to EBI and NAND controllers\n");
- return -EINVAL;
+ if (ebi->data->nwait_used_by_ctrls) {
+ ret = ebi->data->nwait_used_by_ctrls(ebi);
+ if (ret)
+ return ret;
}
stm32_fmc2_ebi_enable(ebi);
@@ -1107,6 +1650,11 @@ static int stm32_fmc2_ebi_probe(struct platform_device *pdev)
return -ENOMEM;
ebi->dev = dev;
+ platform_set_drvdata(pdev, ebi);
+
+ ebi->data = of_device_get_match_data(dev);
+ if (!ebi->data)
+ return -EINVAL;
ebi->regmap = device_node_to_regmap(dev->of_node);
if (IS_ERR(ebi->regmap))
@@ -1120,28 +1668,57 @@ static int stm32_fmc2_ebi_probe(struct platform_device *pdev)
if (PTR_ERR(rstc) == -EPROBE_DEFER)
return -EPROBE_DEFER;
- ret = clk_prepare_enable(ebi->clk);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
if (!IS_ERR(rstc)) {
reset_control_assert(rstc);
reset_control_deassert(rstc);
}
+ /* Check if CFGR register can be modified */
+ ebi->access_granted = true;
+ if (ebi->data->check_rif) {
+ ret = ebi->data->check_rif(ebi, 0);
+ if (ret) {
+ u32 sr;
+
+ ebi->access_granted = false;
+
+ ret = regmap_read(ebi->regmap, FMC2_SR, &sr);
+ if (ret)
+ goto err_release;
+
+ /* In case of CFGR is secure, just check that the FMC2 is enabled */
+ if (sr & FMC2_SR_ISOST) {
+ dev_err(dev, "FMC2 is not ready to be used.\n");
+ ret = -EACCES;
+ goto err_release;
+ }
+ }
+ }
+
ret = stm32_fmc2_ebi_parse_dt(ebi);
if (ret)
goto err_release;
- stm32_fmc2_ebi_save_setup(ebi);
- platform_set_drvdata(pdev, ebi);
+ ret = ebi->data->save_setup(ebi);
+ if (ret)
+ goto err_release;
return 0;
err_release:
stm32_fmc2_ebi_disable_banks(ebi);
stm32_fmc2_ebi_disable(ebi);
- clk_disable_unprepare(ebi->clk);
+ if (ebi->data->put_sems)
+ ebi->data->put_sems(ebi);
+ pm_runtime_put_sync_suspend(dev);
return ret;
}
@@ -1153,7 +1730,25 @@ static void stm32_fmc2_ebi_remove(struct platform_device *pdev)
of_platform_depopulate(&pdev->dev);
stm32_fmc2_ebi_disable_banks(ebi);
stm32_fmc2_ebi_disable(ebi);
+ if (ebi->data->put_sems)
+ ebi->data->put_sems(ebi);
+ pm_runtime_put_sync_suspend(&pdev->dev);
+}
+
+static int __maybe_unused stm32_fmc2_ebi_runtime_suspend(struct device *dev)
+{
+ struct stm32_fmc2_ebi *ebi = dev_get_drvdata(dev);
+
clk_disable_unprepare(ebi->clk);
+
+ return 0;
+}
+
+static int __maybe_unused stm32_fmc2_ebi_runtime_resume(struct device *dev)
+{
+ struct stm32_fmc2_ebi *ebi = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(ebi->clk);
}
static int __maybe_unused stm32_fmc2_ebi_suspend(struct device *dev)
@@ -1161,7 +1756,9 @@ static int __maybe_unused stm32_fmc2_ebi_suspend(struct device *dev)
struct stm32_fmc2_ebi *ebi = dev_get_drvdata(dev);
stm32_fmc2_ebi_disable(ebi);
- clk_disable_unprepare(ebi->clk);
+ if (ebi->data->put_sems)
+ ebi->data->put_sems(ebi);
+ pm_runtime_put_sync_suspend(dev);
pinctrl_pm_select_sleep_state(dev);
return 0;
@@ -1174,21 +1771,55 @@ static int __maybe_unused stm32_fmc2_ebi_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
- ret = clk_prepare_enable(ebi->clk);
- if (ret)
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
return ret;
- stm32_fmc2_ebi_set_setup(ebi);
+ if (ebi->data->get_sems)
+ ebi->data->get_sems(ebi);
+ ebi->data->set_setup(ebi);
stm32_fmc2_ebi_enable(ebi);
return 0;
}
-static SIMPLE_DEV_PM_OPS(stm32_fmc2_ebi_pm_ops, stm32_fmc2_ebi_suspend,
- stm32_fmc2_ebi_resume);
+static const struct dev_pm_ops stm32_fmc2_ebi_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm32_fmc2_ebi_runtime_suspend,
+ stm32_fmc2_ebi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(stm32_fmc2_ebi_suspend, stm32_fmc2_ebi_resume)
+};
+
+static const struct stm32_fmc2_ebi_data stm32_fmc2_ebi_mp1_data = {
+ .child_props = stm32_fmc2_child_props,
+ .nb_child_props = ARRAY_SIZE(stm32_fmc2_child_props),
+ .fmc2_enable_reg = FMC2_BCR1,
+ .fmc2_enable_bit = FMC2_BCR1_FMC2EN,
+ .nwait_used_by_ctrls = stm32_fmc2_ebi_nwait_used_by_ctrls,
+ .set_setup = stm32_fmc2_ebi_mp1_set_setup,
+ .save_setup = stm32_fmc2_ebi_mp1_save_setup,
+};
+
+static const struct stm32_fmc2_ebi_data stm32_fmc2_ebi_mp25_data = {
+ .child_props = stm32_fmc2_mp25_child_props,
+ .nb_child_props = ARRAY_SIZE(stm32_fmc2_mp25_child_props),
+ .fmc2_enable_reg = FMC2_CFGR,
+ .fmc2_enable_bit = FMC2_CFGR_FMC2EN,
+ .set_setup = stm32_fmc2_ebi_mp25_set_setup,
+ .save_setup = stm32_fmc2_ebi_mp25_save_setup,
+ .check_rif = stm32_fmc2_ebi_mp25_check_rif,
+ .put_sems = stm32_fmc2_ebi_mp25_put_sems,
+ .get_sems = stm32_fmc2_ebi_mp25_get_sems,
+};
static const struct of_device_id stm32_fmc2_ebi_match[] = {
- {.compatible = "st,stm32mp1-fmc2-ebi"},
+ {
+ .compatible = "st,stm32mp1-fmc2-ebi",
+ .data = &stm32_fmc2_ebi_mp1_data,
+ },
+ {
+ .compatible = "st,stm32mp25-fmc2-ebi",
+ .data = &stm32_fmc2_ebi_mp25_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, stm32_fmc2_ebi_match);
diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c
index abff87f917cb..5f57cea48b62 100644
--- a/drivers/memory/tegra/tegra234.c
+++ b/drivers/memory/tegra/tegra234.c
@@ -92,6 +92,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA0RDB,
.name = "dla0rdb",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_0,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA0,
.regs = {
.sid = {
@@ -102,6 +104,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA0RDB1,
.name = "dla0rdb1",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_0,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA0,
.regs = {
.sid = {
@@ -112,6 +116,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA0WRB,
.name = "dla0wrb",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_0,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA0,
.regs = {
.sid = {
@@ -121,7 +127,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1RDB,
- .name = "dla0rdb",
+ .name = "dla1rdb",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_1,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -407,7 +415,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1RDB1,
- .name = "dla0rdb1",
+ .name = "dla1rdb1",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_1,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -417,7 +427,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1WRB,
- .name = "dla0wrb",
+ .name = "dla1wrb",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_1,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -539,7 +551,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
.bpmp_id = TEGRA_ICC_BPMP_NVJPG_0,
.type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVJPG,
- .regs = {
+ .regs = {
.sid = {
.override = 0x3f8,
.security = 0x3fc,
@@ -660,6 +672,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA0RDA,
.name = "dla0rda",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_0,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA0,
.regs = {
.sid = {
@@ -670,6 +684,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA0FALRDB,
.name = "dla0falrdb",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_0,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA0,
.regs = {
.sid = {
@@ -680,6 +696,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA0WRA,
.name = "dla0wra",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_0,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA0,
.regs = {
.sid = {
@@ -690,6 +708,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA0FALWRB,
.name = "dla0falwrb",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_0,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA0,
.regs = {
.sid = {
@@ -699,7 +719,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1RDA,
- .name = "dla0rda",
+ .name = "dla1rda",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_1,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -709,7 +731,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1FALRDB,
- .name = "dla0falrdb",
+ .name = "dla1falrdb",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_1,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -719,7 +743,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1WRA,
- .name = "dla0wra",
+ .name = "dla1wra",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_1,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -729,7 +755,9 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1FALWRB,
- .name = "dla0falwrb",
+ .name = "dla1falwrb",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_1,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -908,6 +936,8 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA0RDA1,
.name = "dla0rda1",
+ .bpmp_id = TEGRA_ICC_BPMP_DLA_0,
+ .type = TEGRA_ICC_NISO,
.sid = TEGRA234_SID_NVDLA0,
.regs = {
.sid = {
@@ -917,7 +947,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1RDA1,
- .name = "dla0rda1",
+ .name = "dla1rda1",
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index bbfaf6536903..23fea51ecbdd 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -164,7 +164,7 @@ static struct attribute *memstick_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(memstick_dev);
-static struct bus_type memstick_bus_type = {
+static const struct bus_type memstick_bus_type = {
.name = "memstick",
.dev_groups = memstick_dev_groups,
.match = memstick_bus_match,
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 04115cd92433..47a314a4eb6f 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2078,6 +2078,12 @@ static const struct blk_mq_ops msb_mq_ops = {
static int msb_init_disk(struct memstick_dev *card)
{
struct msb_data *msb = memstick_get_drvdata(card);
+ struct queue_limits lim = {
+ .logical_block_size = msb->page_size,
+ .max_hw_sectors = MS_BLOCK_MAX_PAGES,
+ .max_segments = MS_BLOCK_MAX_SEGS,
+ .max_segment_size = MS_BLOCK_MAX_PAGES * msb->page_size,
+ };
int rc;
unsigned long capacity;
@@ -2093,19 +2099,13 @@ static int msb_init_disk(struct memstick_dev *card)
if (rc)
goto out_release_id;
- msb->disk = blk_mq_alloc_disk(&msb->tag_set, card);
+ msb->disk = blk_mq_alloc_disk(&msb->tag_set, &lim, card);
if (IS_ERR(msb->disk)) {
rc = PTR_ERR(msb->disk);
goto out_free_tag_set;
}
msb->queue = msb->disk->queue;
- blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
- blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
- blk_queue_max_segment_size(msb->queue,
- MS_BLOCK_MAX_PAGES * msb->page_size);
- blk_queue_logical_block_size(msb->queue, msb->page_size);
-
sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
msb->disk->fops = &msb_bdops;
msb->disk->private_data = msb;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 5a69ed33999b..49accfdc89d6 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1103,6 +1103,12 @@ static const struct blk_mq_ops mspro_mq_ops = {
static int mspro_block_init_disk(struct memstick_dev *card)
{
struct mspro_block_data *msb = memstick_get_drvdata(card);
+ struct queue_limits lim = {
+ .logical_block_size = msb->page_size,
+ .max_hw_sectors = MSPRO_BLOCK_MAX_PAGES,
+ .max_segments = MSPRO_BLOCK_MAX_SEGS,
+ .max_segment_size = MSPRO_BLOCK_MAX_PAGES * msb->page_size,
+ };
struct mspro_devinfo *dev_info = NULL;
struct mspro_sys_info *sys_info = NULL;
struct mspro_sys_attr *s_attr = NULL;
@@ -1138,18 +1144,13 @@ static int mspro_block_init_disk(struct memstick_dev *card)
if (rc)
goto out_release_id;
- msb->disk = blk_mq_alloc_disk(&msb->tag_set, card);
+ msb->disk = blk_mq_alloc_disk(&msb->tag_set, &lim, card);
if (IS_ERR(msb->disk)) {
rc = PTR_ERR(msb->disk);
goto out_free_tag_set;
}
msb->queue = msb->disk->queue;
- blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
- blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS);
- blk_queue_max_segment_size(msb->queue,
- MSPRO_BLOCK_MAX_PAGES * msb->page_size);
-
msb->disk->major = major;
msb->disk->first_minor = disk_id << MSPRO_BLOCK_PART_SHIFT;
msb->disk->minors = 1 << MSPRO_BLOCK_PART_SHIFT;
@@ -1158,8 +1159,6 @@ static int mspro_block_init_disk(struct memstick_dev *card)
sprintf(msb->disk->disk_name, "mspblk%d", disk_id);
- blk_queue_logical_block_size(msb->queue, msb->page_size);
-
capacity = be16_to_cpu(sys_info->user_block_count);
capacity *= be16_to_cpu(sys_info->block_size);
capacity *= msb->page_size >> 9;
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 03319a1fa97f..dbd26c3b245b 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -263,7 +263,6 @@ struct fastrpc_channel_ctx {
int domain_id;
int sesscount;
int vmcount;
- u64 perms;
struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS];
struct rpmsg_device *rpdev;
struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
@@ -1279,9 +1278,11 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
/* Map if we have any heap VMIDs associated with this ADSP Static Process. */
if (fl->cctx->vmcount) {
+ u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
+
err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
(u64)fl->cctx->remote_heap->size,
- &fl->cctx->perms,
+ &src_perms,
fl->cctx->vmperms, fl->cctx->vmcount);
if (err) {
dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
@@ -1915,8 +1916,10 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
/* Add memory to static PD pool, protection thru hypervisor */
if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
+ u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
+
err = qcom_scm_assign_mem(buf->phys, (u64)buf->size,
- &fl->cctx->perms, fl->cctx->vmperms, fl->cctx->vmcount);
+ &src_perms, fl->cctx->vmperms, fl->cctx->vmcount);
if (err) {
dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
buf->phys, buf->size, err);
@@ -2290,7 +2293,6 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
if (vmcount) {
data->vmcount = vmcount;
- data->perms = BIT(QCOM_SCM_VMID_HLOS);
for (i = 0; i < data->vmcount; i++) {
data->vmperms[i].vmid = vmids[i];
data->vmperms[i].perm = QCOM_SCM_PERM_RWX;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index c6eb27d46cb0..15119584473c 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -198,8 +198,14 @@ static int lis3lv02d_i2c_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
- if (!lis3->pdata || !lis3->pdata->wakeup_flags)
+ /* Turn on for wakeup if turned off by runtime suspend */
+ if (lis3->pdata && lis3->pdata->wakeup_flags) {
+ if (pm_runtime_suspended(dev))
+ lis3lv02d_poweron(lis3);
+ /* For non wakeup turn off if not already turned off by runtime suspend */
+ } else if (!pm_runtime_suspended(dev))
lis3lv02d_poweroff(lis3);
+
return 0;
}
@@ -208,13 +214,12 @@ static int lis3lv02d_i2c_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
- /*
- * pm_runtime documentation says that devices should always
- * be powered on at resume. Pm_runtime turns them off after system
- * wide resume is complete.
- */
- if (!lis3->pdata || !lis3->pdata->wakeup_flags ||
- pm_runtime_suspended(dev))
+ /* Turn back off if turned on for wakeup and runtime suspended*/
+ if (lis3->pdata && lis3->pdata->wakeup_flags) {
+ if (pm_runtime_suspended(dev))
+ lis3lv02d_poweroff(lis3);
+ /* For non wakeup turn back on if not runtime suspended */
+ } else if (!pm_runtime_suspended(dev))
lis3lv02d_poweron(lis3);
return 0;
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index b080eb2335eb..b92767d6bdd2 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -294,10 +294,11 @@ static void lkdtm_SPINLOCKUP(void)
__release(&lock_me_up);
}
-static void lkdtm_HUNG_TASK(void)
+static void __noreturn lkdtm_HUNG_TASK(void)
{
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
+ BUG();
}
static volatile unsigned int huge = INT_MAX - 2;
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 0772e4a4757e..5732fd59a227 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -153,12 +153,17 @@ static const struct crashtype *find_crashtype(const char *name)
/*
* This is forced noinline just so it distinctly shows up in the stackdump
* which makes validation of expected lkdtm crashes easier.
+ *
+ * NOTE: having a valid return value helps prevent the compiler from doing
+ * tail call optimizations and taking this out of the stack trace.
*/
-static noinline void lkdtm_do_action(const struct crashtype *crashtype)
+static noinline int lkdtm_do_action(const struct crashtype *crashtype)
{
if (WARN_ON(!crashtype || !crashtype->func))
- return;
+ return -EINVAL;
crashtype->func();
+
+ return 0;
}
static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
@@ -167,10 +172,8 @@ static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
int ret;
/* If this doesn't have a symbol, just call immediately. */
- if (!crashpoint->kprobe.symbol_name) {
- lkdtm_do_action(crashtype);
- return 0;
- }
+ if (!crashpoint->kprobe.symbol_name)
+ return lkdtm_do_action(crashtype);
if (lkdtm_kprobe != NULL)
unregister_kprobe(lkdtm_kprobe);
@@ -216,7 +219,7 @@ static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
spin_unlock_irqrestore(&crash_count_lock, flags);
if (do_it)
- lkdtm_do_action(lkdtm_crashtype);
+ return lkdtm_do_action(lkdtm_crashtype);
return 0;
}
@@ -303,6 +306,7 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf,
{
const struct crashtype *crashtype;
char *buf;
+ int err;
if (count >= PAGE_SIZE)
return -EINVAL;
@@ -326,9 +330,11 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf,
return -EINVAL;
pr_info("Performing direct entry %s\n", crashtype->name);
- lkdtm_do_action(crashtype);
+ err = lkdtm_do_action(crashtype);
*off += count;
+ if (err)
+ return err;
return count;
}
diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c
index 4f467d3972a6..b1b316f99703 100644
--- a/drivers/misc/lkdtm/heap.c
+++ b/drivers/misc/lkdtm/heap.c
@@ -48,7 +48,7 @@ static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
* correctly.
*
* This should get caught by either memory tagging, KASan, or by using
- * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
+ * CONFIG_SLUB_DEBUG=y and slab_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
*/
static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
{
diff --git a/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c b/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c
index be52b113aea9..89364bdbb129 100644
--- a/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c
+++ b/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c
@@ -96,7 +96,8 @@ static const struct component_master_ops mei_component_master_ops = {
*
* The function checks if the device is pci device and
* Intel VGA adapter, the subcomponent is SW Proxy
- * and the parent of MEI PCI and the parent of VGA are the same PCH device.
+ * and the VGA is on the bus 0 reserved for built-in devices
+ * to reject discrete GFX.
*
* @dev: master device
* @subcomponent: subcomponent to match (I915_COMPONENT_SWPROXY)
@@ -123,7 +124,8 @@ static int mei_gsc_proxy_component_match(struct device *dev, int subcomponent,
if (subcomponent != I915_COMPONENT_GSC_PROXY)
return 0;
- return component_compare_dev(dev->parent, ((struct device *)data)->parent);
+ /* Only built-in GFX */
+ return (pdev->bus->number == 0);
}
static int mei_gsc_proxy_probe(struct mei_cl_device *cldev,
@@ -146,7 +148,7 @@ static int mei_gsc_proxy_probe(struct mei_cl_device *cldev,
}
component_match_add_typed(&cldev->dev, &master_match,
- mei_gsc_proxy_component_match, cldev->dev.parent);
+ mei_gsc_proxy_component_match, NULL);
if (IS_ERR_OR_NULL(master_match)) {
ret = -ENOMEM;
goto err_exit;
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 961e5d53a27a..aac36750d2c5 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -112,6 +112,8 @@
#define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */
#define MEI_DEV_ID_MTL_M 0x7E70 /* Meteor Lake Point M */
+#define MEI_DEV_ID_ARL_S 0x7F68 /* Arrow Lake Point S */
+#define MEI_DEV_ID_ARL_H 0x7770 /* Arrow Lake Point H */
/*
* MEI HW Section
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 676d566f38dd..8cf636c54032 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -119,6 +119,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_H, MEI_ME_PCH15_CFG)},
/* required last entry */
{0, }
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
index 6f4a4be6ccb5..55f7db490d3b 100644
--- a/drivers/misc/mei/vsc-tp.c
+++ b/drivers/misc/mei/vsc-tp.c
@@ -535,6 +535,7 @@ static const struct acpi_device_id vsc_tp_acpi_ids[] = {
{ "INTC1009" }, /* Raptor Lake */
{ "INTC1058" }, /* Tiger Lake */
{ "INTC1094" }, /* Alder Lake */
+ { "INTC10D0" }, /* Meteor Lake */
{}
};
MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids);
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c
index f50d22882476..3964d9e5a39b 100644
--- a/drivers/misc/vmw_vmci/vmci_datagram.c
+++ b/drivers/misc/vmw_vmci/vmci_datagram.c
@@ -224,8 +224,8 @@ static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
return VMCI_ERROR_NO_MEM;
}
- dg_info = kmalloc(sizeof(*dg_info) +
- (size_t) dg->payload_size, GFP_ATOMIC);
+ dg_info = kmalloc(struct_size(dg_info, msg_payload, dg->payload_size),
+ GFP_ATOMIC);
if (!dg_info) {
atomic_dec(&delayed_dg_host_queue_size);
vmci_resource_put(resource);
@@ -234,7 +234,8 @@ static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
dg_info->in_dg_host_queue = true;
dg_info->entry = dst_entry;
- memcpy(&dg_info->msg, dg, dg_size);
+ dg_info->msg = *dg;
+ memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size);
INIT_WORK(&dg_info->work, dg_delayed_dispatch);
schedule_work(&dg_info->work);
@@ -377,7 +378,8 @@ int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg)
dg_info->in_dg_host_queue = false;
dg_info->entry = dst_entry;
- memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg));
+ dg_info->msg = *dg;
+ memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size);
INIT_WORK(&dg_info->work, dg_delayed_dispatch);
schedule_work(&dg_info->work);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 32d49100dff5..64a3492e8002 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -144,7 +144,7 @@ struct mmc_blk_data {
static dev_t mmc_rpmb_devt;
/* Bus type for RPMB character devices */
-static struct bus_type mmc_rpmb_bus_type = {
+static const struct bus_type mmc_rpmb_bus_type = {
.name = "mmc_rpmb",
};
@@ -206,7 +206,7 @@ static void mmc_blk_kref_release(struct kref *ref)
int devidx;
devidx = mmc_get_devidx(md->disk);
- ida_simple_remove(&mmc_blk_ida, devidx);
+ ida_free(&mmc_blk_ida, devidx);
mutex_lock(&open_lock);
md->disk->private_data = NULL;
@@ -874,10 +874,11 @@ static const struct block_device_operations mmc_bdops = {
static int mmc_blk_part_switch_pre(struct mmc_card *card,
unsigned int part_type)
{
- const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
+ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
+ const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
- if ((part_type & mask) == mask) {
+ if ((part_type & mask) == rpmb) {
if (card->ext_csd.cmdq_en) {
ret = mmc_cmdq_disable(card);
if (ret)
@@ -892,10 +893,11 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card,
static int mmc_blk_part_switch_post(struct mmc_card *card,
unsigned int part_type)
{
- const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
+ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
+ const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
- if ((part_type & mask) == mask) {
+ if ((part_type & mask) == rpmb) {
mmc_retune_unpause(card->host);
if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
ret = mmc_cmdq_enable(card);
@@ -2467,7 +2469,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
bool cache_enabled = false;
bool fua_enabled = false;
- devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
+ devidx = ida_alloc_max(&mmc_blk_ida, max_devices - 1, GFP_KERNEL);
if (devidx < 0) {
/*
* We get -ENOSPC because there are no more any available
@@ -2577,7 +2579,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
err_kfree:
kfree(md);
out:
- ida_simple_remove(&mmc_blk_ida, devidx);
+ ida_free(&mmc_blk_ida, devidx);
return ERR_PTR(ret);
}
@@ -2703,7 +2705,7 @@ static void mmc_blk_rpmb_device_release(struct device *dev)
{
struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev);
- ida_simple_remove(&mmc_rpmb_ida, rpmb->id);
+ ida_free(&mmc_rpmb_ida, rpmb->id);
kfree(rpmb);
}
@@ -2719,13 +2721,13 @@ static int mmc_blk_alloc_rpmb_part(struct mmc_card *card,
struct mmc_rpmb_data *rpmb;
/* This creates the minor number for the RPMB char device */
- devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL);
+ devidx = ida_alloc_max(&mmc_rpmb_ida, max_devices - 1, GFP_KERNEL);
if (devidx < 0)
return devidx;
rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL);
if (!rpmb) {
- ida_simple_remove(&mmc_rpmb_ida, devidx);
+ ida_free(&mmc_rpmb_ida, devidx);
return -ENOMEM;
}
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 0af96548e7da..0ddaee0eae54 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -214,7 +214,7 @@ static const struct dev_pm_ops mmc_bus_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume)
};
-static struct bus_type mmc_bus_type = {
+static const struct bus_type mmc_bus_type = {
.name = "mmc",
.dev_groups = mmc_dev_groups,
.uevent = mmc_bus_uevent,
@@ -272,7 +272,7 @@ static void mmc_release_card(struct device *dev)
/*
* Allocate and initialise a new MMC card structure.
*/
-struct mmc_card *mmc_alloc_card(struct mmc_host *host, struct device_type *type)
+struct mmc_card *mmc_alloc_card(struct mmc_host *host, const struct device_type *type)
{
struct mmc_card *card;
diff --git a/drivers/mmc/core/bus.h b/drivers/mmc/core/bus.h
index 3996b191b68d..cfd0d02d3420 100644
--- a/drivers/mmc/core/bus.h
+++ b/drivers/mmc/core/bus.h
@@ -23,7 +23,7 @@ static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *a
static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
struct mmc_card *mmc_alloc_card(struct mmc_host *host,
- struct device_type *type);
+ const struct device_type *type);
int mmc_add_card(struct mmc_card *card);
void mmc_remove_card(struct mmc_card *card);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index cf396e8f34e9..8f8781d6c25e 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -76,7 +76,7 @@ static void mmc_host_classdev_release(struct device *dev)
struct mmc_host *host = cls_dev_to_mmc_host(dev);
wakeup_source_unregister(host->ws);
if (of_alias_get_id(host->parent->of_node, "mmc") < 0)
- ida_simple_remove(&mmc_host_ida, host->index);
+ ida_free(&mmc_host_ida, host->index);
kfree(host);
}
@@ -88,7 +88,7 @@ static int mmc_host_classdev_shutdown(struct device *dev)
return 0;
}
-static struct class mmc_host_class = {
+static const struct class mmc_host_class = {
.name = "mmc_host",
.dev_release = mmc_host_classdev_release,
.shutdown_pre = mmc_host_classdev_shutdown,
@@ -234,10 +234,8 @@ static void mmc_of_parse_timing_phase(struct device *dev, const char *prop,
}
void
-mmc_of_parse_clk_phase(struct mmc_host *host, struct mmc_clk_phase_map *map)
+mmc_of_parse_clk_phase(struct device *dev, struct mmc_clk_phase_map *map)
{
- struct device *dev = host->parent;
-
mmc_of_parse_timing_phase(dev, "clk-phase-legacy",
&map->phase[MMC_TIMING_LEGACY]);
mmc_of_parse_timing_phase(dev, "clk-phase-mmc-hs",
@@ -538,7 +536,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
min_idx = mmc_first_nonreserved_index();
max_idx = 0;
- index = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL);
+ index = ida_alloc_range(&mmc_host_ida, min_idx, max_idx - 1,
+ GFP_KERNEL);
if (index < 0) {
kfree(host);
return NULL;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index f410bee50132..5b2f7c285461 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -883,7 +883,7 @@ static struct attribute *mmc_std_attrs[] = {
};
ATTRIBUTE_GROUPS(mmc_std);
-static struct device_type mmc_type = {
+static const struct device_type mmc_type = {
.groups = mmc_std_groups,
};
@@ -1015,10 +1015,12 @@ static int mmc_select_bus_width(struct mmc_card *card)
static unsigned ext_csd_bits[] = {
EXT_CSD_BUS_WIDTH_8,
EXT_CSD_BUS_WIDTH_4,
+ EXT_CSD_BUS_WIDTH_1,
};
static unsigned bus_widths[] = {
MMC_BUS_WIDTH_8,
MMC_BUS_WIDTH_4,
+ MMC_BUS_WIDTH_1,
};
struct mmc_host *host = card->host;
unsigned idx, bus_width = 0;
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index a0a2412f62a7..241cdc2b2a2a 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -174,8 +174,8 @@ static struct scatterlist *mmc_alloc_sg(unsigned short sg_len, gfp_t gfp)
return sg;
}
-static void mmc_queue_setup_discard(struct request_queue *q,
- struct mmc_card *card)
+static void mmc_queue_setup_discard(struct mmc_card *card,
+ struct queue_limits *lim)
{
unsigned max_discard;
@@ -183,15 +183,17 @@ static void mmc_queue_setup_discard(struct request_queue *q,
if (!max_discard)
return;
- blk_queue_max_discard_sectors(q, max_discard);
- q->limits.discard_granularity = card->pref_erase << 9;
- /* granularity must not be greater than max. discard */
- if (card->pref_erase > max_discard)
- q->limits.discard_granularity = SECTOR_SIZE;
+ lim->max_hw_discard_sectors = max_discard;
if (mmc_can_secure_erase_trim(card))
- blk_queue_max_secure_erase_sectors(q, max_discard);
+ lim->max_secure_erase_sectors = max_discard;
if (mmc_can_trim(card) && card->erased_byte == 0)
- blk_queue_max_write_zeroes_sectors(q, max_discard);
+ lim->max_write_zeroes_sectors = max_discard;
+
+ /* granularity must not be greater than max. discard */
+ if (card->pref_erase > max_discard)
+ lim->discard_granularity = SECTOR_SIZE;
+ else
+ lim->discard_granularity = card->pref_erase << 9;
}
static unsigned short mmc_get_max_segments(struct mmc_host *host)
@@ -341,40 +343,50 @@ static const struct blk_mq_ops mmc_mq_ops = {
.timeout = mmc_mq_timed_out,
};
-static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
+static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq,
+ struct mmc_card *card)
{
struct mmc_host *host = card->host;
- unsigned block_size = 512;
+ struct queue_limits lim = { };
+ struct gendisk *disk;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
if (mmc_can_erase(card))
- mmc_queue_setup_discard(mq->queue, card);
-
- if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
- blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
- blk_queue_max_hw_sectors(mq->queue,
- min(host->max_blk_count, host->max_req_size / 512));
- if (host->can_dma_map_merge)
- WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
- mmc_dev(host)),
- "merging was advertised but not possible");
- blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
-
- if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
- block_size = card->ext_csd.data_sector_size;
- WARN_ON(block_size != 512 && block_size != 4096);
- }
+ mmc_queue_setup_discard(card, &lim);
+
+ lim.max_hw_sectors = min(host->max_blk_count, host->max_req_size / 512);
+
+ if (mmc_card_mmc(card) && card->ext_csd.data_sector_size)
+ lim.logical_block_size = card->ext_csd.data_sector_size;
+ else
+ lim.logical_block_size = 512;
+
+ WARN_ON_ONCE(lim.logical_block_size != 512 &&
+ lim.logical_block_size != 4096);
- blk_queue_logical_block_size(mq->queue, block_size);
/*
- * After blk_queue_can_use_dma_map_merging() was called with succeed,
- * since it calls blk_queue_virt_boundary(), the mmc should not call
- * both blk_queue_max_segment_size().
+ * Setting a virt_boundary implicity sets a max_segment_size, so try
+ * to set the hardware one here.
*/
- if (!host->can_dma_map_merge)
- blk_queue_max_segment_size(mq->queue,
- round_down(host->max_seg_size, block_size));
+ if (host->can_dma_map_merge) {
+ lim.virt_boundary_mask = dma_get_merge_boundary(mmc_dev(host));
+ lim.max_segments = MMC_DMA_MAP_MERGE_SEGMENTS;
+ } else {
+ lim.max_segment_size =
+ round_down(host->max_seg_size, lim.logical_block_size);
+ lim.max_segments = host->max_segs;
+ }
+
+ disk = blk_mq_alloc_disk(&mq->tag_set, &lim, mq);
+ if (IS_ERR(disk))
+ return disk;
+ mq->queue = disk->queue;
+
+ if (mmc_host_is_spi(host) && host->use_spi_crc)
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
+ blk_queue_rq_timeout(mq->queue, 60 * HZ);
+
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
+ blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
@@ -386,6 +398,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
init_waitqueue_head(&mq->wait);
mmc_crypto_setup_queue(mq->queue, host);
+ return disk;
}
static inline bool mmc_merge_capable(struct mmc_host *host)
@@ -447,18 +460,9 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
return ERR_PTR(ret);
- disk = blk_mq_alloc_disk(&mq->tag_set, mq);
- if (IS_ERR(disk)) {
+ disk = mmc_alloc_disk(mq, card);
+ if (IS_ERR(disk))
blk_mq_free_tag_set(&mq->tag_set);
- return disk;
- }
- mq->queue = disk->queue;
-
- if (mmc_host_is_spi(host) && host->use_spi_crc)
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
- blk_queue_rq_timeout(mq->queue, 60 * HZ);
-
- mmc_setup_queue(mq, card);
return disk;
}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index c3e554344c99..1c8148cdda50 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -805,7 +805,7 @@ static const struct attribute_group sd_std_group = {
};
__ATTRIBUTE_GROUPS(sd_std);
-struct device_type sd_type = {
+const struct device_type sd_type = {
.groups = sd_std_groups,
};
diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h
index 1af5a038bae9..fe6dd46927a4 100644
--- a/drivers/mmc/core/sd.h
+++ b/drivers/mmc/core/sd.h
@@ -4,7 +4,7 @@
#include <linux/types.h>
-extern struct device_type sd_type;
+extern const struct device_type sd_type;
struct mmc_host;
struct mmc_card;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 5914516df2f7..4fb247fde5c0 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -66,7 +66,7 @@ static struct attribute *sdio_std_attrs[] = {
};
ATTRIBUTE_GROUPS(sdio_std);
-static struct device_type sdio_type = {
+static const struct device_type sdio_type = {
.groups = sdio_std_groups,
};
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 47a48e902a24..71d885fbc228 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -244,7 +244,7 @@ static const struct dev_pm_ops sdio_bus_pm_ops = {
)
};
-static struct bus_type sdio_bus_type = {
+static const struct bus_type sdio_bus_type = {
.name = "sdio",
.dev_groups = sdio_dev_groups,
.match = sdio_bus_match,
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 2a2d949a9344..39f45c2b6de8 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -75,11 +75,15 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_irq);
int mmc_gpio_get_ro(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
+ int cansleep;
if (!ctx || !ctx->ro_gpio)
return -ENOSYS;
- return gpiod_get_value_cansleep(ctx->ro_gpio);
+ cansleep = gpiod_cansleep(ctx->ro_gpio);
+ return cansleep ?
+ gpiod_get_value_cansleep(ctx->ro_gpio) :
+ gpiod_get_value(ctx->ro_gpio);
}
EXPORT_SYMBOL(mmc_gpio_get_ro);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 81f2c4e05287..aebc587f77a7 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -798,6 +798,15 @@ config MMC_DW_HI3798CV200
Synopsys DesignWare Memory Card Interface driver. Select this option
for platforms based on HiSilicon Hi3798CV200 SoC.
+config MMC_DW_HI3798MV200
+ tristate "Hi3798MV200 specific extensions for Synopsys DW Memory Card Interface"
+ depends on MMC_DW
+ select MMC_DW_PLTFM
+ help
+ This selects support for HiSilicon Hi3798MV200 SoC specific extensions to the
+ Synopsys DesignWare Memory Card Interface driver. Select this option
+ for platforms based on HiSilicon Hi3798MV200 SoC.
+
config MMC_DW_K3
tristate "K3 specific extensions for Synopsys DW Memory Card Interface"
depends on MMC_DW
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index d0be4465f3ec..f53f86d200ac 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o
obj-$(CONFIG_MMC_DW_BLUEFIELD) += dw_mmc-bluefield.o
obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o
obj-$(CONFIG_MMC_DW_HI3798CV200) += dw_mmc-hi3798cv200.o
+obj-$(CONFIG_MMC_DW_HI3798MV200) += dw_mmc-hi3798mv200.o
obj-$(CONFIG_MMC_DW_K3) += dw_mmc-k3.o
obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o
obj-$(CONFIG_MMC_DW_ROCKCHIP) += dw_mmc-rockchip.o
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index ee3b1a4e0848..8bd938919687 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -180,12 +180,6 @@ struct mmc_davinci_host {
#define DAVINCI_MMC_DATADIR_WRITE 2
unsigned char data_dir;
- /* buffer is used during PIO of one scatterlist segment, and
- * is updated along with buffer_bytes_left. bytes_left applies
- * to all N blocks of the PIO transfer.
- */
- u8 *buffer;
- u32 buffer_bytes_left;
u32 bytes_left;
struct dma_chan *dma_tx;
@@ -196,8 +190,8 @@ struct mmc_davinci_host {
bool active_request;
/* For PIO we walk scatterlists one segment at a time. */
+ struct sg_mapping_iter sg_miter;
unsigned int sg_len;
- struct scatterlist *sg;
/* Version of the MMC/SD controller */
u8 version;
@@ -213,30 +207,22 @@ struct mmc_davinci_host {
static irqreturn_t mmc_davinci_irq(int irq, void *dev_id);
/* PIO only */
-static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
-{
- host->buffer_bytes_left = sg_dma_len(host->sg);
- host->buffer = sg_virt(host->sg);
- if (host->buffer_bytes_left > host->bytes_left)
- host->buffer_bytes_left = host->bytes_left;
-}
-
static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
unsigned int n)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
u8 *p;
unsigned int i;
- if (host->buffer_bytes_left == 0) {
- host->sg = sg_next(host->data->sg);
- mmc_davinci_sg_to_buf(host);
+ /*
+ * By adjusting sgm->consumed this will give a pointer to the
+ * current index into the sgm.
+ */
+ if (!sg_miter_next(sgm)) {
+ dev_err(mmc_dev(host->mmc), "ran out of sglist prematurely\n");
+ return;
}
-
- p = host->buffer;
- if (n > host->buffer_bytes_left)
- n = host->buffer_bytes_left;
- host->buffer_bytes_left -= n;
- host->bytes_left -= n;
+ p = sgm->addr;
/* NOTE: we never transfer more than rw_threshold bytes
* to/from the fifo here; there's no I/O overlap.
@@ -261,7 +247,9 @@ static void davinci_fifo_data_trans(struct mmc_davinci_host *host,
p = p + (n & 3);
}
}
- host->buffer = p;
+
+ sgm->consumed = n;
+ host->bytes_left -= n;
}
static void mmc_davinci_start_command(struct mmc_davinci_host *host,
@@ -517,6 +505,7 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0;
int timeout;
struct mmc_data *data = req->data;
+ unsigned int flags = SG_MITER_ATOMIC; /* Used from IRQ */
if (host->version == MMC_CTLR_VERSION_2)
fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0;
@@ -545,12 +534,14 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
/* Configure the FIFO */
if (data->flags & MMC_DATA_WRITE) {
+ flags |= SG_MITER_FROM_SG;
host->data_dir = DAVINCI_MMC_DATADIR_WRITE;
writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST,
host->base + DAVINCI_MMCFIFOCTL);
writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR,
host->base + DAVINCI_MMCFIFOCTL);
} else {
+ flags |= SG_MITER_TO_SG;
host->data_dir = DAVINCI_MMC_DATADIR_READ;
writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST,
host->base + DAVINCI_MMCFIFOCTL);
@@ -558,7 +549,6 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
host->base + DAVINCI_MMCFIFOCTL);
}
- host->buffer = NULL;
host->bytes_left = data->blocks * data->blksz;
/* For now we try to use DMA whenever we won't need partial FIFO
@@ -576,8 +566,7 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
} else {
/* Revert to CPU Copy */
host->sg_len = data->sg_len;
- host->sg = host->data->sg;
- mmc_davinci_sg_to_buf(host);
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
}
}
@@ -843,6 +832,8 @@ davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
{
mmc_davinci_reset_ctrl(host, 1);
mmc_davinci_reset_ctrl(host, 0);
+ if (!host->do_dma)
+ sg_miter_stop(&host->sg_miter);
}
static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
@@ -919,11 +910,13 @@ static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
if (qstatus & MMCST0_DATDNE) {
/* All blocks sent/received, and CRC checks passed */
if (data != NULL) {
- if ((host->do_dma == 0) && (host->bytes_left > 0)) {
- /* if datasize < rw_threshold
- * no RX ints are generated
- */
- davinci_fifo_data_trans(host, host->bytes_left);
+ if (!host->do_dma) {
+ if (host->bytes_left > 0)
+ /* if datasize < rw_threshold
+ * no RX ints are generated
+ */
+ davinci_fifo_data_trans(host, host->bytes_left);
+ sg_miter_stop(&host->sg_miter);
}
end_transfer = 1;
data->bytes_xfered = data->blocks * data->blksz;
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 698408e8bad0..6dc057718d2c 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -11,7 +11,6 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
diff --git a/drivers/mmc/host/dw_mmc-hi3798cv200.c b/drivers/mmc/host/dw_mmc-hi3798cv200.c
index e9470c50a348..61923a518369 100644
--- a/drivers/mmc/host/dw_mmc-hi3798cv200.c
+++ b/drivers/mmc/host/dw_mmc-hi3798cv200.c
@@ -201,4 +201,3 @@ module_platform_driver(dw_mci_hi3798cv200_driver);
MODULE_DESCRIPTION("HiSilicon Hi3798CV200 Specific DW-MSHC Driver Extension");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:dwmmc_hi3798cv200");
diff --git a/drivers/mmc/host/dw_mmc-hi3798mv200.c b/drivers/mmc/host/dw_mmc-hi3798mv200.c
new file mode 100644
index 000000000000..989ae8dda722
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc-hi3798mv200.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Modified from dw_mmc-hi3798cv200.c
+ *
+ * Copyright (c) 2024 Yang Xiwen <forbidden405@outlook.com>
+ * Copyright (c) 2018 HiSilicon Technologies Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dw_mmc.h"
+#include "dw_mmc-pltfm.h"
+
+#define SDMMC_TUNING_CTRL 0x118
+#define SDMMC_TUNING_FIND_EDGE BIT(5)
+
+#define ALL_INT_CLR 0x1ffff
+
+/* DLL ctrl reg */
+#define SAP_DLL_CTRL_DLLMODE BIT(16)
+
+struct dw_mci_hi3798mv200_priv {
+ struct clk *sample_clk;
+ struct clk *drive_clk;
+ struct regmap *crg_reg;
+ u32 sap_dll_offset;
+ struct mmc_clk_phase_map phase_map;
+};
+
+static void dw_mci_hi3798mv200_set_ios(struct dw_mci *host, struct mmc_ios *ios)
+{
+ struct dw_mci_hi3798mv200_priv *priv = host->priv;
+ struct mmc_clk_phase phase = priv->phase_map.phase[ios->timing];
+ u32 val;
+
+ val = mci_readl(host, ENABLE_SHIFT);
+ if (ios->timing == MMC_TIMING_MMC_DDR52
+ || ios->timing == MMC_TIMING_UHS_DDR50)
+ val |= SDMMC_ENABLE_PHASE;
+ else
+ val &= ~SDMMC_ENABLE_PHASE;
+ mci_writel(host, ENABLE_SHIFT, val);
+
+ val = mci_readl(host, DDR_REG);
+ if (ios->timing == MMC_TIMING_MMC_HS400)
+ val |= SDMMC_DDR_HS400;
+ else
+ val &= ~SDMMC_DDR_HS400;
+ mci_writel(host, DDR_REG, val);
+
+ if (clk_set_rate(host->ciu_clk, ios->clock))
+ dev_warn(host->dev, "Failed to set rate to %u\n", ios->clock);
+ else
+ /*
+ * CLK_MUX_ROUND_NEAREST is enabled for this clock
+ * The actual clock rate is not what we set, but a rounded value
+ * so we should get the rate once again
+ */
+ host->bus_hz = clk_get_rate(host->ciu_clk);
+
+ if (phase.valid) {
+ clk_set_phase(priv->drive_clk, phase.out_deg);
+ clk_set_phase(priv->sample_clk, phase.in_deg);
+ } else {
+ dev_warn(host->dev,
+ "The phase entry for timing mode %d is missing in device tree.\n",
+ ios->timing);
+ }
+}
+
+static inline int dw_mci_hi3798mv200_enable_tuning(struct dw_mci_slot *slot)
+{
+ struct dw_mci_hi3798mv200_priv *priv = slot->host->priv;
+
+ return regmap_clear_bits(priv->crg_reg, priv->sap_dll_offset, SAP_DLL_CTRL_DLLMODE);
+}
+
+static inline int dw_mci_hi3798mv200_disable_tuning(struct dw_mci_slot *slot)
+{
+ struct dw_mci_hi3798mv200_priv *priv = slot->host->priv;
+
+ return regmap_set_bits(priv->crg_reg, priv->sap_dll_offset, SAP_DLL_CTRL_DLLMODE);
+}
+
+static int dw_mci_hi3798mv200_execute_tuning_mix_mode(struct dw_mci_slot *slot,
+ u32 opcode)
+{
+ static const int degrees[] = { 0, 45, 90, 135, 180, 225, 270, 315 };
+ struct dw_mci *host = slot->host;
+ struct dw_mci_hi3798mv200_priv *priv = host->priv;
+ int raise_point = -1, fall_point = -1, mid;
+ int err, prev_err = -1;
+ int found = 0;
+ int regval;
+ int i;
+ int ret;
+
+ ret = dw_mci_hi3798mv200_enable_tuning(slot);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(degrees); i++) {
+ clk_set_phase(priv->sample_clk, degrees[i]);
+ mci_writel(host, RINTSTS, ALL_INT_CLR);
+
+ /*
+ * HiSilicon implemented a tuning mechanism.
+ * It needs special interaction with the DLL.
+ *
+ * Treat edge(flip) found as an error too.
+ */
+ err = mmc_send_tuning(slot->mmc, opcode, NULL);
+ regval = mci_readl(host, TUNING_CTRL);
+ if (err || (regval & SDMMC_TUNING_FIND_EDGE))
+ err = 1;
+ else
+ found = 1;
+
+ if (i > 0) {
+ if (err && !prev_err)
+ fall_point = i - 1;
+ if (!err && prev_err)
+ raise_point = i;
+ }
+
+ if (raise_point != -1 && fall_point != -1)
+ goto tuning_out;
+
+ prev_err = err;
+ err = 0;
+ }
+
+tuning_out:
+ ret = dw_mci_hi3798mv200_disable_tuning(slot);
+ if (ret < 0)
+ return ret;
+
+ if (found) {
+ if (raise_point == -1)
+ raise_point = 0;
+ if (fall_point == -1)
+ fall_point = ARRAY_SIZE(degrees) - 1;
+ if (fall_point < raise_point) {
+ if ((raise_point + fall_point) >
+ (ARRAY_SIZE(degrees) - 1))
+ mid = fall_point / 2;
+ else
+ mid = (raise_point + ARRAY_SIZE(degrees) - 1) / 2;
+ } else {
+ mid = (raise_point + fall_point) / 2;
+ }
+
+ /*
+ * We don't care what timing we are tuning for,
+ * simply use the same phase for all timing needs tuning.
+ */
+ priv->phase_map.phase[MMC_TIMING_MMC_HS200].in_deg = degrees[mid];
+ priv->phase_map.phase[MMC_TIMING_MMC_HS400].in_deg = degrees[mid];
+ priv->phase_map.phase[MMC_TIMING_UHS_SDR104].in_deg = degrees[mid];
+
+ clk_set_phase(priv->sample_clk, degrees[mid]);
+ dev_dbg(host->dev, "Tuning clk_sample[%d, %d], set[%d]\n",
+ raise_point, fall_point, degrees[mid]);
+ ret = 0;
+ } else {
+ dev_err(host->dev, "No valid clk_sample shift!\n");
+ ret = -EINVAL;
+ }
+
+ mci_writel(host, RINTSTS, ALL_INT_CLR);
+
+ return ret;
+}
+
+static int dw_mci_hi3798mv200_init(struct dw_mci *host)
+{
+ struct dw_mci_hi3798mv200_priv *priv;
+ struct device_node *np = host->dev->of_node;
+ int ret;
+
+ priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mmc_of_parse_clk_phase(host->dev, &priv->phase_map);
+
+ priv->sample_clk = devm_clk_get_enabled(host->dev, "ciu-sample");
+ if (IS_ERR(priv->sample_clk))
+ return dev_err_probe(host->dev, PTR_ERR(priv->sample_clk),
+ "failed to get enabled ciu-sample clock\n");
+
+ priv->drive_clk = devm_clk_get_enabled(host->dev, "ciu-drive");
+ if (IS_ERR(priv->drive_clk))
+ return dev_err_probe(host->dev, PTR_ERR(priv->drive_clk),
+ "failed to get enabled ciu-drive clock\n");
+
+ priv->crg_reg = syscon_regmap_lookup_by_phandle(np, "hisilicon,sap-dll-reg");
+ if (IS_ERR(priv->crg_reg))
+ return dev_err_probe(host->dev, PTR_ERR(priv->crg_reg),
+ "failed to get CRG reg\n");
+
+ ret = of_property_read_u32_index(np, "hisilicon,sap-dll-reg", 1, &priv->sap_dll_offset);
+ if (ret)
+ return dev_err_probe(host->dev, ret, "failed to get sample DLL register offset\n");
+
+ host->priv = priv;
+ return 0;
+}
+
+static const struct dw_mci_drv_data hi3798mv200_data = {
+ .common_caps = MMC_CAP_CMD23,
+ .init = dw_mci_hi3798mv200_init,
+ .set_ios = dw_mci_hi3798mv200_set_ios,
+ .execute_tuning = dw_mci_hi3798mv200_execute_tuning_mix_mode,
+};
+
+static const struct of_device_id dw_mci_hi3798mv200_match[] = {
+ { .compatible = "hisilicon,hi3798mv200-dw-mshc" },
+ {},
+};
+
+static int dw_mci_hi3798mv200_probe(struct platform_device *pdev)
+{
+ return dw_mci_pltfm_register(pdev, &hi3798mv200_data);
+}
+
+static void dw_mci_hi3798mv200_remove(struct platform_device *pdev)
+{
+ dw_mci_pltfm_remove(pdev);
+}
+
+MODULE_DEVICE_TABLE(of, dw_mci_hi3798mv200_match);
+static struct platform_driver dw_mci_hi3798mv200_driver = {
+ .probe = dw_mci_hi3798mv200_probe,
+ .remove_new = dw_mci_hi3798mv200_remove,
+ .driver = {
+ .name = "dwmmc_hi3798mv200",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = dw_mci_hi3798mv200_match,
+ },
+};
+module_platform_driver(dw_mci_hi3798mv200_driver);
+
+MODULE_DESCRIPTION("HiSilicon Hi3798MV200 Specific DW-MSHC Driver Extension");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 829af2c98a44..8e2d676b9239 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -35,7 +35,6 @@
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/mmc/slot-gpio.h>
#include "dw_mmc.h"
diff --git a/drivers/mmc/host/meson-mx-sdhc-clkc.c b/drivers/mmc/host/meson-mx-sdhc-clkc.c
index 19200b7079a6..cbd17a596cd2 100644
--- a/drivers/mmc/host/meson-mx-sdhc-clkc.c
+++ b/drivers/mmc/host/meson-mx-sdhc-clkc.c
@@ -71,12 +71,23 @@ static int meson_mx_sdhc_clk_hw_register(struct device *dev,
static int meson_mx_sdhc_gate_clk_hw_register(struct device *dev,
const char *name_suffix,
struct clk_hw *parent,
- struct clk_hw *hw)
+ struct clk_hw *hw,
+ struct clk_bulk_data *clk_bulk_data,
+ u8 bulk_index)
{
struct clk_parent_data parent_data = { .hw = parent };
+ int ret;
+
+ ret = meson_mx_sdhc_clk_hw_register(dev, name_suffix, &parent_data, 1,
+ &clk_gate_ops, hw);
+ if (ret)
+ return ret;
+
+ clk_bulk_data[bulk_index].clk = devm_clk_hw_get_clk(dev, hw, name_suffix);
+ if (IS_ERR(clk_bulk_data[bulk_index].clk))
+ return PTR_ERR(clk_bulk_data[bulk_index].clk);
- return meson_mx_sdhc_clk_hw_register(dev, name_suffix, &parent_data, 1,
- &clk_gate_ops, hw);
+ return 0;
}
int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
@@ -115,7 +126,8 @@ int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
clkc_data->mod_clk_en.bit_idx = 15;
ret = meson_mx_sdhc_gate_clk_hw_register(dev, "mod_clk_on",
&clkc_data->div.hw,
- &clkc_data->mod_clk_en.hw);
+ &clkc_data->mod_clk_en.hw,
+ clk_bulk_data, 0);
if (ret)
return ret;
@@ -123,7 +135,8 @@ int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
clkc_data->tx_clk_en.bit_idx = 14;
ret = meson_mx_sdhc_gate_clk_hw_register(dev, "tx_clk_on",
&clkc_data->div.hw,
- &clkc_data->tx_clk_en.hw);
+ &clkc_data->tx_clk_en.hw,
+ clk_bulk_data, 1);
if (ret)
return ret;
@@ -131,7 +144,8 @@ int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
clkc_data->rx_clk_en.bit_idx = 13;
ret = meson_mx_sdhc_gate_clk_hw_register(dev, "rx_clk_on",
&clkc_data->div.hw,
- &clkc_data->rx_clk_en.hw);
+ &clkc_data->rx_clk_en.hw,
+ clk_bulk_data, 2);
if (ret)
return ret;
@@ -139,18 +153,7 @@ int meson_mx_sdhc_register_clkc(struct device *dev, void __iomem *base,
clkc_data->sd_clk_en.bit_idx = 12;
ret = meson_mx_sdhc_gate_clk_hw_register(dev, "sd_clk_on",
&clkc_data->div.hw,
- &clkc_data->sd_clk_en.hw);
- if (ret)
- return ret;
-
- /*
- * TODO: Replace clk_hw.clk with devm_clk_hw_get_clk() once that is
- * available.
- */
- clk_bulk_data[0].clk = clkc_data->mod_clk_en.hw.clk;
- clk_bulk_data[1].clk = clkc_data->sd_clk_en.hw.clk;
- clk_bulk_data[2].clk = clkc_data->tx_clk_en.hw.clk;
- clk_bulk_data[3].clk = clkc_data->rx_clk_en.hw.clk;
-
- return 0;
+ &clkc_data->sd_clk_en.hw,
+ clk_bulk_data, 3);
+ return ret;
}
diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c
index 1ed9731e77ef..31f750301dc1 100644
--- a/drivers/mmc/host/meson-mx-sdhc-mmc.c
+++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c
@@ -65,10 +65,8 @@ static const struct regmap_config meson_mx_sdhc_regmap_config = {
.max_register = MESON_SDHC_CLK2,
};
-static void meson_mx_sdhc_hw_reset(struct mmc_host *mmc)
+static void meson_mx_sdhc_reset(struct meson_mx_sdhc_host *host)
{
- struct meson_mx_sdhc_host *host = mmc_priv(mmc);
-
regmap_write(host->regmap, MESON_SDHC_SRST, MESON_SDHC_SRST_MAIN_CTRL |
MESON_SDHC_SRST_RXFIFO | MESON_SDHC_SRST_TXFIFO |
MESON_SDHC_SRST_DPHY_RX | MESON_SDHC_SRST_DPHY_TX |
@@ -116,7 +114,7 @@ static void meson_mx_sdhc_wait_cmd_ready(struct mmc_host *mmc)
dev_warn(mmc_dev(mmc),
"Failed to poll for CMD_BUSY while processing CMD%d\n",
host->cmd->opcode);
- meson_mx_sdhc_hw_reset(mmc);
+ meson_mx_sdhc_reset(host);
}
ret = regmap_read_poll_timeout(host->regmap, MESON_SDHC_ESTA, esta,
@@ -127,7 +125,7 @@ static void meson_mx_sdhc_wait_cmd_ready(struct mmc_host *mmc)
dev_warn(mmc_dev(mmc),
"Failed to poll for ESTA[13:11] while processing CMD%d\n",
host->cmd->opcode);
- meson_mx_sdhc_hw_reset(mmc);
+ meson_mx_sdhc_reset(host);
}
}
@@ -495,7 +493,6 @@ static int meson_mx_sdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
}
static const struct mmc_host_ops meson_mx_sdhc_ops = {
- .card_hw_reset = meson_mx_sdhc_hw_reset,
.request = meson_mx_sdhc_request,
.set_ios = meson_mx_sdhc_set_ios,
.card_busy = meson_mx_sdhc_card_busy,
@@ -618,7 +615,7 @@ static irqreturn_t meson_mx_sdhc_irq_thread(int irq, void *irq_data)
}
if (cmd->error == -EIO || cmd->error == -ETIMEDOUT)
- meson_mx_sdhc_hw_reset(host->mmc);
+ meson_mx_sdhc_reset(host);
else if (cmd->data)
/*
* Clear the FIFOs after completing data transfers to prevent
@@ -728,7 +725,7 @@ static void meson_mx_sdhc_init_hw(struct mmc_host *mmc)
{
struct meson_mx_sdhc_host *host = mmc_priv(mmc);
- meson_mx_sdhc_hw_reset(mmc);
+ meson_mx_sdhc_reset(host);
regmap_write(host->regmap, MESON_SDHC_CTRL,
FIELD_PREP(MESON_SDHC_CTRL_RX_PERIOD, 0xf) |
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index bf35761f783a..09d7a6a0dc1a 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bio.h>
-#include <linux/dma-direction.h>
#include <linux/crc7.h>
#include <linux/crc-itu-t.h>
#include <linux/scatterlist.h>
@@ -510,10 +509,7 @@ mmc_spi_command_send(struct mmc_spi_host *host,
* so we explicitly initialize it to all ones on RX paths.
*/
static void
-mmc_spi_setup_data_message(
- struct mmc_spi_host *host,
- bool multiple,
- enum dma_data_direction direction)
+mmc_spi_setup_data_message(struct mmc_spi_host *host, bool multiple, bool write)
{
struct spi_transfer *t;
struct scratch *scratch = host->data;
@@ -523,7 +519,7 @@ mmc_spi_setup_data_message(
/* for reads, readblock() skips 0xff bytes before finding
* the token; for writes, this transfer issues that token.
*/
- if (direction == DMA_TO_DEVICE) {
+ if (write) {
t = &host->token;
memset(t, 0, sizeof(*t));
t->len = 1;
@@ -547,7 +543,7 @@ mmc_spi_setup_data_message(
t = &host->crc;
memset(t, 0, sizeof(*t));
t->len = 2;
- if (direction == DMA_TO_DEVICE) {
+ if (write) {
/* the actual CRC may get written later */
t->tx_buf = &scratch->crc_val;
} else {
@@ -570,10 +566,10 @@ mmc_spi_setup_data_message(
* the next token (next data block, or STOP_TRAN). We can try to
* minimize I/O ops by using a single read to collect end-of-busy.
*/
- if (multiple || direction == DMA_TO_DEVICE) {
+ if (multiple || write) {
t = &host->early_status;
memset(t, 0, sizeof(*t));
- t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1;
+ t->len = write ? sizeof(scratch->status) : 1;
t->tx_buf = host->ones;
t->rx_buf = scratch->status;
t->cs_change = 1;
@@ -777,15 +773,15 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
{
struct spi_device *spi = host->spi;
struct spi_transfer *t;
- enum dma_data_direction direction = mmc_get_dma_dir(data);
struct scatterlist *sg;
unsigned n_sg;
bool multiple = (data->blocks > 1);
- const char *write_or_read = (direction == DMA_TO_DEVICE) ? "write" : "read";
+ bool write = (data->flags & MMC_DATA_WRITE);
+ const char *write_or_read = write ? "write" : "read";
u32 clock_rate;
unsigned long timeout;
- mmc_spi_setup_data_message(host, multiple, direction);
+ mmc_spi_setup_data_message(host, multiple, write);
t = &host->t;
if (t->speed_hz)
@@ -807,7 +803,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
/* allow pio too; we don't allow highmem */
kmap_addr = kmap(sg_page(sg));
- if (direction == DMA_TO_DEVICE)
+ if (write)
t->tx_buf = kmap_addr + sg->offset;
else
t->rx_buf = kmap_addr + sg->offset;
@@ -818,7 +814,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
dev_dbg(&spi->dev, " %s block, %d bytes\n", write_or_read, t->len);
- if (direction == DMA_TO_DEVICE)
+ if (write)
status = mmc_spi_writeblock(host, t, timeout);
else
status = mmc_spi_readblock(host, t, timeout);
@@ -833,7 +829,9 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
}
/* discard mappings */
- if (direction == DMA_FROM_DEVICE)
+ if (write)
+ /* nothing to do */;
+ else
flush_dcache_page(sg_page(sg));
kunmap(sg_page(sg));
@@ -850,7 +848,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
* that can affect the STOP_TRAN logic. Complete (and current)
* MMC specs should sort that out before Linux starts using CMD23.
*/
- if (direction == DMA_TO_DEVICE && multiple) {
+ if (write && multiple) {
struct scratch *scratch = host->data;
int tmp;
const unsigned statlen = sizeof(scratch->status);
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index 35067e1e6cd8..f5da7f9baa52 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -225,6 +225,8 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
struct scatterlist *sg;
int i;
+ host->dma_in_progress = true;
+
if (!host->variant->dma_lli || data->sg_len == 1 ||
idma->use_bounce_buffer) {
u32 dma_addr;
@@ -263,9 +265,30 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
return 0;
}
+static void sdmmc_idma_error(struct mmci_host *host)
+{
+ struct mmc_data *data = host->data;
+ struct sdmmc_idma *idma = host->dma_priv;
+
+ if (!dma_inprogress(host))
+ return;
+
+ writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
+ host->dma_in_progress = false;
+ data->host_cookie = 0;
+
+ if (!idma->use_bounce_buffer)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
+}
+
static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data)
{
+ if (!dma_inprogress(host))
+ return;
+
writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR);
+ host->dma_in_progress = false;
if (!data->host_cookie)
sdmmc_idma_unprep_data(host, data, 0);
@@ -676,6 +699,7 @@ static struct mmci_host_ops sdmmc_variant_ops = {
.dma_setup = sdmmc_idma_setup,
.dma_start = sdmmc_idma_start,
.dma_finalize = sdmmc_idma_finalize,
+ .dma_error = sdmmc_idma_error,
.set_clkreg = mmci_sdmmc_set_clkreg,
.set_pwrreg = mmci_sdmmc_set_pwrreg,
.busy_complete = sdmmc_busy_complete,
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index 5cfdd3a86e54..b88d6dec209f 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -131,12 +131,10 @@ struct moxart_host {
struct dma_async_tx_descriptor *tx_desc;
struct mmc_host *mmc;
struct mmc_request *mrq;
- struct scatterlist *cur_sg;
struct completion dma_complete;
struct completion pio_complete;
- u32 num_sg;
- u32 data_remain;
+ struct sg_mapping_iter sg_miter;
u32 data_len;
u32 fifo_width;
u32 timeout;
@@ -148,35 +146,6 @@ struct moxart_host {
bool is_removed;
};
-static inline void moxart_init_sg(struct moxart_host *host,
- struct mmc_data *data)
-{
- host->cur_sg = data->sg;
- host->num_sg = data->sg_len;
- host->data_remain = host->cur_sg->length;
-
- if (host->data_remain > host->data_len)
- host->data_remain = host->data_len;
-}
-
-static inline int moxart_next_sg(struct moxart_host *host)
-{
- int remain;
- struct mmc_data *data = host->mrq->cmd->data;
-
- host->cur_sg++;
- host->num_sg--;
-
- if (host->num_sg > 0) {
- host->data_remain = host->cur_sg->length;
- remain = host->data_len - data->bytes_xfered;
- if (remain > 0 && remain < host->data_remain)
- host->data_remain = remain;
- }
-
- return host->num_sg;
-}
-
static int moxart_wait_for_status(struct moxart_host *host,
u32 mask, u32 *status)
{
@@ -254,6 +223,11 @@ static void moxart_dma_complete(void *param)
complete(&host->dma_complete);
}
+static bool moxart_use_dma(struct moxart_host *host)
+{
+ return (host->data_len > host->fifo_width) && host->have_dma;
+}
+
static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
{
u32 len, dir_slave;
@@ -291,11 +265,11 @@ static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
dma_async_issue_pending(dma_chan);
}
- data->bytes_xfered += host->data_remain;
-
wait_for_completion_interruptible_timeout(&host->dma_complete,
host->timeout);
+ data->bytes_xfered = host->data_len;
+
dma_unmap_sg(dma_chan->device->dev,
data->sg, data->sg_len,
mmc_get_dma_dir(data));
@@ -304,14 +278,28 @@ static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
static void moxart_transfer_pio(struct moxart_host *host)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct mmc_data *data = host->mrq->cmd->data;
u32 *sgp, len = 0, remain, status;
if (host->data_len == data->bytes_xfered)
return;
- sgp = sg_virt(host->cur_sg);
- remain = host->data_remain;
+ /*
+ * By updating sgm->consumes this will get a proper pointer into the
+ * buffer at any time.
+ */
+ if (!sg_miter_next(sgm)) {
+ /* This shold not happen */
+ dev_err(mmc_dev(host->mmc), "ran out of scatterlist prematurely\n");
+ data->error = -EINVAL;
+ complete(&host->pio_complete);
+ return;
+ }
+ sgp = sgm->addr;
+ remain = sgm->length;
+ if (remain > host->data_len)
+ remain = host->data_len;
if (data->flags & MMC_DATA_WRITE) {
while (remain > 0) {
@@ -326,6 +314,7 @@ static void moxart_transfer_pio(struct moxart_host *host)
sgp++;
len += 4;
}
+ sgm->consumed += len;
remain -= len;
}
@@ -342,22 +331,22 @@ static void moxart_transfer_pio(struct moxart_host *host)
sgp++;
len += 4;
}
+ sgm->consumed += len;
remain -= len;
}
}
- data->bytes_xfered += host->data_remain - remain;
- host->data_remain = remain;
-
- if (host->data_len != data->bytes_xfered)
- moxart_next_sg(host);
- else
+ data->bytes_xfered += sgm->consumed;
+ if (host->data_len == data->bytes_xfered) {
complete(&host->pio_complete);
+ return;
+ }
}
static void moxart_prepare_data(struct moxart_host *host)
{
struct mmc_data *data = host->mrq->cmd->data;
+ unsigned int flags = SG_MITER_ATOMIC; /* Used from IRQ */
u32 datactrl;
int blksz_bits;
@@ -368,15 +357,19 @@ static void moxart_prepare_data(struct moxart_host *host)
blksz_bits = ffs(data->blksz) - 1;
BUG_ON(1 << blksz_bits != data->blksz);
- moxart_init_sg(host, data);
-
datactrl = DCR_DATA_EN | (blksz_bits & DCR_BLK_SIZE);
- if (data->flags & MMC_DATA_WRITE)
+ if (data->flags & MMC_DATA_WRITE) {
+ flags |= SG_MITER_FROM_SG;
datactrl |= DCR_DATA_WRITE;
+ } else {
+ flags |= SG_MITER_TO_SG;
+ }
- if ((host->data_len > host->fifo_width) && host->have_dma)
+ if (moxart_use_dma(host))
datactrl |= DCR_DMA_EN;
+ else
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL);
writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR);
@@ -407,7 +400,7 @@ static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq)
moxart_send_command(host, host->mrq->cmd);
if (mrq->cmd->data) {
- if ((host->data_len > host->fifo_width) && host->have_dma) {
+ if (moxart_use_dma(host)) {
writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK);
@@ -449,6 +442,9 @@ static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
request_done:
+ if (!moxart_use_dma(host))
+ sg_miter_stop(&host->sg_miter);
+
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
}
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index ca01b7d204ba..af7f21888e27 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -38,8 +38,9 @@ struct mvsd_host {
unsigned int xfer_mode;
unsigned int intr_en;
unsigned int ctrl;
+ bool use_pio;
+ struct sg_mapping_iter sg_miter;
unsigned int pio_size;
- void *pio_ptr;
unsigned int sg_frags;
unsigned int ns_per_clk;
unsigned int clock;
@@ -114,11 +115,18 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
* data when the buffer is not aligned on a 64 byte
* boundary.
*/
+ unsigned int miter_flags = SG_MITER_ATOMIC; /* Used from IRQ */
+
+ if (data->flags & MMC_DATA_READ)
+ miter_flags |= SG_MITER_TO_SG;
+ else
+ miter_flags |= SG_MITER_FROM_SG;
+
host->pio_size = data->blocks * data->blksz;
- host->pio_ptr = sg_virt(data->sg);
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, miter_flags);
if (!nodma)
- dev_dbg(host->dev, "fallback to PIO for data at 0x%p size %d\n",
- host->pio_ptr, host->pio_size);
+ dev_dbg(host->dev, "fallback to PIO for data\n");
+ host->use_pio = true;
return 1;
} else {
dma_addr_t phys_addr;
@@ -129,6 +137,7 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
phys_addr = sg_dma_address(data->sg);
mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff);
mvsd_write(MVSD_SYS_ADDR_HI, (u32)phys_addr >> 16);
+ host->use_pio = false;
return 0;
}
}
@@ -288,8 +297,8 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
{
void __iomem *iobase = host->base;
- if (host->pio_ptr) {
- host->pio_ptr = NULL;
+ if (host->use_pio) {
+ sg_miter_stop(&host->sg_miter);
host->pio_size = 0;
} else {
dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
@@ -344,9 +353,12 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
static irqreturn_t mvsd_irq(int irq, void *dev)
{
struct mvsd_host *host = dev;
+ struct sg_mapping_iter *sgm = &host->sg_miter;
void __iomem *iobase = host->base;
u32 intr_status, intr_done_mask;
int irq_handled = 0;
+ u16 *p;
+ int s;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
dev_dbg(host->dev, "intr 0x%04x intr_en 0x%04x hw_state 0x%04x\n",
@@ -370,15 +382,36 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
spin_lock(&host->lock);
/* PIO handling, if needed. Messy business... */
- if (host->pio_size &&
+ if (host->use_pio) {
+ /*
+ * As we set sgm->consumed this always gives a valid buffer
+ * position.
+ */
+ if (!sg_miter_next(sgm)) {
+ /* This should not happen */
+ dev_err(host->dev, "ran out of scatter segments\n");
+ spin_unlock(&host->lock);
+ host->intr_en &=
+ ~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W |
+ MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W);
+ mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
+ return IRQ_HANDLED;
+ }
+ p = sgm->addr;
+ s = sgm->length;
+ if (s > host->pio_size)
+ s = host->pio_size;
+ }
+
+ if (host->use_pio &&
(intr_status & host->intr_en &
(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W))) {
- u16 *p = host->pio_ptr;
- int s = host->pio_size;
+
while (s >= 32 && (intr_status & MVSD_NOR_RX_FIFO_8W)) {
readsw(iobase + MVSD_FIFO, p, 16);
p += 16;
s -= 32;
+ sgm->consumed += 32;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
}
/*
@@ -391,6 +424,7 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
put_unaligned(mvsd_read(MVSD_FIFO), p++);
put_unaligned(mvsd_read(MVSD_FIFO), p++);
s -= 4;
+ sgm->consumed += 4;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
}
if (s && s < 4 && (intr_status & MVSD_NOR_RX_READY)) {
@@ -398,10 +432,13 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
val[0] = mvsd_read(MVSD_FIFO);
val[1] = mvsd_read(MVSD_FIFO);
memcpy(p, ((void *)&val) + 4 - s, s);
+ sgm->consumed += s;
s = 0;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
}
- if (s == 0) {
+ /* PIO transfer done */
+ host->pio_size -= sgm->consumed;
+ if (host->pio_size == 0) {
host->intr_en &=
~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W);
mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
@@ -413,14 +450,10 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
}
dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n",
s, intr_status, mvsd_read(MVSD_HW_STATE));
- host->pio_ptr = p;
- host->pio_size = s;
irq_handled = 1;
- } else if (host->pio_size &&
+ } else if (host->use_pio &&
(intr_status & host->intr_en &
(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W))) {
- u16 *p = host->pio_ptr;
- int s = host->pio_size;
/*
* The TX_FIFO_8W bit is unreliable. When set, bursting
* 16 halfwords all at once in the FIFO drops data. Actually
@@ -431,6 +464,7 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
mvsd_write(MVSD_FIFO, get_unaligned(p++));
mvsd_write(MVSD_FIFO, get_unaligned(p++));
s -= 4;
+ sgm->consumed += 4;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
}
if (s < 4) {
@@ -439,10 +473,13 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
memcpy(((void *)&val) + 4 - s, p, s);
mvsd_write(MVSD_FIFO, val[0]);
mvsd_write(MVSD_FIFO, val[1]);
+ sgm->consumed += s;
s = 0;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
}
- if (s == 0) {
+ /* PIO transfer done */
+ host->pio_size -= sgm->consumed;
+ if (host->pio_size == 0) {
host->intr_en &=
~(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W);
mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
@@ -450,8 +487,6 @@ static irqreturn_t mvsd_irq(int irq, void *dev)
}
dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n",
s, intr_status, mvsd_read(MVSD_HW_STATE));
- host->pio_ptr = p;
- host->pio_size = s;
irq_handled = 1;
}
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 5b3ab0e20505..1edf65291354 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -266,11 +266,18 @@ static inline void buffer_swap32(u32 *buf, int len)
static void mxcmci_swap_buffers(struct mmc_data *data)
{
- struct scatterlist *sg;
- int i;
+ struct sg_mapping_iter sgm;
+ u32 *buf;
+
+ sg_miter_start(&sgm, data->sg, data->sg_len,
+ SG_MITER_TO_SG | SG_MITER_FROM_SG);
+
+ while (sg_miter_next(&sgm)) {
+ buf = sgm.addr;
+ buffer_swap32(buf, sgm.length);
+ }
- for_each_sg(data->sg, sg, data->sg_len, i)
- buffer_swap32(sg_virt(sg), sg->length);
+ sg_miter_stop(&sgm);
}
#else
static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
@@ -526,10 +533,9 @@ static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)
} while (1);
}
-static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
+static int mxcmci_pull(struct mxcmci_host *host, u32 *buf, int bytes)
{
unsigned int stat;
- u32 *buf = _buf;
while (bytes > 3) {
stat = mxcmci_poll_status(host,
@@ -555,10 +561,9 @@ static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)
return 0;
}
-static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
+static int mxcmci_push(struct mxcmci_host *host, u32 *buf, int bytes)
{
unsigned int stat;
- u32 *buf = _buf;
while (bytes > 3) {
stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);
@@ -586,31 +591,39 @@ static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)
static int mxcmci_transfer_data(struct mxcmci_host *host)
{
struct mmc_data *data = host->req->data;
- struct scatterlist *sg;
- int stat, i;
+ struct sg_mapping_iter sgm;
+ int stat;
+ u32 *buf;
host->data = data;
host->datasize = 0;
+ sg_miter_start(&sgm, data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
if (data->flags & MMC_DATA_READ) {
- for_each_sg(data->sg, sg, data->sg_len, i) {
- stat = mxcmci_pull(host, sg_virt(sg), sg->length);
+ while (sg_miter_next(&sgm)) {
+ buf = sgm.addr;
+ stat = mxcmci_pull(host, buf, sgm.length);
if (stat)
- return stat;
- host->datasize += sg->length;
+ goto transfer_error;
+ host->datasize += sgm.length;
}
} else {
- for_each_sg(data->sg, sg, data->sg_len, i) {
- stat = mxcmci_push(host, sg_virt(sg), sg->length);
+ while (sg_miter_next(&sgm)) {
+ buf = sgm.addr;
+ stat = mxcmci_push(host, buf, sgm.length);
if (stat)
- return stat;
- host->datasize += sg->length;
+ goto transfer_error;
+ host->datasize += sgm.length;
}
stat = mxcmci_poll_status(host, STATUS_WRITE_OP_DONE);
if (stat)
- return stat;
+ goto transfer_error;
}
- return 0;
+
+transfer_error:
+ sg_miter_stop(&sgm);
+ return stat;
}
static void mxcmci_datawork(struct work_struct *work)
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 9fb8995b43a1..088f8ed4fdc4 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -148,10 +148,8 @@ struct mmc_omap_host {
struct work_struct send_stop_work;
struct mmc_data *stop_data;
+ struct sg_mapping_iter sg_miter;
unsigned int sg_len;
- int sg_idx;
- u16 * buffer;
- u32 buffer_bytes_left;
u32 total_bytes_left;
unsigned features;
@@ -456,6 +454,8 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
{
if (host->dma_in_use)
mmc_omap_release_dma(host, data, data->error);
+ else
+ sg_miter_stop(&host->sg_miter);
host->data = NULL;
host->sg_len = 0;
@@ -651,19 +651,6 @@ mmc_omap_cmd_timer(struct timer_list *t)
spin_unlock_irqrestore(&host->slot_lock, flags);
}
-/* PIO only */
-static void
-mmc_omap_sg_to_buf(struct mmc_omap_host *host)
-{
- struct scatterlist *sg;
-
- sg = host->data->sg + host->sg_idx;
- host->buffer_bytes_left = sg->length;
- host->buffer = sg_virt(sg);
- if (host->buffer_bytes_left > host->total_bytes_left)
- host->buffer_bytes_left = host->total_bytes_left;
-}
-
static void
mmc_omap_clk_timer(struct timer_list *t)
{
@@ -676,33 +663,37 @@ mmc_omap_clk_timer(struct timer_list *t)
static void
mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
int n, nwords;
+ u16 *buffer;
- if (host->buffer_bytes_left == 0) {
- host->sg_idx++;
- BUG_ON(host->sg_idx == host->sg_len);
- mmc_omap_sg_to_buf(host);
+ if (!sg_miter_next(sgm)) {
+ /* This should not happen */
+ dev_err(mmc_dev(host->mmc), "ran out of scatterlist prematurely\n");
+ return;
}
+ buffer = sgm->addr;
+
n = 64;
- if (n > host->buffer_bytes_left)
- n = host->buffer_bytes_left;
+ if (n > sgm->length)
+ n = sgm->length;
+ if (n > host->total_bytes_left)
+ n = host->total_bytes_left;
/* Round up to handle odd number of bytes to transfer */
nwords = DIV_ROUND_UP(n, 2);
- host->buffer_bytes_left -= n;
+ sgm->consumed = n;
host->total_bytes_left -= n;
host->data->bytes_xfered += n;
if (write) {
__raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA),
- host->buffer, nwords);
+ buffer, nwords);
} else {
__raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA),
- host->buffer, nwords);
+ buffer, nwords);
}
-
- host->buffer += nwords;
}
#ifdef CONFIG_MMC_DEBUG
@@ -956,6 +947,7 @@ static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_reque
static void
mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
{
+ unsigned int miter_flags = SG_MITER_ATOMIC; /* Used from IRQ */
struct mmc_data *data = req->data;
int i, use_dma = 1, block_size;
struct scatterlist *sg;
@@ -990,7 +982,6 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
}
}
- host->sg_idx = 0;
if (use_dma) {
enum dma_data_direction dma_data_dir;
struct dma_async_tx_descriptor *tx;
@@ -1071,7 +1062,11 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
OMAP_MMC_WRITE(host, BUF, 0x1f1f);
host->total_bytes_left = data->blocks * block_size;
host->sg_len = sg_len;
- mmc_omap_sg_to_buf(host);
+ if (data->flags & MMC_DATA_READ)
+ miter_flags |= SG_MITER_TO_SG;
+ else
+ miter_flags |= SG_MITER_FROM_SG;
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, miter_flags);
host->dma_in_use = 0;
}
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index c1fb9740eab0..586f94d4dbfd 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -9,6 +9,7 @@
#ifndef RENESAS_SDHI_H
#define RENESAS_SDHI_H
+#include <linux/dmaengine.h>
#include <linux/platform_device.h>
#include "tmio_mmc.h"
@@ -63,7 +64,7 @@ struct renesas_sdhi_of_data_with_quirks {
struct renesas_sdhi_dma {
unsigned long end_flags;
enum dma_slave_buswidth dma_buswidth;
- bool (*filter)(struct dma_chan *chan, void *arg);
+ dma_filter_fn filter;
void (*enable)(struct tmio_mmc_host *host, bool enable);
struct completion dma_dataend;
struct tasklet_struct dma_complete;
diff --git a/drivers/mmc/host/sdhci-esdhc-mcf.c b/drivers/mmc/host/sdhci-esdhc-mcf.c
index a07f8333cd6b..c97363e2d86c 100644
--- a/drivers/mmc/host/sdhci-esdhc-mcf.c
+++ b/drivers/mmc/host/sdhci-esdhc-mcf.c
@@ -299,9 +299,8 @@ static void esdhc_mcf_pltfm_set_bus_width(struct sdhci_host *host, int width)
static void esdhc_mcf_request_done(struct sdhci_host *host,
struct mmc_request *mrq)
{
- struct scatterlist *sg;
+ struct sg_mapping_iter sgm;
u32 *buffer;
- int i;
if (!mrq->data || !mrq->data->bytes_xfered)
goto exit_done;
@@ -313,10 +312,13 @@ static void esdhc_mcf_request_done(struct sdhci_host *host,
* On mcf5441x there is no hw sdma option/flag to select the dma
* transfer endiannes. A swap after the transfer is needed.
*/
- for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i) {
- buffer = (u32 *)sg_virt(sg);
- esdhc_mcf_buffer_swap32(buffer, sg->length);
+ sg_miter_start(&sgm, mrq->data->sg, mrq->data->sg_len,
+ SG_MITER_ATOMIC | SG_MITER_TO_SG | SG_MITER_FROM_SG);
+ while (sg_miter_next(&sgm)) {
+ buffer = sgm.addr;
+ esdhc_mcf_buffer_swap32(buffer, sgm.length);
}
+ sg_miter_stop(&sgm);
exit_done:
mmc_request_done(host->mmc, mrq);
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
index 42d54532cabe..430c1f90037b 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -435,7 +435,7 @@ static int aspeed_sdhci_probe(struct platform_device *pdev)
goto err_sdhci_add;
if (dev->phase_desc)
- mmc_of_parse_clk_phase(host->mmc, &dev->phase_map);
+ mmc_of_parse_clk_phase(&pdev->dev, &dev->phase_map);
ret = sdhci_add_host(host);
if (ret)
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index a1f57af6acfb..ab4b964d4058 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -52,6 +52,20 @@
#define AT_CTRL_SWIN_TH_VAL_MASK GENMASK(31, 24) /* bits [31:24] */
#define AT_CTRL_SWIN_TH_VAL 0x9 /* sampling window threshold */
+/* Sophgo CV18XX specific Registers */
+#define CV18XX_SDHCI_MSHC_CTRL 0x00
+#define CV18XX_EMMC_FUNC_EN BIT(0)
+#define CV18XX_LATANCY_1T BIT(1)
+#define CV18XX_SDHCI_PHY_TX_RX_DLY 0x40
+#define CV18XX_PHY_TX_DLY_MSK GENMASK(6, 0)
+#define CV18XX_PHY_TX_SRC_MSK GENMASK(9, 8)
+#define CV18XX_PHY_TX_SRC_INVERT_CLK_TX 0x1
+#define CV18XX_PHY_RX_DLY_MSK GENMASK(22, 16)
+#define CV18XX_PHY_RX_SRC_MSK GENMASK(25, 24)
+#define CV18XX_PHY_RX_SRC_INVERT_RX_CLK 0x1
+#define CV18XX_SDHCI_PHY_CONFIG 0x4c
+#define CV18XX_PHY_TX_BPS BIT(0)
+
/* Rockchip specific Registers */
#define DWCMSHC_EMMC_DLL_CTRL 0x800
#define DWCMSHC_EMMC_DLL_RXCLK 0x804
@@ -642,6 +656,35 @@ static void th1520_sdhci_reset(struct sdhci_host *host, u8 mask)
}
}
+static void cv18xx_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
+ u32 val, emmc_caps = MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
+
+ sdhci_reset(host, mask);
+
+ if ((host->mmc->caps2 & emmc_caps) == emmc_caps) {
+ val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
+ val |= CV18XX_EMMC_FUNC_EN;
+ sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
+ }
+
+ val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
+ val |= CV18XX_LATANCY_1T;
+ sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_MSHC_CTRL);
+
+ val = sdhci_readl(host, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_CONFIG);
+ val |= CV18XX_PHY_TX_BPS;
+ sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_CONFIG);
+
+ val = (FIELD_PREP(CV18XX_PHY_TX_DLY_MSK, 0) |
+ FIELD_PREP(CV18XX_PHY_TX_SRC_MSK, CV18XX_PHY_TX_SRC_INVERT_CLK_TX) |
+ FIELD_PREP(CV18XX_PHY_RX_DLY_MSK, 0) |
+ FIELD_PREP(CV18XX_PHY_RX_SRC_MSK, CV18XX_PHY_RX_SRC_INVERT_RX_CLK));
+ sdhci_writel(host, val, priv->vendor_specific_area1 + CV18XX_SDHCI_PHY_TX_RX_DLY);
+}
+
static const struct sdhci_ops sdhci_dwcmshc_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
@@ -671,6 +714,15 @@ static const struct sdhci_ops sdhci_dwcmshc_th1520_ops = {
.platform_execute_tuning = &th1520_execute_tuning,
};
+static const struct sdhci_ops sdhci_dwcmshc_cv18xx_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = dwcmshc_set_uhs_signaling,
+ .get_max_clock = dwcmshc_get_max_clock,
+ .reset = cv18xx_sdhci_reset,
+ .adma_write_desc = dwcmshc_adma_write_desc,
+};
+
static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
.ops = &sdhci_dwcmshc_ops,
.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
@@ -700,6 +752,12 @@ static const struct sdhci_pltfm_data sdhci_dwcmshc_th1520_pdata = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
};
+static const struct sdhci_pltfm_data sdhci_dwcmshc_cv18xx_pdata = {
+ .ops = &sdhci_dwcmshc_cv18xx_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+};
+
static int dwcmshc_rk35xx_init(struct sdhci_host *host, struct dwcmshc_priv *dwc_priv)
{
int err;
@@ -769,6 +827,14 @@ static const struct of_device_id sdhci_dwcmshc_dt_ids[] = {
.data = &sdhci_dwcmshc_pdata,
},
{
+ .compatible = "sophgo,cv1800b-dwcmshc",
+ .data = &sdhci_dwcmshc_cv18xx_pdata,
+ },
+ {
+ .compatible = "sophgo,sg2002-dwcmshc",
+ .data = &sdhci_dwcmshc_cv18xx_pdata,
+ },
+ {
.compatible = "thead,th1520-dwcmshc",
.data = &sdhci_dwcmshc_th1520_pdata,
},
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 7bfee28116af..d4a02184784a 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -693,6 +693,35 @@ static int sdhci_pci_o2_init_sd_express(struct mmc_host *mmc, struct mmc_ios *io
return 0;
}
+static void sdhci_pci_o2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd)
+{
+ struct sdhci_pci_chip *chip;
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ u32 scratch_32 = 0;
+ u8 scratch_8 = 0;
+
+ chip = slot->chip;
+
+ if (mode == MMC_POWER_OFF) {
+ /* UnLock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
+ scratch_8 &= 0x7f;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+
+ /* Set PCR 0x354[16] to switch Clock Source back to OPE Clock */
+ pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32);
+ scratch_32 &= ~(O2_SD_SEL_DLL);
+ pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, scratch_32);
+
+ /* Lock WP */
+ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8);
+ scratch_8 |= 0x80;
+ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8);
+ }
+
+ sdhci_set_power(host, mode, vdd);
+}
+
static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
{
struct sdhci_pci_chip *chip;
@@ -1051,6 +1080,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_pci_o2_set_power,
};
const struct sdhci_pci_fixes sdhci_o2 = {
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
index 8cf3a375de65..cc9d28b75eb9 100644
--- a/drivers/mmc/host/sdhci-xenon-phy.c
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/ktime.h>
+#include <linux/iopoll.h>
#include <linux/of_address.h>
#include "sdhci-pltfm.h"
@@ -109,6 +110,8 @@
#define XENON_EMMC_PHY_LOGIC_TIMING_ADJUST (XENON_EMMC_PHY_REG_BASE + 0x18)
#define XENON_LOGIC_TIMING_VALUE 0x00AA8977
+#define XENON_MAX_PHY_TIMEOUT_LOOPS 100
+
/*
* List offset of PHY registers and some special register values
* in eMMC PHY 5.0 or eMMC PHY 5.1
@@ -216,6 +219,19 @@ static int xenon_alloc_emmc_phy(struct sdhci_host *host)
return 0;
}
+static int xenon_check_stability_internal_clk(struct sdhci_host *host)
+{
+ u32 reg;
+ int err;
+
+ err = read_poll_timeout(sdhci_readw, reg, reg & SDHCI_CLOCK_INT_STABLE,
+ 1100, 20000, false, host, SDHCI_CLOCK_CONTROL);
+ if (err)
+ dev_err(mmc_dev(host->mmc), "phy_init: Internal clock never stabilized.\n");
+
+ return err;
+}
+
/*
* eMMC 5.0/5.1 PHY init/re-init.
* eMMC PHY init should be executed after:
@@ -232,6 +248,11 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs;
+ int ret = xenon_check_stability_internal_clk(host);
+
+ if (ret)
+ return ret;
+
reg = sdhci_readl(host, phy_regs->timing_adj);
reg |= XENON_PHY_INITIALIZAION;
sdhci_writel(host, reg, phy_regs->timing_adj);
@@ -259,18 +280,27 @@ static int xenon_emmc_phy_init(struct sdhci_host *host)
/* get the wait time */
wait /= clock;
wait++;
- /* wait for host eMMC PHY init completes */
- udelay(wait);
- reg = sdhci_readl(host, phy_regs->timing_adj);
- reg &= XENON_PHY_INITIALIZAION;
- if (reg) {
+ /*
+ * AC5X spec says bit must be polled until zero.
+ * We see cases in which timeout can take longer
+ * than the standard calculation on AC5X, which is
+ * expected following the spec comment above.
+ * According to the spec, we must wait as long as
+ * it takes for that bit to toggle on AC5X.
+ * Cap that with 100 delay loops so we won't get
+ * stuck here forever:
+ */
+
+ ret = read_poll_timeout(sdhci_readl, reg,
+ !(reg & XENON_PHY_INITIALIZAION),
+ wait, XENON_MAX_PHY_TIMEOUT_LOOPS * wait,
+ false, host, phy_regs->timing_adj);
+ if (ret)
dev_err(mmc_dev(host->mmc), "eMMC PHY init cannot complete after %d us\n",
- wait);
- return -ETIMEDOUT;
- }
+ wait * XENON_MAX_PHY_TIMEOUT_LOOPS);
- return 0;
+ return ret;
}
#define ARMADA_3700_SOC_PAD_1_8V 0x1
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 077d711e964e..08b4312af94e 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -227,14 +227,12 @@ struct sh_mmcif_host {
bool dying;
long timeout;
void __iomem *addr;
- u32 *pio_ptr;
spinlock_t lock; /* protect sh_mmcif_host::state */
enum sh_mmcif_state state;
enum sh_mmcif_wait_for wait_for;
struct delayed_work timeout_work;
size_t blocksize;
- int sg_idx;
- int sg_blkidx;
+ struct sg_mapping_iter sg_miter;
bool power;
bool ccs_enable; /* Command Completion Signal support */
bool clk_ctrl2_enable;
@@ -600,32 +598,17 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
return ret;
}
-static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
-{
- struct mmc_data *data = host->mrq->data;
-
- host->sg_blkidx += host->blocksize;
-
- /* data->sg->length must be a multiple of host->blocksize? */
- BUG_ON(host->sg_blkidx > data->sg->length);
-
- if (host->sg_blkidx == data->sg->length) {
- host->sg_blkidx = 0;
- if (++host->sg_idx < data->sg_len)
- host->pio_ptr = sg_virt(++data->sg);
- } else {
- host->pio_ptr = p;
- }
-
- return host->sg_idx != data->sg_len;
-}
-
static void sh_mmcif_single_read(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
+ struct mmc_data *data = mrq->data;
+
host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
BLOCK_SIZE_MASK) + 3;
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len,
+ SG_MITER_TO_SG);
+
host->wait_for = MMCIF_WAIT_FOR_READ;
/* buf read enable */
@@ -634,20 +617,32 @@ static void sh_mmcif_single_read(struct sh_mmcif_host *host,
static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
- u32 *p = sg_virt(data->sg);
+ u32 *p;
int i;
if (host->sd_error) {
+ sg_miter_stop(sgm);
data->error = sh_mmcif_error_manage(host);
dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false;
}
+ if (!sg_miter_next(sgm)) {
+ /* This should not happen on single blocks */
+ sg_miter_stop(sgm);
+ return false;
+ }
+
+ p = sgm->addr;
+
for (i = 0; i < host->blocksize / 4; i++)
*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+ sg_miter_stop(&host->sg_miter);
+
/* buffer read end */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
host->wait_for = MMCIF_WAIT_FOR_READ_END;
@@ -658,6 +653,7 @@ static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct mmc_data *data = mrq->data;
if (!data->sg_len || !data->sg->length)
@@ -666,46 +662,63 @@ static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
BLOCK_SIZE_MASK;
+ sg_miter_start(sgm, data->sg, data->sg_len,
+ SG_MITER_TO_SG);
+
+ /* Advance to the first sglist entry */
+ if (!sg_miter_next(sgm)) {
+ sg_miter_stop(sgm);
+ return;
+ }
+
host->wait_for = MMCIF_WAIT_FOR_MREAD;
- host->sg_idx = 0;
- host->sg_blkidx = 0;
- host->pio_ptr = sg_virt(data->sg);
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
}
static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
- u32 *p = host->pio_ptr;
+ u32 *p;
int i;
if (host->sd_error) {
+ sg_miter_stop(sgm);
data->error = sh_mmcif_error_manage(host);
dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false;
}
- BUG_ON(!data->sg->length);
+ p = sgm->addr;
for (i = 0; i < host->blocksize / 4; i++)
*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
- if (!sh_mmcif_next_block(host, p))
- return false;
+ sgm->consumed = host->blocksize;
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+ if (!sg_miter_next(sgm)) {
+ sg_miter_stop(sgm);
+ return false;
+ }
+
return true;
}
static void sh_mmcif_single_write(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
+ struct mmc_data *data = mrq->data;
+
host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
BLOCK_SIZE_MASK) + 3;
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len,
+ SG_MITER_FROM_SG);
+
host->wait_for = MMCIF_WAIT_FOR_WRITE;
/* buf write enable */
@@ -714,20 +727,32 @@ static void sh_mmcif_single_write(struct sh_mmcif_host *host,
static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
- u32 *p = sg_virt(data->sg);
+ u32 *p;
int i;
if (host->sd_error) {
+ sg_miter_stop(sgm);
data->error = sh_mmcif_error_manage(host);
dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false;
}
+ if (!sg_miter_next(sgm)) {
+ /* This should not happen on single blocks */
+ sg_miter_stop(sgm);
+ return false;
+ }
+
+ p = sgm->addr;
+
for (i = 0; i < host->blocksize / 4; i++)
sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
+ sg_miter_stop(&host->sg_miter);
+
/* buffer write end */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
@@ -738,6 +763,7 @@ static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct mmc_data *data = mrq->data;
if (!data->sg_len || !data->sg->length)
@@ -746,34 +772,46 @@ static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
BLOCK_SIZE_MASK;
+ sg_miter_start(sgm, data->sg, data->sg_len,
+ SG_MITER_FROM_SG);
+
+ /* Advance to the first sglist entry */
+ if (!sg_miter_next(sgm)) {
+ sg_miter_stop(sgm);
+ return;
+ }
+
host->wait_for = MMCIF_WAIT_FOR_MWRITE;
- host->sg_idx = 0;
- host->sg_blkidx = 0;
- host->pio_ptr = sg_virt(data->sg);
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
}
static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
{
+ struct sg_mapping_iter *sgm = &host->sg_miter;
struct device *dev = sh_mmcif_host_to_dev(host);
struct mmc_data *data = host->mrq->data;
- u32 *p = host->pio_ptr;
+ u32 *p;
int i;
if (host->sd_error) {
+ sg_miter_stop(sgm);
data->error = sh_mmcif_error_manage(host);
dev_dbg(dev, "%s(): %d\n", __func__, data->error);
return false;
}
- BUG_ON(!data->sg->length);
+ p = sgm->addr;
for (i = 0; i < host->blocksize / 4; i++)
sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
- if (!sh_mmcif_next_block(host, p))
+ sgm->consumed = host->blocksize;
+
+ if (!sg_miter_next(sgm)) {
+ sg_miter_stop(sgm);
return false;
+ }
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index be7f18fd4836..93e912afd3ae 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -259,6 +259,8 @@ static void tmio_mmc_reset_work(struct work_struct *work)
else
mrq->cmd->error = -ETIMEDOUT;
+ /* No new calls yet, but disallow concurrent tmio_mmc_done_work() */
+ host->mrq = ERR_PTR(-EBUSY);
host->cmd = NULL;
host->data = NULL;
@@ -970,6 +972,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
return;
}
+ /* Disallow new mrqs and work handlers to run */
host->mrq = ERR_PTR(-EBUSY);
spin_unlock_irqrestore(&host->lock, flags);
@@ -1004,8 +1007,9 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
"%s.%d: IOS interrupted: clk %u, mode %u",
current->comm, task_pid_nr(current),
ios->clock, ios->power_mode);
- host->mrq = NULL;
+ /* Ready for new mrqs */
+ host->mrq = NULL;
host->clk_cache = ios->clock;
mutex_unlock(&host->ios_lock);
diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
index 001a468bc149..f0562f712d98 100644
--- a/drivers/mmc/host/wbsd.c
+++ b/drivers/mmc/host/wbsd.c
@@ -1284,8 +1284,6 @@ static int wbsd_scan(struct wbsd_host *host)
continue;
for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) {
- id = 0xFFFF;
-
host->config = config_ports[i];
host->unlock_code = unlock_codes[j];
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 77d5f1d24489..860380931b6c 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -883,7 +883,6 @@ static void wmt_mci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct wmt_mci_priv *priv;
- struct resource *res;
u32 reg_tmp;
mmc = platform_get_drvdata(pdev);
@@ -911,9 +910,6 @@ static void wmt_mci_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk_sdmmc);
clk_put(priv->clk_sdmmc);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
mmc_free_host(mmc);
dev_info(&pdev->dev, "WMT MCI device removed\n");
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index aa44a23ec045..97a00ec9a4d4 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -37,7 +37,7 @@
/* Info for the block device */
struct block2mtd_dev {
struct list_head list;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct mtd_info mtd;
struct mutex write_mutex;
};
@@ -55,8 +55,7 @@ static struct page *page_read(struct address_space *mapping, pgoff_t index)
/* erase a specified part of the device */
static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
{
- struct address_space *mapping =
- dev->bdev_handle->bdev->bd_inode->i_mapping;
+ struct address_space *mapping = dev->bdev_file->f_mapping;
struct page *page;
pgoff_t index = to >> PAGE_SHIFT; // page index
int pages = len >> PAGE_SHIFT;
@@ -106,8 +105,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct block2mtd_dev *dev = mtd->priv;
- struct address_space *mapping =
- dev->bdev_handle->bdev->bd_inode->i_mapping;
+ struct address_space *mapping = dev->bdev_file->f_mapping;
struct page *page;
pgoff_t index = from >> PAGE_SHIFT;
int offset = from & (PAGE_SIZE-1);
@@ -142,8 +140,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
loff_t to, size_t len, size_t *retlen)
{
struct page *page;
- struct address_space *mapping =
- dev->bdev_handle->bdev->bd_inode->i_mapping;
+ struct address_space *mapping = dev->bdev_file->f_mapping;
pgoff_t index = to >> PAGE_SHIFT; // page index
int offset = to & ~PAGE_MASK; // page offset
int cpylen;
@@ -198,7 +195,7 @@ static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
static void block2mtd_sync(struct mtd_info *mtd)
{
struct block2mtd_dev *dev = mtd->priv;
- sync_blockdev(dev->bdev_handle->bdev);
+ sync_blockdev(file_bdev(dev->bdev_file));
return;
}
@@ -210,10 +207,9 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
kfree(dev->mtd.name);
- if (dev->bdev_handle) {
- invalidate_mapping_pages(
- dev->bdev_handle->bdev->bd_inode->i_mapping, 0, -1);
- bdev_release(dev->bdev_handle);
+ if (dev->bdev_file) {
+ invalidate_mapping_pages(dev->bdev_file->f_mapping, 0, -1);
+ fput(dev->bdev_file);
}
kfree(dev);
@@ -223,10 +219,10 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
* This function is marked __ref because it calls the __init marked
* early_lookup_bdev when called from the early boot code.
*/
-static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname,
+static struct file __ref *mdtblock_early_get_bdev(const char *devname,
blk_mode_t mode, int timeout, struct block2mtd_dev *dev)
{
- struct bdev_handle *bdev_handle = ERR_PTR(-ENODEV);
+ struct file *bdev_file = ERR_PTR(-ENODEV);
#ifndef MODULE
int i;
@@ -234,7 +230,7 @@ static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname,
* We can't use early_lookup_bdev from a running system.
*/
if (system_state >= SYSTEM_RUNNING)
- return bdev_handle;
+ return bdev_file;
/*
* We might not have the root device mounted at this point.
@@ -253,20 +249,20 @@ static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname,
wait_for_device_probe();
if (!early_lookup_bdev(devname, &devt)) {
- bdev_handle = bdev_open_by_dev(devt, mode, dev, NULL);
- if (!IS_ERR(bdev_handle))
+ bdev_file = bdev_file_open_by_dev(devt, mode, dev, NULL);
+ if (!IS_ERR(bdev_file))
break;
}
}
#endif
- return bdev_handle;
+ return bdev_file;
}
static struct block2mtd_dev *add_device(char *devname, int erase_size,
char *label, int timeout)
{
const blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct block_device *bdev;
struct block2mtd_dev *dev;
char *name;
@@ -279,16 +275,16 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
return NULL;
/* Get a handle on the device */
- bdev_handle = bdev_open_by_path(devname, mode, dev, NULL);
- if (IS_ERR(bdev_handle))
- bdev_handle = mdtblock_early_get_bdev(devname, mode, timeout,
+ bdev_file = bdev_file_open_by_path(devname, mode, dev, NULL);
+ if (IS_ERR(bdev_file))
+ bdev_file = mdtblock_early_get_bdev(devname, mode, timeout,
dev);
- if (IS_ERR(bdev_handle)) {
+ if (IS_ERR(bdev_file)) {
pr_err("error: cannot open device %s\n", devname);
goto err_free_block2mtd;
}
- dev->bdev_handle = bdev_handle;
- bdev = bdev_handle->bdev;
+ dev->bdev_file = bdev_file;
+ bdev = file_bdev(bdev_file);
if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
pr_err("attempting to use an MTD device as a block device\n");
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index f0526dcc2162..3caa0717d46c 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -277,6 +277,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
{
struct mtd_blktrans_ops *tr = new->tr;
struct mtd_blktrans_dev *d;
+ struct queue_limits lim = { };
int last_devnum = -1;
struct gendisk *gd;
int ret;
@@ -331,9 +332,13 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
if (ret)
goto out_kfree_tag_set;
+
+ lim.logical_block_size = tr->blksize;
+ if (tr->discard)
+ lim.max_hw_discard_sectors = UINT_MAX;
/* Create gendisk */
- gd = blk_mq_alloc_disk(new->tag_set, new);
+ gd = blk_mq_alloc_disk(new->tag_set, &lim, new);
if (IS_ERR(gd)) {
ret = PTR_ERR(gd);
goto out_free_tag_set;
@@ -371,14 +376,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (tr->flush)
blk_queue_write_cache(new->rq, true, false);
- blk_queue_logical_block_size(new->rq, tr->blksize);
-
blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
- if (tr->discard)
- blk_queue_max_discard_sectors(new->rq, UINT_MAX);
-
gd->queue = new->rq;
if (new->readonly)
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index e451b28840d5..5887feb347a4 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -621,6 +621,7 @@ static void mtd_check_of_node(struct mtd_info *mtd)
if (plen == mtd_name_len &&
!strncmp(mtd->name, pname + offset, plen)) {
mtd_set_of_node(mtd, mtd_dn);
+ of_node_put(mtd_dn);
break;
}
}
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index a46698744850..5b0f5a9cef81 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -290,16 +290,13 @@ static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,32, 30),
MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,64, 30),
- MARVELL_LAYOUT( 2048, 512, 12, 3, 2, 704, 0, 30,640, 0, 30),
- MARVELL_LAYOUT( 2048, 512, 16, 5, 4, 512, 0, 30, 0, 32, 30),
+ MARVELL_LAYOUT( 2048, 512, 16, 4, 4, 512, 0, 30, 0, 32, 30),
MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
- MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
- MARVELL_LAYOUT( 4096, 512, 12, 6, 5, 704, 0, 30,576, 32, 30),
- MARVELL_LAYOUT( 4096, 512, 16, 9, 8, 512, 0, 30, 0, 32, 30),
+ MARVELL_LAYOUT( 4096, 512, 8, 4, 4, 1024, 0, 30, 0, 64, 30),
+ MARVELL_LAYOUT( 4096, 512, 16, 8, 8, 512, 0, 30, 0, 32, 30),
MARVELL_LAYOUT( 8192, 512, 4, 4, 4, 2048, 0, 30, 0, 0, 0),
- MARVELL_LAYOUT( 8192, 512, 8, 9, 8, 1024, 0, 30, 0, 160, 30),
- MARVELL_LAYOUT( 8192, 512, 12, 12, 11, 704, 0, 30,448, 64, 30),
- MARVELL_LAYOUT( 8192, 512, 16, 17, 16, 512, 0, 30, 0, 32, 30),
+ MARVELL_LAYOUT( 8192, 512, 8, 8, 8, 1024, 0, 30, 0, 160, 30),
+ MARVELL_LAYOUT( 8192, 512, 16, 16, 16, 512, 0, 30, 0, 32, 30),
};
/**
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index 987710e09441..6023cba748bb 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -186,7 +186,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
{
u8 status2;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
- &status2);
+ spinand->scratchbuf);
int ret;
switch (status & STATUS_ECC_MASK) {
@@ -207,6 +207,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
* report the maximum of 4 in this case
*/
/* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
+ status2 = *(spinand->scratchbuf);
return ((status & STATUS_ECC_MASK) >> 2) |
((status2 & STATUS_ECC_MASK) >> 4);
@@ -228,7 +229,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
{
u8 status2;
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
- &status2);
+ spinand->scratchbuf);
int ret;
switch (status & STATUS_ECC_MASK) {
@@ -248,6 +249,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
* 1 ... 4 bits are flipped (and corrected)
*/
/* bits sorted this way (1...0): ECCSE1, ECCSE0 */
+ status2 = *(spinand->scratchbuf);
return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
case STATUS_ECC_UNCOR_ERROR:
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 654bd7372cd8..5c8fdcc088a0 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -348,6 +348,9 @@ static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
int ubiblock_create(struct ubi_volume_info *vi)
{
+ struct queue_limits lim = {
+ .max_segments = UBI_MAX_SG_COUNT,
+ };
struct ubiblock *dev;
struct gendisk *gd;
u64 disk_capacity;
@@ -393,7 +396,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
/* Initialize the gendisk of this ubiblock device */
- gd = blk_mq_alloc_disk(&dev->tag_set, dev);
+ gd = blk_mq_alloc_disk(&dev->tag_set, &lim, dev);
if (IS_ERR(gd)) {
ret = PTR_ERR(gd);
goto out_free_tags;
@@ -416,7 +419,6 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->gd = gd;
dev->rq = gd->queue;
- blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
list_add_tail(&dev->list, &ubiblock_devices);
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index 68e79b1272f6..6d15ab3bfbbc 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -3063,15 +3063,10 @@ static int amt_dev_init(struct net_device *dev)
int err;
amt->dev = dev;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
err = gro_cells_init(&amt->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
return 0;
}
@@ -3081,7 +3076,6 @@ static void amt_dev_uninit(struct net_device *dev)
struct amt_dev *amt = netdev_priv(dev);
gro_cells_destroy(&amt->gro_cells);
- free_percpu(dev->tstats);
}
static const struct net_device_ops amt_netdev_ops = {
@@ -3090,7 +3084,6 @@ static const struct net_device_ops amt_netdev_ops = {
.ndo_open = amt_dev_open,
.ndo_stop = amt_dev_stop,
.ndo_start_xmit = amt_dev_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
};
static void amt_link_setup(struct net_device *dev)
@@ -3111,6 +3104,7 @@ static void amt_link_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
eth_hw_addr_random(dev);
eth_zero_addr(dev->broadcast);
ether_setup(dev);
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c
index 8c651fdee039..57f1729066f2 100644
--- a/drivers/net/arcnet/arc-rawmode.c
+++ b/drivers/net/arcnet/arc-rawmode.c
@@ -186,4 +186,5 @@ static void __exit arcnet_raw_exit(void)
module_init(arcnet_raw_init);
module_exit(arcnet_raw_exit);
+MODULE_DESCRIPTION("ARCnet raw mode packet interface module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 8c3ccc7c83cd..53d10a04d1bd 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -312,6 +312,7 @@ module_param(node, int, 0);
module_param(io, int, 0);
module_param(irq, int, 0);
module_param_string(device, device, sizeof(device), 0);
+MODULE_DESCRIPTION("ARCnet COM90xx RIM I chipset driver");
MODULE_LICENSE("GPL");
static struct net_device *my_dev;
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index d9e052c49ba1..166bfc3c8e6c 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -108,6 +108,7 @@ static int go_tx(struct net_device *dev);
static int debug = ARCNET_DEBUG;
module_param(debug, int, 0);
+MODULE_DESCRIPTION("ARCnet core driver");
MODULE_LICENSE("GPL");
static int __init arcnet_init(void)
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index c09b567845e1..7a0a79973769 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -265,4 +265,5 @@ static void __exit capmode_module_exit(void)
module_init(capmode_module_init);
module_exit(capmode_module_exit);
+MODULE_DESCRIPTION("ARCnet CAP mode packet interface module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 7b5c8bb02f11..c5e571ec94c9 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -61,6 +61,7 @@ module_param(timeout, int, 0);
module_param(backplane, int, 0);
module_param(clockp, int, 0);
module_param(clockm, int, 0);
+MODULE_DESCRIPTION("ARCnet COM20020 chipset PCI driver");
MODULE_LICENSE("GPL");
static void led_tx_set(struct led_classdev *led_cdev,
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 06e1651b594b..a0053e3992a3 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -399,6 +399,7 @@ EXPORT_SYMBOL(com20020_found);
EXPORT_SYMBOL(com20020_netdev_ops);
#endif
+MODULE_DESCRIPTION("ARCnet COM20020 chipset core driver");
MODULE_LICENSE("GPL");
#ifdef MODULE
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index dc3253b318da..75f08aa7528b 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -97,6 +97,7 @@ module_param(backplane, int, 0);
module_param(clockp, int, 0);
module_param(clockm, int, 0);
+MODULE_DESCRIPTION("ARCnet COM20020 chipset PCMCIA driver");
MODULE_LICENSE("GPL");
/*====================================================================*/
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 37b47749fc8b..3b463fbc6402 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -350,6 +350,7 @@ static char device[9]; /* use eg. device=arc1 to change name */
module_param_hw(io, int, ioport, 0);
module_param_hw(irq, int, irq, 0);
module_param_string(device, device, sizeof(device), 0);
+MODULE_DESCRIPTION("ARCnet COM90xx IO mapped chipset driver");
MODULE_LICENSE("GPL");
#ifndef MODULE
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index f49dae194284..b3b287c16561 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -645,6 +645,7 @@ static void com90xx_copy_from_card(struct net_device *dev, int bufnum,
TIME(dev, "memcpy_fromio", count, memcpy_fromio(buf, memaddr, count));
}
+MODULE_DESCRIPTION("ARCnet COM90xx normal chipset driver");
MODULE_LICENSE("GPL");
static int __init com90xx_init(void)
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c
index a7752a5b647f..46519ca63a0a 100644
--- a/drivers/net/arcnet/rfc1051.c
+++ b/drivers/net/arcnet/rfc1051.c
@@ -78,6 +78,7 @@ static void __exit arcnet_rfc1051_exit(void)
module_init(arcnet_rfc1051_init);
module_exit(arcnet_rfc1051_exit);
+MODULE_DESCRIPTION("ARCNet packet format (RFC 1051) module");
MODULE_LICENSE("GPL");
/* Determine a packet's protocol ID.
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c
index a4c856282674..0edf35d971c5 100644
--- a/drivers/net/arcnet/rfc1201.c
+++ b/drivers/net/arcnet/rfc1201.c
@@ -35,6 +35,7 @@
#include "arcdevice.h"
+MODULE_DESCRIPTION("ARCNet packet format (RFC 1201) module");
MODULE_LICENSE("GPL");
static __be16 type_trans(struct sk_buff *skb, struct net_device *dev);
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 31377bb1cc97..339db6e4a1d5 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -194,15 +194,10 @@ static int bareudp_init(struct net_device *dev)
struct bareudp_dev *bareudp = netdev_priv(dev);
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = gro_cells_init(&bareudp->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
+
return 0;
}
@@ -211,7 +206,6 @@ static void bareudp_uninit(struct net_device *dev)
struct bareudp_dev *bareudp = netdev_priv(dev);
gro_cells_destroy(&bareudp->gro_cells);
- free_percpu(dev->tstats);
}
static struct socket *bareudp_create_sock(struct net *net, __be16 port)
@@ -529,7 +523,6 @@ static const struct net_device_ops bareudp_netdev_ops = {
.ndo_open = bareudp_open,
.ndo_stop = bareudp_stop,
.ndo_start_xmit = bareudp_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_fill_metadata_dst = bareudp_fill_metadata_dst,
};
@@ -567,6 +560,7 @@ static void bareudp_setup(struct net_device *dev)
netif_keep_dst(dev);
dev->priv_flags |= IFF_NO_QUEUE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
}
static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -760,23 +754,18 @@ static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
unregister_netdevice_queue(bareudp->dev, head);
}
-static void __net_exit bareudp_exit_batch_net(struct list_head *net_list)
+static void __net_exit bareudp_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_kill_list)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
- bareudp_destroy_tunnels(net, &list);
-
- /* unregister the devices gathered above */
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ bareudp_destroy_tunnels(net, dev_kill_list);
}
static struct pernet_operations bareudp_net_ops = {
.init = bareudp_init_net,
- .exit_batch = bareudp_exit_batch_net,
+ .exit_batch_rtnl = bareudp_exit_batch_rtnl,
.id = &bareudp_net_id,
.size = sizeof(struct bareudp_net),
};
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c99ffe6c683a..c6807e473ab7 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -82,10 +82,6 @@ enum ad_link_speed_type {
#define MAC_ADDRESS_EQUAL(A, B) \
ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
-static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
- 0, 0, 0, 0, 0, 0
-};
-
static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
@@ -106,6 +102,9 @@ static void ad_agg_selection_logic(struct aggregator *aggregator,
static void ad_clear_agg(struct aggregator *aggregator);
static void ad_initialize_agg(struct aggregator *aggregator);
static void ad_initialize_port(struct port *port, int lacp_fast);
+static void ad_enable_collecting(struct port *port);
+static void ad_disable_distributing(struct port *port,
+ bool *update_slave_arr);
static void ad_enable_collecting_distributing(struct port *port,
bool *update_slave_arr);
static void ad_disable_collecting_distributing(struct port *port,
@@ -172,8 +171,37 @@ static inline int __agg_has_partner(struct aggregator *agg)
}
/**
+ * __disable_distributing_port - disable the port's slave for distributing.
+ * Port will still be able to collect.
+ * @port: the port we're looking at
+ *
+ * This will disable only distributing on the port's slave.
+ */
+static void __disable_distributing_port(struct port *port)
+{
+ bond_set_slave_tx_disabled_flags(port->slave, BOND_SLAVE_NOTIFY_LATER);
+}
+
+/**
+ * __enable_collecting_port - enable the port's slave for collecting,
+ * if it's up
+ * @port: the port we're looking at
+ *
+ * This will enable only collecting on the port's slave.
+ */
+static void __enable_collecting_port(struct port *port)
+{
+ struct slave *slave = port->slave;
+
+ if (slave->link == BOND_LINK_UP && bond_slave_is_up(slave))
+ bond_set_slave_rx_enabled_flags(slave, BOND_SLAVE_NOTIFY_LATER);
+}
+
+/**
* __disable_port - disable the port's slave
* @port: the port we're looking at
+ *
+ * This will disable both collecting and distributing on the port's slave.
*/
static inline void __disable_port(struct port *port)
{
@@ -183,6 +211,8 @@ static inline void __disable_port(struct port *port)
/**
* __enable_port - enable the port's slave, if it's up
* @port: the port we're looking at
+ *
+ * This will enable both collecting and distributing on the port's slave.
*/
static inline void __enable_port(struct port *port)
{
@@ -193,10 +223,27 @@ static inline void __enable_port(struct port *port)
}
/**
- * __port_is_enabled - check if the port's slave is in active state
+ * __port_move_to_attached_state - check if port should transition back to attached
+ * state.
+ * @port: the port we're looking at
+ */
+static bool __port_move_to_attached_state(struct port *port)
+{
+ if (!(port->sm_vars & AD_PORT_SELECTED) ||
+ (port->sm_vars & AD_PORT_STANDBY) ||
+ !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
+ !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION))
+ port->sm_mux_state = AD_MUX_ATTACHED;
+
+ return port->sm_mux_state == AD_MUX_ATTACHED;
+}
+
+/**
+ * __port_is_collecting_distributing - check if the port's slave is in the
+ * combined collecting/distributing state
* @port: the port we're looking at
*/
-static inline int __port_is_enabled(struct port *port)
+static int __port_is_collecting_distributing(struct port *port)
{
return bond_is_active_slave(port->slave);
}
@@ -942,6 +989,7 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
*/
static void ad_mux_machine(struct port *port, bool *update_slave_arr)
{
+ struct bonding *bond = __get_bond_by_port(port);
mux_states_t last_state;
/* keep current State Machine state to compare later if it was
@@ -999,9 +1047,13 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
if ((port->sm_vars & AD_PORT_SELECTED) &&
(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
!__check_agg_selection_timer(port)) {
- if (port->aggregator->is_active)
- port->sm_mux_state =
- AD_MUX_COLLECTING_DISTRIBUTING;
+ if (port->aggregator->is_active) {
+ int state = AD_MUX_COLLECTING_DISTRIBUTING;
+
+ if (!bond->params.coupled_control)
+ state = AD_MUX_COLLECTING;
+ port->sm_mux_state = state;
+ }
} else if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY)) {
/* if UNSELECTED or STANDBY */
@@ -1019,11 +1071,45 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
}
break;
case AD_MUX_COLLECTING_DISTRIBUTING:
+ if (!__port_move_to_attached_state(port)) {
+ /* if port state hasn't changed make
+ * sure that a collecting distributing
+ * port in an active aggregator is enabled
+ */
+ if (port->aggregator->is_active &&
+ !__port_is_collecting_distributing(port)) {
+ __enable_port(port);
+ *update_slave_arr = true;
+ }
+ }
+ break;
+ case AD_MUX_COLLECTING:
+ if (!__port_move_to_attached_state(port)) {
+ if ((port->sm_vars & AD_PORT_SELECTED) &&
+ (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
+ (port->partner_oper.port_state & LACP_STATE_COLLECTING)) {
+ port->sm_mux_state = AD_MUX_DISTRIBUTING;
+ } else {
+ /* If port state hasn't changed, make sure that a collecting
+ * port is enabled for an active aggregator.
+ */
+ struct slave *slave = port->slave;
+
+ if (port->aggregator->is_active &&
+ bond_is_slave_rx_disabled(slave)) {
+ ad_enable_collecting(port);
+ *update_slave_arr = true;
+ }
+ }
+ }
+ break;
+ case AD_MUX_DISTRIBUTING:
if (!(port->sm_vars & AD_PORT_SELECTED) ||
(port->sm_vars & AD_PORT_STANDBY) ||
+ !(port->partner_oper.port_state & LACP_STATE_COLLECTING) ||
!(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
!(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) {
- port->sm_mux_state = AD_MUX_ATTACHED;
+ port->sm_mux_state = AD_MUX_COLLECTING;
} else {
/* if port state hasn't changed make
* sure that a collecting distributing
@@ -1031,7 +1117,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
*/
if (port->aggregator &&
port->aggregator->is_active &&
- !__port_is_enabled(port)) {
+ !__port_is_collecting_distributing(port)) {
__enable_port(port);
*update_slave_arr = true;
}
@@ -1082,6 +1168,20 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
update_slave_arr);
port->ntt = true;
break;
+ case AD_MUX_COLLECTING:
+ port->actor_oper_port_state |= LACP_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
+ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
+ ad_enable_collecting(port);
+ ad_disable_distributing(port, update_slave_arr);
+ port->ntt = true;
+ break;
+ case AD_MUX_DISTRIBUTING:
+ port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING;
+ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
+ ad_enable_collecting_distributing(port,
+ update_slave_arr);
+ break;
default:
break;
}
@@ -1484,7 +1584,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
(aggregator->partner_system_priority == port->partner_oper.system_priority) &&
(aggregator->partner_oper_aggregator_key == port->partner_oper.key)
) &&
- ((!MAC_ADDRESS_EQUAL(&(port->partner_oper.system), &(null_mac_addr)) && /* partner answers */
+ ((__agg_has_partner(aggregator) && /* partner answers */
!aggregator->is_individual) /* but is not individual OR */
)
) {
@@ -1907,6 +2007,43 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
}
/**
+ * ad_enable_collecting - enable a port's receive
+ * @port: the port we're looking at
+ *
+ * Enable @port if it's in an active aggregator
+ */
+static void ad_enable_collecting(struct port *port)
+{
+ if (port->aggregator->is_active) {
+ struct slave *slave = port->slave;
+
+ slave_dbg(slave->bond->dev, slave->dev,
+ "Enabling collecting on port %d (LAG %d)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
+ __enable_collecting_port(port);
+ }
+}
+
+/**
+ * ad_disable_distributing - disable a port's transmit
+ * @port: the port we're looking at
+ * @update_slave_arr: Does slave array need update?
+ */
+static void ad_disable_distributing(struct port *port, bool *update_slave_arr)
+{
+ if (port->aggregator && __agg_has_partner(port->aggregator)) {
+ slave_dbg(port->slave->bond->dev, port->slave->dev,
+ "Disabling distributing on port %d (LAG %d)\n",
+ port->actor_port_number,
+ port->aggregator->aggregator_identifier);
+ __disable_distributing_port(port);
+ /* Slave array needs an update */
+ *update_slave_arr = true;
+ }
+}
+
+/**
* ad_enable_collecting_distributing - enable a port's transmit/receive
* @port: the port we're looking at
* @update_slave_arr: Does slave array need update?
@@ -1935,9 +2072,7 @@ static void ad_enable_collecting_distributing(struct port *port,
static void ad_disable_collecting_distributing(struct port *port,
bool *update_slave_arr)
{
- if (port->aggregator &&
- !MAC_ADDRESS_EQUAL(&(port->aggregator->partner_system),
- &(null_mac_addr))) {
+ if (port->aggregator && __agg_has_partner(port->aggregator)) {
slave_dbg(port->slave->bond->dev, port->slave->dev,
"Disabling port %d (LAG %d)\n",
port->actor_port_number,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4e0600c7b050..2c5ed0a7cb18 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1811,7 +1811,7 @@ void bond_xdp_set_features(struct net_device *bond_dev)
ASSERT_RTNL();
- if (!bond_xdp_check(bond)) {
+ if (!bond_xdp_check(bond) || !bond_has_slaves(bond)) {
xdp_clear_features_flag(bond_dev);
return;
}
@@ -1819,6 +1819,8 @@ void bond_xdp_set_features(struct net_device *bond_dev)
bond_for_each_slave(bond, slave, iter)
val &= slave->dev->xdp_features;
+ val &= ~NETDEV_XDP_ACT_XSK_ZEROCOPY;
+
xdp_set_features_flag(bond_dev, val);
}
@@ -2609,7 +2611,7 @@ static int bond_miimon_inspect(struct bonding *bond)
bond_propose_link_state(slave, BOND_LINK_FAIL);
commit++;
slave->delay = bond->params.downdelay;
- if (slave->delay) {
+ if (slave->delay && net_ratelimit()) {
slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
(BOND_MODE(bond) ==
BOND_MODE_ACTIVEBACKUP) ?
@@ -2623,9 +2625,10 @@ static int bond_miimon_inspect(struct bonding *bond)
/* recovered before downdelay expired */
bond_propose_link_state(slave, BOND_LINK_UP);
slave->last_link_up = jiffies;
- slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
- (bond->params.downdelay - slave->delay) *
- bond->params.miimon);
+ if (net_ratelimit())
+ slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
+ (bond->params.downdelay - slave->delay) *
+ bond->params.miimon);
commit++;
continue;
}
@@ -2647,7 +2650,7 @@ static int bond_miimon_inspect(struct bonding *bond)
commit++;
slave->delay = bond->params.updelay;
- if (slave->delay) {
+ if (slave->delay && net_ratelimit()) {
slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
ignore_updelay ? 0 :
bond->params.updelay *
@@ -2657,9 +2660,10 @@ static int bond_miimon_inspect(struct bonding *bond)
case BOND_LINK_BACK:
if (!link_state) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
- slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
- (bond->params.updelay - slave->delay) *
- bond->params.miimon);
+ if (net_ratelimit())
+ slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
+ (bond->params.updelay - slave->delay) *
+ bond->params.miimon);
commit++;
continue;
}
@@ -5909,9 +5913,6 @@ void bond_setup(struct net_device *bond_dev)
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
bond_dev->features |= BOND_XFRM_FEATURES;
#endif /* CONFIG_XFRM_OFFLOAD */
-
- if (bond_xdp_check(bond))
- bond_dev->xdp_features = NETDEV_XDP_ACT_MASK;
}
/* Destroy a bonding device.
@@ -6306,6 +6307,7 @@ static int __init bond_check_params(struct bond_params *params)
params->ad_actor_sys_prio = ad_actor_sys_prio;
eth_zero_addr(params->ad_actor_system);
params->ad_user_port_key = ad_user_port_key;
+ params->coupled_control = 1;
if (packets_per_slave > 0) {
params->reciprocal_packets_per_slave =
reciprocal_value(packets_per_slave);
@@ -6415,28 +6417,41 @@ static int __net_init bond_net_init(struct net *net)
return 0;
}
-static void __net_exit bond_net_exit_batch(struct list_head *net_list)
+/* According to commit 69b0216ac255 ("bonding: fix bonding_masters
+ * race condition in bond unloading") we need to remove sysfs files
+ * before we remove our devices (done later in bond_net_exit_batch_rtnl())
+ */
+static void __net_exit bond_net_pre_exit(struct net *net)
+{
+ struct bond_net *bn = net_generic(net, bond_net_id);
+
+ bond_destroy_sysfs(bn);
+}
+
+static void __net_exit bond_net_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_kill_list)
{
struct bond_net *bn;
struct net *net;
- LIST_HEAD(list);
-
- list_for_each_entry(net, net_list, exit_list) {
- bn = net_generic(net, bond_net_id);
- bond_destroy_sysfs(bn);
- }
/* Kill off any bonds created after unregistering bond rtnl ops */
- rtnl_lock();
list_for_each_entry(net, net_list, exit_list) {
struct bonding *bond, *tmp_bond;
bn = net_generic(net, bond_net_id);
list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
- unregister_netdevice_queue(bond->dev, &list);
+ unregister_netdevice_queue(bond->dev, dev_kill_list);
}
- unregister_netdevice_many(&list);
- rtnl_unlock();
+}
+
+/* According to commit 23fa5c2caae0 ("bonding: destroy proc directory
+ * only after all bonds are gone") bond_destroy_proc_dir() is called
+ * after bond_net_exit_batch_rtnl() has completed.
+ */
+static void __net_exit bond_net_exit_batch(struct list_head *net_list)
+{
+ struct bond_net *bn;
+ struct net *net;
list_for_each_entry(net, net_list, exit_list) {
bn = net_generic(net, bond_net_id);
@@ -6446,6 +6461,8 @@ static void __net_exit bond_net_exit_batch(struct list_head *net_list)
static struct pernet_operations bond_net_ops = {
.init = bond_net_init,
+ .pre_exit = bond_net_pre_exit,
+ .exit_batch_rtnl = bond_net_exit_batch_rtnl,
.exit_batch = bond_net_exit_batch,
.id = &bond_net_id,
.size = sizeof(struct bond_net),
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index cfa74cf8bb1a..29b4c3d1b9b6 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -122,6 +122,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_PEER_NOTIF_DELAY] = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range),
[IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
[IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
+ [IFLA_BOND_COUPLED_CONTROL] = { .type = NLA_U8 },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -549,6 +550,16 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
return err;
}
+ if (data[IFLA_BOND_COUPLED_CONTROL]) {
+ int coupled_control = nla_get_u8(data[IFLA_BOND_COUPLED_CONTROL]);
+
+ bond_opt_initval(&newval, coupled_control);
+ err = __bond_opt_set(bond, BOND_OPT_COUPLED_CONTROL, &newval,
+ data[IFLA_BOND_COUPLED_CONTROL], extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -615,6 +626,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
/* IFLA_BOND_NS_IP6_TARGET */
nla_total_size(sizeof(struct nlattr)) +
nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS +
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_COUPLED_CONTROL */
0;
}
@@ -774,6 +786,10 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.missed_max))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_BOND_COUPLED_CONTROL,
+ bond->params.coupled_control))
+ goto nla_put_failure;
+
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info info;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index f3f27f0bd2a6..4cdbc7e084f4 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -84,7 +84,8 @@ static int bond_option_ad_user_port_key_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_missed_max_set(struct bonding *bond,
const struct bond_opt_value *newval);
-
+static int bond_option_coupled_control_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = {
{ "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT},
@@ -232,6 +233,12 @@ static const struct bond_opt_value bond_missed_max_tbl[] = {
{ NULL, -1, 0},
};
+static const struct bond_opt_value bond_coupled_control_tbl[] = {
+ { "on", 1, BOND_VALFLAG_DEFAULT},
+ { "off", 0, 0},
+ { NULL, -1, 0},
+};
+
static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
@@ -496,6 +503,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.desc = "Delay between each peer notification on failover event, in milliseconds",
.values = bond_peer_notif_delay_tbl,
.set = bond_option_peer_notif_delay_set
+ },
+ [BOND_OPT_COUPLED_CONTROL] = {
+ .id = BOND_OPT_COUPLED_CONTROL,
+ .name = "coupled_control",
+ .desc = "Opt into using coupled control MUX for LACP states",
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+ .flags = BOND_OPTFLAG_IFDOWN,
+ .values = bond_coupled_control_tbl,
+ .set = bond_option_coupled_control_set,
}
};
@@ -1692,3 +1708,13 @@ static int bond_option_ad_user_port_key_set(struct bonding *bond,
bond->params.ad_user_port_key = newval->value;
return 0;
}
+
+static int bond_option_coupled_control_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ netdev_info(bond->dev, "Setting coupled_control to %s (%llu)\n",
+ newval->string, newval->value);
+
+ bond->params.coupled_control = newval->value;
+ return 0;
+}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index eb410714afc2..2e31db55d927 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -168,6 +168,8 @@ config CAN_KVASER_PCIEFD
Kvaser Mini PCI Express 2xHS v2
Kvaser Mini PCI Express 1xCAN v3
Kvaser Mini PCI Express 2xCAN v3
+ Kvaser M.2 PCIe 4xCAN
+ Kvaser PCIe 8xCAN
config CAN_SLCAN
tristate "Serial / USB serial CAN Adaptors (slcan)"
@@ -218,6 +220,7 @@ config CAN_XILINXCAN
source "drivers/net/can/c_can/Kconfig"
source "drivers/net/can/cc770/Kconfig"
source "drivers/net/can/ctucanfd/Kconfig"
+source "drivers/net/can/esd/Kconfig"
source "drivers/net/can/ifi_canfd/Kconfig"
source "drivers/net/can/m_can/Kconfig"
source "drivers/net/can/mscan/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index ff8f76295d13..4669cd51e7bf 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_CAN_VXCAN) += vxcan.o
obj-$(CONFIG_CAN_SLCAN) += slcan/
obj-y += dev/
+obj-y += esd/
obj-y += rcar/
obj-y += spi/
obj-y += usb/
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 036d85ef07f5..dfdc039d92a6 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -346,7 +346,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
/* Neither of TDC parameters nor TDC flags are
* provided: do calculation
*/
- can_calc_tdco(&priv->tdc, priv->tdc_const, &priv->data_bittiming,
+ can_calc_tdco(&priv->tdc, priv->tdc_const, &dbt,
&priv->ctrlmode, priv->ctrlmode_supported);
} /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
* turned off. TDC is disabled: do nothing
diff --git a/drivers/net/can/esd/Kconfig b/drivers/net/can/esd/Kconfig
new file mode 100644
index 000000000000..54bfc366634c
--- /dev/null
+++ b/drivers/net/can/esd/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CAN_ESD_402_PCI
+ tristate "esd electronics gmbh CAN-PCI(e)/402 family"
+ depends on PCI && HAS_DMA
+ help
+ Support for C402 card family from esd electronics gmbh.
+ This card family is based on the ESDACC CAN controller and
+ available in several form factors: PCI, PCIe, PCIe Mini,
+ M.2 PCIe, CPCIserial, PMC, XMC (see https://esd.eu/en)
+
+ This driver can also be built as a module. In this case the
+ module will be called esd_402_pci.
diff --git a/drivers/net/can/esd/Makefile b/drivers/net/can/esd/Makefile
new file mode 100644
index 000000000000..5dd2d470c286
--- /dev/null
+++ b/drivers/net/can/esd/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for esd gmbh ESDACC controller driver
+#
+esd_402_pci-objs := esdacc.o esd_402_pci-core.o
+
+obj-$(CONFIG_CAN_ESD_402_PCI) += esd_402_pci.o
diff --git a/drivers/net/can/esd/esd_402_pci-core.c b/drivers/net/can/esd/esd_402_pci-core.c
new file mode 100644
index 000000000000..b7cdcffd0e45
--- /dev/null
+++ b/drivers/net/can/esd/esd_402_pci-core.c
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
+ * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
+ */
+
+#include <linux/can/dev.h>
+#include <linux/can.h>
+#include <linux/can/netlink.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+#include "esdacc.h"
+
+#define ESD_PCI_DEVICE_ID_PCIE402 0x0402
+
+#define PCI402_FPGA_VER_MIN 0x003d
+#define PCI402_MAX_CORES 6
+#define PCI402_BAR 0
+#define PCI402_IO_OV_OFFS 0
+#define PCI402_IO_PCIEP_OFFS 0x10000
+#define PCI402_IO_LEN_TOTAL 0x20000
+#define PCI402_IO_LEN_CORE 0x2000
+#define PCI402_PCICFG_MSICAP 0x50
+
+#define PCI402_DMA_MASK DMA_BIT_MASK(32)
+#define PCI402_DMA_SIZE ALIGN(0x10000, PAGE_SIZE)
+
+#define PCI402_PCIEP_OF_INT_ENABLE 0x0050
+#define PCI402_PCIEP_OF_BM_ADDR_LO 0x1000
+#define PCI402_PCIEP_OF_BM_ADDR_HI 0x1004
+#define PCI402_PCIEP_OF_MSI_ADDR_LO 0x1008
+#define PCI402_PCIEP_OF_MSI_ADDR_HI 0x100c
+
+struct pci402_card {
+ /* Actually mapped io space, all other iomem derived from this */
+ void __iomem *addr;
+ void __iomem *addr_pciep;
+
+ void *dma_buf;
+ dma_addr_t dma_hnd;
+
+ struct acc_ov ov;
+ struct acc_core *cores;
+
+ bool msi_enabled;
+};
+
+/* The BTR register capabilities described by the can_bittiming_const structures
+ * below are valid since esdACC version 0x0032.
+ */
+
+/* Used if the esdACC FPGA is built as CAN-Classic version. */
+static const struct can_bittiming_const pci402_bittiming_const = {
+ .name = "esd_402",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1,
+};
+
+/* Used if the esdACC FPGA is built as CAN-FD version. */
+static const struct can_bittiming_const pci402_bittiming_const_canfd = {
+ .name = "esd_402fd",
+ .tseg1_min = 1,
+ .tseg1_max = 256,
+ .tseg2_min = 1,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+static const struct net_device_ops pci402_acc_netdev_ops = {
+ .ndo_open = acc_open,
+ .ndo_stop = acc_close,
+ .ndo_start_xmit = acc_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
+};
+
+static const struct ethtool_ops pci402_acc_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+};
+
+static irqreturn_t pci402_interrupt(int irq, void *dev_id)
+{
+ struct pci_dev *pdev = dev_id;
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ irqreturn_t irq_status;
+
+ irq_status = acc_card_interrupt(&card->ov, card->cores);
+
+ return irq_status;
+}
+
+static int pci402_set_msiconfig(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ u32 addr_lo_offs = 0;
+ u32 addr_lo = 0;
+ u32 addr_hi = 0;
+ u32 data = 0;
+ u16 csr = 0;
+ int err;
+
+ /* The FPGA hard IP PCIe core implements a 64-bit MSI Capability
+ * Register Format
+ */
+ err = pci_read_config_word(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_FLAGS, &csr);
+ if (err)
+ goto failed;
+
+ err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_ADDRESS_LO,
+ &addr_lo);
+ if (err)
+ goto failed;
+ err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_ADDRESS_HI,
+ &addr_hi);
+ if (err)
+ goto failed;
+
+ err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_DATA_64,
+ &data);
+ if (err)
+ goto failed;
+
+ addr_lo_offs = addr_lo & 0x0000ffff;
+ addr_lo &= 0xffff0000;
+
+ if (addr_hi)
+ addr_lo |= 1; /* To enable 64-Bit addressing in PCIe endpoint */
+
+ if (!(csr & PCI_MSI_FLAGS_ENABLE)) {
+ err = -EINVAL;
+ goto failed;
+ }
+
+ iowrite32(addr_lo, card->addr_pciep + PCI402_PCIEP_OF_MSI_ADDR_LO);
+ iowrite32(addr_hi, card->addr_pciep + PCI402_PCIEP_OF_MSI_ADDR_HI);
+ acc_ov_write32(&card->ov, ACC_OV_OF_MSI_ADDRESSOFFSET, addr_lo_offs);
+ acc_ov_write32(&card->ov, ACC_OV_OF_MSI_DATA, data);
+
+ return 0;
+
+failed:
+ pci_warn(pdev, "Error while setting MSI configuration:\n"
+ "CSR: 0x%.4x, addr: 0x%.8x%.8x, offs: 0x%.4x, data: 0x%.8x\n",
+ csr, addr_hi, addr_lo, addr_lo_offs, data);
+
+ return err;
+}
+
+static int pci402_init_card(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+
+ card->ov.addr = card->addr + PCI402_IO_OV_OFFS;
+ card->addr_pciep = card->addr + PCI402_IO_PCIEP_OFFS;
+
+ acc_reset_fpga(&card->ov);
+ acc_init_ov(&card->ov, &pdev->dev);
+
+ if (card->ov.version < PCI402_FPGA_VER_MIN) {
+ pci_err(pdev,
+ "esdACC version (0x%.4x) outdated, please update\n",
+ card->ov.version);
+ return -EINVAL;
+ }
+
+ if (card->ov.timestamp_frequency != ACC_TS_FREQ_80MHZ) {
+ pci_err(pdev,
+ "esdACC timestamp frequency of %uHz not supported by driver. Aborted.\n",
+ card->ov.timestamp_frequency);
+ return -EINVAL;
+ }
+
+ if (card->ov.active_cores > PCI402_MAX_CORES) {
+ pci_err(pdev,
+ "Card with %u active cores not supported by driver. Aborted.\n",
+ card->ov.active_cores);
+ return -EINVAL;
+ }
+ card->cores = devm_kcalloc(&pdev->dev, card->ov.active_cores,
+ sizeof(struct acc_core), GFP_KERNEL);
+ if (!card->cores)
+ return -ENOMEM;
+
+ if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD) {
+ pci_warn(pdev,
+ "esdACC with CAN-FD feature detected. This driver doesn't support CAN-FD yet.\n");
+ }
+
+#ifdef __LITTLE_ENDIAN
+ /* So card converts all busmastered data to LE for us: */
+ acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE);
+#endif
+
+ return 0;
+}
+
+static int pci402_init_interrupt(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int err;
+
+ err = pci_enable_msi(pdev);
+ if (!err) {
+ err = pci402_set_msiconfig(pdev);
+ if (!err) {
+ card->msi_enabled = true;
+ acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_MSI_ENABLE);
+ pci_dbg(pdev, "MSI preparation done\n");
+ }
+ }
+
+ err = devm_request_irq(&pdev->dev, pdev->irq, pci402_interrupt,
+ IRQF_SHARED, dev_name(&pdev->dev), pdev);
+ if (err)
+ goto failure_msidis;
+
+ iowrite32(1, card->addr_pciep + PCI402_PCIEP_OF_INT_ENABLE);
+
+ return 0;
+
+failure_msidis:
+ if (card->msi_enabled) {
+ acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_MSI_ENABLE);
+ pci_disable_msi(pdev);
+ card->msi_enabled = false;
+ }
+
+ return err;
+}
+
+static void pci402_finish_interrupt(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_INT_ENABLE);
+ devm_free_irq(&pdev->dev, pdev->irq, pdev);
+
+ if (card->msi_enabled) {
+ acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_MSI_ENABLE);
+ pci_disable_msi(pdev);
+ card->msi_enabled = false;
+ }
+}
+
+static int pci402_init_dma(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int err;
+
+ err = dma_set_coherent_mask(&pdev->dev, PCI402_DMA_MASK);
+ if (err) {
+ pci_err(pdev, "DMA set mask failed!\n");
+ return err;
+ }
+
+ /* The esdACC DMA engine needs the DMA buffer aligned to a 64k
+ * boundary. The DMA API guarantees to align the returned buffer to the
+ * smallest PAGE_SIZE order which is greater than or equal to the
+ * requested size. With PCI402_DMA_SIZE == 64kB this suffices here.
+ */
+ card->dma_buf = dma_alloc_coherent(&pdev->dev, PCI402_DMA_SIZE,
+ &card->dma_hnd, GFP_KERNEL);
+ if (!card->dma_buf)
+ return -ENOMEM;
+
+ acc_init_bm_ptr(&card->ov, card->cores, card->dma_buf);
+
+ iowrite32(card->dma_hnd,
+ card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_LO);
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_HI);
+
+ pci_set_master(pdev);
+
+ acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_BM_ENABLE);
+
+ return 0;
+}
+
+static void pci402_finish_dma(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int i;
+
+ acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_BM_ENABLE);
+
+ pci_clear_master(pdev);
+
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_LO);
+ iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_HI);
+
+ card->ov.bmfifo.messages = NULL;
+ card->ov.bmfifo.irq_cnt = NULL;
+ for (i = 0; i < card->ov.active_cores; i++) {
+ struct acc_core *core = &card->cores[i];
+
+ core->bmfifo.messages = NULL;
+ core->bmfifo.irq_cnt = NULL;
+ }
+
+ dma_free_coherent(&pdev->dev, PCI402_DMA_SIZE, card->dma_buf,
+ card->dma_hnd);
+ card->dma_buf = NULL;
+}
+
+static void pci402_unregister_core(struct acc_core *core)
+{
+ netdev_info(core->netdev, "unregister\n");
+ unregister_candev(core->netdev);
+
+ free_candev(core->netdev);
+ core->netdev = NULL;
+}
+
+static int pci402_init_cores(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int err;
+ int i;
+
+ for (i = 0; i < card->ov.active_cores; i++) {
+ struct acc_core *core = &card->cores[i];
+ struct acc_net_priv *priv;
+ struct net_device *netdev;
+ u32 fifo_config;
+
+ core->addr = card->ov.addr + (i + 1) * PCI402_IO_LEN_CORE;
+
+ fifo_config = acc_read32(core, ACC_CORE_OF_TXFIFO_CONFIG);
+ core->tx_fifo_size = (fifo_config >> 24);
+ if (core->tx_fifo_size <= 1) {
+ pci_err(pdev, "Invalid tx_fifo_size!\n");
+ err = -EINVAL;
+ goto failure;
+ }
+
+ netdev = alloc_candev(sizeof(*priv), core->tx_fifo_size);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto failure;
+ }
+ core->netdev = netdev;
+
+ netdev->flags |= IFF_ECHO;
+ netdev->dev_port = i;
+ netdev->netdev_ops = &pci402_acc_netdev_ops;
+ netdev->ethtool_ops = &pci402_acc_ethtool_ops;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ priv = netdev_priv(netdev);
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_CC_LEN8_DLC;
+
+ priv->can.clock.freq = card->ov.core_frequency;
+ if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD)
+ priv->can.bittiming_const = &pci402_bittiming_const_canfd;
+ else
+ priv->can.bittiming_const = &pci402_bittiming_const;
+ priv->can.do_set_bittiming = acc_set_bittiming;
+ priv->can.do_set_mode = acc_set_mode;
+ priv->can.do_get_berr_counter = acc_get_berr_counter;
+
+ priv->core = core;
+ priv->ov = &card->ov;
+
+ err = register_candev(netdev);
+ if (err) {
+ free_candev(core->netdev);
+ core->netdev = NULL;
+ goto failure;
+ }
+
+ netdev_info(netdev, "registered\n");
+ }
+
+ return 0;
+
+failure:
+ for (i--; i >= 0; i--)
+ pci402_unregister_core(&card->cores[i]);
+
+ return err;
+}
+
+static void pci402_finish_cores(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < card->ov.active_cores; i++)
+ pci402_unregister_core(&card->cores[i]);
+}
+
+static int pci402_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct pci402_card *card = NULL;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL);
+ if (!card) {
+ err = -ENOMEM;
+ goto failure_disable_pci;
+ }
+
+ pci_set_drvdata(pdev, card);
+
+ err = pci_request_regions(pdev, pci_name(pdev));
+ if (err)
+ goto failure_disable_pci;
+
+ card->addr = pci_iomap(pdev, PCI402_BAR, PCI402_IO_LEN_TOTAL);
+ if (!card->addr) {
+ err = -ENOMEM;
+ goto failure_release_regions;
+ }
+
+ err = pci402_init_card(pdev);
+ if (err)
+ goto failure_unmap;
+
+ err = pci402_init_dma(pdev);
+ if (err)
+ goto failure_unmap;
+
+ err = pci402_init_interrupt(pdev);
+ if (err)
+ goto failure_finish_dma;
+
+ err = pci402_init_cores(pdev);
+ if (err)
+ goto failure_finish_interrupt;
+
+ return 0;
+
+failure_finish_interrupt:
+ pci402_finish_interrupt(pdev);
+
+failure_finish_dma:
+ pci402_finish_dma(pdev);
+
+failure_unmap:
+ pci_iounmap(pdev, card->addr);
+
+failure_release_regions:
+ pci_release_regions(pdev);
+
+failure_disable_pci:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void pci402_remove(struct pci_dev *pdev)
+{
+ struct pci402_card *card = pci_get_drvdata(pdev);
+
+ pci402_finish_interrupt(pdev);
+ pci402_finish_cores(pdev);
+ pci402_finish_dma(pdev);
+ pci_iounmap(pdev, card->addr);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id pci402_tbl[] = {
+ {
+ .vendor = PCI_VENDOR_ID_ESDGMBH,
+ .device = ESD_PCI_DEVICE_ID_PCIE402,
+ .subvendor = PCI_VENDOR_ID_ESDGMBH,
+ .subdevice = PCI_ANY_ID,
+ },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, pci402_tbl);
+
+static struct pci_driver pci402_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = pci402_tbl,
+ .probe = pci402_probe,
+ .remove = pci402_remove,
+};
+module_pci_driver(pci402_driver);
+
+MODULE_DESCRIPTION("Socket-CAN driver for esd CAN 402 card family with esdACC core on PCIe");
+MODULE_AUTHOR("Thomas Körper <socketcan@esd.eu>");
+MODULE_AUTHOR("Stefan Mätje <stefan.maetje@esd.eu>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/esd/esdacc.c b/drivers/net/can/esd/esdacc.c
new file mode 100644
index 000000000000..121cbbf81458
--- /dev/null
+++ b/drivers/net/can/esd/esdacc.c
@@ -0,0 +1,764 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
+ * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
+ */
+
+#include "esdacc.h"
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+
+/* esdACC ID register layout */
+#define ACC_ID_ID_MASK GENMASK(28, 0)
+#define ACC_ID_EFF_FLAG BIT(29)
+
+/* esdACC DLC register layout */
+#define ACC_DLC_DLC_MASK GENMASK(3, 0)
+#define ACC_DLC_RTR_FLAG BIT(4)
+#define ACC_DLC_TXD_FLAG BIT(5)
+
+/* ecc value of esdACC equals SJA1000's ECC register */
+#define ACC_ECC_SEG 0x1f
+#define ACC_ECC_DIR 0x20
+#define ACC_ECC_BIT 0x00
+#define ACC_ECC_FORM 0x40
+#define ACC_ECC_STUFF 0x80
+#define ACC_ECC_MASK 0xc0
+
+/* esdACC Status Register bits. Unused bits not documented. */
+#define ACC_REG_STATUS_MASK_STATUS_ES BIT(17)
+#define ACC_REG_STATUS_MASK_STATUS_EP BIT(18)
+#define ACC_REG_STATUS_MASK_STATUS_BS BIT(19)
+
+/* esdACC Overview Module BM_IRQ_Mask register related defines */
+/* Two bit wide command masks to mask or unmask a single core IRQ */
+#define ACC_BM_IRQ_UNMASK BIT(0)
+#define ACC_BM_IRQ_MASK (ACC_BM_IRQ_UNMASK << 1)
+/* Command to unmask all IRQ sources. Created by shifting
+ * and oring the two bit wide ACC_BM_IRQ_UNMASK 16 times.
+ */
+#define ACC_BM_IRQ_UNMASK_ALL 0x55555555U
+
+static void acc_resetmode_enter(struct acc_core *core)
+{
+ acc_set_bits(core, ACC_CORE_OF_CTRL_MODE,
+ ACC_REG_CONTROL_MASK_MODE_RESETMODE);
+
+ /* Read back reset mode bit to flush PCI write posting */
+ acc_resetmode_entered(core);
+}
+
+static void acc_resetmode_leave(struct acc_core *core)
+{
+ acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
+ ACC_REG_CONTROL_MASK_MODE_RESETMODE);
+
+ /* Read back reset mode bit to flush PCI write posting */
+ acc_resetmode_entered(core);
+}
+
+static void acc_txq_put(struct acc_core *core, u32 acc_id, u8 acc_dlc,
+ const void *data)
+{
+ acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_1,
+ *((const u32 *)(data + 4)));
+ acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_0,
+ *((const u32 *)data));
+ acc_write32(core, ACC_CORE_OF_TXFIFO_DLC, acc_dlc);
+ /* CAN id must be written at last. This write starts TX. */
+ acc_write32(core, ACC_CORE_OF_TXFIFO_ID, acc_id);
+}
+
+static u8 acc_tx_fifo_next(struct acc_core *core, u8 tx_fifo_idx)
+{
+ ++tx_fifo_idx;
+ if (tx_fifo_idx >= core->tx_fifo_size)
+ tx_fifo_idx = 0U;
+ return tx_fifo_idx;
+}
+
+/* Convert timestamp from esdACC time stamp ticks to ns
+ *
+ * The conversion factor ts2ns from time stamp counts to ns is basically
+ * ts2ns = NSEC_PER_SEC / timestamp_frequency
+ *
+ * We handle here only a fixed timestamp frequency of 80MHz. The
+ * resulting ts2ns factor would be 12.5.
+ *
+ * At the end we multiply by 12 and add the half of the HW timestamp
+ * to get a multiplication by 12.5. This way any overflow is
+ * avoided until ktime_t itself overflows.
+ */
+#define ACC_TS_FACTOR (NSEC_PER_SEC / ACC_TS_FREQ_80MHZ)
+#define ACC_TS_80MHZ_SHIFT 1
+
+static ktime_t acc_ts2ktime(struct acc_ov *ov, u64 ts)
+{
+ u64 ns;
+
+ ns = (ts * ACC_TS_FACTOR) + (ts >> ACC_TS_80MHZ_SHIFT);
+
+ return ns_to_ktime(ns);
+}
+
+#undef ACC_TS_FACTOR
+#undef ACC_TS_80MHZ_SHIFT
+
+void acc_init_ov(struct acc_ov *ov, struct device *dev)
+{
+ u32 temp;
+
+ temp = acc_ov_read32(ov, ACC_OV_OF_VERSION);
+ ov->version = temp;
+ ov->features = (temp >> 16);
+
+ temp = acc_ov_read32(ov, ACC_OV_OF_INFO);
+ ov->total_cores = temp;
+ ov->active_cores = (temp >> 8);
+
+ ov->core_frequency = acc_ov_read32(ov, ACC_OV_OF_CANCORE_FREQ);
+ ov->timestamp_frequency = acc_ov_read32(ov, ACC_OV_OF_TS_FREQ_LO);
+
+ /* Depending on esdACC feature NEW_PSC enable the new prescaler
+ * or adjust core_frequency according to the implicit division by 2.
+ */
+ if (ov->features & ACC_OV_REG_FEAT_MASK_NEW_PSC) {
+ acc_ov_set_bits(ov, ACC_OV_OF_MODE,
+ ACC_OV_REG_MODE_MASK_NEW_PSC_ENABLE);
+ } else {
+ ov->core_frequency /= 2;
+ }
+
+ dev_dbg(dev,
+ "esdACC v%u, freq: %u/%u, feat/strap: 0x%x/0x%x, cores: %u/%u\n",
+ ov->version, ov->core_frequency, ov->timestamp_frequency,
+ ov->features, acc_ov_read32(ov, ACC_OV_OF_INFO) >> 16,
+ ov->active_cores, ov->total_cores);
+}
+
+void acc_init_bm_ptr(struct acc_ov *ov, struct acc_core *cores, const void *mem)
+{
+ unsigned int u;
+
+ /* DMA buffer layout as follows where N is the number of CAN cores
+ * implemented in the FPGA, i.e. N = ov->total_cores
+ *
+ * Section Layout Section size
+ * ----------------------------------------------
+ * FIFO Card/Overview ACC_CORE_DMABUF_SIZE
+ * FIFO Core0 ACC_CORE_DMABUF_SIZE
+ * ... ...
+ * FIFO CoreN ACC_CORE_DMABUF_SIZE
+ * irq_cnt Card/Overview sizeof(u32)
+ * irq_cnt Core0 sizeof(u32)
+ * ... ...
+ * irq_cnt CoreN sizeof(u32)
+ */
+ ov->bmfifo.messages = mem;
+ ov->bmfifo.irq_cnt = mem + (ov->total_cores + 1U) * ACC_CORE_DMABUF_SIZE;
+
+ for (u = 0U; u < ov->active_cores; u++) {
+ struct acc_core *core = &cores[u];
+
+ core->bmfifo.messages = mem + (u + 1U) * ACC_CORE_DMABUF_SIZE;
+ core->bmfifo.irq_cnt = ov->bmfifo.irq_cnt + (u + 1U);
+ }
+}
+
+int acc_open(struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ struct acc_core *core = priv->core;
+ u32 tx_fifo_status;
+ u32 ctrl_mode;
+ int err;
+
+ /* Retry to enter RESET mode if out of sync. */
+ if (priv->can.state != CAN_STATE_STOPPED) {
+ netdev_warn(netdev, "Entered %s() with bad can.state: %s\n",
+ __func__, can_get_state_str(priv->can.state));
+ acc_resetmode_enter(core);
+ priv->can.state = CAN_STATE_STOPPED;
+ }
+
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ ctrl_mode = ACC_REG_CONTROL_MASK_IE_RXTX |
+ ACC_REG_CONTROL_MASK_IE_TXERROR |
+ ACC_REG_CONTROL_MASK_IE_ERRWARN |
+ ACC_REG_CONTROL_MASK_IE_OVERRUN |
+ ACC_REG_CONTROL_MASK_IE_ERRPASS;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ ctrl_mode |= ACC_REG_CONTROL_MASK_IE_BUSERR;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ ctrl_mode |= ACC_REG_CONTROL_MASK_MODE_LOM;
+
+ acc_set_bits(core, ACC_CORE_OF_CTRL_MODE, ctrl_mode);
+
+ acc_resetmode_leave(core);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Resync TX FIFO indices to HW state after (re-)start. */
+ tx_fifo_status = acc_read32(core, ACC_CORE_OF_TXFIFO_STATUS);
+ core->tx_fifo_head = tx_fifo_status & 0xff;
+ core->tx_fifo_tail = (tx_fifo_status >> 8) & 0xff;
+
+ netif_start_queue(netdev);
+ return 0;
+}
+
+int acc_close(struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ struct acc_core *core = priv->core;
+
+ acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
+ ACC_REG_CONTROL_MASK_IE_RXTX |
+ ACC_REG_CONTROL_MASK_IE_TXERROR |
+ ACC_REG_CONTROL_MASK_IE_ERRWARN |
+ ACC_REG_CONTROL_MASK_IE_OVERRUN |
+ ACC_REG_CONTROL_MASK_IE_ERRPASS |
+ ACC_REG_CONTROL_MASK_IE_BUSERR);
+
+ netif_stop_queue(netdev);
+ acc_resetmode_enter(core);
+ priv->can.state = CAN_STATE_STOPPED;
+
+ /* Mark pending TX requests to be aborted after controller restart. */
+ acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
+
+ /* ACC_REG_CONTROL_MASK_MODE_LOM is only accessible in RESET mode */
+ acc_clear_bits(core, ACC_CORE_OF_CTRL_MODE,
+ ACC_REG_CONTROL_MASK_MODE_LOM);
+
+ close_candev(netdev);
+ return 0;
+}
+
+netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ struct acc_core *core = priv->core;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u8 tx_fifo_head = core->tx_fifo_head;
+ int fifo_usage;
+ u32 acc_id;
+ u8 acc_dlc;
+
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ /* Access core->tx_fifo_tail only once because it may be changed
+ * from the interrupt level.
+ */
+ fifo_usage = tx_fifo_head - core->tx_fifo_tail;
+ if (fifo_usage < 0)
+ fifo_usage += core->tx_fifo_size;
+
+ if (fifo_usage >= core->tx_fifo_size - 1) {
+ netdev_err(core->netdev,
+ "BUG: TX ring full when queue awake!\n");
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (fifo_usage == core->tx_fifo_size - 2)
+ netif_stop_queue(netdev);
+
+ acc_dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
+ if (cf->can_id & CAN_RTR_FLAG)
+ acc_dlc |= ACC_DLC_RTR_FLAG;
+
+ if (cf->can_id & CAN_EFF_FLAG) {
+ acc_id = cf->can_id & CAN_EFF_MASK;
+ acc_id |= ACC_ID_EFF_FLAG;
+ } else {
+ acc_id = cf->can_id & CAN_SFF_MASK;
+ }
+
+ can_put_echo_skb(skb, netdev, core->tx_fifo_head, 0);
+
+ core->tx_fifo_head = acc_tx_fifo_next(core, tx_fifo_head);
+
+ acc_txq_put(core, acc_id, acc_dlc, cf->data);
+
+ return NETDEV_TX_OK;
+}
+
+int acc_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ u32 core_status = acc_read32(priv->core, ACC_CORE_OF_STATUS);
+
+ bec->txerr = (core_status >> 8) & 0xff;
+ bec->rxerr = core_status & 0xff;
+
+ return 0;
+}
+
+int acc_set_mode(struct net_device *netdev, enum can_mode mode)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+
+ switch (mode) {
+ case CAN_MODE_START:
+ /* Paranoid FIFO index check. */
+ {
+ const u32 tx_fifo_status =
+ acc_read32(priv->core, ACC_CORE_OF_TXFIFO_STATUS);
+ const u8 hw_fifo_head = tx_fifo_status;
+
+ if (hw_fifo_head != priv->core->tx_fifo_head ||
+ hw_fifo_head != priv->core->tx_fifo_tail) {
+ netdev_warn(netdev,
+ "TX FIFO mismatch: T %2u H %2u; TFHW %#08x\n",
+ priv->core->tx_fifo_tail,
+ priv->core->tx_fifo_head,
+ tx_fifo_status);
+ }
+ }
+ acc_resetmode_leave(priv->core);
+ /* To leave the bus-off state the esdACC controller begins
+ * here a grace period where it counts 128 "idle conditions" (each
+ * of 11 consecutive recessive bits) on the bus as required
+ * by the CAN spec.
+ *
+ * During this time the TX FIFO may still contain already
+ * aborted "zombie" frames that are only drained from the FIFO
+ * at the end of the grace period.
+ *
+ * To not to interfere with this drain process we don't
+ * call netif_wake_queue() here. When the controller reaches
+ * the error-active state again, it informs us about that
+ * with an acc_bmmsg_errstatechange message. Then
+ * netif_wake_queue() is called from
+ * handle_core_msg_errstatechange() instead.
+ */
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int acc_set_bittiming(struct net_device *netdev)
+{
+ struct acc_net_priv *priv = netdev_priv(netdev);
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ u32 brp;
+ u32 btr;
+
+ if (priv->ov->features & ACC_OV_REG_FEAT_MASK_CANFD) {
+ u32 fbtr = 0;
+
+ netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n",
+ bt->brp, bt->prop_seg,
+ bt->phase_seg1, bt->phase_seg2, bt->sjw);
+
+ brp = FIELD_PREP(ACC_REG_BRP_FD_MASK_BRP, bt->brp - 1);
+
+ btr = FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG2, bt->phase_seg2 - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_SJW, bt->sjw - 1);
+
+ /* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
+ acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
+ acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
+
+ netdev_dbg(netdev, "esdACC: BRP %u, NBTR 0x%08x, DBTR 0x%08x",
+ brp, btr, fbtr);
+ } else {
+ netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n",
+ bt->brp, bt->prop_seg,
+ bt->phase_seg1, bt->phase_seg2, bt->sjw);
+
+ brp = FIELD_PREP(ACC_REG_BRP_CL_MASK_BRP, bt->brp - 1);
+
+ btr = FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG2, bt->phase_seg2 - 1);
+ btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_SJW, bt->sjw - 1);
+
+ /* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
+ acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
+ acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
+
+ netdev_dbg(netdev, "esdACC: BRP %u, BTR 0x%08x", brp, btr);
+ }
+
+ return 0;
+}
+
+static void handle_core_msg_rxtxdone(struct acc_core *core,
+ const struct acc_bmmsg_rxtxdone *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct net_device_stats *stats = &core->netdev->stats;
+ struct sk_buff *skb;
+
+ if (msg->acc_dlc.len & ACC_DLC_TXD_FLAG) {
+ u8 tx_fifo_tail = core->tx_fifo_tail;
+
+ if (core->tx_fifo_head == tx_fifo_tail) {
+ netdev_warn(core->netdev,
+ "TX interrupt, but queue is empty!?\n");
+ return;
+ }
+
+ /* Direct access echo skb to attach HW time stamp. */
+ skb = priv->can.echo_skb[tx_fifo_tail];
+ if (skb) {
+ skb_hwtstamps(skb)->hwtstamp =
+ acc_ts2ktime(priv->ov, msg->ts);
+ }
+
+ stats->tx_packets++;
+ stats->tx_bytes += can_get_echo_skb(core->netdev, tx_fifo_tail,
+ NULL);
+
+ core->tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail);
+
+ netif_wake_queue(core->netdev);
+
+ } else {
+ struct can_frame *cf;
+
+ skb = alloc_can_skb(core->netdev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ cf->can_id = msg->id & ACC_ID_ID_MASK;
+ if (msg->id & ACC_ID_EFF_FLAG)
+ cf->can_id |= CAN_EFF_FLAG;
+
+ can_frame_set_cc_len(cf, msg->acc_dlc.len & ACC_DLC_DLC_MASK,
+ priv->can.ctrlmode);
+
+ if (msg->acc_dlc.len & ACC_DLC_RTR_FLAG) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ memcpy(cf->data, msg->data, cf->len);
+ stats->rx_bytes += cf->len;
+ }
+ stats->rx_packets++;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+ }
+}
+
+static void handle_core_msg_txabort(struct acc_core *core,
+ const struct acc_bmmsg_txabort *msg)
+{
+ struct net_device_stats *stats = &core->netdev->stats;
+ u8 tx_fifo_tail = core->tx_fifo_tail;
+ u32 abort_mask = msg->abort_mask; /* u32 extend to avoid warnings later */
+
+ /* The abort_mask shows which frames were aborted in esdACC's FIFO. */
+ while (tx_fifo_tail != core->tx_fifo_head && (abort_mask)) {
+ const u32 tail_mask = (1U << tx_fifo_tail);
+
+ if (!(abort_mask & tail_mask))
+ break;
+ abort_mask &= ~tail_mask;
+
+ can_free_echo_skb(core->netdev, tx_fifo_tail, NULL);
+ stats->tx_dropped++;
+ stats->tx_aborted_errors++;
+
+ tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail);
+ }
+ core->tx_fifo_tail = tx_fifo_tail;
+ if (abort_mask)
+ netdev_warn(core->netdev, "Unhandled aborted messages\n");
+
+ if (!acc_resetmode_entered(core))
+ netif_wake_queue(core->netdev);
+}
+
+static void handle_core_msg_overrun(struct acc_core *core,
+ const struct acc_bmmsg_overrun *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct net_device_stats *stats = &core->netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /* lost_cnt may be 0 if not supported by esdACC version */
+ if (msg->lost_cnt) {
+ stats->rx_errors += msg->lost_cnt;
+ stats->rx_over_errors += msg->lost_cnt;
+ } else {
+ stats->rx_errors++;
+ stats->rx_over_errors++;
+ }
+
+ skb = alloc_can_err_skb(core->netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+}
+
+static void handle_core_msg_buserr(struct acc_core *core,
+ const struct acc_bmmsg_buserr *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct net_device_stats *stats = &core->netdev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ const u32 reg_status = msg->reg_status;
+ const u8 rxerr = reg_status;
+ const u8 txerr = (reg_status >> 8);
+ u8 can_err_prot_type = 0U;
+
+ priv->can.can_stats.bus_error++;
+
+ /* Error occurred during transmission? */
+ if (msg->ecc & ACC_ECC_DIR) {
+ stats->rx_errors++;
+ } else {
+ can_err_prot_type |= CAN_ERR_PROT_TX;
+ stats->tx_errors++;
+ }
+ /* Determine error type */
+ switch (msg->ecc & ACC_ECC_MASK) {
+ case ACC_ECC_BIT:
+ can_err_prot_type |= CAN_ERR_PROT_BIT;
+ break;
+ case ACC_ECC_FORM:
+ can_err_prot_type |= CAN_ERR_PROT_FORM;
+ break;
+ case ACC_ECC_STUFF:
+ can_err_prot_type |= CAN_ERR_PROT_STUFF;
+ break;
+ default:
+ can_err_prot_type |= CAN_ERR_PROT_UNSPEC;
+ break;
+ }
+
+ skb = alloc_can_err_skb(core->netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT;
+
+ /* Set protocol error type */
+ cf->data[2] = can_err_prot_type;
+ /* Set error location */
+ cf->data[3] = msg->ecc & ACC_ECC_SEG;
+
+ /* Insert CAN TX and RX error counters. */
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+}
+
+static void
+handle_core_msg_errstatechange(struct acc_core *core,
+ const struct acc_bmmsg_errstatechange *msg)
+{
+ struct acc_net_priv *priv = netdev_priv(core->netdev);
+ struct can_frame *cf = NULL;
+ struct sk_buff *skb;
+ const u32 reg_status = msg->reg_status;
+ const u8 rxerr = reg_status;
+ const u8 txerr = (reg_status >> 8);
+ enum can_state new_state;
+
+ if (reg_status & ACC_REG_STATUS_MASK_STATUS_BS) {
+ new_state = CAN_STATE_BUS_OFF;
+ } else if (reg_status & ACC_REG_STATUS_MASK_STATUS_EP) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (reg_status & ACC_REG_STATUS_MASK_STATUS_ES) {
+ new_state = CAN_STATE_ERROR_WARNING;
+ } else {
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ if (priv->can.state == CAN_STATE_BUS_OFF) {
+ /* See comment in acc_set_mode() for CAN_MODE_START */
+ netif_wake_queue(core->netdev);
+ }
+ }
+
+ skb = alloc_can_err_skb(core->netdev, &cf);
+
+ if (new_state != priv->can.state) {
+ enum can_state tx_state, rx_state;
+
+ tx_state = (txerr >= rxerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+ rx_state = (rxerr >= txerr) ?
+ new_state : CAN_STATE_ERROR_ACTIVE;
+
+ /* Always call can_change_state() to update the state
+ * even if alloc_can_err_skb() may have failed.
+ * can_change_state() can cope with a NULL cf pointer.
+ */
+ can_change_state(core->netdev, cf, tx_state, rx_state);
+ }
+
+ if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+
+ skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
+
+ netif_rx(skb);
+ }
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
+ can_bus_off(core->netdev);
+ }
+}
+
+static void handle_core_interrupt(struct acc_core *core)
+{
+ u32 msg_fifo_head = core->bmfifo.local_irq_cnt & 0xff;
+
+ while (core->bmfifo.msg_fifo_tail != msg_fifo_head) {
+ const union acc_bmmsg *msg =
+ &core->bmfifo.messages[core->bmfifo.msg_fifo_tail];
+
+ switch (msg->msg_id) {
+ case BM_MSG_ID_RXTXDONE:
+ handle_core_msg_rxtxdone(core, &msg->rxtxdone);
+ break;
+
+ case BM_MSG_ID_TXABORT:
+ handle_core_msg_txabort(core, &msg->txabort);
+ break;
+
+ case BM_MSG_ID_OVERRUN:
+ handle_core_msg_overrun(core, &msg->overrun);
+ break;
+
+ case BM_MSG_ID_BUSERR:
+ handle_core_msg_buserr(core, &msg->buserr);
+ break;
+
+ case BM_MSG_ID_ERRPASSIVE:
+ case BM_MSG_ID_ERRWARN:
+ handle_core_msg_errstatechange(core,
+ &msg->errstatechange);
+ break;
+
+ default:
+ /* Ignore all other BM messages (like the CAN-FD messages) */
+ break;
+ }
+
+ core->bmfifo.msg_fifo_tail =
+ (core->bmfifo.msg_fifo_tail + 1) & 0xff;
+ }
+}
+
+/**
+ * acc_card_interrupt() - handle the interrupts of an esdACC FPGA
+ *
+ * @ov: overview module structure
+ * @cores: array of core structures
+ *
+ * This function handles all interrupts pending for the overview module and the
+ * CAN cores of the esdACC FPGA.
+ *
+ * It examines for all cores (the overview module core and the CAN cores)
+ * the bmfifo.irq_cnt and compares it with the previously saved
+ * bmfifo.local_irq_cnt. An IRQ is pending if they differ. The esdACC FPGA
+ * updates the bmfifo.irq_cnt values by DMA.
+ *
+ * The pending interrupts are masked by writing to the IRQ mask register at
+ * ACC_OV_OF_BM_IRQ_MASK. This register has for each core a two bit command
+ * field evaluated as follows:
+ *
+ * Define, bit pattern: meaning
+ * 00: no action
+ * ACC_BM_IRQ_UNMASK, 01: unmask interrupt
+ * ACC_BM_IRQ_MASK, 10: mask interrupt
+ * 11: no action
+ *
+ * For each CAN core with a pending IRQ handle_core_interrupt() handles all
+ * busmaster messages from the message FIFO. The last handled message (FIFO
+ * index) is written to the CAN core to acknowledge its handling.
+ *
+ * Last step is to unmask all interrupts in the FPGA using
+ * ACC_BM_IRQ_UNMASK_ALL.
+ *
+ * Return:
+ * IRQ_HANDLED, if card generated an interrupt that was handled
+ * IRQ_NONE, if the interrupt is not ours
+ */
+irqreturn_t acc_card_interrupt(struct acc_ov *ov, struct acc_core *cores)
+{
+ u32 irqmask;
+ int i;
+
+ /* First we look for whom interrupts are pending, card/overview
+ * or any of the cores. Two bits in irqmask are used for each;
+ * Each two bit field is set to ACC_BM_IRQ_MASK if an IRQ is
+ * pending.
+ */
+ irqmask = 0U;
+ if (READ_ONCE(*ov->bmfifo.irq_cnt) != ov->bmfifo.local_irq_cnt) {
+ irqmask |= ACC_BM_IRQ_MASK;
+ ov->bmfifo.local_irq_cnt = READ_ONCE(*ov->bmfifo.irq_cnt);
+ }
+
+ for (i = 0; i < ov->active_cores; i++) {
+ struct acc_core *core = &cores[i];
+
+ if (READ_ONCE(*core->bmfifo.irq_cnt) != core->bmfifo.local_irq_cnt) {
+ irqmask |= (ACC_BM_IRQ_MASK << (2 * (i + 1)));
+ core->bmfifo.local_irq_cnt = READ_ONCE(*core->bmfifo.irq_cnt);
+ }
+ }
+
+ if (!irqmask)
+ return IRQ_NONE;
+
+ /* At second we tell the card we're working on them by writing irqmask,
+ * call handle_{ov|core}_interrupt and then acknowledge the
+ * interrupts by writing irq_cnt:
+ */
+ acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, irqmask);
+
+ if (irqmask & ACC_BM_IRQ_MASK) {
+ /* handle_ov_interrupt(); - no use yet. */
+ acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_COUNTER,
+ ov->bmfifo.local_irq_cnt);
+ }
+
+ for (i = 0; i < ov->active_cores; i++) {
+ struct acc_core *core = &cores[i];
+
+ if (irqmask & (ACC_BM_IRQ_MASK << (2 * (i + 1)))) {
+ handle_core_interrupt(core);
+ acc_write32(core, ACC_OV_OF_BM_IRQ_COUNTER,
+ core->bmfifo.local_irq_cnt);
+ }
+ }
+
+ acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, ACC_BM_IRQ_UNMASK_ALL);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/net/can/esd/esdacc.h b/drivers/net/can/esd/esdacc.h
new file mode 100644
index 000000000000..a70488b25d39
--- /dev/null
+++ b/drivers/net/can/esd/esdacc.h
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
+ * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
+ */
+
+#include <linux/bits.h>
+#include <linux/can/dev.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/units.h>
+
+#define ACC_TS_FREQ_80MHZ (80 * HZ_PER_MHZ)
+#define ACC_I2C_ADDON_DETECT_DELAY_MS 10
+
+/* esdACC Overview Module */
+#define ACC_OV_OF_PROBE 0x0000
+#define ACC_OV_OF_VERSION 0x0004
+#define ACC_OV_OF_INFO 0x0008
+#define ACC_OV_OF_CANCORE_FREQ 0x000c
+#define ACC_OV_OF_TS_FREQ_LO 0x0010
+#define ACC_OV_OF_TS_FREQ_HI 0x0014
+#define ACC_OV_OF_IRQ_STATUS_CORES 0x0018
+#define ACC_OV_OF_TS_CURR_LO 0x001c
+#define ACC_OV_OF_TS_CURR_HI 0x0020
+#define ACC_OV_OF_IRQ_STATUS 0x0028
+#define ACC_OV_OF_MODE 0x002c
+#define ACC_OV_OF_BM_IRQ_COUNTER 0x0070
+#define ACC_OV_OF_BM_IRQ_MASK 0x0074
+#define ACC_OV_OF_MSI_DATA 0x0080
+#define ACC_OV_OF_MSI_ADDRESSOFFSET 0x0084
+
+/* Feature flags are contained in the upper 16 bit of the version
+ * register at ACC_OV_OF_VERSION but only used with these masks after
+ * extraction into an extra variable => (xx - 16).
+ */
+#define ACC_OV_REG_FEAT_MASK_CANFD BIT(27 - 16)
+#define ACC_OV_REG_FEAT_MASK_NEW_PSC BIT(28 - 16)
+
+#define ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE BIT(0)
+#define ACC_OV_REG_MODE_MASK_BM_ENABLE BIT(1)
+#define ACC_OV_REG_MODE_MASK_MODE_LED BIT(2)
+#define ACC_OV_REG_MODE_MASK_TIMER_ENABLE BIT(4)
+#define ACC_OV_REG_MODE_MASK_TIMER_ONE_SHOT BIT(5)
+#define ACC_OV_REG_MODE_MASK_TIMER_ABSOLUTE BIT(6)
+#define ACC_OV_REG_MODE_MASK_TIMER GENMASK(6, 4)
+#define ACC_OV_REG_MODE_MASK_TS_SRC GENMASK(8, 7)
+#define ACC_OV_REG_MODE_MASK_I2C_ENABLE BIT(11)
+#define ACC_OV_REG_MODE_MASK_MSI_ENABLE BIT(14)
+#define ACC_OV_REG_MODE_MASK_NEW_PSC_ENABLE BIT(15)
+#define ACC_OV_REG_MODE_MASK_FPGA_RESET BIT(31)
+
+/* esdACC CAN Core Module */
+#define ACC_CORE_OF_CTRL_MODE 0x0000
+#define ACC_CORE_OF_STATUS_IRQ 0x0008
+#define ACC_CORE_OF_BRP 0x000c
+#define ACC_CORE_OF_BTR 0x0010
+#define ACC_CORE_OF_FBTR 0x0014
+#define ACC_CORE_OF_STATUS 0x0030
+#define ACC_CORE_OF_TXFIFO_CONFIG 0x0048
+#define ACC_CORE_OF_TXFIFO_STATUS 0x004c
+#define ACC_CORE_OF_TX_STATUS_IRQ 0x0050
+#define ACC_CORE_OF_TX_ABORT_MASK 0x0054
+#define ACC_CORE_OF_BM_IRQ_COUNTER 0x0070
+#define ACC_CORE_OF_TXFIFO_ID 0x00c0
+#define ACC_CORE_OF_TXFIFO_DLC 0x00c4
+#define ACC_CORE_OF_TXFIFO_DATA_0 0x00c8
+#define ACC_CORE_OF_TXFIFO_DATA_1 0x00cc
+
+#define ACC_REG_CONTROL_MASK_MODE_RESETMODE BIT(0)
+#define ACC_REG_CONTROL_MASK_MODE_LOM BIT(1)
+#define ACC_REG_CONTROL_MASK_MODE_STM BIT(2)
+#define ACC_REG_CONTROL_MASK_MODE_TRANSEN BIT(5)
+#define ACC_REG_CONTROL_MASK_MODE_TS BIT(6)
+#define ACC_REG_CONTROL_MASK_MODE_SCHEDULE BIT(7)
+
+#define ACC_REG_CONTROL_MASK_IE_RXTX BIT(8)
+#define ACC_REG_CONTROL_MASK_IE_TXERROR BIT(9)
+#define ACC_REG_CONTROL_MASK_IE_ERRWARN BIT(10)
+#define ACC_REG_CONTROL_MASK_IE_OVERRUN BIT(11)
+#define ACC_REG_CONTROL_MASK_IE_TSI BIT(12)
+#define ACC_REG_CONTROL_MASK_IE_ERRPASS BIT(13)
+#define ACC_REG_CONTROL_MASK_IE_ALI BIT(14)
+#define ACC_REG_CONTROL_MASK_IE_BUSERR BIT(15)
+
+/* BRP and BTR register layout for CAN-Classic version */
+#define ACC_REG_BRP_CL_MASK_BRP GENMASK(8, 0)
+#define ACC_REG_BTR_CL_MASK_TSEG1 GENMASK(3, 0)
+#define ACC_REG_BTR_CL_MASK_TSEG2 GENMASK(18, 16)
+#define ACC_REG_BTR_CL_MASK_SJW GENMASK(25, 24)
+
+/* BRP and BTR register layout for CAN-FD version */
+#define ACC_REG_BRP_FD_MASK_BRP GENMASK(7, 0)
+#define ACC_REG_BTR_FD_MASK_TSEG1 GENMASK(7, 0)
+#define ACC_REG_BTR_FD_MASK_TSEG2 GENMASK(22, 16)
+#define ACC_REG_BTR_FD_MASK_SJW GENMASK(30, 24)
+
+/* 256 BM_MSGs of 32 byte size */
+#define ACC_CORE_DMAMSG_SIZE 32U
+#define ACC_CORE_DMABUF_SIZE (256U * ACC_CORE_DMAMSG_SIZE)
+
+enum acc_bmmsg_id {
+ BM_MSG_ID_RXTXDONE = 0x01,
+ BM_MSG_ID_TXABORT = 0x02,
+ BM_MSG_ID_OVERRUN = 0x03,
+ BM_MSG_ID_BUSERR = 0x04,
+ BM_MSG_ID_ERRPASSIVE = 0x05,
+ BM_MSG_ID_ERRWARN = 0x06,
+ BM_MSG_ID_TIMESLICE = 0x07,
+ BM_MSG_ID_HWTIMER = 0x08,
+ BM_MSG_ID_HOTPLUG = 0x09,
+};
+
+/* The struct acc_bmmsg_* structure declarations that follow here provide
+ * access to the ring buffer of bus master messages maintained by the FPGA
+ * bus master engine. All bus master messages have the same size of
+ * ACC_CORE_DMAMSG_SIZE and a minimum alignment of ACC_CORE_DMAMSG_SIZE in
+ * memory.
+ *
+ * All structure members are natural aligned. Therefore we should not need
+ * a __packed attribute. All struct acc_bmmsg_* declarations have at least
+ * reserved* members to fill the structure to the full ACC_CORE_DMAMSG_SIZE.
+ *
+ * A failure of this property due padding will be detected at compile time
+ * by static_assert(sizeof(union acc_bmmsg) == ACC_CORE_DMAMSG_SIZE).
+ */
+
+struct acc_bmmsg_rxtxdone {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 reserved1[2];
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u32 id;
+ struct {
+ u8 len;
+ u8 txdfifo_idx;
+ u8 zeroes8;
+ u8 reserved;
+ } acc_dlc;
+ u8 data[CAN_MAX_DLEN];
+ /* Time stamps in struct acc_ov::timestamp_frequency ticks. */
+ u64 ts;
+};
+
+struct acc_bmmsg_txabort {
+ u8 msg_id;
+ u8 txfifo_level;
+ u16 abort_mask;
+ u8 txtsfifo_level;
+ u8 reserved2[1];
+ u16 abort_mask_txts;
+ u64 ts;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_overrun {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 lost_cnt;
+ u8 reserved1;
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_buserr {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 ecc;
+ u8 reserved1;
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reg_status;
+ u32 reg_btr;
+ u32 reserved3[2];
+};
+
+struct acc_bmmsg_errstatechange {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 reserved1[2];
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reg_status;
+ u32 reserved3[3];
+};
+
+struct acc_bmmsg_timeslice {
+ u8 msg_id;
+ u8 txfifo_level;
+ u8 reserved1[2];
+ u8 txtsfifo_level;
+ u8 reserved2[3];
+ u64 ts;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_hwtimer {
+ u8 msg_id;
+ u8 reserved1[3];
+ u32 reserved2[1];
+ u64 timer;
+ u32 reserved3[4];
+};
+
+struct acc_bmmsg_hotplug {
+ u8 msg_id;
+ u8 reserved1[3];
+ u32 reserved2[7];
+};
+
+union acc_bmmsg {
+ u8 msg_id;
+ struct acc_bmmsg_rxtxdone rxtxdone;
+ struct acc_bmmsg_txabort txabort;
+ struct acc_bmmsg_overrun overrun;
+ struct acc_bmmsg_buserr buserr;
+ struct acc_bmmsg_errstatechange errstatechange;
+ struct acc_bmmsg_timeslice timeslice;
+ struct acc_bmmsg_hwtimer hwtimer;
+};
+
+/* Check size of union acc_bmmsg to be of expected size. */
+static_assert(sizeof(union acc_bmmsg) == ACC_CORE_DMAMSG_SIZE);
+
+struct acc_bmfifo {
+ const union acc_bmmsg *messages;
+ /* irq_cnt points to an u32 value where the esdACC FPGA deposits
+ * the bm_fifo head index in coherent DMA memory. Only bits 7..0
+ * are valid. Use READ_ONCE() to access this memory location.
+ */
+ const u32 *irq_cnt;
+ u32 local_irq_cnt;
+ u32 msg_fifo_tail;
+};
+
+struct acc_core {
+ void __iomem *addr;
+ struct net_device *netdev;
+ struct acc_bmfifo bmfifo;
+ u8 tx_fifo_size;
+ u8 tx_fifo_head;
+ u8 tx_fifo_tail;
+};
+
+struct acc_ov {
+ void __iomem *addr;
+ struct acc_bmfifo bmfifo;
+ u32 timestamp_frequency;
+ u32 core_frequency;
+ u16 version;
+ u16 features;
+ u8 total_cores;
+ u8 active_cores;
+};
+
+struct acc_net_priv {
+ struct can_priv can; /* must be the first member! */
+ struct acc_core *core;
+ struct acc_ov *ov;
+};
+
+static inline u32 acc_read32(struct acc_core *core, unsigned short offs)
+{
+ return ioread32be(core->addr + offs);
+}
+
+static inline void acc_write32(struct acc_core *core,
+ unsigned short offs, u32 v)
+{
+ iowrite32be(v, core->addr + offs);
+}
+
+static inline void acc_write32_noswap(struct acc_core *core,
+ unsigned short offs, u32 v)
+{
+ iowrite32(v, core->addr + offs);
+}
+
+static inline void acc_set_bits(struct acc_core *core,
+ unsigned short offs, u32 mask)
+{
+ u32 v = acc_read32(core, offs);
+
+ v |= mask;
+ acc_write32(core, offs, v);
+}
+
+static inline void acc_clear_bits(struct acc_core *core,
+ unsigned short offs, u32 mask)
+{
+ u32 v = acc_read32(core, offs);
+
+ v &= ~mask;
+ acc_write32(core, offs, v);
+}
+
+static inline int acc_resetmode_entered(struct acc_core *core)
+{
+ u32 ctrl = acc_read32(core, ACC_CORE_OF_CTRL_MODE);
+
+ return (ctrl & ACC_REG_CONTROL_MASK_MODE_RESETMODE) != 0;
+}
+
+static inline u32 acc_ov_read32(struct acc_ov *ov, unsigned short offs)
+{
+ return ioread32be(ov->addr + offs);
+}
+
+static inline void acc_ov_write32(struct acc_ov *ov,
+ unsigned short offs, u32 v)
+{
+ iowrite32be(v, ov->addr + offs);
+}
+
+static inline void acc_ov_set_bits(struct acc_ov *ov,
+ unsigned short offs, u32 b)
+{
+ u32 v = acc_ov_read32(ov, offs);
+
+ v |= b;
+ acc_ov_write32(ov, offs, v);
+}
+
+static inline void acc_ov_clear_bits(struct acc_ov *ov,
+ unsigned short offs, u32 b)
+{
+ u32 v = acc_ov_read32(ov, offs);
+
+ v &= ~b;
+ acc_ov_write32(ov, offs, v);
+}
+
+static inline void acc_reset_fpga(struct acc_ov *ov)
+{
+ acc_ov_write32(ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_FPGA_RESET);
+
+ /* (Re-)start and wait for completion of addon detection on the I^2C bus */
+ acc_ov_set_bits(ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_I2C_ENABLE);
+ mdelay(ACC_I2C_ADDON_DETECT_DELAY_MS);
+}
+
+void acc_init_ov(struct acc_ov *ov, struct device *dev);
+void acc_init_bm_ptr(struct acc_ov *ov, struct acc_core *cores,
+ const void *mem);
+int acc_open(struct net_device *netdev);
+int acc_close(struct net_device *netdev);
+netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+int acc_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec);
+int acc_set_mode(struct net_device *netdev, enum can_mode mode);
+int acc_set_bittiming(struct net_device *netdev);
+irqreturn_t acc_card_interrupt(struct acc_ov *ov, struct acc_core *cores);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index a57005faa04f..f81b598147b3 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -27,7 +27,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
#define KVASER_PCIEFD_MAX_ERR_REP 256U
#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U
-#define KVASER_PCIEFD_MAX_CAN_CHANNELS 4UL
+#define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL
#define KVASER_PCIEFD_DMA_COUNT 2U
#define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
@@ -47,12 +47,19 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015
#define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016
+/* Xilinx based devices */
+#define KVASER_PCIEFD_M2_4CAN_DEVICE_ID 0x0017
+#define KVASER_PCIEFD_8CAN_DEVICE_ID 0x0019
+
/* Altera SerDes Enable 64-bit DMA address translation */
#define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0)
/* SmartFusion2 SerDes LSB address translation mask */
#define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12)
+/* Xilinx SerDes LSB address translation mask */
+#define KVASER_PCIEFD_XILINX_DMA_LSB_MASK GENMASK(31, 12)
+
/* Kvaser KCAN CAN controller registers */
#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
@@ -281,6 +288,8 @@ static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie,
dma_addr_t addr, int index);
static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
dma_addr_t addr, int index);
+static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index);
struct kvaser_pciefd_address_offset {
u32 serdes;
@@ -335,6 +344,18 @@ static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offse
.kcan_ch1 = 0x142000,
};
+static const struct kvaser_pciefd_address_offset kvaser_pciefd_xilinx_address_offset = {
+ .serdes = 0x00208,
+ .pci_ien = 0x102004,
+ .pci_irq = 0x102008,
+ .sysid = 0x100000,
+ .loopback = 0x103000,
+ .kcan_srb_fifo = 0x120000,
+ .kcan_srb = 0x121000,
+ .kcan_ch0 = 0x140000,
+ .kcan_ch1 = 0x142000,
+};
+
static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask = {
.kcan_rx0 = BIT(4),
.kcan_tx = { BIT(0), BIT(1), BIT(2), BIT(3) },
@@ -347,6 +368,12 @@ static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = {
.all = GENMASK(19, 16) | BIT(4),
};
+static const struct kvaser_pciefd_irq_mask kvaser_pciefd_xilinx_irq_mask = {
+ .kcan_rx0 = BIT(4),
+ .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) },
+ .all = GENMASK(19, 16) | BIT(4),
+};
+
static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = {
.kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_altera,
};
@@ -355,6 +382,10 @@ static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops = {
.kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_sf2,
};
+static const struct kvaser_pciefd_dev_ops kvaser_pciefd_xilinx_dev_ops = {
+ .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_xilinx,
+};
+
static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data = {
.address_offset = &kvaser_pciefd_altera_address_offset,
.irq_mask = &kvaser_pciefd_altera_irq_mask,
@@ -367,6 +398,12 @@ static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data = {
.ops = &kvaser_pciefd_sf2_dev_ops,
};
+static const struct kvaser_pciefd_driver_data kvaser_pciefd_xilinx_driver_data = {
+ .address_offset = &kvaser_pciefd_xilinx_address_offset,
+ .irq_mask = &kvaser_pciefd_xilinx_irq_mask,
+ .ops = &kvaser_pciefd_xilinx_dev_ops,
+};
+
struct kvaser_pciefd_can {
struct can_priv can;
struct kvaser_pciefd *kv_pcie;
@@ -457,6 +494,14 @@ static struct pci_device_id kvaser_pciefd_id_table[] = {
.driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data,
},
{
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_M2_4CAN_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data,
+ },
+ {
+ PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_8CAN_DEVICE_ID),
+ .driver_data = (kernel_ulong_t)&kvaser_pciefd_xilinx_driver_data,
+ },
+ {
0,
},
};
@@ -1035,6 +1080,21 @@ static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie,
iowrite32(msb, serdes_base + 0x4);
}
+static void kvaser_pciefd_write_dma_map_xilinx(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int index)
+{
+ void __iomem *serdes_base;
+ u32 lsb = addr & KVASER_PCIEFD_XILINX_DMA_LSB_MASK;
+ u32 msb = 0x0;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ msb = addr >> 32;
+#endif
+ serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index;
+ iowrite32(msb, serdes_base);
+ iowrite32(lsb, serdes_base + 0x4);
+}
+
static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
{
int i;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 16ecc11c7f62..14b231c4d7ec 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -255,6 +255,7 @@ enum m_can_reg {
#define TXESC_TBDS_64B 0x7
/* Tx Event FIFO Configuration (TXEFC) */
+#define TXEFC_EFWM_MASK GENMASK(29, 24)
#define TXEFC_EFS_MASK GENMASK(21, 16)
/* Tx Event FIFO Status (TXEFS) */
@@ -320,6 +321,12 @@ struct id_and_dlc {
u32 dlc;
};
+struct m_can_fifo_element {
+ u32 id;
+ u32 dlc;
+ u8 data[CANFD_MAX_DLEN];
+};
+
static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg)
{
return cdev->ops->read_reg(cdev, reg);
@@ -372,16 +379,6 @@ m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val)
return cdev->ops->read_fifo(cdev, addr_offset, val, 1);
}
-static inline bool _m_can_tx_fifo_full(u32 txfqs)
-{
- return !!(txfqs & TXFQS_TFQF);
-}
-
-static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev)
-{
- return _m_can_tx_fifo_full(m_can_read(cdev, M_CAN_TXFQS));
-}
-
static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
{
u32 cccr = m_can_read(cdev, M_CAN_CCCR);
@@ -416,15 +413,48 @@ static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
}
}
+static void m_can_interrupt_enable(struct m_can_classdev *cdev, u32 interrupts)
+{
+ if (cdev->active_interrupts == interrupts)
+ return;
+ cdev->ops->write_reg(cdev, M_CAN_IE, interrupts);
+ cdev->active_interrupts = interrupts;
+}
+
+static void m_can_coalescing_disable(struct m_can_classdev *cdev)
+{
+ u32 new_interrupts = cdev->active_interrupts | IR_RF0N | IR_TEFN;
+
+ if (!cdev->net->irq)
+ return;
+
+ hrtimer_cancel(&cdev->hrtimer);
+ m_can_interrupt_enable(cdev, new_interrupts);
+}
+
static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
{
+ if (!cdev->net->irq) {
+ dev_dbg(cdev->dev, "Start hrtimer\n");
+ hrtimer_start(&cdev->hrtimer,
+ ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
+ HRTIMER_MODE_REL_PINNED);
+ }
+
/* Only interrupt line 0 is used in this driver */
m_can_write(cdev, M_CAN_ILE, ILE_EINT0);
}
static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
{
+ m_can_coalescing_disable(cdev);
m_can_write(cdev, M_CAN_ILE, 0x0);
+ cdev->active_interrupts = 0x0;
+
+ if (!cdev->net->irq) {
+ dev_dbg(cdev->dev, "Stop hrtimer\n");
+ hrtimer_cancel(&cdev->hrtimer);
+ }
}
/* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit
@@ -444,18 +474,26 @@ static u32 m_can_get_timestamp(struct m_can_classdev *cdev)
static void m_can_clean(struct net_device *net)
{
struct m_can_classdev *cdev = netdev_priv(net);
+ unsigned long irqflags;
- if (cdev->tx_skb) {
- int putidx = 0;
+ if (cdev->tx_ops) {
+ for (int i = 0; i != cdev->tx_fifo_size; ++i) {
+ if (!cdev->tx_ops[i].skb)
+ continue;
- net->stats.tx_errors++;
- if (cdev->version > 30)
- putidx = FIELD_GET(TXFQS_TFQPI_MASK,
- m_can_read(cdev, M_CAN_TXFQS));
-
- can_free_echo_skb(cdev->net, putidx, NULL);
- cdev->tx_skb = NULL;
+ net->stats.tx_errors++;
+ cdev->tx_ops[i].skb = NULL;
+ }
}
+
+ for (int i = 0; i != cdev->can.echo_skb_max; ++i)
+ can_free_echo_skb(cdev->net, i, NULL);
+
+ netdev_reset_queue(cdev->net);
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ cdev->tx_fifo_in_flight = 0;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
}
/* For peripherals, pass skb to rx-offload, which will push skb from
@@ -1007,23 +1045,60 @@ static int m_can_poll(struct napi_struct *napi, int quota)
* echo. timestamp is used for peripherals to ensure correct ordering
* by rx-offload, and is ignored for non-peripherals.
*/
-static void m_can_tx_update_stats(struct m_can_classdev *cdev,
- unsigned int msg_mark,
- u32 timestamp)
+static unsigned int m_can_tx_update_stats(struct m_can_classdev *cdev,
+ unsigned int msg_mark, u32 timestamp)
{
struct net_device *dev = cdev->net;
struct net_device_stats *stats = &dev->stats;
+ unsigned int frame_len;
if (cdev->is_peripheral)
stats->tx_bytes +=
can_rx_offload_get_echo_skb_queue_timestamp(&cdev->offload,
msg_mark,
timestamp,
- NULL);
+ &frame_len);
else
- stats->tx_bytes += can_get_echo_skb(dev, msg_mark, NULL);
+ stats->tx_bytes += can_get_echo_skb(dev, msg_mark, &frame_len);
stats->tx_packets++;
+
+ return frame_len;
+}
+
+static void m_can_finish_tx(struct m_can_classdev *cdev, int transmitted,
+ unsigned int transmitted_frame_len)
+{
+ unsigned long irqflags;
+
+ netdev_completed_queue(cdev->net, transmitted, transmitted_frame_len);
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ if (cdev->tx_fifo_in_flight >= cdev->tx_fifo_size && transmitted > 0)
+ netif_wake_queue(cdev->net);
+ cdev->tx_fifo_in_flight -= transmitted;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+}
+
+static netdev_tx_t m_can_start_tx(struct m_can_classdev *cdev)
+{
+ unsigned long irqflags;
+ int tx_fifo_in_flight;
+
+ spin_lock_irqsave(&cdev->tx_handling_spinlock, irqflags);
+ tx_fifo_in_flight = cdev->tx_fifo_in_flight + 1;
+ if (tx_fifo_in_flight >= cdev->tx_fifo_size) {
+ netif_stop_queue(cdev->net);
+ if (tx_fifo_in_flight > cdev->tx_fifo_size) {
+ netdev_err_once(cdev->net, "hard_xmit called while TX FIFO full\n");
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+ return NETDEV_TX_BUSY;
+ }
+ }
+ cdev->tx_fifo_in_flight = tx_fifo_in_flight;
+ spin_unlock_irqrestore(&cdev->tx_handling_spinlock, irqflags);
+
+ return NETDEV_TX_OK;
}
static int m_can_echo_tx_event(struct net_device *dev)
@@ -1035,6 +1110,8 @@ static int m_can_echo_tx_event(struct net_device *dev)
int i = 0;
int err = 0;
unsigned int msg_mark;
+ int processed = 0;
+ unsigned int processed_frame_len = 0;
struct m_can_classdev *cdev = netdev_priv(dev);
@@ -1063,25 +1140,62 @@ static int m_can_echo_tx_event(struct net_device *dev)
fgi = (++fgi >= cdev->mcfg[MRAM_TXE].num ? 0 : fgi);
/* update stats */
- m_can_tx_update_stats(cdev, msg_mark, timestamp);
+ processed_frame_len += m_can_tx_update_stats(cdev, msg_mark,
+ timestamp);
+
+ ++processed;
}
if (ack_fgi != -1)
m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
ack_fgi));
+ m_can_finish_tx(cdev, processed, processed_frame_len);
+
return err;
}
+static void m_can_coalescing_update(struct m_can_classdev *cdev, u32 ir)
+{
+ u32 new_interrupts = cdev->active_interrupts;
+ bool enable_rx_timer = false;
+ bool enable_tx_timer = false;
+
+ if (!cdev->net->irq)
+ return;
+
+ if (cdev->rx_coalesce_usecs_irq > 0 && (ir & (IR_RF0N | IR_RF0W))) {
+ enable_rx_timer = true;
+ new_interrupts &= ~IR_RF0N;
+ }
+ if (cdev->tx_coalesce_usecs_irq > 0 && (ir & (IR_TEFN | IR_TEFW))) {
+ enable_tx_timer = true;
+ new_interrupts &= ~IR_TEFN;
+ }
+ if (!enable_rx_timer && !hrtimer_active(&cdev->hrtimer))
+ new_interrupts |= IR_RF0N;
+ if (!enable_tx_timer && !hrtimer_active(&cdev->hrtimer))
+ new_interrupts |= IR_TEFN;
+
+ m_can_interrupt_enable(cdev, new_interrupts);
+ if (enable_rx_timer | enable_tx_timer)
+ hrtimer_start(&cdev->hrtimer, cdev->irq_timer_wait,
+ HRTIMER_MODE_REL);
+}
+
static irqreturn_t m_can_isr(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct m_can_classdev *cdev = netdev_priv(dev);
u32 ir;
- if (pm_runtime_suspended(cdev->dev))
+ if (pm_runtime_suspended(cdev->dev)) {
+ m_can_coalescing_disable(cdev);
return IRQ_NONE;
+ }
+
ir = m_can_read(cdev, M_CAN_IR);
+ m_can_coalescing_update(cdev, ir);
if (!ir)
return IRQ_NONE;
@@ -1096,13 +1210,17 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
* - state change IRQ
* - bus error IRQ and bus error reporting
*/
- if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) {
+ if (ir & (IR_RF0N | IR_RF0W | IR_ERR_ALL_30X)) {
cdev->irqstatus = ir;
if (!cdev->is_peripheral) {
m_can_disable_all_interrupts(cdev);
napi_schedule(&cdev->napi);
- } else if (m_can_rx_peripheral(dev, ir) < 0) {
- goto out_fail;
+ } else {
+ int pkts;
+
+ pkts = m_can_rx_peripheral(dev, ir);
+ if (pkts < 0)
+ goto out_fail;
}
}
@@ -1110,21 +1228,18 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
if (ir & IR_TC) {
/* Transmission Complete Interrupt*/
u32 timestamp = 0;
+ unsigned int frame_len;
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
- m_can_tx_update_stats(cdev, 0, timestamp);
- netif_wake_queue(dev);
+ frame_len = m_can_tx_update_stats(cdev, 0, timestamp);
+ m_can_finish_tx(cdev, 1, frame_len);
}
} else {
- if (ir & IR_TEFN) {
+ if (ir & (IR_TEFN | IR_TEFW)) {
/* New TX FIFO Element arrived */
if (m_can_echo_tx_event(dev) != 0)
goto out_fail;
-
- if (netif_queue_stopped(dev) &&
- !m_can_tx_fifo_full(cdev))
- netif_wake_queue(dev);
}
}
@@ -1138,6 +1253,15 @@ out_fail:
return IRQ_HANDLED;
}
+static enum hrtimer_restart m_can_coalescing_timer(struct hrtimer *timer)
+{
+ struct m_can_classdev *cdev = container_of(timer, struct m_can_classdev, hrtimer);
+
+ irq_wake_thread(cdev->net->irq, cdev->net);
+
+ return HRTIMER_NORESTART;
+}
+
static const struct can_bittiming_const m_can_bittiming_const_30X = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
@@ -1276,9 +1400,8 @@ static int m_can_chip_config(struct net_device *dev)
}
/* Disable unused interrupts */
- interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TEFW | IR_TFE |
- IR_TCF | IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N |
- IR_RF0F | IR_RF0W);
+ interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TFE | IR_TCF |
+ IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F);
m_can_config_endisable(cdev, true);
@@ -1315,6 +1438,8 @@ static int m_can_chip_config(struct net_device *dev)
} else {
/* Full TX Event FIFO is used */
m_can_write(cdev, M_CAN_TXEFC,
+ FIELD_PREP(TXEFC_EFWM_MASK,
+ cdev->tx_max_coalesced_frames_irq) |
FIELD_PREP(TXEFC_EFS_MASK,
cdev->mcfg[MRAM_TXE].num) |
cdev->mcfg[MRAM_TXE].off);
@@ -1322,6 +1447,7 @@ static int m_can_chip_config(struct net_device *dev)
/* rx fifo configuration, blocking mode, fifo size 1 */
m_can_write(cdev, M_CAN_RXF0C,
+ FIELD_PREP(RXFC_FWM_MASK, cdev->rx_max_coalesced_frames_irq) |
FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) |
cdev->mcfg[MRAM_RXF0].off);
@@ -1380,7 +1506,7 @@ static int m_can_chip_config(struct net_device *dev)
else
interrupts &= ~(IR_ERR_LEC_31X);
}
- m_can_write(cdev, M_CAN_IE, interrupts);
+ m_can_interrupt_enable(cdev, interrupts);
/* route all interrupts to INT0 */
m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0);
@@ -1413,15 +1539,16 @@ static int m_can_start(struct net_device *dev)
if (ret)
return ret;
+ netdev_queue_set_dql_min_limit(netdev_get_tx_queue(cdev->net, 0),
+ cdev->tx_max_coalesced_frames);
+
cdev->can.state = CAN_STATE_ERROR_ACTIVE;
m_can_enable_all_interrupts(cdev);
- if (!dev->irq) {
- dev_dbg(cdev->dev, "Start hrtimer\n");
- hrtimer_start(&cdev->hrtimer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
- HRTIMER_MODE_REL_PINNED);
- }
+ if (cdev->version > 30)
+ cdev->tx_fifo_putidx = FIELD_GET(TXFQS_TFQPI_MASK,
+ m_can_read(cdev, M_CAN_TXFQS));
return 0;
}
@@ -1577,11 +1704,6 @@ static void m_can_stop(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
- if (!dev->irq) {
- dev_dbg(cdev->dev, "Stop hrtimer\n");
- hrtimer_cancel(&cdev->hrtimer);
- }
-
/* disable all interrupts */
m_can_disable_all_interrupts(cdev);
@@ -1605,8 +1727,9 @@ static int m_can_close(struct net_device *dev)
m_can_clk_stop(cdev);
free_irq(dev->irq, dev);
+ m_can_clean(dev);
+
if (cdev->is_peripheral) {
- cdev->tx_skb = NULL;
destroy_workqueue(cdev->tx_wq);
cdev->tx_wq = NULL;
can_rx_offload_disable(&cdev->offload);
@@ -1619,57 +1742,42 @@ static int m_can_close(struct net_device *dev)
return 0;
}
-static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx)
-{
- struct m_can_classdev *cdev = netdev_priv(dev);
- /*get wrap around for loopback skb index */
- unsigned int wrap = cdev->can.echo_skb_max;
- int next_idx;
-
- /* calculate next index */
- next_idx = (++putidx >= wrap ? 0 : putidx);
-
- /* check if occupied */
- return !!cdev->can.echo_skb[next_idx];
-}
-
-static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
+static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev,
+ struct sk_buff *skb)
{
- struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data;
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u8 len_padded = DIV_ROUND_UP(cf->len, 4);
+ struct m_can_fifo_element fifo_element;
struct net_device *dev = cdev->net;
- struct sk_buff *skb = cdev->tx_skb;
- struct id_and_dlc fifo_header;
u32 cccr, fdflags;
- u32 txfqs;
int err;
- int putidx;
-
- cdev->tx_skb = NULL;
+ u32 putidx;
+ unsigned int frame_len = can_skb_get_frame_len(skb);
/* Generate ID field for TX buffer Element */
/* Common to all supported M_CAN versions */
if (cf->can_id & CAN_EFF_FLAG) {
- fifo_header.id = cf->can_id & CAN_EFF_MASK;
- fifo_header.id |= TX_BUF_XTD;
+ fifo_element.id = cf->can_id & CAN_EFF_MASK;
+ fifo_element.id |= TX_BUF_XTD;
} else {
- fifo_header.id = ((cf->can_id & CAN_SFF_MASK) << 18);
+ fifo_element.id = ((cf->can_id & CAN_SFF_MASK) << 18);
}
if (cf->can_id & CAN_RTR_FLAG)
- fifo_header.id |= TX_BUF_RTR;
+ fifo_element.id |= TX_BUF_RTR;
if (cdev->version == 30) {
netif_stop_queue(dev);
- fifo_header.dlc = can_fd_len2dlc(cf->len) << 16;
+ fifo_element.dlc = can_fd_len2dlc(cf->len) << 16;
/* Write the frame ID, DLC, and payload to the FIFO element. */
- err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_header, 2);
+ err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, &fifo_element, 2);
if (err)
goto out_fail;
err = m_can_fifo_write(cdev, 0, M_CAN_FIFO_DATA,
- cf->data, DIV_ROUND_UP(cf->len, 4));
+ cf->data, len_padded);
if (err)
goto out_fail;
@@ -1690,33 +1798,15 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
}
m_can_write(cdev, M_CAN_TXBTIE, 0x1);
- can_put_echo_skb(skb, dev, 0, 0);
+ can_put_echo_skb(skb, dev, 0, frame_len);
m_can_write(cdev, M_CAN_TXBAR, 0x1);
/* End of xmit function for version 3.0.x */
} else {
/* Transmit routine for version >= v3.1.x */
- txfqs = m_can_read(cdev, M_CAN_TXFQS);
-
- /* Check if FIFO full */
- if (_m_can_tx_fifo_full(txfqs)) {
- /* This shouldn't happen */
- netif_stop_queue(dev);
- netdev_warn(dev,
- "TX queue active although FIFO is full.");
-
- if (cdev->is_peripheral) {
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
- } else {
- return NETDEV_TX_BUSY;
- }
- }
-
/* get put index for frame */
- putidx = FIELD_GET(TXFQS_TFQPI_MASK, txfqs);
+ putidx = cdev->tx_fifo_putidx;
/* Construct DLC Field, with CAN-FD configuration.
* Use the put index of the fifo as the message marker,
@@ -1731,30 +1821,32 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
fdflags |= TX_BUF_BRS;
}
- fifo_header.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) |
+ fifo_element.dlc = FIELD_PREP(TX_BUF_MM_MASK, putidx) |
FIELD_PREP(TX_BUF_DLC_MASK, can_fd_len2dlc(cf->len)) |
fdflags | TX_BUF_EFC;
- err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, &fifo_header, 2);
- if (err)
- goto out_fail;
- err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA,
- cf->data, DIV_ROUND_UP(cf->len, 4));
+ memcpy_and_pad(fifo_element.data, CANFD_MAX_DLEN, &cf->data,
+ cf->len, 0);
+
+ err = m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID,
+ &fifo_element, 2 + len_padded);
if (err)
goto out_fail;
/* Push loopback echo.
* Will be looped back on TX interrupt based on message marker
*/
- can_put_echo_skb(skb, dev, putidx, 0);
+ can_put_echo_skb(skb, dev, putidx, frame_len);
- /* Enable TX FIFO element to start transfer */
- m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
-
- /* stop network queue if fifo full */
- if (m_can_tx_fifo_full(cdev) ||
- m_can_next_echo_skb_occupied(dev, putidx))
- netif_stop_queue(dev);
+ if (cdev->is_peripheral) {
+ /* Delay enabling TX FIFO element */
+ cdev->tx_peripheral_submit |= BIT(putidx);
+ } else {
+ /* Enable TX FIFO element to start transfer */
+ m_can_write(cdev, M_CAN_TXBAR, BIT(putidx));
+ }
+ cdev->tx_fifo_putidx = (++cdev->tx_fifo_putidx >= cdev->can.echo_skb_max ?
+ 0 : cdev->tx_fifo_putidx);
}
return NETDEV_TX_OK;
@@ -1765,46 +1857,91 @@ out_fail:
return NETDEV_TX_BUSY;
}
+static void m_can_tx_submit(struct m_can_classdev *cdev)
+{
+ if (cdev->version == 30)
+ return;
+ if (!cdev->is_peripheral)
+ return;
+
+ m_can_write(cdev, M_CAN_TXBAR, cdev->tx_peripheral_submit);
+ cdev->tx_peripheral_submit = 0;
+}
+
static void m_can_tx_work_queue(struct work_struct *ws)
{
- struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev,
- tx_work);
+ struct m_can_tx_op *op = container_of(ws, struct m_can_tx_op, work);
+ struct m_can_classdev *cdev = op->cdev;
+ struct sk_buff *skb = op->skb;
+
+ op->skb = NULL;
+ m_can_tx_handler(cdev, skb);
+ if (op->submit)
+ m_can_tx_submit(cdev);
+}
+
+static void m_can_tx_queue_skb(struct m_can_classdev *cdev, struct sk_buff *skb,
+ bool submit)
+{
+ cdev->tx_ops[cdev->next_tx_op].skb = skb;
+ cdev->tx_ops[cdev->next_tx_op].submit = submit;
+ queue_work(cdev->tx_wq, &cdev->tx_ops[cdev->next_tx_op].work);
+
+ ++cdev->next_tx_op;
+ if (cdev->next_tx_op >= cdev->tx_fifo_size)
+ cdev->next_tx_op = 0;
+}
+
+static netdev_tx_t m_can_start_peripheral_xmit(struct m_can_classdev *cdev,
+ struct sk_buff *skb)
+{
+ bool submit;
+
+ ++cdev->nr_txs_without_submit;
+ if (cdev->nr_txs_without_submit >= cdev->tx_max_coalesced_frames ||
+ !netdev_xmit_more()) {
+ cdev->nr_txs_without_submit = 0;
+ submit = true;
+ } else {
+ submit = false;
+ }
+ m_can_tx_queue_skb(cdev, skb, submit);
- m_can_tx_handler(cdev);
+ return NETDEV_TX_OK;
}
static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ unsigned int frame_len;
+ netdev_tx_t ret;
if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
- if (cdev->is_peripheral) {
- if (cdev->tx_skb) {
- netdev_err(dev, "hard_xmit called while tx busy\n");
- return NETDEV_TX_BUSY;
- }
+ frame_len = can_skb_get_frame_len(skb);
- if (cdev->can.state == CAN_STATE_BUS_OFF) {
- m_can_clean(dev);
- } else {
- /* Need to stop the queue to avoid numerous requests
- * from being sent. Suggested improvement is to create
- * a queueing mechanism that will queue the skbs and
- * process them in order.
- */
- cdev->tx_skb = skb;
- netif_stop_queue(cdev->net);
- queue_work(cdev->tx_wq, &cdev->tx_work);
- }
- } else {
- cdev->tx_skb = skb;
- return m_can_tx_handler(cdev);
+ if (cdev->can.state == CAN_STATE_BUS_OFF) {
+ m_can_clean(cdev->net);
+ return NETDEV_TX_OK;
}
- return NETDEV_TX_OK;
+ ret = m_can_start_tx(cdev);
+ if (ret != NETDEV_TX_OK)
+ return ret;
+
+ netdev_sent_queue(dev, frame_len);
+
+ if (cdev->is_peripheral)
+ ret = m_can_start_peripheral_xmit(cdev, skb);
+ else
+ ret = m_can_tx_handler(cdev, skb);
+
+ if (ret != NETDEV_TX_OK)
+ netdev_completed_queue(dev, 1, frame_len);
+
+ return ret;
}
static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer)
@@ -1844,15 +1981,17 @@ static int m_can_open(struct net_device *dev)
/* register interrupt handler */
if (cdev->is_peripheral) {
- cdev->tx_skb = NULL;
- cdev->tx_wq = alloc_workqueue("mcan_wq",
- WQ_FREEZABLE | WQ_MEM_RECLAIM, 0);
+ cdev->tx_wq = alloc_ordered_workqueue("mcan_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM);
if (!cdev->tx_wq) {
err = -ENOMEM;
goto out_wq_fail;
}
- INIT_WORK(&cdev->tx_work, m_can_tx_work_queue);
+ for (int i = 0; i != cdev->tx_fifo_size; ++i) {
+ cdev->tx_ops[i].cdev = cdev;
+ INIT_WORK(&cdev->tx_ops[i].work, m_can_tx_work_queue);
+ }
err = request_threaded_irq(dev->irq, NULL, m_can_isr,
IRQF_ONESHOT,
@@ -1900,7 +2039,108 @@ static const struct net_device_ops m_can_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static int m_can_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ ec->rx_max_coalesced_frames_irq = cdev->rx_max_coalesced_frames_irq;
+ ec->rx_coalesce_usecs_irq = cdev->rx_coalesce_usecs_irq;
+ ec->tx_max_coalesced_frames = cdev->tx_max_coalesced_frames;
+ ec->tx_max_coalesced_frames_irq = cdev->tx_max_coalesced_frames_irq;
+ ec->tx_coalesce_usecs_irq = cdev->tx_coalesce_usecs_irq;
+
+ return 0;
+}
+
+static int m_can_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ if (cdev->can.state != CAN_STATE_STOPPED) {
+ netdev_err(dev, "Device is in use, please shut it down first\n");
+ return -EBUSY;
+ }
+
+ if (ec->rx_max_coalesced_frames_irq > cdev->mcfg[MRAM_RXF0].num) {
+ netdev_err(dev, "rx-frames-irq %u greater than the RX FIFO %u\n",
+ ec->rx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_RXF0].num);
+ return -EINVAL;
+ }
+ if ((ec->rx_max_coalesced_frames_irq == 0) != (ec->rx_coalesce_usecs_irq == 0)) {
+ netdev_err(dev, "rx-frames-irq and rx-usecs-irq can only be set together\n");
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXE].num) {
+ netdev_err(dev, "tx-frames-irq %u greater than the TX event FIFO %u\n",
+ ec->tx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_TXE].num);
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames_irq > cdev->mcfg[MRAM_TXB].num) {
+ netdev_err(dev, "tx-frames-irq %u greater than the TX FIFO %u\n",
+ ec->tx_max_coalesced_frames_irq,
+ cdev->mcfg[MRAM_TXB].num);
+ return -EINVAL;
+ }
+ if ((ec->tx_max_coalesced_frames_irq == 0) != (ec->tx_coalesce_usecs_irq == 0)) {
+ netdev_err(dev, "tx-frames-irq and tx-usecs-irq can only be set together\n");
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXE].num) {
+ netdev_err(dev, "tx-frames %u greater than the TX event FIFO %u\n",
+ ec->tx_max_coalesced_frames,
+ cdev->mcfg[MRAM_TXE].num);
+ return -EINVAL;
+ }
+ if (ec->tx_max_coalesced_frames > cdev->mcfg[MRAM_TXB].num) {
+ netdev_err(dev, "tx-frames %u greater than the TX FIFO %u\n",
+ ec->tx_max_coalesced_frames,
+ cdev->mcfg[MRAM_TXB].num);
+ return -EINVAL;
+ }
+ if (ec->rx_coalesce_usecs_irq != 0 && ec->tx_coalesce_usecs_irq != 0 &&
+ ec->rx_coalesce_usecs_irq != ec->tx_coalesce_usecs_irq) {
+ netdev_err(dev, "rx-usecs-irq %u needs to be equal to tx-usecs-irq %u if both are enabled\n",
+ ec->rx_coalesce_usecs_irq,
+ ec->tx_coalesce_usecs_irq);
+ return -EINVAL;
+ }
+
+ cdev->rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
+ cdev->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+ cdev->tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
+ cdev->tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
+ cdev->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
+
+ if (cdev->rx_coalesce_usecs_irq)
+ cdev->irq_timer_wait =
+ ns_to_ktime(cdev->rx_coalesce_usecs_irq * NSEC_PER_USEC);
+ else
+ cdev->irq_timer_wait =
+ ns_to_ktime(cdev->tx_coalesce_usecs_irq * NSEC_PER_USEC);
+
+ return 0;
+}
+
static const struct ethtool_ops m_can_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_TX_USECS_IRQ |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_coalesce = m_can_get_coalesce,
+ .set_coalesce = m_can_set_coalesce,
+};
+
+static const struct ethtool_ops m_can_ethtool_ops_polling = {
.get_ts_info = ethtool_op_get_ts_info,
};
@@ -1908,7 +2148,10 @@ static int register_m_can_dev(struct net_device *dev)
{
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &m_can_netdev_ops;
- dev->ethtool_ops = &m_can_ethtool_ops;
+ if (dev->irq)
+ dev->ethtool_ops = &m_can_ethtool_ops;
+ else
+ dev->ethtool_ops = &m_can_ethtool_ops_polling;
return register_candev(dev);
}
@@ -2056,12 +2299,23 @@ int m_can_class_register(struct m_can_classdev *cdev)
{
int ret;
- if (cdev->pm_clock_support) {
- ret = m_can_clk_start(cdev);
- if (ret)
- return ret;
+ cdev->tx_fifo_size = max(1, min(cdev->mcfg[MRAM_TXB].num,
+ cdev->mcfg[MRAM_TXE].num));
+ if (cdev->is_peripheral) {
+ cdev->tx_ops =
+ devm_kzalloc(cdev->dev,
+ cdev->tx_fifo_size * sizeof(*cdev->tx_ops),
+ GFP_KERNEL);
+ if (!cdev->tx_ops) {
+ dev_err(cdev->dev, "Failed to allocate tx_ops for workqueue\n");
+ return -ENOMEM;
+ }
}
+ ret = m_can_clk_start(cdev);
+ if (ret)
+ return ret;
+
if (cdev->is_peripheral) {
ret = can_rx_offload_add_manual(cdev->net, &cdev->offload,
NAPI_POLL_WEIGHT);
@@ -2069,8 +2323,15 @@ int m_can_class_register(struct m_can_classdev *cdev)
goto clk_disable;
}
- if (!cdev->net->irq)
+ if (!cdev->net->irq) {
+ dev_dbg(cdev->dev, "Polling enabled, initialize hrtimer");
+ hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
cdev->hrtimer.function = &hrtimer_callback;
+ } else {
+ hrtimer_init(&cdev->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cdev->hrtimer.function = m_can_coalescing_timer;
+ }
ret = m_can_dev_setup(cdev);
if (ret)
@@ -2121,7 +2382,15 @@ int m_can_class_suspend(struct device *dev)
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
- m_can_stop(ndev);
+
+ /* leave the chip running with rx interrupt enabled if it is
+ * used as a wake-up source.
+ */
+ if (cdev->pm_wake_source)
+ m_can_write(cdev, M_CAN_IE, IR_RF0N);
+ else
+ m_can_stop(ndev);
+
m_can_clk_stop(cdev);
}
@@ -2148,11 +2417,15 @@ int m_can_class_resume(struct device *dev)
ret = m_can_clk_start(cdev);
if (ret)
return ret;
- ret = m_can_start(ndev);
- if (ret) {
- m_can_clk_stop(cdev);
- return ret;
+ if (cdev->pm_wake_source) {
+ m_can_write(cdev, M_CAN_IE, cdev->active_interrupts);
+ } else {
+ ret = m_can_start(ndev);
+ if (ret) {
+ m_can_clk_stop(cdev);
+ return ret;
+ }
}
netif_device_attach(ndev);
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index 520e14277dff..3a9edc292593 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -70,6 +70,13 @@ struct m_can_ops {
int (*init)(struct m_can_classdev *cdev);
};
+struct m_can_tx_op {
+ struct m_can_classdev *cdev;
+ struct work_struct work;
+ struct sk_buff *skb;
+ bool submit;
+};
+
struct m_can_classdev {
struct can_priv can;
struct can_rx_offload offload;
@@ -80,18 +87,42 @@ struct m_can_classdev {
struct clk *cclk;
struct workqueue_struct *tx_wq;
- struct work_struct tx_work;
- struct sk_buff *tx_skb;
struct phy *transceiver;
+ ktime_t irq_timer_wait;
+
struct m_can_ops *ops;
int version;
u32 irqstatus;
int pm_clock_support;
+ int pm_wake_source;
int is_peripheral;
+ // Cached M_CAN_IE register content
+ u32 active_interrupts;
+ u32 rx_max_coalesced_frames_irq;
+ u32 rx_coalesce_usecs_irq;
+ u32 tx_max_coalesced_frames;
+ u32 tx_max_coalesced_frames_irq;
+ u32 tx_coalesce_usecs_irq;
+
+ // Store this internally to avoid fetch delays on peripheral chips
+ u32 tx_fifo_putidx;
+
+ /* Protects shared state between start_xmit and m_can_isr */
+ spinlock_t tx_handling_spinlock;
+ int tx_fifo_in_flight;
+
+ struct m_can_tx_op *tx_ops;
+ int tx_fifo_size;
+ int next_tx_op;
+
+ int nr_txs_without_submit;
+ /* bitfield of fifo elements that will be submitted together */
+ u32 tx_peripheral_submit;
+
struct mram_cfg mcfg[MRAM_CFG_NUM];
struct hrtimer hrtimer;
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index f2219aa2824b..45400de4163d 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -125,6 +125,7 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class->dev = &pci->dev;
mcan_class->net->irq = pci_irq_vector(pci, 0);
mcan_class->pm_clock_support = 1;
+ mcan_class->pm_wake_source = 0;
mcan_class->can.clock.freq = id->driver_data;
mcan_class->ops = &m_can_pci_ops;
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index cdb28d6a092c..df0367124b4c 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -109,10 +109,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
ret = irq;
goto probe_fail;
}
- } else {
- dev_dbg(mcan_class->dev, "Polling enabled, initialize hrtimer");
- hrtimer_init(&mcan_class->hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
}
/* message ram could be shared */
@@ -143,6 +139,7 @@ static int m_can_plat_probe(struct platform_device *pdev)
mcan_class->net->irq = irq;
mcan_class->pm_clock_support = 1;
+ mcan_class->pm_wake_source = 0;
mcan_class->can.clock.freq = clk_get_rate(mcan_class->cclk);
mcan_class->dev = &pdev->dev;
mcan_class->transceiver = transceiver;
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index ae8c42f5debd..a42600dac70d 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -411,6 +411,7 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
priv->spi = spi;
mcan_class->pm_clock_support = 0;
+ mcan_class->pm_wake_source = device_property_read_bool(&spi->dev, "wakeup-source");
mcan_class->can.clock.freq = freq;
mcan_class->dev = &spi->dev;
mcan_class->ops = &tcan4x5x_ops;
@@ -459,6 +460,9 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
goto out_power;
}
+ if (mcan_class->pm_wake_source)
+ device_init_wakeup(&spi->dev, true);
+
ret = m_can_class_register(mcan_class);
if (ret) {
dev_err(&spi->dev, "Failed registering m_can device %pe\n",
@@ -487,6 +491,29 @@ static void tcan4x5x_can_remove(struct spi_device *spi)
m_can_class_free_dev(priv->cdev.net);
}
+static int __maybe_unused tcan4x5x_suspend(struct device *dev)
+{
+ struct m_can_classdev *cdev = dev_get_drvdata(dev);
+ struct spi_device *spi = to_spi_device(dev);
+
+ if (cdev->pm_wake_source)
+ enable_irq_wake(spi->irq);
+
+ return m_can_class_suspend(dev);
+}
+
+static int __maybe_unused tcan4x5x_resume(struct device *dev)
+{
+ struct m_can_classdev *cdev = dev_get_drvdata(dev);
+ struct spi_device *spi = to_spi_device(dev);
+ int ret = m_can_class_resume(dev);
+
+ if (cdev->pm_wake_source)
+ disable_irq_wake(spi->irq);
+
+ return ret;
+}
+
static const struct of_device_id tcan4x5x_of_match[] = {
{
.compatible = "ti,tcan4x5x",
@@ -505,11 +532,15 @@ static const struct spi_device_id tcan4x5x_id_table[] = {
};
MODULE_DEVICE_TABLE(spi, tcan4x5x_id_table);
+static const struct dev_pm_ops tcan4x5x_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tcan4x5x_suspend, tcan4x5x_resume)
+};
+
static struct spi_driver tcan4x5x_can_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = tcan4x5x_of_match,
- .pm = NULL,
+ .pm = &tcan4x5x_pm_ops,
},
.id_table = tcan4x5x_id_table,
.probe = tcan4x5x_can_probe,
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index 32286f861a19..721df91cdbfb 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -436,7 +436,7 @@ int softing_startstop(struct net_device *dev, int up)
return ret;
bus_bitmask_start = 0;
- if (dev && up)
+ if (up)
/* prepare to start this bus as well */
bus_bitmask_start |= (1 << priv->index);
/* bring netdevs down */
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index eebf967f4711..1d9057dc44f2 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -837,7 +837,7 @@ static int __mcp251xfd_get_berr_counter(const struct net_device *ndev,
return err;
if (trec & MCP251XFD_REG_TREC_TXBO)
- bec->txerr = 256;
+ bec->txerr = CAN_BUS_OFF_THRESHOLD;
else
bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec);
bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index d1450722cb3c..bd58c636d465 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -100,6 +100,7 @@ config CAN_KVASER_USB
- Scania VCI2 (if you have the Kvaser logo on top)
- Kvaser BlackBird v2
- Kvaser Leaf Pro HS v2
+ - Kvaser Leaf v3
- Kvaser Hybrid CAN/LIN
- Kvaser Hybrid 2xCAN/LIN
- Kvaser Hybrid Pro CAN/LIN
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 95b0fdb602c8..65c962f76898 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -385,7 +385,7 @@ static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev,
static int gs_cmd_reset(struct gs_can *dev)
{
struct gs_device_mode dm = {
- .mode = GS_CAN_MODE_RESET,
+ .mode = cpu_to_le32(GS_CAN_MODE_RESET),
};
return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_MODE,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 71ef4db5c09f..8faf8a462c05 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -88,6 +88,7 @@
#define USB_USBCAN_PRO_4HS_PRODUCT_ID 0x0114
#define USB_HYBRID_CANLIN_PRODUCT_ID 0x0115
#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 0x0116
+#define USB_LEAF_V3_PRODUCT_ID 0x0117
static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
.quirks = KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP,
@@ -235,6 +236,8 @@ static const struct usb_device_id kvaser_usb_table[] = {
.driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID),
.driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_V3_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 98c669ad5141..f7fabba707ea 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -119,7 +119,7 @@ static int vxcan_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(priv->peer);
- iflink = peer ? peer->ifindex : 0;
+ iflink = peer ? READ_ONCE(peer->ifindex) : 0;
rcu_read_unlock();
return iflink;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 3722eaa84234..fae0120473f8 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -31,6 +31,7 @@
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <linux/u64_stats_sync.h>
#define DRIVER_NAME "xilinx_can"
@@ -58,6 +59,13 @@ enum xcan_reg {
*/
XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */
XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
+
+ /* only on AXI CAN cores */
+ XCAN_ECC_CFG_OFFSET = 0xC8, /* ECC Configuration */
+ XCAN_TXTLFIFO_ECC_OFFSET = 0xCC, /* TXTL FIFO ECC error counter */
+ XCAN_TXOLFIFO_ECC_OFFSET = 0xD0, /* TXOL FIFO ECC error counter */
+ XCAN_RXFIFO_ECC_OFFSET = 0xD4, /* RX FIFO ECC error counter */
+
XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
@@ -124,6 +132,18 @@ enum xcan_reg {
#define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */
#define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */
#define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */
+#define XCAN_IXR_E2BERX_MASK BIT(23) /* RX FIFO two bit ECC error */
+#define XCAN_IXR_E1BERX_MASK BIT(22) /* RX FIFO one bit ECC error */
+#define XCAN_IXR_E2BETXOL_MASK BIT(21) /* TXOL FIFO two bit ECC error */
+#define XCAN_IXR_E1BETXOL_MASK BIT(20) /* TXOL FIFO One bit ECC error */
+#define XCAN_IXR_E2BETXTL_MASK BIT(19) /* TXTL FIFO Two bit ECC error */
+#define XCAN_IXR_E1BETXTL_MASK BIT(18) /* TXTL FIFO One bit ECC error */
+#define XCAN_IXR_ECC_MASK (XCAN_IXR_E2BERX_MASK | \
+ XCAN_IXR_E1BERX_MASK | \
+ XCAN_IXR_E2BETXOL_MASK | \
+ XCAN_IXR_E1BETXOL_MASK | \
+ XCAN_IXR_E2BETXTL_MASK | \
+ XCAN_IXR_E1BETXTL_MASK)
#define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */
#define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */
#define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */
@@ -137,6 +157,11 @@ enum xcan_reg {
#define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */
#define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */
#define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */
+#define XCAN_ECC_CFG_REECRX_MASK BIT(2) /* Reset RX FIFO ECC error counters */
+#define XCAN_ECC_CFG_REECTXOL_MASK BIT(1) /* Reset TXOL FIFO ECC error counters */
+#define XCAN_ECC_CFG_REECTXTL_MASK BIT(0) /* Reset TXTL FIFO ECC error counters */
+#define XCAN_ECC_1BIT_CNT_MASK GENMASK(15, 0) /* FIFO ECC 1bit count mask */
+#define XCAN_ECC_2BIT_CNT_MASK GENMASK(31, 16) /* FIFO ECC 2bit count mask */
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
#define XCAN_BRPR_TDC_ENABLE BIT(16) /* Transmitter Delay Compensation (TDC) Enable */
@@ -202,6 +227,14 @@ struct xcan_devtype_data {
* @devtype: Device type specific constants
* @transceiver: Optional pointer to associated CAN transceiver
* @rstc: Pointer to reset control
+ * @ecc_enable: ECC enable flag
+ * @syncp: synchronization for ECC error stats
+ * @ecc_rx_2_bit_errors: RXFIFO 2bit ECC count
+ * @ecc_rx_1_bit_errors: RXFIFO 1bit ECC count
+ * @ecc_txol_2_bit_errors: TXOLFIFO 2bit ECC count
+ * @ecc_txol_1_bit_errors: TXOLFIFO 1bit ECC count
+ * @ecc_txtl_2_bit_errors: TXTLFIFO 2bit ECC count
+ * @ecc_txtl_1_bit_errors: TXTLFIFO 1bit ECC count
*/
struct xcan_priv {
struct can_priv can;
@@ -221,6 +254,14 @@ struct xcan_priv {
struct xcan_devtype_data devtype;
struct phy *transceiver;
struct reset_control *rstc;
+ bool ecc_enable;
+ struct u64_stats_sync syncp;
+ u64_stats_t ecc_rx_2_bit_errors;
+ u64_stats_t ecc_rx_1_bit_errors;
+ u64_stats_t ecc_txol_2_bit_errors;
+ u64_stats_t ecc_txol_1_bit_errors;
+ u64_stats_t ecc_txtl_2_bit_errors;
+ u64_stats_t ecc_txtl_1_bit_errors;
};
/* CAN Bittiming constants as per Xilinx CAN specs */
@@ -308,6 +349,24 @@ static const struct can_tdc_const xcan_tdc_const_canfd2 = {
.tdcf_max = 0,
};
+enum xcan_stats_type {
+ XCAN_ECC_RX_2_BIT_ERRORS,
+ XCAN_ECC_RX_1_BIT_ERRORS,
+ XCAN_ECC_TXOL_2_BIT_ERRORS,
+ XCAN_ECC_TXOL_1_BIT_ERRORS,
+ XCAN_ECC_TXTL_2_BIT_ERRORS,
+ XCAN_ECC_TXTL_1_BIT_ERRORS,
+};
+
+static const char xcan_priv_flags_strings[][ETH_GSTRING_LEN] = {
+ [XCAN_ECC_RX_2_BIT_ERRORS] = "ecc_rx_2_bit_errors",
+ [XCAN_ECC_RX_1_BIT_ERRORS] = "ecc_rx_1_bit_errors",
+ [XCAN_ECC_TXOL_2_BIT_ERRORS] = "ecc_txol_2_bit_errors",
+ [XCAN_ECC_TXOL_1_BIT_ERRORS] = "ecc_txol_1_bit_errors",
+ [XCAN_ECC_TXTL_2_BIT_ERRORS] = "ecc_txtl_2_bit_errors",
+ [XCAN_ECC_TXTL_1_BIT_ERRORS] = "ecc_txtl_1_bit_errors",
+};
+
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@@ -523,6 +582,9 @@ static int xcan_chip_start(struct net_device *ndev)
XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
+ if (priv->ecc_enable)
+ ier |= XCAN_IXR_ECC_MASK;
+
if (priv->devtype.flags & XCAN_FLAG_RXMNF)
ier |= XCAN_IXR_RXMNF_MASK;
@@ -1127,6 +1189,54 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
priv->can.can_stats.bus_error++;
}
+ if (priv->ecc_enable && isr & XCAN_IXR_ECC_MASK) {
+ u32 reg_rx_ecc, reg_txol_ecc, reg_txtl_ecc;
+
+ reg_rx_ecc = priv->read_reg(priv, XCAN_RXFIFO_ECC_OFFSET);
+ reg_txol_ecc = priv->read_reg(priv, XCAN_TXOLFIFO_ECC_OFFSET);
+ reg_txtl_ecc = priv->read_reg(priv, XCAN_TXTLFIFO_ECC_OFFSET);
+
+ /* The counter reaches its maximum at 0xffff and does not overflow.
+ * Accept the small race window between reading and resetting ECC counters.
+ */
+ priv->write_reg(priv, XCAN_ECC_CFG_OFFSET, XCAN_ECC_CFG_REECRX_MASK |
+ XCAN_ECC_CFG_REECTXOL_MASK | XCAN_ECC_CFG_REECTXTL_MASK);
+
+ u64_stats_update_begin(&priv->syncp);
+
+ if (isr & XCAN_IXR_E2BERX_MASK) {
+ u64_stats_add(&priv->ecc_rx_2_bit_errors,
+ FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_rx_ecc));
+ }
+
+ if (isr & XCAN_IXR_E1BERX_MASK) {
+ u64_stats_add(&priv->ecc_rx_1_bit_errors,
+ FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_rx_ecc));
+ }
+
+ if (isr & XCAN_IXR_E2BETXOL_MASK) {
+ u64_stats_add(&priv->ecc_txol_2_bit_errors,
+ FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_txol_ecc));
+ }
+
+ if (isr & XCAN_IXR_E1BETXOL_MASK) {
+ u64_stats_add(&priv->ecc_txol_1_bit_errors,
+ FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_txol_ecc));
+ }
+
+ if (isr & XCAN_IXR_E2BETXTL_MASK) {
+ u64_stats_add(&priv->ecc_txtl_2_bit_errors,
+ FIELD_GET(XCAN_ECC_2BIT_CNT_MASK, reg_txtl_ecc));
+ }
+
+ if (isr & XCAN_IXR_E1BETXTL_MASK) {
+ u64_stats_add(&priv->ecc_txtl_1_bit_errors,
+ FIELD_GET(XCAN_ECC_1BIT_CNT_MASK, reg_txtl_ecc));
+ }
+
+ u64_stats_update_end(&priv->syncp);
+ }
+
if (cf.can_id) {
struct can_frame *skb_cf;
struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
@@ -1354,8 +1464,8 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = (struct net_device *)dev_id;
struct xcan_priv *priv = netdev_priv(ndev);
+ u32 isr_errors, mask;
u32 isr, ier;
- u32 isr_errors;
u32 rx_int_mask = xcan_rx_int_mask(priv);
/* Get the interrupt status from Xilinx CAN */
@@ -1374,10 +1484,15 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
if (isr & XCAN_IXR_TXOK_MASK)
xcan_tx_interrupt(ndev, isr);
+ mask = XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+ XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
+ XCAN_IXR_RXMNF_MASK;
+
+ if (priv->ecc_enable)
+ mask |= XCAN_IXR_ECC_MASK;
+
/* Check for the type of error interrupt and Processing it */
- isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
- XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
- XCAN_IXR_RXMNF_MASK);
+ isr_errors = isr & mask;
if (isr_errors) {
priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
xcan_err_interrupt(ndev, isr);
@@ -1546,6 +1661,43 @@ static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
return 0;
}
+static void xcan_get_strings(struct net_device *ndev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &xcan_priv_flags_strings,
+ sizeof(xcan_priv_flags_strings));
+ }
+}
+
+static int xcan_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(xcan_priv_flags_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void xcan_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&priv->syncp);
+
+ data[XCAN_ECC_RX_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_rx_2_bit_errors);
+ data[XCAN_ECC_RX_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_rx_1_bit_errors);
+ data[XCAN_ECC_TXOL_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_txol_2_bit_errors);
+ data[XCAN_ECC_TXOL_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_txol_1_bit_errors);
+ data[XCAN_ECC_TXTL_2_BIT_ERRORS] = u64_stats_read(&priv->ecc_txtl_2_bit_errors);
+ data[XCAN_ECC_TXTL_1_BIT_ERRORS] = u64_stats_read(&priv->ecc_txtl_1_bit_errors);
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
+}
+
static const struct net_device_ops xcan_netdev_ops = {
.ndo_open = xcan_open,
.ndo_stop = xcan_close,
@@ -1555,6 +1707,9 @@ static const struct net_device_ops xcan_netdev_ops = {
static const struct ethtool_ops xcan_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
+ .get_strings = xcan_get_strings,
+ .get_sset_count = xcan_get_sset_count,
+ .get_ethtool_stats = xcan_get_ethtool_stats,
};
/**
@@ -1793,6 +1948,7 @@ static int xcan_probe(struct platform_device *pdev)
return -ENOMEM;
priv = netdev_priv(ndev);
+ priv->ecc_enable = of_property_read_bool(pdev->dev.of_node, "xlnx,has-ecc");
priv->dev = &pdev->dev;
priv->can.bittiming_const = devtype->bittiming_const;
priv->can.do_set_mode = xcan_do_set_mode;
@@ -1909,6 +2065,11 @@ static int xcan_probe(struct platform_device *pdev)
priv->reg_base, ndev->irq, priv->can.clock.freq,
hw_tx_max, priv->tx_max);
+ if (priv->ecc_enable) {
+ /* Reset FIFO ECC counters */
+ priv->write_reg(priv, XCAN_ECC_CFG_OFFSET, XCAN_ECC_CFG_REECRX_MASK |
+ XCAN_ECC_CFG_REECTXOL_MASK | XCAN_ECC_CFG_REECTXTL_MASK);
+ }
return 0;
err_disableclks:
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index f8c1d73b251d..3092b391031a 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -48,7 +48,7 @@ config NET_DSA_MT7530
config NET_DSA_MT7530_MDIO
tristate "MediaTek MT7530 MDIO interface driver"
depends on NET_DSA_MT7530
- imply MEDIATEK_GE_PHY
+ select MEDIATEK_GE_PHY
select PCS_MTK_LYNXI
help
This enables support for the MediaTek MT7530 and MT7531 switch
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 0d628b35fd5c..b2eeff04f4c8 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -559,6 +559,19 @@ static void b53_port_set_learning(struct b53_device *dev, int port,
b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg);
}
+static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
+{
+ struct b53_device *dev = ds->priv;
+ u16 reg;
+
+ b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, &reg);
+ if (enable)
+ reg |= BIT(port);
+ else
+ reg &= ~BIT(port);
+ b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
+}
+
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
@@ -1257,7 +1270,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct b53_device *dev = ds->priv;
- struct ethtool_eee *p = &dev->ports[port].eee;
+ struct ethtool_keee *p = &dev->ports[port].eee;
u8 rgmii_ctrl = 0, reg = 0, off;
bool tx_pause = false;
bool rx_pause = false;
@@ -2193,21 +2206,6 @@ void b53_mirror_del(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_mirror_del);
-void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
-{
- struct b53_device *dev = ds->priv;
- u16 reg;
-
- b53_read16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, &reg);
- if (enable)
- reg |= BIT(port);
- else
- reg &= ~BIT(port);
- b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
-}
-EXPORT_SYMBOL(b53_eee_enable_set);
-
-
/* Returns 0 if EEE was not enabled, or 1 otherwise
*/
int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
@@ -2224,27 +2222,21 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
}
EXPORT_SYMBOL(b53_eee_init);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
{
struct b53_device *dev = ds->priv;
- struct ethtool_eee *p = &dev->ports[port].eee;
- u16 reg;
if (is5325(dev) || is5365(dev))
return -EOPNOTSUPP;
- b53_read16(dev, B53_EEE_PAGE, B53_EEE_LPI_INDICATE, &reg);
- e->eee_enabled = p->eee_enabled;
- e->eee_active = !!(reg & BIT(port));
-
return 0;
}
EXPORT_SYMBOL(b53_get_mac_eee);
-int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e)
{
struct b53_device *dev = ds->priv;
- struct ethtool_eee *p = &dev->ports[port].eee;
+ struct ethtool_keee *p = &dev->ports[port].eee;
if (is5325(dev) || is5365(dev))
return -EOPNOTSUPP;
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index fdcfd5081c28..c13a907947f1 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -95,7 +95,7 @@ struct b53_pcs {
struct b53_port {
u16 vlan_ctl_mask;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
struct b53_vlan {
@@ -395,9 +395,8 @@ void b53_mirror_del(struct dsa_switch *ds, int port,
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
void b53_disable_port(struct dsa_switch *ds, int port);
void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
-void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable);
int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy);
-int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
-int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+int b53_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
+int b53_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
#endif
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 4a52ccbe393f..bc77ee9e6d0a 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -835,7 +835,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
bool tx_pause, bool rx_pause)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct ethtool_eee *p = &priv->dev->ports[port].eee;
+ struct ethtool_keee *p = &priv->dev->ports[port].eee;
u32 reg_rgmii_ctrl = 0;
u32 reg, offset;
diff --git a/drivers/net/dsa/dsa_loop_bdinfo.c b/drivers/net/dsa/dsa_loop_bdinfo.c
index 237066d30704..14ca42491512 100644
--- a/drivers/net/dsa/dsa_loop_bdinfo.c
+++ b/drivers/net/dsa/dsa_loop_bdinfo.c
@@ -32,4 +32,5 @@ static int __init dsa_loop_bdinfo_init(void)
}
arch_initcall(dsa_loop_bdinfo_init)
+MODULE_DESCRIPTION("DSA mock-up switch driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 61b71bcfe396..14923535ca7e 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -49,9 +49,9 @@ static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
mutex_lock(&dev->alu_mutex);
ctrl_addr = IND_ACC_TABLE(table) | addr;
- ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
if (!ret)
- ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
mutex_unlock(&dev->alu_mutex);
@@ -633,6 +633,57 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
}
/**
+ * ksz879x_get_loopback - KSZ879x specific function to get loopback
+ * configuration status for a specific port
+ * @dev: Pointer to the device structure
+ * @port: Port number to query
+ * @val: Pointer to store the result
+ *
+ * This function reads the SMI registers to determine whether loopback mode
+ * is enabled for a specific port.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz879x_get_loopback(struct ksz_device *dev, u16 port,
+ u16 *val)
+{
+ u8 stat3;
+ int ret;
+
+ ret = ksz_pread8(dev, port, REG_PORT_STATUS_3, &stat3);
+ if (ret)
+ return ret;
+
+ if (stat3 & PORT_PHY_LOOPBACK)
+ *val |= BMCR_LOOPBACK;
+
+ return 0;
+}
+
+/**
+ * ksz879x_set_loopback - KSZ879x specific function to set loopback mode for
+ * a specific port
+ * @dev: Pointer to the device structure.
+ * @port: Port number to modify.
+ * @val: Value indicating whether to enable or disable loopback mode.
+ *
+ * This function translates loopback bit of the BMCR register into the
+ * corresponding hardware register bit value and writes it to the SMI interface.
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz879x_set_loopback(struct ksz_device *dev, u16 port, u16 val)
+{
+ u8 stat3 = 0;
+
+ if (val & BMCR_LOOPBACK)
+ stat3 |= PORT_PHY_LOOPBACK;
+
+ return ksz_prmw8(dev, port, REG_PORT_STATUS_3, PORT_PHY_LOOPBACK,
+ stat3);
+}
+
+/**
* ksz8_r_phy_ctrl - Translates and reads from the SMI interface to a MIIM PHY
* Control register (Reg. 31).
* @dev: The KSZ device instance.
@@ -676,59 +727,122 @@ static int ksz8_r_phy_ctrl(struct ksz_device *dev, int port, u16 *val)
return 0;
}
+/**
+ * ksz8_r_phy_bmcr - Translates and reads from the SMI interface to a MIIM PHY
+ * Basic mode control register (Reg. 0).
+ * @dev: The KSZ device instance.
+ * @port: The port number to be read.
+ * @val: The value read from the SMI interface.
+ *
+ * This function reads the SMI interface and translates the hardware register
+ * bit values into their corresponding control settings for a MIIM PHY Basic
+ * mode control register.
+ *
+ * MIIM Bit Mapping Comparison between KSZ8794 and KSZ8873
+ * -------------------------------------------------------------------
+ * MIIM Bit | KSZ8794 Reg/Bit | KSZ8873 Reg/Bit
+ * ----------------------------+-----------------------------+----------------
+ * Bit 15 - Soft Reset | 0xF/4 | Not supported
+ * Bit 14 - Loopback | 0xD/0 (MAC), 0xF/7 (PHY) ~ 0xD/0 (PHY)
+ * Bit 13 - Force 100 | 0xC/6 = 0xC/6
+ * Bit 12 - AN Enable | 0xC/7 (reverse logic) ~ 0xC/7
+ * Bit 11 - Power Down | 0xD/3 = 0xD/3
+ * Bit 10 - PHY Isolate | 0xF/5 | Not supported
+ * Bit 9 - Restart AN | 0xD/5 = 0xD/5
+ * Bit 8 - Force Full-Duplex | 0xC/5 = 0xC/5
+ * Bit 7 - Collision Test/Res. | Not supported | Not supported
+ * Bit 6 - Reserved | Not supported | Not supported
+ * Bit 5 - Hp_mdix | 0x9/7 ~ 0xF/7
+ * Bit 4 - Force MDI | 0xD/1 = 0xD/1
+ * Bit 3 - Disable MDIX | 0xD/2 = 0xD/2
+ * Bit 2 - Disable Far-End F. | ???? | 0xD/4
+ * Bit 1 - Disable Transmit | 0xD/6 = 0xD/6
+ * Bit 0 - Disable LED | 0xD/7 = 0xD/7
+ * -------------------------------------------------------------------
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz8_r_phy_bmcr(struct ksz_device *dev, u16 port, u16 *val)
+{
+ const u16 *regs = dev->info->regs;
+ u8 restart, speed, ctrl;
+ int ret;
+
+ *val = 0;
+
+ ret = ksz_pread8(dev, port, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, port, regs[P_SPEED_STATUS], &speed);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, port, regs[P_FORCE_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
+ if (ctrl & PORT_FORCE_100_MBIT)
+ *val |= BMCR_SPEED100;
+
+ if (ksz_is_ksz88x3(dev)) {
+ if (restart & KSZ8873_PORT_PHY_LOOPBACK)
+ *val |= BMCR_LOOPBACK;
+
+ if ((ctrl & PORT_AUTO_NEG_ENABLE))
+ *val |= BMCR_ANENABLE;
+ } else {
+ ret = ksz879x_get_loopback(dev, port, val);
+ if (ret)
+ return ret;
+
+ if (!(ctrl & PORT_AUTO_NEG_DISABLE))
+ *val |= BMCR_ANENABLE;
+ }
+
+ if (restart & PORT_POWER_DOWN)
+ *val |= BMCR_PDOWN;
+
+ if (restart & PORT_AUTO_NEG_RESTART)
+ *val |= BMCR_ANRESTART;
+
+ if (ctrl & PORT_FORCE_FULL_DUPLEX)
+ *val |= BMCR_FULLDPLX;
+
+ if (speed & PORT_HP_MDIX)
+ *val |= KSZ886X_BMCR_HP_MDIX;
+
+ if (restart & PORT_FORCE_MDIX)
+ *val |= KSZ886X_BMCR_FORCE_MDI;
+
+ if (restart & PORT_AUTO_MDIX_DISABLE)
+ *val |= KSZ886X_BMCR_DISABLE_AUTO_MDIX;
+
+ if (restart & PORT_TX_DISABLE)
+ *val |= KSZ886X_BMCR_DISABLE_TRANSMIT;
+
+ if (restart & PORT_LED_OFF)
+ *val |= KSZ886X_BMCR_DISABLE_LED;
+
+ return 0;
+}
+
int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
{
- u8 restart, speed, ctrl, link;
+ u8 ctrl, link, val1, val2;
int processed = true;
const u16 *regs;
- u8 val1, val2;
u16 data = 0;
- u8 p = phy;
+ u16 p = phy;
int ret;
regs = dev->info->regs;
switch (reg) {
case MII_BMCR:
- ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
- if (ret)
- return ret;
-
- ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
- if (ret)
- return ret;
-
- ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ ret = ksz8_r_phy_bmcr(dev, p, &data);
if (ret)
return ret;
-
- if (restart & PORT_PHY_LOOPBACK)
- data |= BMCR_LOOPBACK;
- if (ctrl & PORT_FORCE_100_MBIT)
- data |= BMCR_SPEED100;
- if (ksz_is_ksz88x3(dev)) {
- if ((ctrl & PORT_AUTO_NEG_ENABLE))
- data |= BMCR_ANENABLE;
- } else {
- if (!(ctrl & PORT_AUTO_NEG_DISABLE))
- data |= BMCR_ANENABLE;
- }
- if (restart & PORT_POWER_DOWN)
- data |= BMCR_PDOWN;
- if (restart & PORT_AUTO_NEG_RESTART)
- data |= BMCR_ANRESTART;
- if (ctrl & PORT_FORCE_FULL_DUPLEX)
- data |= BMCR_FULLDPLX;
- if (speed & PORT_HP_MDIX)
- data |= KSZ886X_BMCR_HP_MDIX;
- if (restart & PORT_FORCE_MDIX)
- data |= KSZ886X_BMCR_FORCE_MDI;
- if (restart & PORT_AUTO_MDIX_DISABLE)
- data |= KSZ886X_BMCR_DISABLE_AUTO_MDIX;
- if (restart & PORT_TX_DISABLE)
- data |= KSZ886X_BMCR_DISABLE_TRANSMIT;
- if (restart & PORT_LED_OFF)
- data |= KSZ886X_BMCR_DISABLE_LED;
break;
case MII_BMSR:
ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
@@ -860,113 +974,137 @@ static int ksz8_w_phy_ctrl(struct ksz_device *dev, int port, u16 val)
return ret;
}
-int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+/**
+ * ksz8_w_phy_bmcr - Translates and writes to the SMI interface from a MIIM PHY
+ * Basic mode control register (Reg. 0).
+ * @dev: The KSZ device instance.
+ * @port: The port number to be configured.
+ * @val: The register value to be written.
+ *
+ * This function translates control settings from a MIIM PHY Basic mode control
+ * register into their corresponding hardware register bit values for the SMI
+ * interface.
+ *
+ * MIIM Bit Mapping Comparison between KSZ8794 and KSZ8873
+ * -------------------------------------------------------------------
+ * MIIM Bit | KSZ8794 Reg/Bit | KSZ8873 Reg/Bit
+ * ----------------------------+-----------------------------+----------------
+ * Bit 15 - Soft Reset | 0xF/4 | Not supported
+ * Bit 14 - Loopback | 0xD/0 (MAC), 0xF/7 (PHY) ~ 0xD/0 (PHY)
+ * Bit 13 - Force 100 | 0xC/6 = 0xC/6
+ * Bit 12 - AN Enable | 0xC/7 (reverse logic) ~ 0xC/7
+ * Bit 11 - Power Down | 0xD/3 = 0xD/3
+ * Bit 10 - PHY Isolate | 0xF/5 | Not supported
+ * Bit 9 - Restart AN | 0xD/5 = 0xD/5
+ * Bit 8 - Force Full-Duplex | 0xC/5 = 0xC/5
+ * Bit 7 - Collision Test/Res. | Not supported | Not supported
+ * Bit 6 - Reserved | Not supported | Not supported
+ * Bit 5 - Hp_mdix | 0x9/7 ~ 0xF/7
+ * Bit 4 - Force MDI | 0xD/1 = 0xD/1
+ * Bit 3 - Disable MDIX | 0xD/2 = 0xD/2
+ * Bit 2 - Disable Far-End F. | ???? | 0xD/4
+ * Bit 1 - Disable Transmit | 0xD/6 = 0xD/6
+ * Bit 0 - Disable LED | 0xD/7 = 0xD/7
+ * -------------------------------------------------------------------
+ *
+ * Return: 0 on success, error code on failure.
+ */
+static int ksz8_w_phy_bmcr(struct ksz_device *dev, u16 port, u16 val)
{
- u8 restart, speed, ctrl, data;
- const u16 *regs;
- u8 p = phy;
+ u8 restart, speed, ctrl, restart_mask;
+ const u16 *regs = dev->info->regs;
int ret;
- regs = dev->info->regs;
+ /* Do not support PHY reset function. */
+ if (val & BMCR_RESET)
+ return 0;
- switch (reg) {
- case MII_BMCR:
+ speed = 0;
+ if (val & KSZ886X_BMCR_HP_MDIX)
+ speed |= PORT_HP_MDIX;
- /* Do not support PHY reset function. */
- if (val & BMCR_RESET)
- break;
- ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
- if (ret)
- return ret;
+ ret = ksz_prmw8(dev, port, regs[P_SPEED_STATUS], PORT_HP_MDIX, speed);
+ if (ret)
+ return ret;
- data = speed;
- if (val & KSZ886X_BMCR_HP_MDIX)
- data |= PORT_HP_MDIX;
- else
- data &= ~PORT_HP_MDIX;
+ ctrl = 0;
+ if (ksz_is_ksz88x3(dev)) {
+ if ((val & BMCR_ANENABLE))
+ ctrl |= PORT_AUTO_NEG_ENABLE;
+ } else {
+ if (!(val & BMCR_ANENABLE))
+ ctrl |= PORT_AUTO_NEG_DISABLE;
- if (data != speed) {
- ret = ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
- if (ret)
- return ret;
- }
+ /* Fiber port does not support auto-negotiation. */
+ if (dev->ports[port].fiber)
+ ctrl |= PORT_AUTO_NEG_DISABLE;
+ }
- ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
- if (ret)
- return ret;
+ if (val & BMCR_SPEED100)
+ ctrl |= PORT_FORCE_100_MBIT;
- data = ctrl;
- if (ksz_is_ksz88x3(dev)) {
- if ((val & BMCR_ANENABLE))
- data |= PORT_AUTO_NEG_ENABLE;
- else
- data &= ~PORT_AUTO_NEG_ENABLE;
- } else {
- if (!(val & BMCR_ANENABLE))
- data |= PORT_AUTO_NEG_DISABLE;
- else
- data &= ~PORT_AUTO_NEG_DISABLE;
-
- /* Fiber port does not support auto-negotiation. */
- if (dev->ports[p].fiber)
- data |= PORT_AUTO_NEG_DISABLE;
- }
+ if (val & BMCR_FULLDPLX)
+ ctrl |= PORT_FORCE_FULL_DUPLEX;
- if (val & BMCR_SPEED100)
- data |= PORT_FORCE_100_MBIT;
- else
- data &= ~PORT_FORCE_100_MBIT;
- if (val & BMCR_FULLDPLX)
- data |= PORT_FORCE_FULL_DUPLEX;
- else
- data &= ~PORT_FORCE_FULL_DUPLEX;
+ ret = ksz_prmw8(dev, port, regs[P_FORCE_CTRL], PORT_FORCE_100_MBIT |
+ /* PORT_AUTO_NEG_ENABLE and PORT_AUTO_NEG_DISABLE are the same
+ * bits
+ */
+ PORT_FORCE_FULL_DUPLEX | PORT_AUTO_NEG_ENABLE, ctrl);
+ if (ret)
+ return ret;
- if (data != ctrl) {
- ret = ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
- if (ret)
- return ret;
- }
+ restart = 0;
+ restart_mask = PORT_LED_OFF | PORT_TX_DISABLE | PORT_AUTO_NEG_RESTART |
+ PORT_POWER_DOWN | PORT_AUTO_MDIX_DISABLE | PORT_FORCE_MDIX;
+
+ if (val & KSZ886X_BMCR_DISABLE_LED)
+ restart |= PORT_LED_OFF;
+
+ if (val & KSZ886X_BMCR_DISABLE_TRANSMIT)
+ restart |= PORT_TX_DISABLE;
+
+ if (val & BMCR_ANRESTART)
+ restart |= PORT_AUTO_NEG_RESTART;
+
+ if (val & BMCR_PDOWN)
+ restart |= PORT_POWER_DOWN;
+
+ if (val & KSZ886X_BMCR_DISABLE_AUTO_MDIX)
+ restart |= PORT_AUTO_MDIX_DISABLE;
+
+ if (val & KSZ886X_BMCR_FORCE_MDI)
+ restart |= PORT_FORCE_MDIX;
- ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ksz_is_ksz88x3(dev)) {
+ restart_mask |= KSZ8873_PORT_PHY_LOOPBACK;
+
+ if (val & BMCR_LOOPBACK)
+ restart |= KSZ8873_PORT_PHY_LOOPBACK;
+ } else {
+ ret = ksz879x_set_loopback(dev, port, val);
if (ret)
return ret;
+ }
- data = restart;
- if (val & KSZ886X_BMCR_DISABLE_LED)
- data |= PORT_LED_OFF;
- else
- data &= ~PORT_LED_OFF;
- if (val & KSZ886X_BMCR_DISABLE_TRANSMIT)
- data |= PORT_TX_DISABLE;
- else
- data &= ~PORT_TX_DISABLE;
- if (val & BMCR_ANRESTART)
- data |= PORT_AUTO_NEG_RESTART;
- else
- data &= ~(PORT_AUTO_NEG_RESTART);
- if (val & BMCR_PDOWN)
- data |= PORT_POWER_DOWN;
- else
- data &= ~PORT_POWER_DOWN;
- if (val & KSZ886X_BMCR_DISABLE_AUTO_MDIX)
- data |= PORT_AUTO_MDIX_DISABLE;
- else
- data &= ~PORT_AUTO_MDIX_DISABLE;
- if (val & KSZ886X_BMCR_FORCE_MDI)
- data |= PORT_FORCE_MDIX;
- else
- data &= ~PORT_FORCE_MDIX;
- if (val & BMCR_LOOPBACK)
- data |= PORT_PHY_LOOPBACK;
- else
- data &= ~PORT_PHY_LOOPBACK;
+ return ksz_prmw8(dev, port, regs[P_NEG_RESTART_CTRL], restart_mask,
+ restart);
+}
- if (data != restart) {
- ret = ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL],
- data);
- if (ret)
- return ret;
- }
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+{
+ const u16 *regs;
+ u8 ctrl, data;
+ u16 p = phy;
+ int ret;
+
+ regs = dev->info->regs;
+
+ switch (reg) {
+ case MII_BMCR:
+ ret = ksz8_w_phy_bmcr(dev, p, val);
+ if (ret)
+ return ret;
break;
case MII_ADVERTISE:
ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index beca974e0171..7c9341ef73b0 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -265,6 +265,7 @@
#define PORT_AUTO_MDIX_DISABLE BIT(2)
#define PORT_FORCE_MDIX BIT(1)
#define PORT_MAC_LOOPBACK BIT(0)
+#define KSZ8873_PORT_PHY_LOOPBACK BIT(0)
#define REG_PORT_1_STATUS_2 0x1E
#define REG_PORT_2_STATUS_2 0x2E
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index cac4a607e54a..82bebee4615c 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -104,6 +104,10 @@ static const struct of_device_id ksz9477_dt_ids[] = {
.data = &ksz_switch_chips[KSZ8563]
},
{
+ .compatible = "microchip,ksz8567",
+ .data = &ksz_switch_chips[KSZ8567]
+ },
+ {
.compatible = "microchip,ksz9567",
.data = &ksz_switch_chips[KSZ9567]
},
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 245dfb7a7a31..2b510f150dd8 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1476,6 +1476,39 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.gbit_capable = {true, true, true},
},
+ [KSZ8567] = {
+ .chip_id = KSZ8567_CHIP_ID,
+ .dev_name = "KSZ8567",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total port count */
+ .port_nirqs = 3,
+ .num_tx_queues = 4,
+ .tc_cbs_supported = true,
+ .tc_ets_supported = true,
+ .ops = &ksz9477_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {false, false, false, false, false,
+ true, true},
+ },
+
[KSZ9567] = {
.chip_id = KSZ9567_CHIP_ID,
.dev_name = "KSZ9567",
@@ -1864,6 +1897,29 @@ static void ksz_get_strings(struct dsa_switch *ds, int port,
}
}
+/**
+ * ksz_update_port_member - Adjust port forwarding rules based on STP state and
+ * isolation settings.
+ * @dev: A pointer to the struct ksz_device representing the device.
+ * @port: The port number to adjust.
+ *
+ * This function dynamically adjusts the port membership configuration for a
+ * specified port and other device ports, based on Spanning Tree Protocol (STP)
+ * states and port isolation settings. Each port, including the CPU port, has a
+ * membership register, represented as a bitfield, where each bit corresponds
+ * to a port number. A set bit indicates permission to forward frames to that
+ * port. This function iterates over all ports, updating the membership register
+ * to reflect current forwarding permissions:
+ *
+ * 1. Forwards frames only to ports that are part of the same bridge group and
+ * in the BR_STATE_FORWARDING state.
+ * 2. Takes into account the isolation status of ports; ports in the
+ * BR_STATE_FORWARDING state with BR_ISOLATED configuration will not forward
+ * frames to each other, even if they are in the same bridge group.
+ * 3. Ensures that the CPU port is included in the membership based on its
+ * upstream port configuration, allowing for management and control traffic
+ * to flow as required.
+ */
static void ksz_update_port_member(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
@@ -1892,7 +1948,14 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
if (other_p->stp_state != BR_STATE_FORWARDING)
continue;
- if (p->stp_state == BR_STATE_FORWARDING) {
+ /* At this point we know that "port" and "other" port [i] are in
+ * the same bridge group and that "other" port [i] is in
+ * forwarding stp state. If "port" is also in forwarding stp
+ * state, we can allow forwarding from port [port] to port [i].
+ * Except if both ports are isolated.
+ */
+ if (p->stp_state == BR_STATE_FORWARDING &&
+ !(p->isolated && other_p->isolated)) {
val |= BIT(port);
port_member |= BIT(i);
}
@@ -1911,8 +1974,19 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
third_p = &dev->ports[j];
if (third_p->stp_state != BR_STATE_FORWARDING)
continue;
+
third_dp = dsa_to_port(ds, j);
- if (dsa_port_bridge_same(other_dp, third_dp))
+
+ /* Now we updating relation of the "other" port [i] to
+ * the "third" port [j]. We already know that "other"
+ * port [i] is in forwarding stp state and that "third"
+ * port [j] is in forwarding stp state too.
+ * We need to check if "other" port [i] and "third" port
+ * [j] are in the same bridge group and not isolated
+ * before allowing forwarding from port [i] to port [j].
+ */
+ if (dsa_port_bridge_same(other_dp, third_dp) &&
+ !(other_p->isolated && third_p->isolated))
val |= BIT(j);
}
@@ -2185,6 +2259,8 @@ static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
return ksz_irq_common_setup(dev, pirq);
}
+static int ksz_parse_drive_strength(struct ksz_device *dev);
+
static int ksz_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
@@ -2206,6 +2282,10 @@ static int ksz_setup(struct dsa_switch *ds)
return ret;
}
+ ret = ksz_parse_drive_strength(dev);
+ if (ret)
+ return ret;
+
/* set broadcast storm protection 10% rate */
regmap_update_bits(ksz_regmap_16(dev), regs[S_BROADCAST_CTRL],
BROADCAST_STORM_RATE,
@@ -2649,6 +2729,7 @@ static void ksz_port_teardown(struct dsa_switch *ds, int port)
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -2664,7 +2745,7 @@ static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
- if (flags.mask & ~BR_LEARNING)
+ if (flags.mask & ~(BR_LEARNING | BR_ISOLATED))
return -EINVAL;
return 0;
@@ -2677,8 +2758,12 @@ static int ksz_port_bridge_flags(struct dsa_switch *ds, int port,
struct ksz_device *dev = ds->priv;
struct ksz_port *p = &dev->ports[port];
- if (flags.mask & BR_LEARNING) {
- p->learning = !!(flags.val & BR_LEARNING);
+ if (flags.mask & (BR_LEARNING | BR_ISOLATED)) {
+ if (flags.mask & BR_LEARNING)
+ p->learning = !!(flags.val & BR_LEARNING);
+
+ if (flags.mask & BR_ISOLATED)
+ p->isolated = !!(flags.val & BR_ISOLATED);
/* Make the change take effect immediately */
ksz_port_stp_state_set(ds, port, p->stp_state);
@@ -2705,7 +2790,8 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
dev->chip_id == KSZ9563_CHIP_ID)
proto = DSA_TAG_PROTO_KSZ9893;
- if (dev->chip_id == KSZ9477_CHIP_ID ||
+ if (dev->chip_id == KSZ8567_CHIP_ID ||
+ dev->chip_id == KSZ9477_CHIP_ID ||
dev->chip_id == KSZ9896_CHIP_ID ||
dev->chip_id == KSZ9897_CHIP_ID ||
dev->chip_id == KSZ9567_CHIP_ID)
@@ -2813,6 +2899,7 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
case KSZ8830_CHIP_ID:
return KSZ8863_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -2839,6 +2926,7 @@ static int ksz_validate_eee(struct dsa_switch *ds, int port)
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -2852,7 +2940,7 @@ static int ksz_validate_eee(struct dsa_switch *ds, int port)
}
static int ksz_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
int ret;
@@ -2872,7 +2960,7 @@ static int ksz_get_mac_eee(struct dsa_switch *ds, int port,
}
static int ksz_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
struct ksz_device *dev = ds->priv;
int ret;
@@ -3183,6 +3271,7 @@ static int ksz_switch_detect(struct ksz_device *dev)
case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
case KSZ9567_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case LAN9370_CHIP_ID:
case LAN9371_CHIP_ID:
case LAN9372_CHIP_ID:
@@ -3220,6 +3309,7 @@ static int ksz_cls_flower_add(struct dsa_switch *ds, int port,
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -3239,6 +3329,7 @@ static int ksz_cls_flower_del(struct dsa_switch *ds, int port,
switch (dev->chip_id) {
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -4142,6 +4233,7 @@ static int ksz_parse_drive_strength(struct ksz_device *dev)
case KSZ8794_CHIP_ID:
case KSZ8765_CHIP_ID:
case KSZ8563_CHIP_ID:
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
case KSZ9563_CHIP_ID:
case KSZ9567_CHIP_ID:
@@ -4242,10 +4334,6 @@ int ksz_switch_register(struct ksz_device *dev)
for (port_num = 0; port_num < dev->info->port_cnt; ++port_num)
dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA;
if (dev->dev->of_node) {
- ret = ksz_parse_drive_strength(dev);
- if (ret)
- return ret;
-
ret = of_get_phy_mode(dev->dev->of_node, &interface);
if (ret == 0)
dev->compat_interface = interface;
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 15612101a155..40c11b0d6b62 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -110,6 +110,7 @@ struct ksz_switch_macaddr {
struct ksz_port {
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
bool learning;
+ bool isolated;
int stp_state;
struct phy_device phydev;
@@ -187,6 +188,7 @@ struct ksz_device {
/* List of supported models */
enum ksz_model {
KSZ8563,
+ KSZ8567,
KSZ8795,
KSZ8794,
KSZ8765,
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index 6f6d878e742c..c8166fb440ab 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -165,6 +165,10 @@ static const struct of_device_id ksz_dt_ids[] = {
.data = &ksz_switch_chips[KSZ8563]
},
{
+ .compatible = "microchip,ksz8567",
+ .data = &ksz_switch_chips[KSZ8567]
+ },
+ {
.compatible = "microchip,ksz9567",
.data = &ksz_switch_chips[KSZ9567]
},
@@ -204,6 +208,7 @@ static const struct spi_device_id ksz_spi_ids[] = {
{ "ksz9893" },
{ "ksz9563" },
{ "ksz8563" },
+ { "ksz8567" },
{ "ksz9567" },
{ "lan9370" },
{ "lan9371" },
diff --git a/drivers/net/dsa/mt7530-mdio.c b/drivers/net/dsa/mt7530-mdio.c
index 088533663b83..fa3ee85a99c1 100644
--- a/drivers/net/dsa/mt7530-mdio.c
+++ b/drivers/net/dsa/mt7530-mdio.c
@@ -81,17 +81,14 @@ static const struct regmap_bus mt7530_regmap_bus = {
};
static int
-mt7531_create_sgmii(struct mt7530_priv *priv, bool dual_sgmii)
+mt7531_create_sgmii(struct mt7530_priv *priv)
{
struct regmap_config *mt7531_pcs_config[2] = {};
struct phylink_pcs *pcs;
struct regmap *regmap;
int i, ret = 0;
- /* MT7531AE has two SGMII units for port 5 and port 6
- * MT7531BE has only one SGMII unit for port 6
- */
- for (i = dual_sgmii ? 0 : 1; i < 2; i++) {
+ for (i = priv->p5_sgmii ? 0 : 1; i < 2; i++) {
mt7531_pcs_config[i] = devm_kzalloc(priv->dev,
sizeof(struct regmap_config),
GFP_KERNEL);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 3c1f657593a8..678b51f9cea6 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -414,92 +414,57 @@ mt753x_preferred_default_local_cpu_port(struct dsa_switch *ds)
}
/* Setup port 6 interface mode and TRGMII TX circuit */
-static int
-mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+static void
+mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
- u32 ncpo1, ssc_delta, trgint, xtal;
-
- xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
+ u32 ncpo1, ssc_delta, xtal;
- if (xtal == HWTRAP_XTAL_20MHZ) {
- dev_err(priv->dev,
- "%s: MT7530 with a 20MHz XTAL is not supported!\n",
- __func__);
- return -EINVAL;
- }
+ /* Disable the MT7530 TRGMII clocks */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
- switch (interface) {
- case PHY_INTERFACE_MODE_RGMII:
- trgint = 0;
- break;
- case PHY_INTERFACE_MODE_TRGMII:
- trgint = 1;
- if (xtal == HWTRAP_XTAL_25MHZ)
- ssc_delta = 0x57;
- else
- ssc_delta = 0x87;
- if (priv->id == ID_MT7621) {
- /* PLL frequency: 125MHz: 1.0GBit */
- if (xtal == HWTRAP_XTAL_40MHZ)
- ncpo1 = 0x0640;
- if (xtal == HWTRAP_XTAL_25MHZ)
- ncpo1 = 0x0a00;
- } else { /* PLL frequency: 250MHz: 2.0Gbit */
- if (xtal == HWTRAP_XTAL_40MHZ)
- ncpo1 = 0x0c80;
- if (xtal == HWTRAP_XTAL_25MHZ)
- ncpo1 = 0x1400;
- }
- break;
- default:
- dev_err(priv->dev, "xMII interface %d not supported\n",
- interface);
- return -EINVAL;
+ if (interface == PHY_INTERFACE_MODE_RGMII) {
+ mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
+ P6_INTF_MODE(0));
+ return;
}
- mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
- P6_INTF_MODE(trgint));
+ mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, P6_INTF_MODE(1));
- if (trgint) {
- /* Disable the MT7530 TRGMII clocks */
- core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+ xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
- /* Setup the MT7530 TRGMII Tx Clock */
- core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
- core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
- core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
- core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
- core_write(priv, CORE_PLL_GROUP4,
- RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
- RG_SYSPLL_BIAS_LPF_EN);
- core_write(priv, CORE_PLL_GROUP2,
- RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
- RG_SYSPLL_POSDIV(1));
- core_write(priv, CORE_PLL_GROUP7,
- RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
- RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ssc_delta = 0x57;
+ else
+ ssc_delta = 0x87;
- /* Enable the MT7530 TRGMII clocks */
- core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+ if (priv->id == ID_MT7621) {
+ /* PLL frequency: 125MHz: 1.0GBit */
+ if (xtal == HWTRAP_XTAL_40MHZ)
+ ncpo1 = 0x0640;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ncpo1 = 0x0a00;
+ } else { /* PLL frequency: 250MHz: 2.0Gbit */
+ if (xtal == HWTRAP_XTAL_40MHZ)
+ ncpo1 = 0x0c80;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ncpo1 = 0x1400;
}
- return 0;
-}
+ /* Setup the MT7530 TRGMII Tx Clock */
+ core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+ core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
+ core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP4, RG_SYSPLL_DDSFBK_EN |
+ RG_SYSPLL_BIAS_EN | RG_SYSPLL_BIAS_LPF_EN);
+ core_write(priv, CORE_PLL_GROUP2, RG_SYSPLL_EN_NORMAL |
+ RG_SYSPLL_VODEN | RG_SYSPLL_POSDIV(1));
+ core_write(priv, CORE_PLL_GROUP7, RG_LCDDS_PCW_NCPO_CHG |
+ RG_LCCDS_C(3) | RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
-static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
-{
- u32 val;
-
- val = mt7530_read(priv, MT7531_TOP_SIG_SR);
-
- return (val & PAD_DUAL_SGMII_EN) != 0;
-}
-
-static int
-mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
-{
- return 0;
+ /* Enable the MT7530 TRGMII clocks */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
}
static void
@@ -510,9 +475,6 @@ mt7531_pll_setup(struct mt7530_priv *priv)
u32 xtal;
u32 val;
- if (mt7531_dual_sgmii_supported(priv))
- return;
-
val = mt7530_read(priv, MT7531_CREV);
top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
hwstrap = mt7530_read(priv, MT7531_HWTRAP);
@@ -920,8 +882,6 @@ static const char *p5_intf_modes(unsigned int p5_interface)
return "PHY P4";
case P5_INTF_SEL_GMAC5:
return "GMAC5";
- case P5_INTF_SEL_GMAC5_SGMII:
- return "GMAC5_SGMII";
default:
return "unknown";
}
@@ -956,13 +916,8 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
/* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */
val &= ~MHWTRAP_P5_DIS;
break;
- case P5_DISABLED:
- interface = PHY_INTERFACE_MODE_NA;
- break;
default:
- dev_err(ds->dev, "Unsupported p5_intf_sel %d\n",
- priv->p5_intf_sel);
- goto unlock_exit;
+ break;
}
/* Setup RGMII settings */
@@ -992,9 +947,6 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n",
val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface));
- priv->p5_interface = interface;
-
-unlock_exit:
mutex_unlock(&priv->reg_mutex);
}
@@ -1014,18 +966,10 @@ mt753x_trap_frames(struct mt7530_priv *priv)
MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));
}
-static int
+static void
mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
- int ret;
-
- /* Setup max capability of CPU port at first */
- if (priv->info->cpu_port_config) {
- ret = priv->info->cpu_port_config(ds, port);
- if (ret)
- return ret;
- }
/* Enable Mediatek header mode on the cpu port */
mt7530_write(priv, MT7530_PVC_P(port),
@@ -1035,10 +979,6 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
UNU_FFP(BIT(port)));
- /* Set CPU port number */
- if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
- mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
-
/* Add the CPU port to the CPU port bitmap for MT7531 and the switch on
* the MT7988 SoC. Trapped frames will be forwarded to the CPU port that
* is affine to the inbound user port.
@@ -1055,8 +995,6 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
/* Set to fallback mode for independent VLAN learning */
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
MT7530_PORT_FALLBACK_MODE);
-
- return 0;
}
static int
@@ -1080,7 +1018,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
priv->ports[port].enable = true;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
priv->ports[port].pm);
- mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
mutex_unlock(&priv->reg_mutex);
@@ -1100,7 +1037,6 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
priv->ports[port].enable = false;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
- mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
mutex_unlock(&priv->reg_mutex);
}
@@ -2107,7 +2043,7 @@ mt7530_setup_irq(struct mt7530_priv *priv)
}
/* This register must be set for MT7530 to properly fire interrupts */
- if (priv->id != ID_MT7531)
+ if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL);
ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn,
@@ -2146,24 +2082,40 @@ mt7530_free_irq_common(struct mt7530_priv *priv)
static void
mt7530_free_irq(struct mt7530_priv *priv)
{
- mt7530_free_mdio_irq(priv);
+ struct device_node *mnp, *np = priv->dev->of_node;
+
+ mnp = of_get_child_by_name(np, "mdio");
+ if (!mnp)
+ mt7530_free_mdio_irq(priv);
+ of_node_put(mnp);
+
mt7530_free_irq_common(priv);
}
static int
mt7530_setup_mdio(struct mt7530_priv *priv)
{
+ struct device_node *mnp, *np = priv->dev->of_node;
struct dsa_switch *ds = priv->ds;
struct device *dev = priv->dev;
struct mii_bus *bus;
static int idx;
- int ret;
+ int ret = 0;
+
+ mnp = of_get_child_by_name(np, "mdio");
+
+ if (mnp && !of_device_is_available(mnp))
+ goto out;
bus = devm_mdiobus_alloc(dev);
- if (!bus)
- return -ENOMEM;
+ if (!bus) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!mnp)
+ ds->user_mii_bus = bus;
- ds->user_mii_bus = bus;
bus->priv = priv;
bus->name = KBUILD_MODNAME "-mii";
snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++);
@@ -2174,16 +2126,18 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
bus->parent = dev;
bus->phy_mask = ~ds->phys_mii_mask;
- if (priv->irq)
+ if (priv->irq && !mnp)
mt7530_setup_mdio_irq(priv);
- ret = devm_mdiobus_register(dev, bus);
+ ret = devm_of_mdiobus_register(dev, bus, mnp);
if (ret) {
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
- if (priv->irq)
+ if (priv->irq && !mnp)
mt7530_free_mdio_irq(priv);
}
+out:
+ of_node_put(mnp);
return ret;
}
@@ -2238,6 +2192,12 @@ mt7530_setup(struct dsa_switch *ds)
}
}
+ /* Disable LEDs before reset to prevent the MT7530 sampling a
+ * potentially incorrect HT_XTAL_FSEL value.
+ */
+ mt7530_write(priv, MT7530_LED_EN, 0);
+ usleep_range(1000, 1100);
+
/* Reset whole chip through gpio pin or memory-mapped registers for
* different type of hardware
*/
@@ -2267,6 +2227,12 @@ mt7530_setup(struct dsa_switch *ds)
return -ENODEV;
}
+ if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_20MHZ) {
+ dev_err(priv->dev,
+ "MT7530 with a 20MHz XTAL is not supported!\n");
+ return -EINVAL;
+ }
+
/* Reset the switch through internal reset */
mt7530_write(priv, MT7530_SYS_CTRL,
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
@@ -2289,14 +2255,18 @@ mt7530_setup(struct dsa_switch *ds)
val |= MHWTRAP_MANUAL;
mt7530_write(priv, MT7530_MHWTRAP, val);
- priv->p6_interface = PHY_INTERFACE_MODE_NA;
-
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ /* Clear link settings and enable force mode to force link down
+ * on all ports until they're enabled later.
+ */
+ mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
+ PMCR_FORCE_MODE, PMCR_FORCE_MODE);
+
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
@@ -2305,9 +2275,7 @@ mt7530_setup(struct dsa_switch *ds)
mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
if (dsa_is_cpu_port(ds, i)) {
- ret = mt753x_cpu_port_enable(ds, i);
- if (ret)
- return ret;
+ mt753x_cpu_port_enable(ds, i);
} else {
mt7530_port_disable(ds, i);
@@ -2326,16 +2294,13 @@ mt7530_setup(struct dsa_switch *ds)
return ret;
/* Setup port 5 */
- priv->p5_intf_sel = P5_DISABLED;
- interface = PHY_INTERFACE_MODE_NA;
-
if (!dsa_is_unused_port(ds, 5)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
- ret = of_get_phy_mode(dsa_to_port(ds, 5)->dn, &interface);
- if (ret && ret != -ENODEV)
- return ret;
} else {
- /* Scan the ethernet nodes. look for GMAC1, lookup used phy */
+ /* Scan the ethernet nodes. Look for GMAC1, lookup the used PHY.
+ * Set priv->p5_intf_sel to the appropriate value if PHY muxing
+ * is detected.
+ */
for_each_child_of_node(dn, mac_np) {
if (!of_device_is_compatible(mac_np,
"mediatek,eth-mac"))
@@ -2366,6 +2331,10 @@ mt7530_setup(struct dsa_switch *ds)
of_node_put(phy_node);
break;
}
+
+ if (priv->p5_intf_sel == P5_INTF_SEL_PHY_P0 ||
+ priv->p5_intf_sel == P5_INTF_SEL_PHY_P4)
+ mt7530_setup_port5(ds, interface);
}
#ifdef CONFIG_GPIOLIB
@@ -2376,8 +2345,6 @@ mt7530_setup(struct dsa_switch *ds)
}
#endif /* CONFIG_GPIOLIB */
- mt7530_setup_port5(ds, interface);
-
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
@@ -2402,6 +2369,12 @@ mt7531_setup_common(struct dsa_switch *ds)
UNU_FFP_MASK);
for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ /* Clear link settings and enable force mode to force link down
+ * on all ports until they're enabled later.
+ */
+ mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
+ MT7531_FORCE_MODE, MT7531_FORCE_MODE);
+
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
@@ -2412,9 +2385,7 @@ mt7531_setup_common(struct dsa_switch *ds)
mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
if (dsa_is_cpu_port(ds, i)) {
- ret = mt753x_cpu_port_enable(ds, i);
- if (ret)
- return ret;
+ mt753x_cpu_port_enable(ds, i);
} else {
mt7530_port_disable(ds, i);
@@ -2474,38 +2445,35 @@ mt7531_setup(struct dsa_switch *ds)
return -ENODEV;
}
- /* all MACs must be forced link-down before sw reset */
+ /* MT7531AE has got two SGMII units. One for port 5, one for port 6.
+ * MT7531BE has got only one SGMII unit which is for port 6.
+ */
+ val = mt7530_read(priv, MT7531_TOP_SIG_SR);
+ priv->p5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
+
+ /* Force link down on all ports before internal reset */
for (i = 0; i < MT7530_NUM_PORTS; i++)
mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK);
/* Reset the switch through internal reset */
- mt7530_write(priv, MT7530_SYS_CTRL,
- SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
- SYS_CTRL_REG_RST);
-
- mt7531_pll_setup(priv);
-
- if (mt7531_dual_sgmii_supported(priv)) {
- priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
+ mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
+ if (!priv->p5_sgmii) {
+ mt7531_pll_setup(priv);
+ } else {
/* Let ds->user_mii_bus be able to access external phy. */
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK,
MT7531_EXT_P_MDC_11);
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK,
MT7531_EXT_P_MDIO_12);
- } else {
- priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
}
- dev_dbg(ds->dev, "P5 support %s interface\n",
- p5_intf_modes(priv->p5_intf_sel));
+
+ if (!dsa_is_unused_port(ds, 5))
+ priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
MT7531_GPIO0_INTERRUPT);
- /* Let phylink decide the interface later. */
- priv->p5_interface = PHY_INTERFACE_MODE_NA;
- priv->p6_interface = PHY_INTERFACE_MODE_NA;
-
/* Enable PHY core PLL, since phy_device has not yet been created
* provided for phy_[read,write]_mmd_indirect is called, we provide
* our own mt7531_ind_mmd_phy_[read,write] to complete this
@@ -2535,12 +2503,14 @@ static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
switch (port) {
- case 0 ... 4: /* Internal phy */
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 4:
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
break;
- case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
+ /* Port 5 supports rgmii with delays, mii, and gmii. */
+ case 5:
phy_interface_set_rgmii(config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_MII,
config->supported_interfaces);
@@ -2548,7 +2518,8 @@ static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
config->supported_interfaces);
break;
- case 6: /* 1st cpu port */
+ /* Port 6 supports rgmii and trgmii. */
+ case 6:
__set_bit(PHY_INTERFACE_MODE_RGMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_TRGMII,
@@ -2557,30 +2528,30 @@ static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
}
}
-static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
-{
- return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII);
-}
-
static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
struct mt7530_priv *priv = ds->priv;
switch (port) {
- case 0 ... 4: /* Internal phy */
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 4:
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
break;
- case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */
- if (mt7531_is_rgmii_port(priv, port)) {
+ /* Port 5 supports rgmii with delays on MT7531BE, sgmii/802.3z on
+ * MT7531AE.
+ */
+ case 5:
+ if (!priv->p5_sgmii) {
phy_interface_set_rgmii(config->supported_interfaces);
break;
}
fallthrough;
- case 6: /* 1st cpu port supports sgmii/8023z only */
+ /* Port 6 supports sgmii/802.3z. */
+ case 6:
__set_bit(PHY_INTERFACE_MODE_SGMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_1000BASEX,
@@ -2596,14 +2567,14 @@ static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
- phy_interface_zero(config->supported_interfaces);
-
switch (port) {
- case 0 ... 4: /* Internal phy */
+ /* Ports which are connected to switch PHYs. There is no MII pinout. */
+ case 0 ... 3:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
+ /* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
case 6:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
@@ -2612,41 +2583,24 @@ static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
}
}
-static int
-mt753x_pad_setup(struct dsa_switch *ds, const struct phylink_link_state *state)
-{
- struct mt7530_priv *priv = ds->priv;
-
- return priv->info->pad_setup(ds, state->interface);
-}
-
-static int
+static void
mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
- /* Only need to setup port5. */
- if (port != 5)
- return 0;
-
- mt7530_setup_port5(priv->ds, interface);
-
- return 0;
+ if (port == 5)
+ mt7530_setup_port5(priv->ds, interface);
+ else if (port == 6)
+ mt7530_setup_port6(priv->ds, interface);
}
-static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
- phy_interface_t interface,
- struct phy_device *phydev)
+static void mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
+ phy_interface_t interface,
+ struct phy_device *phydev)
{
u32 val;
- if (!mt7531_is_rgmii_port(priv, port)) {
- dev_err(priv->dev, "RGMII mode is not available for port %d\n",
- port);
- return -EINVAL;
- }
-
val = mt7530_read(priv, MT7531_CLKGEN_CTRL);
val |= GP_CLK_EN;
val &= ~GP_MODE_MASK;
@@ -2674,31 +2628,14 @@ static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
case PHY_INTERFACE_MODE_RGMII_ID:
break;
default:
- return -EINVAL;
+ break;
}
}
- mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
-
- return 0;
-}
-
-static bool mt753x_is_mac_port(u32 port)
-{
- return (port == 5 || port == 6);
-}
-
-static int
-mt7988_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface)
-{
- if (dsa_is_cpu_port(ds, port) &&
- interface == PHY_INTERFACE_MODE_INTERNAL)
- return 0;
- return -EINVAL;
+ mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
}
-static int
+static void
mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
@@ -2706,39 +2643,11 @@ mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
struct phy_device *phydev;
struct dsa_port *dp;
- if (!mt753x_is_mac_port(port)) {
- dev_err(priv->dev, "port %d is not a MAC port\n", port);
- return -EINVAL;
- }
-
- switch (interface) {
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (phy_interface_mode_is_rgmii(interface)) {
dp = dsa_to_port(ds, port);
phydev = dp->user->phydev;
- return mt7531_rgmii_setup(priv, port, interface, phydev);
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_NA:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- /* handled in SGMII PCS driver */
- return 0;
- default:
- return -EINVAL;
+ mt7531_rgmii_setup(priv, port, interface, phydev);
}
-
- return -EINVAL;
-}
-
-static int
-mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
- const struct phylink_link_state *state)
-{
- struct mt7530_priv *priv = ds->priv;
-
- return priv->info->mac_port_config(ds, port, mode, state->interface);
}
static struct phylink_pcs *
@@ -2764,54 +2673,13 @@ mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
{
struct mt7530_priv *priv = ds->priv;
- u32 mcr_cur, mcr_new;
-
- switch (port) {
- case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
- break;
- case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
- if (priv->p5_interface == state->interface)
- break;
-
- if (mt753x_mac_config(ds, port, mode, state) < 0)
- goto unsupported;
-
- if (priv->p5_intf_sel != P5_DISABLED)
- priv->p5_interface = state->interface;
- break;
- case 6: /* 1st cpu port */
- if (priv->p6_interface == state->interface)
- break;
-
- mt753x_pad_setup(ds, state);
- if (mt753x_mac_config(ds, port, mode, state) < 0)
- goto unsupported;
-
- priv->p6_interface = state->interface;
- break;
- default:
-unsupported:
- dev_err(ds->dev, "%s: unsupported %s port: %i\n",
- __func__, phy_modes(state->interface), port);
- return;
- }
-
- mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port));
- mcr_new = mcr_cur;
- mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
- mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
- PMCR_BACKPR_EN | PMCR_FORCE_MODE_ID(priv->id);
+ if ((port == 5 || port == 6) && priv->info->mac_port_config)
+ priv->info->mac_port_config(ds, port, mode, state->interface);
/* Are we connected to external phy */
if (port == 5 && dsa_is_user_port(ds, 5))
- mcr_new |= PMCR_EXT_PHY;
-
- if (mcr_new != mcr_cur)
- mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
+ mt7530_set(priv, MT7530_PMCR_P(port), PMCR_EXT_PHY);
}
static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
@@ -2835,17 +2703,10 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
- /* MT753x MAC works in 1G full duplex mode for all up-clocked
- * variants.
- */
- if (interface == PHY_INTERFACE_MODE_TRGMII ||
- (phy_interface_mode_is_8023z(interface))) {
- speed = SPEED_1000;
- duplex = DUPLEX_FULL;
- }
-
switch (speed) {
case SPEED_1000:
+ case SPEED_2500:
+ case SPEED_10000:
mcr |= PMCR_FORCE_SPEED_1000;
break;
case SPEED_100:
@@ -2863,6 +2724,7 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
switch (speed) {
case SPEED_1000:
+ case SPEED_2500:
mcr |= PMCR_FORCE_EEE1G;
break;
case SPEED_100:
@@ -2874,63 +2736,6 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
mt7530_set(priv, MT7530_PMCR_P(port), mcr);
}
-static int
-mt7531_cpu_port_config(struct dsa_switch *ds, int port)
-{
- struct mt7530_priv *priv = ds->priv;
- phy_interface_t interface;
- int speed;
- int ret;
-
- switch (port) {
- case 5:
- if (mt7531_is_rgmii_port(priv, port))
- interface = PHY_INTERFACE_MODE_RGMII;
- else
- interface = PHY_INTERFACE_MODE_2500BASEX;
-
- priv->p5_interface = interface;
- break;
- case 6:
- interface = PHY_INTERFACE_MODE_2500BASEX;
-
- priv->p6_interface = interface;
- break;
- default:
- return -EINVAL;
- }
-
- if (interface == PHY_INTERFACE_MODE_2500BASEX)
- speed = SPEED_2500;
- else
- speed = SPEED_1000;
-
- ret = mt7531_mac_config(ds, port, MLO_AN_FIXED, interface);
- if (ret)
- return ret;
- mt7530_write(priv, MT7530_PMCR_P(port),
- PMCR_CPU_PORT_SETTING(priv->id));
- mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL,
- speed, DUPLEX_FULL, true, true);
-
- return 0;
-}
-
-static int
-mt7988_cpu_port_config(struct dsa_switch *ds, int port)
-{
- struct mt7530_priv *priv = ds->priv;
-
- mt7530_write(priv, MT7530_PMCR_P(port),
- PMCR_CPU_PORT_SETTING(priv->id));
-
- mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED,
- PHY_INTERFACE_MODE_INTERNAL, NULL,
- SPEED_10000, DUPLEX_FULL, true, true);
-
- return 0;
-}
-
static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
@@ -3013,17 +2818,9 @@ static int
mt753x_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
- int i, ret;
-
- /* Initialise the PCS devices */
- for (i = 0; i < priv->ds->num_ports; i++) {
- priv->pcs[i].pcs.ops = priv->info->pcs_ops;
- priv->pcs[i].pcs.neg_mode = true;
- priv->pcs[i].priv = priv;
- priv->pcs[i].port = i;
- }
+ int ret = priv->info->sw_setup(ds);
+ int i;
- ret = priv->info->sw_setup(ds);
if (ret)
return ret;
@@ -3035,8 +2832,16 @@ mt753x_setup(struct dsa_switch *ds)
if (ret && priv->irq)
mt7530_free_irq_common(priv);
+ /* Initialise the PCS devices */
+ for (i = 0; i < priv->ds->num_ports; i++) {
+ priv->pcs[i].pcs.ops = priv->info->pcs_ops;
+ priv->pcs[i].pcs.neg_mode = true;
+ priv->pcs[i].priv = priv;
+ priv->pcs[i].port = i;
+ }
+
if (priv->create_sgmii) {
- ret = priv->create_sgmii(priv, mt7531_dual_sgmii_supported(priv));
+ ret = priv->create_sgmii(priv);
if (ret && priv->irq)
mt7530_free_irq(priv);
}
@@ -3045,7 +2850,7 @@ mt753x_setup(struct dsa_switch *ds)
}
static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
struct mt7530_priv *priv = ds->priv;
u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port));
@@ -3057,7 +2862,7 @@ static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
}
static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
struct mt7530_priv *priv = ds->priv;
u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN;
@@ -3074,9 +2879,34 @@ static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
return 0;
}
-static int mt7988_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
+static void
+mt753x_conduit_state_change(struct dsa_switch *ds,
+ const struct net_device *conduit,
+ bool operational)
{
- return 0;
+ struct dsa_port *cpu_dp = conduit->dsa_ptr;
+ struct mt7530_priv *priv = ds->priv;
+ int val = 0;
+ u8 mask;
+
+ /* Set the CPU port to trap frames to for MT7530. Trapped frames will be
+ * forwarded to the numerically smallest CPU port whose conduit
+ * interface is up.
+ */
+ if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
+ return;
+
+ mask = BIT(cpu_dp->index);
+
+ if (operational)
+ priv->active_cpu_ports |= mask;
+ else
+ priv->active_cpu_ports &= ~mask;
+
+ if (priv->active_cpu_ports)
+ val = CPU_EN | CPU_PORT(__ffs(priv->active_cpu_ports));
+
+ mt7530_rmw(priv, MT7530_MFC, CPU_EN | CPU_PORT_MASK, val);
}
static int mt7988_setup(struct dsa_switch *ds)
@@ -3129,6 +2959,7 @@ const struct dsa_switch_ops mt7530_switch_ops = {
.phylink_mac_link_up = mt753x_phylink_mac_link_up,
.get_mac_eee = mt753x_get_mac_eee,
.set_mac_eee = mt753x_set_mac_eee,
+ .conduit_state_change = mt753x_conduit_state_change,
};
EXPORT_SYMBOL_GPL(mt7530_switch_ops);
@@ -3141,7 +2972,6 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c22 = mt7530_phy_write_c22,
.phy_read_c45 = mt7530_phy_read_c45,
.phy_write_c45 = mt7530_phy_write_c45,
- .pad_setup = mt7530_pad_clk_setup,
.mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
@@ -3153,7 +2983,6 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c22 = mt7530_phy_write_c22,
.phy_read_c45 = mt7530_phy_read_c45,
.phy_write_c45 = mt7530_phy_write_c45,
- .pad_setup = mt7530_pad_clk_setup,
.mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
@@ -3165,8 +2994,6 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c22 = mt7531_ind_c22_phy_write,
.phy_read_c45 = mt7531_ind_c45_phy_read,
.phy_write_c45 = mt7531_ind_c45_phy_write,
- .pad_setup = mt7531_pad_setup,
- .cpu_port_config = mt7531_cpu_port_config,
.mac_port_get_caps = mt7531_mac_port_get_caps,
.mac_port_config = mt7531_mac_config,
},
@@ -3178,10 +3005,7 @@ const struct mt753x_info mt753x_table[] = {
.phy_write_c22 = mt7531_ind_c22_phy_write,
.phy_read_c45 = mt7531_ind_c45_phy_read,
.phy_write_c45 = mt7531_ind_c45_phy_write,
- .pad_setup = mt7988_pad_setup,
- .cpu_port_config = mt7988_cpu_port_config,
.mac_port_get_caps = mt7988_mac_port_get_caps,
- .mac_port_config = mt7988_mac_config,
},
};
EXPORT_SYMBOL_GPL(mt753x_table);
@@ -3208,10 +3032,8 @@ mt7530_probe_common(struct mt7530_priv *priv)
/* Sanity check if these required device operations are filled
* properly.
*/
- if (!priv->info->sw_setup || !priv->info->pad_setup ||
- !priv->info->phy_read_c22 || !priv->info->phy_write_c22 ||
- !priv->info->mac_port_get_caps ||
- !priv->info->mac_port_config)
+ if (!priv->info->sw_setup || !priv->info->phy_read_c22 ||
+ !priv->info->phy_write_c22 || !priv->info->mac_port_get_caps)
return -EINVAL;
priv->id = priv->info->id;
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 17e42d30fff4..a71166e0a7fc 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -41,8 +41,8 @@ enum mt753x_id {
#define UNU_FFP(x) (((x) & 0xff) << 8)
#define UNU_FFP_MASK UNU_FFP(~0)
#define CPU_EN BIT(7)
-#define CPU_PORT(x) ((x) << 4)
-#define CPU_MASK (0xf << 4)
+#define CPU_PORT_MASK GENMASK(6, 4)
+#define CPU_PORT(x) FIELD_PREP(CPU_PORT_MASK, x)
#define MIRROR_EN BIT(3)
#define MIRROR_PORT(x) ((x) & 0x7)
#define MIRROR_MASK 0x7
@@ -304,20 +304,11 @@ enum mt7530_vlan_port_acc_frm {
MT7531_FORCE_DPX | \
MT7531_FORCE_RX_FC | \
MT7531_FORCE_TX_FC)
-#define PMCR_FORCE_MODE_ID(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
- MT7531_FORCE_MODE : PMCR_FORCE_MODE)
#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
PMCR_FORCE_FDX | PMCR_FORCE_LNK | \
PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100)
-#define PMCR_CPU_PORT_SETTING(id) (PMCR_FORCE_MODE_ID((id)) | \
- PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
- PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \
- PMCR_TX_EN | PMCR_RX_EN | \
- PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
- PMCR_FORCE_SPEED_1000 | \
- PMCR_FORCE_FDX | PMCR_FORCE_LNK)
#define MT7530_PMEEECR_P(x) (0x3004 + (x) * 0x100)
#define WAKEUP_TIME_1000(x) (((x) & 0xFF) << 24)
@@ -683,11 +674,10 @@ struct mt7530_port {
/* Port 5 interface select definitions */
enum p5_interface_select {
- P5_DISABLED = 0,
+ P5_DISABLED,
P5_INTF_SEL_PHY_P0,
P5_INTF_SEL_PHY_P4,
P5_INTF_SEL_GMAC5,
- P5_INTF_SEL_GMAC5_SGMII,
};
struct mt7530_priv;
@@ -705,8 +695,6 @@ struct mt753x_pcs {
* @phy_write_c22: Holding the way writing PHY port using C22
* @phy_read_c45: Holding the way reading PHY port using C45
* @phy_write_c45: Holding the way writing PHY port using C45
- * @pad_setup: Holding the way setting up the bus pad for a certain
- * MAC port
* @phy_mode_supported: Check if the PHY type is being supported on a certain
* port
* @mac_port_validate: Holding the way to set addition validate type for a
@@ -727,16 +715,14 @@ struct mt753x_info {
int regnum);
int (*phy_write_c45)(struct mt7530_priv *priv, int port, int devad,
int regnum, u16 val);
- int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
- int (*cpu_port_config)(struct dsa_switch *ds, int port);
void (*mac_port_get_caps)(struct dsa_switch *ds, int port,
struct phylink_config *config);
void (*mac_port_validate)(struct dsa_switch *ds, int port,
phy_interface_t interface,
unsigned long *supported);
- int (*mac_port_config)(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
+ void (*mac_port_config)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
};
/* struct mt7530_priv - This is the main data structure for holding the state
@@ -754,12 +740,14 @@ struct mt753x_info {
* @ports: Holding the state among ports
* @reg_mutex: The lock for protecting among process accessing
* registers
- * @p6_interface Holding the current port 6 interface
* @p5_intf_sel: Holding the current port 5 interface select
+ * @p5_sgmii: Flag for distinguishing if port 5 of the MT7531 switch
+ * has got SGMII
* @irq: IRQ number of the switch
* @irq_domain: IRQ domain of the switch irq_chip
* @irq_enable: IRQ enable bits, synced to SYS_INT_EN
* @create_sgmii: Pointer to function creating SGMII PCS instance(s)
+ * @active_cpu_ports: Holding the active CPU ports
*/
struct mt7530_priv {
struct device *dev;
@@ -773,9 +761,8 @@ struct mt7530_priv {
const struct mt753x_info *info;
unsigned int id;
bool mcm;
- phy_interface_t p6_interface;
- phy_interface_t p5_interface;
- unsigned int p5_intf_sel;
+ enum p5_interface_select p5_intf_sel;
+ bool p5_sgmii;
u8 mirror_rx;
u8 mirror_tx;
struct mt7530_port ports[MT7530_NUM_PORTS];
@@ -785,7 +772,8 @@ struct mt7530_priv {
int irq;
struct irq_domain *irq_domain;
u32 irq_enable;
- int (*create_sgmii)(struct mt7530_priv *priv, bool dual_sgmii);
+ int (*create_sgmii)(struct mt7530_priv *priv);
+ u8 active_cpu_ports;
};
struct mt7530_hw_vlan_entry {
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 614cabb5c1b0..9ed1821184ec 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1451,14 +1451,14 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
/* Nothing to do on the port's MAC */
return 0;
}
static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
/* Nothing to do on the port's MAC */
return 0;
@@ -3659,7 +3659,7 @@ static int mv88e6xxx_mdio_read_c45(struct mii_bus *bus, int phy, int devad,
int err;
if (!chip->info->ops->phy_read_c45)
- return 0xffff;
+ return -ENODEV;
mv88e6xxx_reg_lock(chip);
err = chip->info->ops->phy_read_c45(chip, bus, phy, devad, reg, &val);
@@ -3712,7 +3712,10 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
if (external) {
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_g2_scratch_gpio_set_smi(chip, true);
+ if (chip->info->family == MV88E6XXX_FAMILY_6393)
+ err = mv88e6393x_g2_scratch_gpio_set_smi(chip, true);
+ else
+ err = mv88e6390_g2_scratch_gpio_set_smi(chip, true);
mv88e6xxx_reg_unlock(chip);
if (err)
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index d9434f7cae53..82f9b410de0b 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -378,8 +378,10 @@ extern const struct mv88e6xxx_avb_ops mv88e6390_avb_ops;
extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops;
-int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+int mv88e6390_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external);
+int mv88e6393x_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+ bool external);
int mv88e6352_g2_scratch_port_has_serdes(struct mv88e6xxx_chip *chip, int port);
int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin);
int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats);
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
index a9d6e40321a2..61ab6cc4fbfc 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -240,7 +240,7 @@ const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {
};
/**
- * mv88e6xxx_g2_scratch_gpio_set_smi - set gpio muxing for external smi
+ * mv88e6390_g2_scratch_gpio_set_smi - set gpio muxing for external smi
* @chip: chip private data
* @external: set mux for external smi, or free for gpio usage
*
@@ -248,7 +248,7 @@ const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {
* an external SMI interface, or they may be made free for other
* GPIO uses.
*/
-int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+int mv88e6390_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external)
{
int misc_cfg = MV88E6352_G2_SCRATCH_MISC_CFG;
@@ -291,6 +291,37 @@ int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
}
/**
+ * mv88e6393x_g2_scratch_gpio_set_smi - set gpio muxing for external smi
+ * @chip: chip private data
+ * @external: set mux for external smi, or free for gpio usage
+ *
+ * MV88E6191X/6193X/6393X GPIO pins 9 and 10 can be configured as an
+ * external SMI interface or as regular GPIO-s.
+ *
+ * They however have a different register layout then the existing
+ * function.
+ */
+
+int mv88e6393x_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+ bool external)
+{
+ int misc_cfg = MV88E6352_G2_SCRATCH_MISC_CFG;
+ int err;
+ u8 val;
+
+ err = mv88e6xxx_g2_scratch_read(chip, misc_cfg, &val);
+ if (err)
+ return err;
+
+ if (external)
+ val &= ~MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI;
+ else
+ val |= MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI;
+
+ return mv88e6xxx_g2_scratch_write(chip, misc_cfg, val);
+}
+
+/**
* mv88e6352_g2_scratch_port_has_serdes - indicate if a port can have a serdes
* @chip: chip private data
* @port: port number to check for serdes
diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6185.c b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
index 4d677f836807..5a27d047a38e 100644
--- a/drivers/net/dsa/mv88e6xxx/pcs-6185.c
+++ b/drivers/net/dsa/mv88e6xxx/pcs-6185.c
@@ -95,7 +95,7 @@ static void mv88e6185_pcs_get_state(struct phylink_pcs *pcs,
}
}
-static int mv88e6185_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+static int mv88e6185_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac)
@@ -137,6 +137,7 @@ static int mv88e6185_pcs_init(struct mv88e6xxx_chip *chip, int port)
mpcs->chip = chip;
mpcs->port = port;
mpcs->phylink_pcs.ops = &mv88e6185_phylink_pcs_ops;
+ mpcs->phylink_pcs.neg_mode = true;
irq = mv88e6xxx_serdes_irq_mapping(chip, port);
if (irq) {
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 7a864329cb72..dab66c0c6f64 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -950,15 +950,15 @@ qca8k_mdio_register(struct qca8k_priv *priv)
struct device *dev = ds->dev;
struct device_node *mdio;
struct mii_bus *bus;
- int err = 0;
+ int ret = 0;
mdio = of_get_child_by_name(dev->of_node, "mdio");
if (mdio && !of_device_is_available(mdio))
- goto out;
+ goto out_put_node;
bus = devm_mdiobus_alloc(dev);
if (!bus) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out_put_node;
}
@@ -984,12 +984,11 @@ qca8k_mdio_register(struct qca8k_priv *priv)
bus->write = qca8k_legacy_mdio_write;
}
- err = devm_of_mdiobus_register(dev, bus, mdio);
+ ret = devm_of_mdiobus_register(dev, bus, mdio);
out_put_node:
of_node_put(mdio);
-out:
- return err;
+ return ret;
}
static int
@@ -998,7 +997,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
struct device_node *ports, *port;
phy_interface_t mode;
- int err;
+ int ret;
ports = of_get_child_by_name(priv->dev->of_node, "ports");
if (!ports)
@@ -1008,11 +1007,11 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
return -EINVAL;
for_each_available_child_of_node(ports, port) {
- err = of_property_read_u32(port, "reg", &reg);
- if (err) {
+ ret = of_property_read_u32(port, "reg", &reg);
+ if (ret) {
of_node_put(port);
of_node_put(ports);
- return err;
+ return ret;
}
if (!dsa_is_user_port(priv->ds, reg))
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index 2358cd399c7e..7f80035c5441 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -534,7 +534,7 @@ int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
}
int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
struct qca8k_priv *priv = ds->priv;
@@ -558,7 +558,7 @@ exit:
}
int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
- struct ethtool_eee *e)
+ struct ethtool_keee *e)
{
/* Nothing to do on the port's MAC */
return 0;
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index c8785c36c54e..2184d8d2d5a9 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -518,8 +518,8 @@ void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset);
/* Common eee function */
-int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee);
-int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *eee);
+int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e);
/* Common bridge function */
void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
index 060165a85fb7..6989972eebc3 100644
--- a/drivers/net/dsa/realtek/Kconfig
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -16,37 +16,29 @@ menuconfig NET_DSA_REALTEK
if NET_DSA_REALTEK
config NET_DSA_REALTEK_MDIO
- tristate "Realtek MDIO interface driver"
+ bool "Realtek MDIO interface support"
depends on OF
- depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
- depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
- depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for registering switches configured
through MDIO.
config NET_DSA_REALTEK_SMI
- tristate "Realtek SMI interface driver"
+ bool "Realtek SMI interface support"
depends on OF
- depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
- depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
- depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for registering switches connected
through SMI.
config NET_DSA_REALTEK_RTL8365MB
- tristate "Realtek RTL8365MB switch subdriver"
- imply NET_DSA_REALTEK_SMI
- imply NET_DSA_REALTEK_MDIO
+ tristate "Realtek RTL8365MB switch driver"
+ depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
select NET_DSA_TAG_RTL8_4
help
Select to enable support for Realtek RTL8365MB-VC and RTL8367S.
config NET_DSA_REALTEK_RTL8366RB
- tristate "Realtek RTL8366RB switch subdriver"
- imply NET_DSA_REALTEK_SMI
- imply NET_DSA_REALTEK_MDIO
+ tristate "Realtek RTL8366RB switch driver"
+ depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
select NET_DSA_TAG_RTL4_A
help
Select to enable support for Realtek RTL8366RB.
diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
index 0aab57252a7c..35491dc20d6d 100644
--- a/drivers/net/dsa/realtek/Makefile
+++ b/drivers/net/dsa/realtek/Makefile
@@ -1,6 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_NET_DSA_REALTEK_MDIO) += realtek-mdio.o
-obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
+obj-$(CONFIG_NET_DSA_REALTEK) += realtek_dsa.o
+realtek_dsa-objs := rtl83xx.o
+
+ifdef CONFIG_NET_DSA_REALTEK_MDIO
+realtek_dsa-objs += realtek-mdio.o
+endif
+
+ifdef CONFIG_NET_DSA_REALTEK_SMI
+realtek_dsa-objs += realtek-smi.o
+endif
+
obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
rtl8366-objs := rtl8366-core.o rtl8366rb.o
obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
index 292e6d087e8b..04b758e5a680 100644
--- a/drivers/net/dsa/realtek/realtek-mdio.c
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -25,6 +25,8 @@
#include <linux/regmap.h>
#include "realtek.h"
+#include "realtek-mdio.h"
+#include "rtl83xx.h"
/* Read/write via mdiobus */
#define REALTEK_MDIO_CTRL0_REG 31
@@ -99,192 +101,87 @@ out_unlock:
return ret;
}
-static void realtek_mdio_lock(void *ctx)
-{
- struct realtek_priv *priv = ctx;
-
- mutex_lock(&priv->map_lock);
-}
-
-static void realtek_mdio_unlock(void *ctx)
-{
- struct realtek_priv *priv = ctx;
-
- mutex_unlock(&priv->map_lock);
-}
-
-static const struct regmap_config realtek_mdio_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
+static const struct realtek_interface_info realtek_mdio_info = {
.reg_read = realtek_mdio_read,
.reg_write = realtek_mdio_write,
- .cache_type = REGCACHE_NONE,
- .lock = realtek_mdio_lock,
- .unlock = realtek_mdio_unlock,
};
-static const struct regmap_config realtek_mdio_nolock_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .reg_read = realtek_mdio_read,
- .reg_write = realtek_mdio_write,
- .cache_type = REGCACHE_NONE,
- .disable_locking = true,
-};
-
-static int realtek_mdio_probe(struct mdio_device *mdiodev)
+/**
+ * realtek_mdio_probe() - Probe a platform device for an MDIO-connected switch
+ * @mdiodev: mdio_device to probe on.
+ *
+ * This function should be used as the .probe in an mdio_driver. After
+ * calling the common probe function for both interfaces, it initializes the
+ * values specific for MDIO-connected devices. Finally, it calls a common
+ * function to register the DSA switch.
+ *
+ * Context: Can sleep. Takes and releases priv->map_lock.
+ * Return: Returns 0 on success, a negative error on failure.
+ */
+int realtek_mdio_probe(struct mdio_device *mdiodev)
{
- struct realtek_priv *priv;
struct device *dev = &mdiodev->dev;
- const struct realtek_variant *var;
- struct regmap_config rc;
- struct device_node *np;
+ struct realtek_priv *priv;
int ret;
- var = of_device_get_match_data(dev);
- if (!var)
- return -EINVAL;
-
- priv = devm_kzalloc(&mdiodev->dev,
- size_add(sizeof(*priv), var->chip_data_sz),
- GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- mutex_init(&priv->map_lock);
+ priv = rtl83xx_probe(dev, &realtek_mdio_info);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
- rc = realtek_mdio_regmap_config;
- rc.lock_arg = priv;
- priv->map = devm_regmap_init(dev, NULL, priv, &rc);
- if (IS_ERR(priv->map)) {
- ret = PTR_ERR(priv->map);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- rc = realtek_mdio_nolock_regmap_config;
- priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
- if (IS_ERR(priv->map_nolock)) {
- ret = PTR_ERR(priv->map_nolock);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- priv->mdio_addr = mdiodev->addr;
priv->bus = mdiodev->bus;
- priv->dev = &mdiodev->dev;
- priv->chip_data = (void *)priv + sizeof(*priv);
-
- priv->clk_delay = var->clk_delay;
- priv->cmd_read = var->cmd_read;
- priv->cmd_write = var->cmd_write;
- priv->ops = var->ops;
-
+ priv->mdio_addr = mdiodev->addr;
priv->write_reg_noack = realtek_mdio_write;
- np = dev->of_node;
-
- dev_set_drvdata(dev, priv);
-
- /* TODO: if power is software controlled, set up any regulators here */
- priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
-
- priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(priv->reset)) {
- dev_err(dev, "failed to get RESET GPIO\n");
- return PTR_ERR(priv->reset);
- }
-
- if (priv->reset) {
- gpiod_set_value(priv->reset, 1);
- dev_dbg(dev, "asserted RESET\n");
- msleep(REALTEK_HW_STOP_DELAY);
- gpiod_set_value(priv->reset, 0);
- msleep(REALTEK_HW_START_DELAY);
- dev_dbg(dev, "deasserted RESET\n");
- }
-
- ret = priv->ops->detect(priv);
- if (ret) {
- dev_err(dev, "unable to detect switch\n");
- return ret;
- }
-
- priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
- if (!priv->ds)
- return -ENOMEM;
-
- priv->ds->dev = dev;
- priv->ds->num_ports = priv->num_ports;
- priv->ds->priv = priv;
- priv->ds->ops = var->ds_ops_mdio;
-
- ret = dsa_register_switch(priv->ds);
+ ret = rtl83xx_register_switch(priv);
if (ret) {
- dev_err(priv->dev, "unable to register switch ret = %d\n", ret);
+ rtl83xx_remove(priv);
return ret;
}
return 0;
}
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_probe, REALTEK_DSA);
-static void realtek_mdio_remove(struct mdio_device *mdiodev)
+/**
+ * realtek_mdio_remove() - Remove the driver of an MDIO-connected switch
+ * @mdiodev: mdio_device to be removed.
+ *
+ * This function should be used as the .remove_new in an mdio_driver. First
+ * it unregisters the DSA switch and then it calls the common remove function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_mdio_remove(struct mdio_device *mdiodev)
{
struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
if (!priv)
return;
- dsa_unregister_switch(priv->ds);
+ rtl83xx_unregister_switch(priv);
- /* leave the device reset asserted */
- if (priv->reset)
- gpiod_set_value(priv->reset, 1);
+ rtl83xx_remove(priv);
}
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_remove, REALTEK_DSA);
-static void realtek_mdio_shutdown(struct mdio_device *mdiodev)
+/**
+ * realtek_mdio_shutdown() - Shutdown the driver of a MDIO-connected switch
+ * @mdiodev: mdio_device shutting down.
+ *
+ * This function should be used as the .shutdown in a platform_driver. It calls
+ * the common shutdown function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_mdio_shutdown(struct mdio_device *mdiodev)
{
struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
if (!priv)
return;
- dsa_switch_shutdown(priv->ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
+ rtl83xx_shutdown(priv);
}
-
-static const struct of_device_id realtek_mdio_of_match[] = {
-#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
- { .compatible = "realtek,rtl8366rb", .data = &rtl8366rb_variant, },
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
- { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
-#endif
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, realtek_mdio_of_match);
-
-static struct mdio_driver realtek_mdio_driver = {
- .mdiodrv.driver = {
- .name = "realtek-mdio",
- .of_match_table = realtek_mdio_of_match,
- },
- .probe = realtek_mdio_probe,
- .remove = realtek_mdio_remove,
- .shutdown = realtek_mdio_shutdown,
-};
-
-mdio_module_driver(realtek_mdio_driver);
-
-MODULE_AUTHOR("Luiz Angelo Daros de Luca <luizluca@gmail.com>");
-MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via MDIO interface");
-MODULE_LICENSE("GPL");
+EXPORT_SYMBOL_NS_GPL(realtek_mdio_shutdown, REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/realtek-mdio.h b/drivers/net/dsa/realtek/realtek-mdio.h
new file mode 100644
index 000000000000..ee70f6a5b8ff
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-mdio.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _REALTEK_MDIO_H
+#define _REALTEK_MDIO_H
+
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_MDIO)
+
+static inline int realtek_mdio_driver_register(struct mdio_driver *drv)
+{
+ return mdio_driver_register(drv);
+}
+
+static inline void realtek_mdio_driver_unregister(struct mdio_driver *drv)
+{
+ mdio_driver_unregister(drv);
+}
+
+int realtek_mdio_probe(struct mdio_device *mdiodev);
+void realtek_mdio_remove(struct mdio_device *mdiodev);
+void realtek_mdio_shutdown(struct mdio_device *mdiodev);
+
+#else /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_MDIO) */
+
+static inline int realtek_mdio_driver_register(struct mdio_driver *drv)
+{
+ return 0;
+}
+
+static inline void realtek_mdio_driver_unregister(struct mdio_driver *drv)
+{
+}
+
+static inline int realtek_mdio_probe(struct mdio_device *mdiodev)
+{
+ return -ENOENT;
+}
+
+static inline void realtek_mdio_remove(struct mdio_device *mdiodev)
+{
+}
+
+static inline void realtek_mdio_shutdown(struct mdio_device *mdiodev)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_MDIO) */
+
+#endif /* _REALTEK_MDIO_H */
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
index 755546ed8db6..88590ae95a75 100644
--- a/drivers/net/dsa/realtek/realtek-smi.c
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -31,7 +31,6 @@
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/of.h>
-#include <linux/of_mdio.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
@@ -40,12 +39,14 @@
#include <linux/if_bridge.h>
#include "realtek.h"
+#include "realtek-smi.h"
+#include "rtl83xx.h"
#define REALTEK_SMI_ACK_RETRY_COUNT 5
static inline void realtek_smi_clk_delay(struct realtek_priv *priv)
{
- ndelay(priv->clk_delay);
+ ndelay(priv->variant->clk_delay);
}
static void realtek_smi_start(struct realtek_priv *priv)
@@ -208,7 +209,7 @@ static int realtek_smi_read_reg(struct realtek_priv *priv, u32 addr, u32 *data)
realtek_smi_start(priv);
/* Send READ command */
- ret = realtek_smi_write_byte(priv, priv->cmd_read);
+ ret = realtek_smi_write_byte(priv, priv->variant->cmd_read);
if (ret)
goto out;
@@ -249,7 +250,7 @@ static int realtek_smi_write_reg(struct realtek_priv *priv,
realtek_smi_start(priv);
/* Send WRITE command */
- ret = realtek_smi_write_byte(priv, priv->cmd_write);
+ ret = realtek_smi_write_byte(priv, priv->variant->cmd_write);
if (ret)
goto out;
@@ -310,258 +311,98 @@ static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
return realtek_smi_read_reg(priv, reg, val);
}
-static void realtek_smi_lock(void *ctx)
-{
- struct realtek_priv *priv = ctx;
-
- mutex_lock(&priv->map_lock);
-}
-
-static void realtek_smi_unlock(void *ctx)
-{
- struct realtek_priv *priv = ctx;
-
- mutex_unlock(&priv->map_lock);
-}
-
-static const struct regmap_config realtek_smi_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
+static const struct realtek_interface_info realtek_smi_info = {
.reg_read = realtek_smi_read,
.reg_write = realtek_smi_write,
- .cache_type = REGCACHE_NONE,
- .lock = realtek_smi_lock,
- .unlock = realtek_smi_unlock,
};
-static const struct regmap_config realtek_smi_nolock_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .reg_read = realtek_smi_read,
- .reg_write = realtek_smi_write,
- .cache_type = REGCACHE_NONE,
- .disable_locking = true,
-};
-
-static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
-{
- struct realtek_priv *priv = bus->priv;
-
- return priv->ops->phy_read(priv, addr, regnum);
-}
-
-static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
- u16 val)
-{
- struct realtek_priv *priv = bus->priv;
-
- return priv->ops->phy_write(priv, addr, regnum, val);
-}
-
-static int realtek_smi_setup_mdio(struct dsa_switch *ds)
-{
- struct realtek_priv *priv = ds->priv;
- struct device_node *mdio_np;
- int ret;
-
- mdio_np = of_get_compatible_child(priv->dev->of_node, "realtek,smi-mdio");
- if (!mdio_np) {
- dev_err(priv->dev, "no MDIO bus node\n");
- return -ENODEV;
- }
-
- priv->user_mii_bus = devm_mdiobus_alloc(priv->dev);
- if (!priv->user_mii_bus) {
- ret = -ENOMEM;
- goto err_put_node;
- }
- priv->user_mii_bus->priv = priv;
- priv->user_mii_bus->name = "SMI user MII";
- priv->user_mii_bus->read = realtek_smi_mdio_read;
- priv->user_mii_bus->write = realtek_smi_mdio_write;
- snprintf(priv->user_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
- ds->index);
- priv->user_mii_bus->dev.of_node = mdio_np;
- priv->user_mii_bus->parent = priv->dev;
- ds->user_mii_bus = priv->user_mii_bus;
-
- ret = devm_of_mdiobus_register(priv->dev, priv->user_mii_bus, mdio_np);
- if (ret) {
- dev_err(priv->dev, "unable to register MDIO bus %s\n",
- priv->user_mii_bus->id);
- goto err_put_node;
- }
-
- return 0;
-
-err_put_node:
- of_node_put(mdio_np);
-
- return ret;
-}
-
-static int realtek_smi_probe(struct platform_device *pdev)
+/**
+ * realtek_smi_probe() - Probe a platform device for an SMI-connected switch
+ * @pdev: platform_device to probe on.
+ *
+ * This function should be used as the .probe in a platform_driver. After
+ * calling the common probe function for both interfaces, it initializes the
+ * values specific for SMI-connected devices. Finally, it calls a common
+ * function to register the DSA switch.
+ *
+ * Context: Can sleep. Takes and releases priv->map_lock.
+ * Return: Returns 0 on success, a negative error on failure.
+ */
+int realtek_smi_probe(struct platform_device *pdev)
{
- const struct realtek_variant *var;
struct device *dev = &pdev->dev;
struct realtek_priv *priv;
- struct regmap_config rc;
- struct device_node *np;
int ret;
- var = of_device_get_match_data(dev);
- np = dev->of_node;
-
- priv = devm_kzalloc(dev, sizeof(*priv) + var->chip_data_sz, GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- priv->chip_data = (void *)priv + sizeof(*priv);
-
- mutex_init(&priv->map_lock);
-
- rc = realtek_smi_regmap_config;
- rc.lock_arg = priv;
- priv->map = devm_regmap_init(dev, NULL, priv, &rc);
- if (IS_ERR(priv->map)) {
- ret = PTR_ERR(priv->map);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- rc = realtek_smi_nolock_regmap_config;
- priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
- if (IS_ERR(priv->map_nolock)) {
- ret = PTR_ERR(priv->map_nolock);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- /* Link forward and backward */
- priv->dev = dev;
- priv->clk_delay = var->clk_delay;
- priv->cmd_read = var->cmd_read;
- priv->cmd_write = var->cmd_write;
- priv->ops = var->ops;
-
- priv->setup_interface = realtek_smi_setup_mdio;
- priv->write_reg_noack = realtek_smi_write_reg_noack;
-
- dev_set_drvdata(dev, priv);
- spin_lock_init(&priv->lock);
-
- /* TODO: if power is software controlled, set up any regulators here */
-
- priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(priv->reset)) {
- dev_err(dev, "failed to get RESET GPIO\n");
- return PTR_ERR(priv->reset);
- }
- if (priv->reset) {
- gpiod_set_value(priv->reset, 1);
- dev_dbg(dev, "asserted RESET\n");
- msleep(REALTEK_HW_STOP_DELAY);
- gpiod_set_value(priv->reset, 0);
- msleep(REALTEK_HW_START_DELAY);
- dev_dbg(dev, "deasserted RESET\n");
- }
+ priv = rtl83xx_probe(dev, &realtek_smi_info);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
/* Fetch MDIO pins */
priv->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
- if (IS_ERR(priv->mdc))
+ if (IS_ERR(priv->mdc)) {
+ rtl83xx_remove(priv);
return PTR_ERR(priv->mdc);
+ }
+
priv->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
- if (IS_ERR(priv->mdio))
+ if (IS_ERR(priv->mdio)) {
+ rtl83xx_remove(priv);
return PTR_ERR(priv->mdio);
-
- priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
-
- ret = priv->ops->detect(priv);
- if (ret) {
- dev_err(dev, "unable to detect switch\n");
- return ret;
}
- priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
- if (!priv->ds)
- return -ENOMEM;
-
- priv->ds->dev = dev;
- priv->ds->num_ports = priv->num_ports;
- priv->ds->priv = priv;
+ priv->write_reg_noack = realtek_smi_write_reg_noack;
- priv->ds->ops = var->ds_ops_smi;
- ret = dsa_register_switch(priv->ds);
+ ret = rtl83xx_register_switch(priv);
if (ret) {
- dev_err_probe(dev, ret, "unable to register switch\n");
+ rtl83xx_remove(priv);
return ret;
}
+
return 0;
}
+EXPORT_SYMBOL_NS_GPL(realtek_smi_probe, REALTEK_DSA);
-static void realtek_smi_remove(struct platform_device *pdev)
+/**
+ * realtek_smi_remove() - Remove the driver of a SMI-connected switch
+ * @pdev: platform_device to be removed.
+ *
+ * This function should be used as the .remove_new in a platform_driver. First
+ * it unregisters the DSA switch and then it calls the common remove function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_smi_remove(struct platform_device *pdev)
{
struct realtek_priv *priv = platform_get_drvdata(pdev);
if (!priv)
return;
- dsa_unregister_switch(priv->ds);
- if (priv->user_mii_bus)
- of_node_put(priv->user_mii_bus->dev.of_node);
+ rtl83xx_unregister_switch(priv);
- /* leave the device reset asserted */
- if (priv->reset)
- gpiod_set_value(priv->reset, 1);
+ rtl83xx_remove(priv);
}
+EXPORT_SYMBOL_NS_GPL(realtek_smi_remove, REALTEK_DSA);
-static void realtek_smi_shutdown(struct platform_device *pdev)
+/**
+ * realtek_smi_shutdown() - Shutdown the driver of a SMI-connected switch
+ * @pdev: platform_device shutting down.
+ *
+ * This function should be used as the .shutdown in a platform_driver. It calls
+ * the common shutdown function.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void realtek_smi_shutdown(struct platform_device *pdev)
{
struct realtek_priv *priv = platform_get_drvdata(pdev);
if (!priv)
return;
- dsa_switch_shutdown(priv->ds);
-
- platform_set_drvdata(pdev, NULL);
+ rtl83xx_shutdown(priv);
}
-
-static const struct of_device_id realtek_smi_of_match[] = {
-#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
- {
- .compatible = "realtek,rtl8366rb",
- .data = &rtl8366rb_variant,
- },
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
- {
- .compatible = "realtek,rtl8365mb",
- .data = &rtl8365mb_variant,
- },
-#endif
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
-
-static struct platform_driver realtek_smi_driver = {
- .driver = {
- .name = "realtek-smi",
- .of_match_table = realtek_smi_of_match,
- },
- .probe = realtek_smi_probe,
- .remove_new = realtek_smi_remove,
- .shutdown = realtek_smi_shutdown,
-};
-module_platform_driver(realtek_smi_driver);
-
-MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
-MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via SMI interface");
-MODULE_LICENSE("GPL");
+EXPORT_SYMBOL_NS_GPL(realtek_smi_shutdown, REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/realtek-smi.h b/drivers/net/dsa/realtek/realtek-smi.h
new file mode 100644
index 000000000000..ea49a2edd3c8
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-smi.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _REALTEK_SMI_H
+#define _REALTEK_SMI_H
+
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_SMI)
+
+static inline int realtek_smi_driver_register(struct platform_driver *drv)
+{
+ return platform_driver_register(drv);
+}
+
+static inline void realtek_smi_driver_unregister(struct platform_driver *drv)
+{
+ platform_driver_unregister(drv);
+}
+
+int realtek_smi_probe(struct platform_device *pdev);
+void realtek_smi_remove(struct platform_device *pdev);
+void realtek_smi_shutdown(struct platform_device *pdev);
+
+#else /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_SMI) */
+
+static inline int realtek_smi_driver_register(struct platform_driver *drv)
+{
+ return 0;
+}
+
+static inline void realtek_smi_driver_unregister(struct platform_driver *drv)
+{
+}
+
+static inline int realtek_smi_probe(struct platform_device *pdev)
+{
+ return -ENOENT;
+}
+
+static inline void realtek_smi_remove(struct platform_device *pdev)
+{
+}
+
+static inline void realtek_smi_shutdown(struct platform_device *pdev)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_REALTEK_SMI) */
+
+#endif /* _REALTEK_SMI_H */
diff --git a/drivers/net/dsa/realtek/realtek.h b/drivers/net/dsa/realtek/realtek.h
index 790488e9c667..e0b1aa01337b 100644
--- a/drivers/net/dsa/realtek/realtek.h
+++ b/drivers/net/dsa/realtek/realtek.h
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <net/dsa.h>
+#include <linux/reset.h>
#define REALTEK_HW_STOP_DELAY 25 /* msecs */
#define REALTEK_HW_START_DELAY 100 /* msecs */
@@ -48,6 +49,7 @@ struct rtl8366_vlan_4k {
struct realtek_priv {
struct device *dev;
+ struct reset_control *reset_ctl;
struct gpio_desc *reset;
struct gpio_desc *mdc;
struct gpio_desc *mdio;
@@ -58,11 +60,10 @@ struct realtek_priv {
struct mii_bus *bus;
int mdio_addr;
- unsigned int clk_delay;
- u8 cmd_read;
- u8 cmd_write;
+ const struct realtek_variant *variant;
+
spinlock_t lock; /* Locks around command writes */
- struct dsa_switch *ds;
+ struct dsa_switch ds;
struct irq_domain *irqdomain;
bool leds_disabled;
@@ -73,7 +74,6 @@ struct realtek_priv {
struct rtl8366_mib_counter *mib_counters;
const struct realtek_ops *ops;
- int (*setup_interface)(struct dsa_switch *ds);
int (*write_reg_noack)(void *ctx, u32 addr, u32 data);
int vlan_enabled;
@@ -91,7 +91,6 @@ struct realtek_ops {
int (*detect)(struct realtek_priv *priv);
int (*reset_chip)(struct realtek_priv *priv);
int (*setup)(struct realtek_priv *priv);
- void (*cleanup)(struct realtek_priv *priv);
int (*get_mib_counter)(struct realtek_priv *priv,
int port,
struct rtl8366_mib_counter *mib,
@@ -116,8 +115,7 @@ struct realtek_ops {
};
struct realtek_variant {
- const struct dsa_switch_ops *ds_ops_smi;
- const struct dsa_switch_ops *ds_ops_mdio;
+ const struct dsa_switch_ops *ds_ops;
const struct realtek_ops *ops;
unsigned int clk_delay;
u8 cmd_read;
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index b072045eb154..12665a8a3412 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -101,6 +101,9 @@
#include <linux/if_vlan.h>
#include "realtek.h"
+#include "realtek-smi.h"
+#include "realtek-mdio.h"
+#include "rtl83xx.h"
/* Family-specific data and limits */
#define RTL8365MB_PHYADDRMAX 7
@@ -206,10 +209,10 @@
#define RTL8365MB_EXT_PORT_MODE_100FX 13
/* External interface mode configuration registers 0~1 */
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305 /* EXT1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305 /* EXT0,EXT1 */
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3 /* EXT2 */
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extint) \
- ((_extint) == 1 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 : \
+ ((_extint) <= 1 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 : \
(_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 : \
0x0)
#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extint) \
@@ -689,7 +692,7 @@ static int rtl8365mb_phy_ocp_read(struct realtek_priv *priv, int phy,
u32 val;
int ret;
- mutex_lock(&priv->map_lock);
+ rtl83xx_lock(priv);
ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
@@ -722,7 +725,7 @@ static int rtl8365mb_phy_ocp_read(struct realtek_priv *priv, int phy,
*data = val & 0xFFFF;
out:
- mutex_unlock(&priv->map_lock);
+ rtl83xx_unlock(priv);
return ret;
}
@@ -733,7 +736,7 @@ static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
u32 val;
int ret;
- mutex_lock(&priv->map_lock);
+ rtl83xx_lock(priv);
ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
@@ -764,7 +767,7 @@ static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
goto out;
out:
- mutex_unlock(&priv->map_lock);
+ rtl83xx_unlock(priv);
return 0;
}
@@ -825,17 +828,6 @@ static int rtl8365mb_phy_write(struct realtek_priv *priv, int phy, int regnum,
return 0;
}
-static int rtl8365mb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
-{
- return rtl8365mb_phy_read(ds->priv, phy, regnum);
-}
-
-static int rtl8365mb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
- u16 val)
-{
- return rtl8365mb_phy_write(ds->priv, phy, regnum, val);
-}
-
static const struct rtl8365mb_extint *
rtl8365mb_get_port_extint(struct realtek_priv *priv, int port)
{
@@ -878,6 +870,7 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
{
const struct rtl8365mb_extint *extint =
rtl8365mb_get_port_extint(priv, port);
+ struct dsa_switch *ds = &priv->ds;
struct device_node *dn;
struct dsa_port *dp;
int tx_delay = 0;
@@ -888,7 +881,7 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
if (!extint)
return -ENODEV;
- dp = dsa_to_port(priv->ds, port);
+ dp = dsa_to_port(ds, port);
dn = dp->dn;
/* Set the RGMII TX/RX delay
@@ -1541,6 +1534,7 @@ static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
static void rtl8365mb_stats_setup(struct realtek_priv *priv)
{
struct rtl8365mb *mb = priv->chip_data;
+ struct dsa_switch *ds = &priv->ds;
int i;
/* Per-chip global mutex to protect MIB counter access, since doing
@@ -1551,7 +1545,7 @@ static void rtl8365mb_stats_setup(struct realtek_priv *priv)
for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(priv->ds, i))
+ if (dsa_is_unused_port(ds, i))
continue;
/* Per-port spinlock to protect the stats64 data */
@@ -1567,12 +1561,13 @@ static void rtl8365mb_stats_setup(struct realtek_priv *priv)
static void rtl8365mb_stats_teardown(struct realtek_priv *priv)
{
struct rtl8365mb *mb = priv->chip_data;
+ struct dsa_switch *ds = &priv->ds;
int i;
for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(priv->ds, i))
+ if (dsa_is_unused_port(ds, i))
continue;
cancel_delayed_work_sync(&p->mib_work);
@@ -1971,7 +1966,7 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
dev_info(priv->dev, "no interrupt support\n");
/* Configure CPU tagging */
- dsa_switch_for_each_cpu_port(cpu_dp, priv->ds) {
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
cpu->mask |= BIT(cpu_dp->index);
if (cpu->trap_port == RTL8365MB_MAX_NUM_PORTS)
@@ -1986,7 +1981,7 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(priv->ds, i))
+ if (dsa_is_unused_port(ds, i))
continue;
/* Forward only to the CPU */
@@ -2003,7 +1998,7 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
* ports will still forward frames to the CPU despite being
* administratively down by default.
*/
- rtl8365mb_port_stp_state_set(priv->ds, i, BR_STATE_DISABLED);
+ rtl8365mb_port_stp_state_set(ds, i, BR_STATE_DISABLED);
/* Set up per-port private data */
p->priv = priv;
@@ -2014,12 +2009,10 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
if (ret)
goto out_teardown_irq;
- if (priv->setup_interface) {
- ret = priv->setup_interface(ds);
- if (ret) {
- dev_err(priv->dev, "could not set up MDIO bus\n");
- goto out_teardown_irq;
- }
+ ret = rtl83xx_setup_user_mdio(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ goto out_teardown_irq;
}
/* Start statistics counter polling */
@@ -2113,7 +2106,7 @@ static int rtl8365mb_detect(struct realtek_priv *priv)
return 0;
}
-static const struct dsa_switch_ops rtl8365mb_switch_ops_smi = {
+static const struct dsa_switch_ops rtl8365mb_switch_ops = {
.get_tag_protocol = rtl8365mb_get_tag_protocol,
.change_tag_protocol = rtl8365mb_change_tag_protocol,
.setup = rtl8365mb_setup,
@@ -2134,29 +2127,6 @@ static const struct dsa_switch_ops rtl8365mb_switch_ops_smi = {
.port_max_mtu = rtl8365mb_port_max_mtu,
};
-static const struct dsa_switch_ops rtl8365mb_switch_ops_mdio = {
- .get_tag_protocol = rtl8365mb_get_tag_protocol,
- .change_tag_protocol = rtl8365mb_change_tag_protocol,
- .setup = rtl8365mb_setup,
- .teardown = rtl8365mb_teardown,
- .phylink_get_caps = rtl8365mb_phylink_get_caps,
- .phylink_mac_config = rtl8365mb_phylink_mac_config,
- .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
- .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
- .phy_read = rtl8365mb_dsa_phy_read,
- .phy_write = rtl8365mb_dsa_phy_write,
- .port_stp_state_set = rtl8365mb_port_stp_state_set,
- .get_strings = rtl8365mb_get_strings,
- .get_ethtool_stats = rtl8365mb_get_ethtool_stats,
- .get_sset_count = rtl8365mb_get_sset_count,
- .get_eth_phy_stats = rtl8365mb_get_phy_stats,
- .get_eth_mac_stats = rtl8365mb_get_mac_stats,
- .get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
- .get_stats64 = rtl8365mb_get_stats64,
- .port_change_mtu = rtl8365mb_port_change_mtu,
- .port_max_mtu = rtl8365mb_port_max_mtu,
-};
-
static const struct realtek_ops rtl8365mb_ops = {
.detect = rtl8365mb_detect,
.phy_read = rtl8365mb_phy_read,
@@ -2164,16 +2134,66 @@ static const struct realtek_ops rtl8365mb_ops = {
};
const struct realtek_variant rtl8365mb_variant = {
- .ds_ops_smi = &rtl8365mb_switch_ops_smi,
- .ds_ops_mdio = &rtl8365mb_switch_ops_mdio,
+ .ds_ops = &rtl8365mb_switch_ops,
.ops = &rtl8365mb_ops,
.clk_delay = 10,
.cmd_read = 0xb9,
.cmd_write = 0xb8,
.chip_data_sz = sizeof(struct rtl8365mb),
};
-EXPORT_SYMBOL_GPL(rtl8365mb_variant);
+
+static const struct of_device_id rtl8365mb_of_match[] = {
+ { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rtl8365mb_of_match);
+
+static struct platform_driver rtl8365mb_smi_driver = {
+ .driver = {
+ .name = "rtl8365mb-smi",
+ .of_match_table = rtl8365mb_of_match,
+ },
+ .probe = realtek_smi_probe,
+ .remove_new = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
+};
+
+static struct mdio_driver rtl8365mb_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "rtl8365mb-mdio",
+ .of_match_table = rtl8365mb_of_match,
+ },
+ .probe = realtek_mdio_probe,
+ .remove = realtek_mdio_remove,
+ .shutdown = realtek_mdio_shutdown,
+};
+
+static int rtl8365mb_init(void)
+{
+ int ret;
+
+ ret = realtek_mdio_driver_register(&rtl8365mb_mdio_driver);
+ if (ret)
+ return ret;
+
+ ret = realtek_smi_driver_register(&rtl8365mb_smi_driver);
+ if (ret) {
+ realtek_mdio_driver_unregister(&rtl8365mb_mdio_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(rtl8365mb_init);
+
+static void __exit rtl8365mb_exit(void)
+{
+ realtek_smi_driver_unregister(&rtl8365mb_smi_driver);
+ realtek_mdio_driver_unregister(&rtl8365mb_mdio_driver);
+}
+module_exit(rtl8365mb_exit);
MODULE_AUTHOR("Alvin Å ipraga <alsi@bang-olufsen.dk>");
MODULE_DESCRIPTION("Driver for RTL8365MB-VC ethernet switch");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/rtl8366-core.c b/drivers/net/dsa/realtek/rtl8366-core.c
index 59f98d2c8769..7c6520ba3a26 100644
--- a/drivers/net/dsa/realtek/rtl8366-core.c
+++ b/drivers/net/dsa/realtek/rtl8366-core.c
@@ -34,7 +34,7 @@ int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used)
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
+EXPORT_SYMBOL_NS_GPL(rtl8366_mc_is_used, REALTEK_DSA);
/**
* rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
@@ -187,7 +187,7 @@ int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
return ret;
}
-EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
+EXPORT_SYMBOL_NS_GPL(rtl8366_set_vlan, REALTEK_DSA);
int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
unsigned int vid)
@@ -217,7 +217,7 @@ int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
+EXPORT_SYMBOL_NS_GPL(rtl8366_set_pvid, REALTEK_DSA);
int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
@@ -243,7 +243,7 @@ int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
priv->vlan4k_enabled = enable;
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k);
+EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan4k, REALTEK_DSA);
int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
{
@@ -265,7 +265,7 @@ int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
return ret;
}
-EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
+EXPORT_SYMBOL_NS_GPL(rtl8366_enable_vlan, REALTEK_DSA);
int rtl8366_reset_vlan(struct realtek_priv *priv)
{
@@ -290,7 +290,7 @@ int rtl8366_reset_vlan(struct realtek_priv *priv)
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
+EXPORT_SYMBOL_NS_GPL(rtl8366_reset_vlan, REALTEK_DSA);
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
@@ -345,7 +345,7 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
+EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_add, REALTEK_DSA);
int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
@@ -389,7 +389,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-EXPORT_SYMBOL_GPL(rtl8366_vlan_del);
+EXPORT_SYMBOL_NS_GPL(rtl8366_vlan_del, REALTEK_DSA);
void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
@@ -403,7 +403,7 @@ void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
for (i = 0; i < priv->num_mib_counters; i++)
ethtool_puts(&data, priv->mib_counters[i].name);
}
-EXPORT_SYMBOL_GPL(rtl8366_get_strings);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_strings, REALTEK_DSA);
int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
@@ -417,7 +417,7 @@ int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
return priv->num_mib_counters;
}
-EXPORT_SYMBOL_GPL(rtl8366_get_sset_count);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_sset_count, REALTEK_DSA);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
{
@@ -441,4 +441,4 @@ void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
data[i] = mibvalue;
}
}
-EXPORT_SYMBOL_GPL(rtl8366_get_ethtool_stats);
+EXPORT_SYMBOL_NS_GPL(rtl8366_get_ethtool_stats, REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index e3b6a470ca67..e10ae94cf771 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -23,6 +23,9 @@
#include <linux/regmap.h>
#include "realtek.h"
+#include "realtek-smi.h"
+#include "realtek-mdio.h"
+#include "rtl83xx.h"
#define RTL8366RB_PORT_NUM_CPU 5
#define RTL8366RB_NUM_PORTS 6
@@ -1030,12 +1033,10 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
if (ret)
dev_info(priv->dev, "no interrupt support\n");
- if (priv->setup_interface) {
- ret = priv->setup_interface(ds);
- if (ret) {
- dev_err(priv->dev, "could not set up MDIO bus\n");
- return -ENODEV;
- }
+ ret = rtl83xx_setup_user_mdio(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ return -ENODEV;
}
return 0;
@@ -1650,6 +1651,7 @@ static int rtl8366rb_get_mc_index(struct realtek_priv *priv, int port, int *val)
static int rtl8366rb_set_mc_index(struct realtek_priv *priv, int port, int index)
{
+ struct dsa_switch *ds = &priv->ds;
struct rtl8366rb *rb;
bool pvid_enabled;
int ret;
@@ -1674,7 +1676,7 @@ static int rtl8366rb_set_mc_index(struct realtek_priv *priv, int port, int index
* not drop any untagged or C-tagged frames. Make sure to update the
* filtering setting.
*/
- if (dsa_port_is_vlan_filtering(dsa_to_port(priv->ds, port)))
+ if (dsa_port_is_vlan_filtering(dsa_to_port(ds, port)))
ret = rtl8366rb_drop_untagged(priv, port, !pvid_enabled);
return ret;
@@ -1718,7 +1720,7 @@ static int rtl8366rb_phy_read(struct realtek_priv *priv, int phy, int regnum)
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- mutex_lock(&priv->map_lock);
+ rtl83xx_lock(priv);
ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_READ);
@@ -1746,7 +1748,7 @@ static int rtl8366rb_phy_read(struct realtek_priv *priv, int phy, int regnum)
phy, regnum, reg, val);
out:
- mutex_unlock(&priv->map_lock);
+ rtl83xx_unlock(priv);
return ret;
}
@@ -1760,7 +1762,7 @@ static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum,
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- mutex_lock(&priv->map_lock);
+ rtl83xx_lock(priv);
ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_WRITE);
@@ -1777,22 +1779,11 @@ static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum,
goto out;
out:
- mutex_unlock(&priv->map_lock);
+ rtl83xx_unlock(priv);
return ret;
}
-static int rtl8366rb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
-{
- return rtl8366rb_phy_read(ds->priv, phy, regnum);
-}
-
-static int rtl8366rb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
- u16 val)
-{
- return rtl8366rb_phy_write(ds->priv, phy, regnum, val);
-}
-
static int rtl8366rb_reset_chip(struct realtek_priv *priv)
{
int timeout = 10;
@@ -1858,7 +1849,7 @@ static int rtl8366rb_detect(struct realtek_priv *priv)
return 0;
}
-static const struct dsa_switch_ops rtl8366rb_switch_ops_smi = {
+static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.get_tag_protocol = rtl8366_get_tag_protocol,
.setup = rtl8366rb_setup,
.phylink_get_caps = rtl8366rb_phylink_get_caps,
@@ -1882,32 +1873,6 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops_smi = {
.port_max_mtu = rtl8366rb_max_mtu,
};
-static const struct dsa_switch_ops rtl8366rb_switch_ops_mdio = {
- .get_tag_protocol = rtl8366_get_tag_protocol,
- .setup = rtl8366rb_setup,
- .phy_read = rtl8366rb_dsa_phy_read,
- .phy_write = rtl8366rb_dsa_phy_write,
- .phylink_get_caps = rtl8366rb_phylink_get_caps,
- .phylink_mac_link_up = rtl8366rb_mac_link_up,
- .phylink_mac_link_down = rtl8366rb_mac_link_down,
- .get_strings = rtl8366_get_strings,
- .get_ethtool_stats = rtl8366_get_ethtool_stats,
- .get_sset_count = rtl8366_get_sset_count,
- .port_bridge_join = rtl8366rb_port_bridge_join,
- .port_bridge_leave = rtl8366rb_port_bridge_leave,
- .port_vlan_filtering = rtl8366rb_vlan_filtering,
- .port_vlan_add = rtl8366_vlan_add,
- .port_vlan_del = rtl8366_vlan_del,
- .port_enable = rtl8366rb_port_enable,
- .port_disable = rtl8366rb_port_disable,
- .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
- .port_bridge_flags = rtl8366rb_port_bridge_flags,
- .port_stp_state_set = rtl8366rb_port_stp_state_set,
- .port_fast_age = rtl8366rb_port_fast_age,
- .port_change_mtu = rtl8366rb_change_mtu,
- .port_max_mtu = rtl8366rb_max_mtu,
-};
-
static const struct realtek_ops rtl8366rb_ops = {
.detect = rtl8366rb_detect,
.get_vlan_mc = rtl8366rb_get_vlan_mc,
@@ -1925,16 +1890,66 @@ static const struct realtek_ops rtl8366rb_ops = {
};
const struct realtek_variant rtl8366rb_variant = {
- .ds_ops_smi = &rtl8366rb_switch_ops_smi,
- .ds_ops_mdio = &rtl8366rb_switch_ops_mdio,
+ .ds_ops = &rtl8366rb_switch_ops,
.ops = &rtl8366rb_ops,
.clk_delay = 10,
.cmd_read = 0xa9,
.cmd_write = 0xa8,
.chip_data_sz = sizeof(struct rtl8366rb),
};
-EXPORT_SYMBOL_GPL(rtl8366rb_variant);
+
+static const struct of_device_id rtl8366rb_of_match[] = {
+ { .compatible = "realtek,rtl8366rb", .data = &rtl8366rb_variant, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rtl8366rb_of_match);
+
+static struct platform_driver rtl8366rb_smi_driver = {
+ .driver = {
+ .name = "rtl8366rb-smi",
+ .of_match_table = rtl8366rb_of_match,
+ },
+ .probe = realtek_smi_probe,
+ .remove_new = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
+};
+
+static struct mdio_driver rtl8366rb_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "rtl8366rb-mdio",
+ .of_match_table = rtl8366rb_of_match,
+ },
+ .probe = realtek_mdio_probe,
+ .remove = realtek_mdio_remove,
+ .shutdown = realtek_mdio_shutdown,
+};
+
+static int rtl8366rb_init(void)
+{
+ int ret;
+
+ ret = realtek_mdio_driver_register(&rtl8366rb_mdio_driver);
+ if (ret)
+ return ret;
+
+ ret = realtek_smi_driver_register(&rtl8366rb_smi_driver);
+ if (ret) {
+ realtek_mdio_driver_unregister(&rtl8366rb_mdio_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(rtl8366rb_init);
+
+static void __exit rtl8366rb_exit(void)
+{
+ realtek_smi_driver_unregister(&rtl8366rb_smi_driver);
+ realtek_mdio_driver_unregister(&rtl8366rb_mdio_driver);
+}
+module_exit(rtl8366rb_exit);
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_DESCRIPTION("Driver for RTL8366RB ethernet switch");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(REALTEK_DSA);
diff --git a/drivers/net/dsa/realtek/rtl83xx.c b/drivers/net/dsa/realtek/rtl83xx.c
new file mode 100644
index 000000000000..d2e876805393
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl83xx.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/of_mdio.h>
+
+#include "realtek.h"
+#include "rtl83xx.h"
+
+/**
+ * rtl83xx_lock() - Locks the mutex used by regmaps
+ * @ctx: realtek_priv pointer
+ *
+ * This function is passed to regmap to be used as the lock function.
+ * It is also used externally to block regmap before executing multiple
+ * operations that must happen in sequence (which will use
+ * realtek_priv.map_nolock instead).
+ *
+ * Context: Can sleep. Holds priv->map_lock lock.
+ * Return: nothing
+ */
+void rtl83xx_lock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_lock(&priv->map_lock);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_lock, REALTEK_DSA);
+
+/**
+ * rtl83xx_unlock() - Unlocks the mutex used by regmaps
+ * @ctx: realtek_priv pointer
+ *
+ * This function unlocks the lock acquired by rtl83xx_lock.
+ *
+ * Context: Releases priv->map_lock lock.
+ * Return: nothing
+ */
+void rtl83xx_unlock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_unlock(&priv->map_lock);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_unlock, REALTEK_DSA);
+
+static int rtl83xx_user_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_read(priv, addr, regnum);
+}
+
+static int rtl83xx_user_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_write(priv, addr, regnum, val);
+}
+
+/**
+ * rtl83xx_setup_user_mdio() - register the user mii bus driver
+ * @ds: DSA switch associated with this user_mii_bus
+ *
+ * Registers the MDIO bus for built-in Ethernet PHYs, and associates it with
+ * the mandatory 'mdio' child OF node of the switch.
+ *
+ * Context: Can sleep.
+ * Return: 0 on success, negative value for failure.
+ */
+int rtl83xx_setup_user_mdio(struct dsa_switch *ds)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct device_node *mdio_np;
+ struct mii_bus *bus;
+ int ret = 0;
+
+ mdio_np = of_get_child_by_name(priv->dev->of_node, "mdio");
+ if (!mdio_np) {
+ dev_err(priv->dev, "no MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ bus = devm_mdiobus_alloc(priv->dev);
+ if (!bus) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+
+ bus->priv = priv;
+ bus->name = "Realtek user MII";
+ bus->read = rtl83xx_user_mdio_read;
+ bus->write = rtl83xx_user_mdio_write;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s:user_mii", dev_name(priv->dev));
+ bus->parent = priv->dev;
+
+ ret = devm_of_mdiobus_register(priv->dev, bus, mdio_np);
+ if (ret) {
+ dev_err(priv->dev, "unable to register MDIO bus %s\n",
+ bus->id);
+ goto err_put_node;
+ }
+
+ priv->user_mii_bus = bus;
+
+err_put_node:
+ of_node_put(mdio_np);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_setup_user_mdio, REALTEK_DSA);
+
+/**
+ * rtl83xx_probe() - probe a Realtek switch
+ * @dev: the device being probed
+ * @interface_info: specific management interface info.
+ *
+ * This function initializes realtek_priv and reads data from the device tree
+ * node. The switch is hard resetted if a method is provided.
+ *
+ * Context: Can sleep.
+ * Return: Pointer to the realtek_priv or ERR_PTR() in case of failure.
+ *
+ * The realtek_priv pointer does not need to be freed as it is controlled by
+ * devres.
+ */
+struct realtek_priv *
+rtl83xx_probe(struct device *dev,
+ const struct realtek_interface_info *interface_info)
+{
+ const struct realtek_variant *var;
+ struct realtek_priv *priv;
+ struct regmap_config rc = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = interface_info->reg_read,
+ .reg_write = interface_info->reg_write,
+ .cache_type = REGCACHE_NONE,
+ .lock = rtl83xx_lock,
+ .unlock = rtl83xx_unlock,
+ };
+ int ret;
+
+ var = of_device_get_match_data(dev);
+ if (!var)
+ return ERR_PTR(-EINVAL);
+
+ priv = devm_kzalloc(dev, size_add(sizeof(*priv), var->chip_data_sz),
+ GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&priv->map_lock);
+
+ rc.lock_arg = priv;
+ priv->map = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map)) {
+ ret = PTR_ERR(priv->map);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ rc.disable_locking = true;
+ priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map_nolock)) {
+ ret = PTR_ERR(priv->map_nolock);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /* Link forward and backward */
+ priv->dev = dev;
+ priv->variant = var;
+ priv->ops = var->ops;
+ priv->chip_data = (void *)priv + sizeof(*priv);
+
+ spin_lock_init(&priv->lock);
+
+ priv->leds_disabled = of_property_read_bool(dev->of_node,
+ "realtek,disable-leds");
+
+ /* TODO: if power is software controlled, set up any regulators here */
+ priv->reset_ctl = devm_reset_control_get_optional(dev, NULL);
+ if (IS_ERR(priv->reset_ctl)) {
+ ret = PTR_ERR(priv->reset_ctl);
+ dev_err_probe(dev, ret, "failed to get reset control\n");
+ return ERR_CAST(priv->reset_ctl);
+ }
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return ERR_CAST(priv->reset);
+ }
+
+ dev_set_drvdata(dev, priv);
+
+ if (priv->reset_ctl || priv->reset) {
+ rtl83xx_reset_assert(priv);
+ dev_dbg(dev, "asserted RESET\n");
+ msleep(REALTEK_HW_STOP_DELAY);
+ rtl83xx_reset_deassert(priv);
+ msleep(REALTEK_HW_START_DELAY);
+ dev_dbg(dev, "deasserted RESET\n");
+ }
+
+ return priv;
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_probe, REALTEK_DSA);
+
+/**
+ * rtl83xx_register_switch() - detects and register a switch
+ * @priv: realtek_priv pointer
+ *
+ * This function first checks the switch chip ID and register a DSA
+ * switch.
+ *
+ * Context: Can sleep. Takes and releases priv->map_lock.
+ * Return: 0 on success, negative value for failure.
+ */
+int rtl83xx_register_switch(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+ int ret;
+
+ ret = priv->ops->detect(priv);
+ if (ret) {
+ dev_err_probe(priv->dev, ret, "unable to detect switch\n");
+ return ret;
+ }
+
+ ds->priv = priv;
+ ds->dev = priv->dev;
+ ds->ops = priv->variant->ds_ops;
+ ds->num_ports = priv->num_ports;
+
+ ret = dsa_register_switch(ds);
+ if (ret) {
+ dev_err_probe(priv->dev, ret, "unable to register switch\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_register_switch, REALTEK_DSA);
+
+/**
+ * rtl83xx_unregister_switch() - unregister a switch
+ * @priv: realtek_priv pointer
+ *
+ * This function unregister a DSA switch.
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void rtl83xx_unregister_switch(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+
+ dsa_unregister_switch(ds);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_unregister_switch, REALTEK_DSA);
+
+/**
+ * rtl83xx_shutdown() - shutdown a switch
+ * @priv: realtek_priv pointer
+ *
+ * This function shuts down the DSA switch and cleans the platform driver data,
+ * to prevent realtek_{smi,mdio}_remove() from running afterwards, which is
+ * possible if the parent bus implements its own .shutdown() as .remove().
+ *
+ * Context: Can sleep.
+ * Return: Nothing.
+ */
+void rtl83xx_shutdown(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(priv->dev, NULL);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_shutdown, REALTEK_DSA);
+
+/**
+ * rtl83xx_remove() - Cleanup a realtek switch driver
+ * @priv: realtek_priv pointer
+ *
+ * If a method is provided, this function asserts the hard reset of the switch
+ * in order to avoid leaking traffic when the driver is gone.
+ *
+ * Context: Might sleep if priv->gdev->chip->can_sleep.
+ * Return: nothing
+ */
+void rtl83xx_remove(struct realtek_priv *priv)
+{
+ /* leave the device reset asserted */
+ rtl83xx_reset_assert(priv);
+}
+EXPORT_SYMBOL_NS_GPL(rtl83xx_remove, REALTEK_DSA);
+
+void rtl83xx_reset_assert(struct realtek_priv *priv)
+{
+ int ret;
+
+ ret = reset_control_assert(priv->reset_ctl);
+ if (ret)
+ dev_warn(priv->dev,
+ "Failed to assert the switch reset control: %pe\n",
+ ERR_PTR(ret));
+
+ gpiod_set_value(priv->reset, true);
+}
+
+void rtl83xx_reset_deassert(struct realtek_priv *priv)
+{
+ int ret;
+
+ ret = reset_control_deassert(priv->reset_ctl);
+ if (ret)
+ dev_warn(priv->dev,
+ "Failed to deassert the switch reset control: %pe\n",
+ ERR_PTR(ret));
+
+ gpiod_set_value(priv->reset, false);
+}
+
+MODULE_AUTHOR("Luiz Angelo Daros de Luca <luizluca@gmail.com>");
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Realtek DSA switches common module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek/rtl83xx.h b/drivers/net/dsa/realtek/rtl83xx.h
new file mode 100644
index 000000000000..c8a0ff8fd75e
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl83xx.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _RTL83XX_H
+#define _RTL83XX_H
+
+struct realtek_interface_info {
+ int (*reg_read)(void *ctx, u32 reg, u32 *val);
+ int (*reg_write)(void *ctx, u32 reg, u32 val);
+};
+
+void rtl83xx_lock(void *ctx);
+void rtl83xx_unlock(void *ctx);
+int rtl83xx_setup_user_mdio(struct dsa_switch *ds);
+struct realtek_priv *
+rtl83xx_probe(struct device *dev,
+ const struct realtek_interface_info *interface_info);
+int rtl83xx_register_switch(struct realtek_priv *priv);
+void rtl83xx_unregister_switch(struct realtek_priv *priv);
+void rtl83xx_shutdown(struct realtek_priv *priv);
+void rtl83xx_remove(struct realtek_priv *priv);
+void rtl83xx_reset_assert(struct realtek_priv *priv);
+void rtl83xx_reset_deassert(struct realtek_priv *priv);
+
+#endif /* _RTL83XX_H */
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 768454aa36d6..d29b5d7af0d7 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -67,18 +67,12 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
static int dummy_dev_init(struct net_device *dev)
{
- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
- if (!dev->lstats)
- return -ENOMEM;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS;
+ netdev_lockdep_set_classes(dev);
return 0;
}
-static void dummy_dev_uninit(struct net_device *dev)
-{
- free_percpu(dev->lstats);
-}
-
static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
{
if (new_carrier)
@@ -90,7 +84,6 @@ static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
static const struct net_device_ops dummy_netdev_ops = {
.ndo_init = dummy_dev_init,
- .ndo_uninit = dummy_dev_uninit,
.ndo_start_xmit = dummy_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = set_multicast_list,
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 5a274b99f299..6a19b5393ed1 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -15,9 +15,6 @@ if ETHERNET
config MDIO
tristate
-config SUNGEM_PHY
- tristate
-
source "drivers/net/ethernet/3com/Kconfig"
source "drivers/net/ethernet/actions/Kconfig"
source "drivers/net/ethernet/adaptec/Kconfig"
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index da3bdd302502..760a9a60bc15 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -21,6 +21,7 @@ config ADIN1110
tristate "Analog Devices ADIN1110 MAC-PHY"
depends on SPI && NET_SWITCHDEV
select CRC8
+ select PHYLIB
help
Say yes here to build support for Analog Devices ADIN1110
Low Power 10BASE-T1L Ethernet MAC-PHY.
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index d7c274af6d4d..8b4ef5121308 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -464,8 +464,9 @@ static int adin1110_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* bitfield of ADIN1110_MDIOACC register will contain
* the requested register value.
*/
- ret = readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
- (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+ ret = readx_poll_timeout_atomic(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE),
+ 100, 30000);
if (ret < 0)
return ret;
@@ -495,8 +496,9 @@ static int adin1110_mdio_write(struct mii_bus *bus, int phy_id,
if (ret < 0)
return ret;
- return readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
- (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+ return readx_poll_timeout_atomic(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE),
+ 100, 30000);
}
/* ADIN1110 MAC-PHY contains an ADIN1100 PHY.
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 633b321d7fdd..9e9e4a03f1a8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -90,8 +90,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
struct ena_com_admin_sq *sq = &admin_queue->sq;
u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
- sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
- &sq->dma_addr, GFP_KERNEL);
+ sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &sq->dma_addr, GFP_KERNEL);
if (!sq->entries) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
@@ -113,8 +112,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
struct ena_com_admin_cq *cq = &admin_queue->cq;
u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
- cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
- &cq->dma_addr, GFP_KERNEL);
+ cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &cq->dma_addr, GFP_KERNEL);
if (!cq->entries) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
@@ -136,8 +134,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
- aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
- &aenq->dma_addr, GFP_KERNEL);
+ aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, &aenq->dma_addr, GFP_KERNEL);
if (!aenq->entries) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
@@ -155,14 +152,13 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
aenq_caps = 0;
aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
- aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
- << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
- ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+ aenq_caps |=
+ (sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
+ ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
if (unlikely(!aenq_handlers)) {
- netdev_err(ena_dev->net_device,
- "AENQ handlers pointer is NULL\n");
+ netdev_err(ena_dev->net_device, "AENQ handlers pointer is NULL\n");
return -EINVAL;
}
@@ -189,14 +185,12 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queu
}
if (unlikely(!admin_queue->comp_ctx)) {
- netdev_err(admin_queue->ena_dev->net_device,
- "Completion context is NULL\n");
+ netdev_err(admin_queue->ena_dev->net_device, "Completion context is NULL\n");
return NULL;
}
if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
- netdev_err(admin_queue->ena_dev->net_device,
- "Completion context is occupied\n");
+ netdev_err(admin_queue->ena_dev->net_device, "Completion context is occupied\n");
return NULL;
}
@@ -226,8 +220,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
/* In case of queue FULL */
cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- netdev_dbg(admin_queue->ena_dev->net_device,
- "Admin queue is full.\n");
+ netdev_dbg(admin_queue->ena_dev->net_device, "Admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(-ENOSPC);
}
@@ -274,8 +267,7 @@ static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
struct ena_comp_ctx *comp_ctx;
u16 i;
- admin_queue->comp_ctx =
- devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
+ admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
if (unlikely(!admin_queue->comp_ctx)) {
netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
@@ -336,20 +328,17 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->desc_addr.virt_addr =
- dma_alloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
GFP_KERNEL);
set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->desc_addr.virt_addr) {
io_sq->desc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size,
- &io_sq->desc_addr.phys_addr,
- GFP_KERNEL);
+ &io_sq->desc_addr.phys_addr, GFP_KERNEL);
}
if (!io_sq->desc_addr.virt_addr) {
- netdev_err(ena_dev->net_device,
- "Memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
return -ENOMEM;
}
}
@@ -367,16 +356,14 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
dev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
- io_sq->bounce_buf_ctrl.base_buffer =
- devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+ io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->bounce_buf_ctrl.base_buffer)
io_sq->bounce_buf_ctrl.base_buffer =
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
if (!io_sq->bounce_buf_ctrl.base_buffer) {
- netdev_err(ena_dev->net_device,
- "Bounce buffer memory allocation failed\n");
+ netdev_err(ena_dev->net_device, "Bounce buffer memory allocation failed\n");
return -ENOMEM;
}
@@ -425,13 +412,11 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
prev_node = dev_to_node(ena_dev->dmadev);
set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_cq->cdesc_addr.virt_addr =
- dma_alloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
set_dev_node(ena_dev->dmadev, prev_node);
if (!io_cq->cdesc_addr.virt_addr) {
io_cq->cdesc_addr.virt_addr =
- dma_alloc_coherent(ena_dev->dmadev, size,
- &io_cq->cdesc_addr.phys_addr,
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
GFP_KERNEL);
}
@@ -514,8 +499,8 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
u8 comp_status)
{
if (unlikely(comp_status != 0))
- netdev_err(admin_queue->ena_dev->net_device,
- "Admin command failed[%u]\n", comp_status);
+ netdev_err(admin_queue->ena_dev->net_device, "Admin command failed[%u]\n",
+ comp_status);
switch (comp_status) {
case ENA_ADMIN_SUCCESS:
@@ -580,8 +565,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
}
if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
- netdev_err(admin_queue->ena_dev->net_device,
- "Command was aborted\n");
+ netdev_err(admin_queue->ena_dev->net_device, "Command was aborted\n");
spin_lock_irqsave(&admin_queue->q_lock, flags);
admin_queue->stats.aborted_cmd++;
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
@@ -589,8 +573,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
goto err;
}
- WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
- comp_ctx->status);
+ WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status);
ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
err:
@@ -634,8 +617,7 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set LLQ configurations: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set LLQ configurations: %d\n", ret);
return ret;
}
@@ -658,8 +640,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_default_cfg->llq_header_location;
} else {
netdev_err(ena_dev->net_device,
- "Invalid header location control, supported: 0x%x\n",
- supported_feat);
+ "Invalid header location control, supported: 0x%x\n", supported_feat);
return -EINVAL;
}
@@ -681,8 +662,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
netdev_err(ena_dev->net_device,
"Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_stride_ctrl,
- supported_feat, llq_info->desc_stride_ctrl);
+ llq_default_cfg->llq_stride_ctrl, supported_feat,
+ llq_info->desc_stride_ctrl);
}
} else {
llq_info->desc_stride_ctrl = 0;
@@ -704,8 +685,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
llq_info->desc_list_entry_size = 256;
} else {
netdev_err(ena_dev->net_device,
- "Invalid entry_size_ctrl, supported: 0x%x\n",
- supported_feat);
+ "Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
return -EINVAL;
}
@@ -750,8 +730,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
netdev_err(ena_dev->net_device,
"Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
- llq_default_cfg->llq_num_decs_before_header,
- supported_feat, llq_info->descs_num_before_header);
+ llq_default_cfg->llq_num_decs_before_header, supported_feat,
+ llq_info->descs_num_before_header);
}
/* Check for accelerated queue supported */
llq_accel_mode_get = llq_features->accel_mode.u.get;
@@ -767,8 +747,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
rc = ena_com_set_llq(ena_dev);
if (rc)
- netdev_err(ena_dev->net_device,
- "Cannot set LLQ configuration: %d\n", rc);
+ netdev_err(ena_dev->net_device, "Cannot set LLQ configuration: %d\n", rc);
return rc;
}
@@ -780,8 +759,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
int ret;
wait_for_completion_timeout(&comp_ctx->wait_event,
- usecs_to_jiffies(
- admin_queue->completion_timeout));
+ usecs_to_jiffies(admin_queue->completion_timeout));
/* In case the command wasn't completed find out the root cause.
* There might be 2 kinds of errors
@@ -797,8 +775,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
if (comp_ctx->status == ENA_CMD_COMPLETED) {
netdev_err(admin_queue->ena_dev->net_device,
"The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
- comp_ctx->cmd_opcode,
- admin_queue->auto_polling ? "ON" : "OFF");
+ comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
/* Check if fallback to polling is enabled */
if (admin_queue->auto_polling)
admin_queue->polling = true;
@@ -867,15 +844,13 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
if (unlikely(i == timeout)) {
netdev_err(ena_dev->net_device,
"Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
- mmio_read->seq_num, offset, read_resp->req_id,
- read_resp->reg_off);
+ mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off);
ret = ENA_MMIO_READ_TIMEOUT;
goto err;
}
if (read_resp->reg_off != offset) {
- netdev_err(ena_dev->net_device,
- "Read failure: wrong offset provided\n");
+ netdev_err(ena_dev->net_device, "Read failure: wrong offset provided\n");
ret = ENA_MMIO_READ_TIMEOUT;
} else {
ret = read_resp->reg_val;
@@ -934,8 +909,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != -ENODEV)))
- netdev_err(ena_dev->net_device,
- "Failed to destroy io sq error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to destroy io sq error: %d\n", ret);
return ret;
}
@@ -949,8 +923,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
if (io_cq->cdesc_addr.virt_addr) {
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
- dma_free_coherent(ena_dev->dmadev, size,
- io_cq->cdesc_addr.virt_addr,
+ dma_free_coherent(ena_dev->dmadev, size, io_cq->cdesc_addr.virt_addr,
io_cq->cdesc_addr.phys_addr);
io_cq->cdesc_addr.virt_addr = NULL;
@@ -959,8 +932,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
if (io_sq->desc_addr.virt_addr) {
size = io_sq->desc_entry_size * io_sq->q_depth;
- dma_free_coherent(ena_dev->dmadev, size,
- io_sq->desc_addr.virt_addr,
+ dma_free_coherent(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr,
io_sq->desc_addr.phys_addr);
io_sq->desc_addr.virt_addr = NULL;
@@ -985,8 +957,7 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
- netdev_err(ena_dev->net_device,
- "Reg read timeout occurred\n");
+ netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
return -ETIME;
}
@@ -1026,8 +997,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
- netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
- feature_id);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", feature_id);
return -EOPNOTSUPP;
}
@@ -1064,8 +1034,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
if (unlikely(ret))
netdev_err(ena_dev->net_device,
- "Failed to submit get_feature command %d error: %d\n",
- feature_id, ret);
+ "Failed to submit get_feature command %d error: %d\n", feature_id, ret);
return ret;
}
@@ -1104,13 +1073,11 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_HASH_FUNCTION))
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
return -EOPNOTSUPP;
- rss->hash_key =
- dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
- &rss->hash_key_dma_addr, GFP_KERNEL);
+ rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+ &rss->hash_key_dma_addr, GFP_KERNEL);
if (unlikely(!rss->hash_key))
return -ENOMEM;
@@ -1123,8 +1090,8 @@ static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
struct ena_rss *rss = &ena_dev->rss;
if (rss->hash_key)
- dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
- rss->hash_key, rss->hash_key_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), rss->hash_key,
+ rss->hash_key_dma_addr);
rss->hash_key = NULL;
}
@@ -1132,9 +1099,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
- rss->hash_ctrl =
- dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
- &rss->hash_ctrl_dma_addr, GFP_KERNEL);
+ rss->hash_ctrl = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+ &rss->hash_ctrl_dma_addr, GFP_KERNEL);
if (unlikely(!rss->hash_ctrl))
return -ENOMEM;
@@ -1147,8 +1113,8 @@ static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
struct ena_rss *rss = &ena_dev->rss;
if (rss->hash_ctrl)
- dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
- rss->hash_ctrl, rss->hash_ctrl_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), rss->hash_ctrl,
+ rss->hash_ctrl_dma_addr);
rss->hash_ctrl = NULL;
}
@@ -1177,15 +1143,13 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
tbl_size = (1ULL << log_size) *
sizeof(struct ena_admin_rss_ind_table_entry);
- rss->rss_ind_tbl =
- dma_alloc_coherent(ena_dev->dmadev, tbl_size,
- &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
+ rss->rss_ind_tbl = dma_alloc_coherent(ena_dev->dmadev, tbl_size, &rss->rss_ind_tbl_dma_addr,
+ GFP_KERNEL);
if (unlikely(!rss->rss_ind_tbl))
goto mem_err1;
tbl_size = (1ULL << log_size) * sizeof(u16);
- rss->host_rss_ind_tbl =
- devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
+ rss->host_rss_ind_tbl = devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
if (unlikely(!rss->host_rss_ind_tbl))
goto mem_err2;
@@ -1197,8 +1161,7 @@ mem_err2:
tbl_size = (1ULL << log_size) *
sizeof(struct ena_admin_rss_ind_table_entry);
- dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
- rss->rss_ind_tbl_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, rss->rss_ind_tbl_dma_addr);
rss->rss_ind_tbl = NULL;
mem_err1:
rss->tbl_log_size = 0;
@@ -1261,8 +1224,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
&create_cmd.sq_ba,
io_sq->desc_addr.phys_addr);
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Memory address set failed\n");
+ netdev_err(ena_dev->net_device, "Memory address set failed\n");
return ret;
}
}
@@ -1273,8 +1235,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to create IO SQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to create IO SQ. error: %d\n", ret);
return ret;
}
@@ -1284,16 +1245,12 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
(uintptr_t)cmd_completion.sq_doorbell_offset);
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
- + cmd_completion.llq_headers_offset);
-
io_sq->desc_addr.pbuf_dev_addr =
(u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
cmd_completion.llq_descriptors_offset);
}
- netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n",
- io_sq->idx, io_sq->q_depth);
+ netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
return ret;
}
@@ -1420,8 +1377,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
(struct ena_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to create IO CQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to create IO CQ. error: %d\n", ret);
return ret;
}
@@ -1430,18 +1386,12 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
cmd_completion.cq_interrupt_unmask_register_offset);
- if (cmd_completion.cq_head_db_register_offset)
- io_cq->cq_head_db_reg =
- (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
- cmd_completion.cq_head_db_register_offset);
-
if (cmd_completion.numa_node_register_offset)
io_cq->numa_node_cfg_reg =
(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
cmd_completion.numa_node_register_offset);
- netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n",
- io_cq->idx, io_cq->q_depth);
+ netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
return ret;
}
@@ -1451,8 +1401,7 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
struct ena_com_io_cq **io_cq)
{
if (qid >= ENA_TOTAL_NUM_QUEUES) {
- netdev_err(ena_dev->net_device,
- "Invalid queue number %d but the max is %d\n", qid,
+ netdev_err(ena_dev->net_device, "Invalid queue number %d but the max is %d\n", qid,
ENA_TOTAL_NUM_QUEUES);
return -EINVAL;
}
@@ -1492,8 +1441,7 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
spin_lock_irqsave(&admin_queue->q_lock, flags);
while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
- ena_delay_exponential_backoff_us(exp++,
- ena_dev->ena_min_poll_delay_us);
+ ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
spin_lock_irqsave(&admin_queue->q_lock, flags);
}
spin_unlock_irqrestore(&admin_queue->q_lock, flags);
@@ -1519,8 +1467,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != -ENODEV)))
- netdev_err(ena_dev->net_device,
- "Failed to destroy IO CQ. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to destroy IO CQ. error: %d\n", ret);
return ret;
}
@@ -1588,8 +1535,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to config AENQ ret: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to config AENQ ret: %d\n", ret);
return ret;
}
@@ -1610,8 +1556,7 @@ int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
- netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n",
- width);
+ netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", width);
return -EINVAL;
}
@@ -1633,19 +1578,16 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
ctrl_ver = ena_com_reg_bar_read32(ena_dev,
ENA_REGS_CONTROLLER_VERSION_OFF);
- if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
- (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
+ if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
return -ETIME;
}
dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
- (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
- ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+ (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
- dev_info(ena_dev->dmadev,
- "ENA controller version: %d.%d.%d implementation version %d\n",
+ dev_info(ena_dev->dmadev, "ENA controller version: %d.%d.%d implementation version %d\n",
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
@@ -1694,20 +1636,17 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
size = ADMIN_SQ_SIZE(admin_queue->q_depth);
if (sq->entries)
- dma_free_coherent(ena_dev->dmadev, size, sq->entries,
- sq->dma_addr);
+ dma_free_coherent(ena_dev->dmadev, size, sq->entries, sq->dma_addr);
sq->entries = NULL;
size = ADMIN_CQ_SIZE(admin_queue->q_depth);
if (cq->entries)
- dma_free_coherent(ena_dev->dmadev, size, cq->entries,
- cq->dma_addr);
+ dma_free_coherent(ena_dev->dmadev, size, cq->entries, cq->dma_addr);
cq->entries = NULL;
size = ADMIN_AENQ_SIZE(aenq->q_depth);
if (ena_dev->aenq.entries)
- dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
- aenq->dma_addr);
+ dma_free_coherent(ena_dev->dmadev, size, aenq->entries, aenq->dma_addr);
aenq->entries = NULL;
}
@@ -1733,10 +1672,8 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
spin_lock_init(&mmio_read->lock);
- mmio_read->read_resp =
- dma_alloc_coherent(ena_dev->dmadev,
- sizeof(*mmio_read->read_resp),
- &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+ mmio_read->read_resp = dma_alloc_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
+ &mmio_read->read_resp_dma_addr, GFP_KERNEL);
if (unlikely(!mmio_read->read_resp))
goto err;
@@ -1767,8 +1704,8 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
- dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
- mmio_read->read_resp, mmio_read->read_resp_dma_addr);
+ dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp,
+ mmio_read->read_resp_dma_addr);
mmio_read->read_resp = NULL;
}
@@ -1800,8 +1737,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
}
if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
- netdev_err(ena_dev->net_device,
- "Device isn't ready, abort com init\n");
+ netdev_err(ena_dev->net_device, "Device isn't ready, abort com init\n");
return -ENODEV;
}
@@ -1878,8 +1814,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
int ret;
if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
- netdev_err(ena_dev->net_device,
- "Qid (%d) is bigger than max num of queues (%d)\n",
+ netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
ctx->qid, ENA_TOTAL_NUM_QUEUES);
return -EINVAL;
}
@@ -1905,8 +1840,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
/* header length is limited to 8 bits */
- io_sq->tx_max_header_size =
- min_t(u32, ena_dev->tx_max_header_size, SZ_256);
+ io_sq->tx_max_header_size = min_t(u32, ena_dev->tx_max_header_size, SZ_256);
ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
if (ret)
@@ -1938,8 +1872,7 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
struct ena_com_io_cq *io_cq;
if (qid >= ENA_TOTAL_NUM_QUEUES) {
- netdev_err(ena_dev->net_device,
- "Qid (%d) is bigger than max num of queues (%d)\n",
+ netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
qid, ENA_TOTAL_NUM_QUEUES);
return;
}
@@ -1983,8 +1916,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
if (rc)
return rc;
- if (get_resp.u.max_queue_ext.version !=
- ENA_FEATURE_MAX_QUEUE_EXT_VER)
+ if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
return -EINVAL;
memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
@@ -2025,18 +1957,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
if (!rc)
- memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
- sizeof(get_resp.u.hw_hints));
+ memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, sizeof(get_resp.u.hw_hints));
else if (rc == -EOPNOTSUPP)
- memset(&get_feat_ctx->hw_hints, 0x0,
- sizeof(get_feat_ctx->hw_hints));
+ memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
else
return rc;
rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
if (!rc)
- memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
- sizeof(get_resp.u.llq));
+ memcpy(&get_feat_ctx->llq, &get_resp.u.llq, sizeof(get_resp.u.llq));
else if (rc == -EOPNOTSUPP)
memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
else
@@ -2084,8 +2013,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
aenq_common = &aenq_e->aenq_common_desc;
/* Go over all the events */
- while ((READ_ONCE(aenq_common->flags) &
- ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+ while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
/* Make sure the phase bit (ownership) is as expected before
* reading the rest of the descriptor.
*/
@@ -2094,8 +2022,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
timestamp = (u64)aenq_common->timestamp_low |
((u64)aenq_common->timestamp_high << 32);
- netdev_dbg(ena_dev->net_device,
- "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
+ netdev_dbg(ena_dev->net_device, "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
aenq_common->group, aenq_common->syndrome, timestamp);
/* Handle specific event*/
@@ -2124,8 +2051,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
/* write the aenq doorbell after all AENQ descriptors were read */
mb();
- writel_relaxed((u32)aenq->head,
- ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ writel_relaxed((u32)aenq->head, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
@@ -2137,15 +2063,13 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
- if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
- (cap == ENA_MMIO_READ_TIMEOUT))) {
+ if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) {
netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
return -ETIME;
}
if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
- netdev_err(ena_dev->net_device,
- "Device isn't ready, can't reset device\n");
+ netdev_err(ena_dev->net_device, "Device isn't ready, can't reset device\n");
return -EINVAL;
}
@@ -2168,8 +2092,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
rc = wait_for_reset_state(ena_dev, timeout,
ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
if (rc != 0) {
- netdev_err(ena_dev->net_device,
- "Reset indication didn't turn on\n");
+ netdev_err(ena_dev->net_device, "Reset indication didn't turn on\n");
return rc;
}
@@ -2177,8 +2100,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
rc = wait_for_reset_state(ena_dev, timeout, 0);
if (rc != 0) {
- netdev_err(ena_dev->net_device,
- "Reset indication didn't turn off\n");
+ netdev_err(ena_dev->net_device, "Reset indication didn't turn off\n");
return rc;
}
@@ -2215,8 +2137,7 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
sizeof(*get_resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to get stats. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
return ret;
}
@@ -2228,8 +2149,7 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
int ret;
if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
- netdev_err(ena_dev->net_device,
- "Capability %d isn't supported\n",
+ netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
ENA_ADMIN_ENI_STATS);
return -EOPNOTSUPP;
}
@@ -2266,8 +2186,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
int ret;
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
- netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
- ENA_ADMIN_MTU);
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
return -EOPNOTSUPP;
}
@@ -2286,8 +2205,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set mtu %d. error: %d\n", mtu, ret);
+ netdev_err(ena_dev->net_device, "Failed to set mtu %d. error: %d\n", mtu, ret);
return ret;
}
@@ -2301,8 +2219,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
ret = ena_com_get_feature(ena_dev, &resp,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to get offload capabilities %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to get offload capabilities %d\n", ret);
return ret;
}
@@ -2320,8 +2237,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
struct ena_admin_get_feat_resp get_resp;
int ret;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_HASH_FUNCTION)) {
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) {
netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_FUNCTION);
return -EOPNOTSUPP;
@@ -2334,8 +2250,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
return ret;
if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
- netdev_err(ena_dev->net_device,
- "Func hash %d isn't supported by device, abort\n",
+ netdev_err(ena_dev->net_device, "Func hash %d isn't supported by device, abort\n",
rss->hash_func);
return -EOPNOTSUPP;
}
@@ -2365,8 +2280,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret)) {
- netdev_err(ena_dev->net_device,
- "Failed to set hash function %d. error: %d\n",
+ netdev_err(ena_dev->net_device, "Failed to set hash function %d. error: %d\n",
rss->hash_func, ret);
return -EINVAL;
}
@@ -2398,16 +2312,15 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return rc;
if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
- netdev_err(ena_dev->net_device,
- "Flow hash function %d isn't supported\n", func);
+ netdev_err(ena_dev->net_device, "Flow hash function %d isn't supported\n", func);
return -EOPNOTSUPP;
}
if ((func == ENA_ADMIN_TOEPLITZ) && key) {
if (key_len != sizeof(hash_key->key)) {
netdev_err(ena_dev->net_device,
- "key len (%u) doesn't equal the supported size (%zu)\n",
- key_len, sizeof(hash_key->key));
+ "key len (%u) doesn't equal the supported size (%zu)\n", key_len,
+ sizeof(hash_key->key));
return -EINVAL;
}
memcpy(hash_key->key, key, key_len);
@@ -2495,8 +2408,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
struct ena_admin_set_feat_resp resp;
int ret;
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_RSS_HASH_INPUT)) {
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) {
netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_INPUT);
return -EOPNOTSUPP;
@@ -2527,8 +2439,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set hash input. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set hash input. error: %d\n", ret);
return ret;
}
@@ -2605,8 +2516,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
int rc;
if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
- netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n",
- proto);
+ netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", proto);
return -EINVAL;
}
@@ -2658,8 +2568,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
struct ena_admin_set_feat_resp resp;
int ret;
- if (!ena_com_check_supported_feature_id(
- ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
return -EOPNOTSUPP;
@@ -2699,8 +2608,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set indirect table. error: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set indirect table. error: %d\n", ret);
return ret;
}
@@ -2779,9 +2687,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
- host_attr->host_info =
- dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
- &host_attr->host_info_dma_addr, GFP_KERNEL);
+ host_attr->host_info = dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
+ &host_attr->host_info_dma_addr, GFP_KERNEL);
if (unlikely(!host_attr->host_info))
return -ENOMEM;
@@ -2827,8 +2734,7 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
if (host_attr->debug_area_virt_addr) {
dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
- host_attr->debug_area_virt_addr,
- host_attr->debug_area_dma_addr);
+ host_attr->debug_area_virt_addr, host_attr->debug_area_dma_addr);
host_attr->debug_area_virt_addr = NULL;
}
}
@@ -2877,8 +2783,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret))
- netdev_err(ena_dev->net_device,
- "Failed to set host attributes: %d\n", ret);
+ netdev_err(ena_dev->net_device, "Failed to set host attributes: %d\n", ret);
return ret;
}
@@ -2896,8 +2801,7 @@ static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *en
u32 *intr_moder_interval)
{
if (!intr_delay_resolution) {
- netdev_err(ena_dev->net_device,
- "Illegal interrupt delay granularity value\n");
+ netdev_err(ena_dev->net_device, "Illegal interrupt delay granularity value\n");
return -EFAULT;
}
@@ -2935,14 +2839,12 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
if (rc) {
if (rc == -EOPNOTSUPP) {
- netdev_dbg(ena_dev->net_device,
- "Feature %d isn't supported\n",
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0;
} else {
netdev_err(ena_dev->net_device,
- "Failed to get interrupt moderation admin cmd. rc: %d\n",
- rc);
+ "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
}
/* no moderation supported, disable adaptive support */
@@ -2990,8 +2892,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
if (unlikely(ena_dev->tx_max_header_size == 0)) {
- netdev_err(ena_dev->net_device,
- "The size of the LLQ entry is smaller than needed\n");
+ netdev_err(ena_dev->net_device, "The size of the LLQ entry is smaller than needed\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 3c5081d9d25d..fea57eb8e58b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -109,16 +109,13 @@ struct ena_com_io_cq {
/* Interrupt unmask register */
u32 __iomem *unmask_reg;
- /* The completion queue head doorbell register */
- u32 __iomem *cq_head_db_reg;
-
/* numa configuration register (for TPH) */
u32 __iomem *numa_node_cfg_reg;
/* The value to write to the above register to unmask
* the interrupt of this queue
*/
- u32 msix_vector;
+ u32 msix_vector ____cacheline_aligned;
enum queue_direction direction;
@@ -134,7 +131,6 @@ struct ena_com_io_cq {
/* Device queue index */
u16 idx;
u16 head;
- u16 last_head_update;
u8 phase;
u8 cdesc_entry_size_in_bytes;
@@ -158,7 +154,6 @@ struct ena_com_io_sq {
struct ena_com_io_desc_addr desc_addr;
u32 __iomem *db_addr;
- u8 __iomem *header_addr;
enum queue_direction direction;
enum ena_admin_placement_policy_type mem_queue_type;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index f9f886289b97..933e619b3a31 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -18,8 +18,7 @@ static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
- desc_phase = (READ_ONCE(cdesc->status) &
- ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
if (desc_phase != expected_phase)
@@ -65,8 +64,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
io_sq->entries_in_tx_burst_left--;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
- io_sq->qid, io_sq->entries_in_tx_burst_left);
+ "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid,
+ io_sq->entries_in_tx_burst_left);
}
/* Make sure everything was written into the bounce buffer before
@@ -75,8 +74,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
wmb();
/* The line is completed. Copy it to dev */
- __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
- bounce_buffer, (llq_info->desc_list_entry_size) / 8);
+ __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, bounce_buffer,
+ (llq_info->desc_list_entry_size) / 8);
io_sq->tail++;
@@ -102,16 +101,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
header_offset =
llq_info->descs_num_before_header * io_sq->desc_entry_size;
- if (unlikely((header_offset + header_len) >
- llq_info->desc_list_entry_size)) {
+ if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Trying to write header larger than llq entry can accommodate\n");
return -EFAULT;
}
if (unlikely(!bounce_buffer)) {
- netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Bounce buffer is NULL\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
return -EFAULT;
}
@@ -129,8 +126,7 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
bounce_buffer = pkt_ctrl->curr_bounce_buf;
if (unlikely(!bounce_buffer)) {
- netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Bounce buffer is NULL\n");
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
return NULL;
}
@@ -247,8 +243,7 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
ena_com_cq_inc_head(io_cq);
count++;
- last = (READ_ONCE(cdesc->status) &
- ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last);
@@ -369,9 +364,8 @@ static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
"l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
- ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
- ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
- ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
+ ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_csum_err,
+ ena_rx_ctx->l4_csum_err, ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
}
/*****************************************************************************/
@@ -403,13 +397,12 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
if (unlikely(header_len > io_sq->tx_max_header_size)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Header size is too large %d max header: %d\n",
- header_len, io_sq->tx_max_header_size);
+ "Header size is too large %d max header: %d\n", header_len,
+ io_sq->tx_max_header_size);
return -EINVAL;
}
- if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
- !buffer_to_push)) {
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Push header wasn't provided in LLQ mode\n");
return -EINVAL;
@@ -556,13 +549,11 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
}
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
- nb_hw_desc);
+ "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc);
if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
- ena_rx_ctx->max_bufs);
+ "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs);
return -ENOSPC;
}
@@ -586,8 +577,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
io_sq->next_to_comp += nb_hw_desc;
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
- io_sq->qid, io_sq->next_to_comp);
+ "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
+ io_sq->next_to_comp);
/* Get rx flags from the last pkt */
ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
@@ -624,8 +615,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->req_id = req_id;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
- __func__, io_sq->qid, req_id);
+ "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid,
+ req_id);
desc->buff_addr_lo = (u32)ena_buf->paddr;
desc->buff_addr_hi =
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 372b259279ec..72b019758caa 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -8,8 +8,6 @@
#include "ena_com.h"
-/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
-#define ENA_COMP_HEAD_THRESH 4
/* we allow 2 DMA descriptors per LLQ entry */
#define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE (2 * sizeof(struct ena_eth_io_tx_desc))
#define ENA_LLQ_HEADER (128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
@@ -145,8 +143,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
}
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Queue: %d num_descs: %d num_entries_needed: %d\n",
- io_sq->qid, num_descs, num_entries_needed);
+ "Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, num_descs,
+ num_entries_needed);
return num_entries_needed > io_sq->entries_in_tx_burst_left;
}
@@ -157,43 +155,20 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
u16 tail = io_sq->tail;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Write submission queue doorbell for queue: %d tail: %d\n",
- io_sq->qid, tail);
+ "Write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail);
writel(tail, io_sq->db_addr);
if (is_llq_max_tx_burst_exists(io_sq)) {
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
- "Reset available entries in tx burst for queue %d to %d\n",
- io_sq->qid, max_entries_in_tx_burst);
+ "Reset available entries in tx burst for queue %d to %d\n", io_sq->qid,
+ max_entries_in_tx_burst);
io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
}
return 0;
}
-static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
-{
- u16 unreported_comp, head;
- bool need_update;
-
- if (unlikely(io_cq->cq_head_db_reg)) {
- head = io_cq->head;
- unreported_comp = head - io_cq->last_head_update;
- need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
-
- if (unlikely(need_update)) {
- netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Write completion queue doorbell for queue %d: head: %d\n",
- io_cq->qid, head);
- writel(head, io_cq->cq_head_db_reg);
- io_cq->last_head_update = head;
- }
- }
-
- return 0;
-}
-
static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
u8 numa_node)
{
@@ -248,8 +223,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
*req_id = READ_ONCE(cdesc->req_id);
if (unlikely(*req_id >= io_cq->q_depth)) {
- netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
- "Invalid req id %d\n", cdesc->req_id);
+ netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "Invalid req id %d\n",
+ cdesc->req_id);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 1c0a7828d397..09e7da1a69c9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -32,7 +32,7 @@ MODULE_LICENSE("GPL");
#define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus())
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
- NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
+ NETIF_MSG_IFDOWN | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
static struct ena_aenq_handlers aenq_handlers;
@@ -47,19 +47,44 @@ static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
+ enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
struct ena_adapter *adapter = netdev_priv(dev);
+ unsigned int time_since_last_napi, threshold;
+ struct ena_ring *tx_ring;
+ int napi_scheduled;
+
+ if (txqueue >= adapter->num_io_queues) {
+ netdev_err(dev, "TX timeout on invalid queue %u\n", txqueue);
+ goto schedule_reset;
+ }
+
+ threshold = jiffies_to_usecs(dev->watchdog_timeo);
+ tx_ring = &adapter->tx_ring[txqueue];
+ time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
+ napi_scheduled = !!(tx_ring->napi->state & NAPIF_STATE_SCHED);
+
+ netdev_err(dev,
+ "TX q %d is paused for too long (threshold %u). Time since last napi %u usec. napi scheduled: %d\n",
+ txqueue,
+ threshold,
+ time_since_last_napi,
+ napi_scheduled);
+
+ if (threshold < time_since_last_napi && napi_scheduled) {
+ netdev_err(dev,
+ "napi handler hasn't been called for a long time but is scheduled\n");
+ reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
+ }
+schedule_reset:
/* Change the state of the device to trigger reset
* Check that we are not in the middle or a trigger already
*/
-
if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
- ena_reset_device(adapter, ENA_REGS_RESET_OS_NETDEV_WD);
+ ena_reset_device(adapter, reset_reason);
ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
-
- netif_err(adapter, tx_err, dev, "Transmit time out\n");
}
static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
@@ -116,11 +141,9 @@ int ena_xmit_common(struct ena_adapter *adapter,
if (unlikely(rc)) {
netif_err(adapter, tx_queued, adapter->netdev,
"Failed to prepare tx bufs\n");
- ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
- &ring->syncp);
+ ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, &ring->syncp);
if (rc != -ENOMEM)
- ena_reset_device(adapter,
- ENA_REGS_RESET_DRIVER_INVALID_STATE);
+ ena_reset_device(adapter, ENA_REGS_RESET_DRIVER_INVALID_STATE);
return rc;
}
@@ -485,8 +508,7 @@ static struct page *ena_alloc_map_page(struct ena_ring *rx_ring,
*/
page = dev_alloc_page();
if (!page) {
- ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
- &rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, &rx_ring->syncp);
return ERR_PTR(-ENOSPC);
}
@@ -523,7 +545,7 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
/* We handle DMA here */
page = ena_alloc_map_page(rx_ring, &dma);
- if (unlikely(IS_ERR(page)))
+ if (IS_ERR(page))
return PTR_ERR(page);
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
@@ -545,8 +567,8 @@ static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring,
struct ena_rx_buffer *rx_info,
unsigned long attrs)
{
- dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE,
- DMA_BIDIRECTIONAL, attrs);
+ dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL,
+ attrs);
}
static void ena_free_rx_page(struct ena_ring *rx_ring,
@@ -819,8 +841,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
&req_id);
if (rc) {
if (unlikely(rc == -EINVAL))
- handle_invalid_req_id(tx_ring, req_id, NULL,
- false);
+ handle_invalid_req_id(tx_ring, req_id, NULL, false);
break;
}
@@ -856,7 +877,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
tx_ring->next_to_clean = next_to_clean;
ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
- ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
@@ -1046,8 +1066,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
DMA_FROM_DEVICE);
if (!reuse_rx_buf_page)
- ena_unmap_rx_buff_attrs(rx_ring, rx_info,
- DMA_ATTR_SKIP_CPU_SYNC);
+ ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
page_offset + buf_offset, len, buf_len);
@@ -1303,10 +1322,8 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ENA_RX_REFILL_THRESH_PACKET);
/* Optimization, try to batch new rx buffers */
- if (refill_required > refill_threshold) {
- ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
+ if (refill_required > refill_threshold)
ena_refill_rx_bufs(rx_ring, refill_required);
- }
if (xdp_flags & ENA_XDP_REDIRECT)
xdp_do_flush();
@@ -1320,8 +1337,7 @@ error:
adapter = netdev_priv(rx_ring->netdev);
if (rc == -ENOSPC) {
- ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
- &rx_ring->syncp);
+ ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp);
ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
} else {
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
@@ -1811,8 +1827,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
if (!ena_dev->rss.tbl_log_size) {
rc = ena_rss_init_default(adapter);
if (rc && (rc != -EOPNOTSUPP)) {
- netif_err(adapter, ifup, adapter->netdev,
- "Failed to init RSS rc: %d\n", rc);
+ netif_err(adapter, ifup, adapter->netdev, "Failed to init RSS rc: %d\n", rc);
return rc;
}
}
@@ -2134,6 +2149,12 @@ int ena_up(struct ena_adapter *adapter)
*/
ena_init_napi_in_range(adapter, 0, io_queue_count);
+ /* Enabling DIM needs to happen before enabling IRQs since DIM
+ * is run from napi routine
+ */
+ if (ena_com_interrupt_moderation_supported(adapter->ena_dev))
+ ena_com_enable_adaptive_moderation(adapter->ena_dev);
+
rc = ena_request_io_irq(adapter);
if (rc)
goto err_req_irq;
@@ -2184,7 +2205,7 @@ void ena_down(struct ena_adapter *adapter)
{
int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
- netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
+ netif_dbg(adapter, ifdown, adapter->netdev, "%s\n", __func__);
clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
@@ -2197,8 +2218,6 @@ void ena_down(struct ena_adapter *adapter)
/* After this point the napi handler won't enable the tx queue */
ena_napi_disable_in_range(adapter, 0, io_queue_count);
- /* After destroy the queue there won't be any new interrupts */
-
if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
int rc;
@@ -2588,8 +2607,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(rc))
goto error_drop_packet;
- skb_tx_timestamp(skb);
-
next_to_use = tx_ring->next_to_use;
req_id = tx_ring->free_ids[next_to_use];
tx_info = &tx_ring->tx_buffer_info[req_id];
@@ -2653,6 +2670,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+ skb_tx_timestamp(skb);
+
if (netif_xmit_stopped(txq) || !netdev_xmit_more())
/* trigger the dma engine. ena_ring_tx_doorbell()
* calls a memory barrier inside it.
@@ -2670,22 +2689,6 @@ error_drop_packet:
return NETDEV_TX_OK;
}
-static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
-{
- u16 qid;
- /* we suspect that this is good for in--kernel network services that
- * want to loop incoming skb rx to tx in normal user generated traffic,
- * most probably we will not get to this
- */
- if (skb_rx_queue_recorded(skb))
- qid = skb_get_rx_queue(skb);
- else
- qid = netdev_pick_tx(dev, skb, NULL);
-
- return qid;
-}
-
static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
@@ -2764,8 +2767,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
rc = ena_com_set_host_attributes(adapter->ena_dev);
if (rc) {
if (rc == -EOPNOTSUPP)
- netif_warn(adapter, drv, adapter->netdev,
- "Cannot set host attributes\n");
+ netif_warn(adapter, drv, adapter->netdev, "Cannot set host attributes\n");
else
netif_err(adapter, drv, adapter->netdev,
"Cannot set host attributes\n");
@@ -2863,18 +2865,16 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_open = ena_open,
.ndo_stop = ena_close,
.ndo_start_xmit = ena_start_xmit,
- .ndo_select_queue = ena_select_queue,
.ndo_get_stats64 = ena_get_stats64,
.ndo_tx_timeout = ena_tx_timeout,
.ndo_change_mtu = ena_change_mtu,
- .ndo_set_mac_address = NULL,
.ndo_validate_addr = eth_validate_addr,
.ndo_bpf = ena_xdp,
.ndo_xdp_xmit = ena_xdp_xmit,
};
-static void ena_calc_io_queue_size(struct ena_adapter *adapter,
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
+static int ena_calc_io_queue_size(struct ena_adapter *adapter,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq;
struct ena_com_dev *ena_dev = adapter->ena_dev;
@@ -2933,6 +2933,18 @@ static void ena_calc_io_queue_size(struct ena_adapter *adapter,
max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
+ if (max_tx_queue_size < ENA_MIN_RING_SIZE) {
+ netdev_err(adapter->netdev, "Device max TX queue size: %d < minimum: %d\n",
+ max_tx_queue_size, ENA_MIN_RING_SIZE);
+ return -EINVAL;
+ }
+
+ if (max_rx_queue_size < ENA_MIN_RING_SIZE) {
+ netdev_err(adapter->netdev, "Device max RX queue size: %d < minimum: %d\n",
+ max_rx_queue_size, ENA_MIN_RING_SIZE);
+ return -EINVAL;
+ }
+
/* When forcing large headers, we multiply the entry size by 2, and therefore divide
* the queue size by 2, leaving the amount of memory used by the queues unchanged.
*/
@@ -2963,6 +2975,8 @@ static void ena_calc_io_queue_size(struct ena_adapter *adapter,
adapter->max_rx_ring_size = max_rx_queue_size;
adapter->requested_tx_ring_size = tx_queue_size;
adapter->requested_rx_ring_size = rx_queue_size;
+
+ return 0;
}
static int ena_device_validate_params(struct ena_adapter *adapter,
@@ -3070,6 +3084,7 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
bool *wd_state)
{
struct ena_com_dev *ena_dev = adapter->ena_dev;
+ struct net_device *netdev = adapter->netdev;
struct ena_llq_configurations llq_config;
struct device *dev = &pdev->dev;
bool readless_supported;
@@ -3159,15 +3174,19 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
&llq_config);
if (rc) {
- dev_err(dev, "ENA device init failed\n");
+ netdev_err(netdev, "Cannot set queues placement policy rc= %d\n", rc);
goto err_admin_init;
}
- ena_calc_io_queue_size(adapter, get_feat_ctx);
+ rc = ena_calc_io_queue_size(adapter, get_feat_ctx);
+ if (unlikely(rc))
+ goto err_admin_init;
return 0;
err_admin_init:
+ ena_com_abort_admin_commands(ena_dev);
+ ena_com_wait_for_abort_completion(ena_dev);
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev);
err_mmio_read_less:
@@ -3226,7 +3245,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
if (!graceful)
ena_com_set_admin_running_state(ena_dev, false);
- if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ if (dev_up)
ena_down(adapter);
/* Stop the device from sending AENQ events (in case reset flag is set
@@ -3372,14 +3391,18 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
struct ena_ring *tx_ring)
{
struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi);
+ enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_MISS_TX_CMPL;
unsigned int time_since_last_napi;
unsigned int missing_tx_comp_to;
bool is_tx_comp_time_expired;
struct ena_tx_buffer *tx_buf;
unsigned long last_jiffies;
+ int napi_scheduled;
u32 missed_tx = 0;
int i, rc = 0;
+ missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to);
+
for (i = 0; i < tx_ring->ring_size; i++) {
tx_buf = &tx_ring->tx_buffer_info[i];
last_jiffies = tx_buf->last_jiffies;
@@ -3406,25 +3429,45 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
adapter->missing_tx_completion_to);
if (unlikely(is_tx_comp_time_expired)) {
- if (!tx_buf->print_once) {
- time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
- missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to);
- netif_notice(adapter, tx_err, adapter->netdev,
- "Found a Tx that wasn't completed on time, qid %d, index %d. %u usecs have passed since last napi execution. Missing Tx timeout value %u msecs\n",
- tx_ring->qid, i, time_since_last_napi, missing_tx_comp_to);
+ time_since_last_napi =
+ jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
+ napi_scheduled = !!(ena_napi->napi.state & NAPIF_STATE_SCHED);
+
+ if (missing_tx_comp_to < time_since_last_napi && napi_scheduled) {
+ /* We suspect napi isn't called because the
+ * bottom half is not run. Require a bigger
+ * timeout for these cases
+ */
+ if (!time_is_before_jiffies(last_jiffies +
+ 2 * adapter->missing_tx_completion_to))
+ continue;
+
+ reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION;
}
- tx_buf->print_once = 1;
missed_tx++;
+
+ if (tx_buf->print_once)
+ continue;
+
+ netif_notice(adapter, tx_err, adapter->netdev,
+ "TX hasn't completed, qid %d, index %d. %u usecs from last napi execution, napi scheduled: %d\n",
+ tx_ring->qid, i, time_since_last_napi, napi_scheduled);
+
+ tx_buf->print_once = 1;
}
}
if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
netif_err(adapter, tx_err, adapter->netdev,
- "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
+ "Lost TX completions are above the threshold (%d > %d). Completion transmission timeout: %u.\n",
missed_tx,
- adapter->missing_tx_completion_threshold);
- ena_reset_device(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
+ adapter->missing_tx_completion_threshold,
+ missing_tx_comp_to);
+ netif_err(adapter, tx_err, adapter->netdev,
+ "Resetting the device\n");
+
+ ena_reset_device(adapter, reset_reason);
rc = -EIO;
}
@@ -3762,8 +3805,8 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
}
}
- rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
- ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
+ rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, ENA_HASH_KEY_SIZE,
+ 0xFFFFFFFF);
if (unlikely(rc && (rc != -EOPNOTSUPP))) {
dev_err(dev, "Cannot fill hash function\n");
goto err_fill_indir;
@@ -4040,8 +4083,8 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
free_irq_cpu_rmap(netdev->rx_cpu_rmap);
netdev->rx_cpu_rmap = NULL;
}
-#endif /* CONFIG_RFS_ACCEL */
+#endif /* CONFIG_RFS_ACCEL */
/* Make sure timer and reset routine won't be called after
* freeing device resources.
*/
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 1e007a41a525..2c3d6a77ea79 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -21,6 +21,7 @@ enum ena_regs_reset_reason_types {
ENA_REGS_RESET_USER_TRIGGER = 12,
ENA_REGS_RESET_GENERIC = 13,
ENA_REGS_RESET_MISS_INTERRUPT = 14,
+ ENA_REGS_RESET_SUSPECTED_POLL_STARVATION = 15,
};
/* ena_registers offsets */
diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.c b/drivers/net/ethernet/amazon/ena/ena_xdp.c
index fc1c4ef73ba3..337c435d3ce9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_xdp.c
+++ b/drivers/net/ethernet/amazon/ena/ena_xdp.c
@@ -412,7 +412,6 @@ static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget)
tx_ring->next_to_clean = next_to_clean;
ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
- ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
"tx_poll: q %d done. total pkts: %d\n",
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
index ea773cfa0af6..c83a0a80d533 100644
--- a/drivers/net/ethernet/amd/pds_core/adminq.c
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -82,7 +82,6 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
unsigned long irqflags;
int nq_work = 0;
int aq_work = 0;
- int credits;
/* Don't process AdminQ when it's not up */
if (!pdsc_adminq_inc_if_up(pdsc)) {
@@ -128,11 +127,9 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq)
credits:
/* Return the interrupt credits, one for each completion */
- credits = nq_work + aq_work;
- if (credits)
- pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
- credits,
- PDS_CORE_INTR_CRED_REARM);
+ pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
+ nq_work + aq_work,
+ PDS_CORE_INTR_CRED_REARM);
refcount_dec(&pdsc->adminq_refcnt);
}
@@ -157,7 +154,6 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
qcq = &pdsc->adminqcq;
queue_work(pdsc->wq, &qcq->work);
- pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
refcount_dec(&pdsc->adminq_refcnt);
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
index 11c23a7f3172..2babea110991 100644
--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -160,23 +160,19 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
if (err < 0) {
dev_warn(cf->dev, "auxiliary_device_init of %s failed: %pe\n",
name, ERR_PTR(err));
- goto err_out;
+ kfree(padev);
+ return ERR_PTR(err);
}
err = auxiliary_device_add(aux_dev);
if (err) {
dev_warn(cf->dev, "auxiliary_device_add of %s failed: %pe\n",
name, ERR_PTR(err));
- goto err_out_uninit;
+ auxiliary_device_uninit(aux_dev);
+ return ERR_PTR(err);
}
return padev;
-
-err_out_uninit:
- auxiliary_device_uninit(aux_dev);
-err_out:
- kfree(padev);
- return ERR_PTR(err);
}
int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
@@ -184,6 +180,9 @@ int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
struct pds_auxiliary_dev *padev;
int err = 0;
+ if (!cf)
+ return -ENODEV;
+
mutex_lock(&pf->config_lock);
padev = pf->vfs[cf->vf_id].padev;
@@ -202,14 +201,27 @@ int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
{
struct pds_auxiliary_dev *padev;
- enum pds_core_vif_types vt;
char devname[PDS_DEVNAME_LEN];
+ enum pds_core_vif_types vt;
+ unsigned long mask;
u16 vt_support;
int client_id;
int err = 0;
+ if (!cf)
+ return -ENODEV;
+
mutex_lock(&pf->config_lock);
+ mask = BIT_ULL(PDSC_S_FW_DEAD) |
+ BIT_ULL(PDSC_S_STOPPING_DRIVER);
+ if (cf->state & mask) {
+ dev_err(pf->dev, "%s: can't add dev, VF client in bad state %#lx\n",
+ __func__, cf->state);
+ err = -ENXIO;
+ goto out_unlock;
+ }
+
/* We only support vDPA so far, so it is the only one to
* be verified that it is available in the Core device and
* enabled in the devlink param. In the future this might
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 7658a7286767..9662ee72814c 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -129,6 +129,7 @@ static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq)
if (index < 0)
return index;
qcq->intx = index;
+ qcq->cq.bound_intr = &pdsc->intr_info[index];
return 0;
}
@@ -222,7 +223,6 @@ int pdsc_qcq_alloc(struct pdsc *pdsc, unsigned int type, unsigned int index,
goto err_out_free_irq;
}
- qcq->cq.bound_intr = &pdsc->intr_info[qcq->intx];
qcq->cq.num_descs = num_descs;
qcq->cq.desc_size = cq_desc_size;
qcq->cq.tail_idx = 0;
@@ -300,6 +300,17 @@ err_out:
return err;
}
+static void pdsc_core_uninit(struct pdsc *pdsc)
+{
+ pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
+ pdsc_qcq_free(pdsc, &pdsc->adminqcq);
+
+ if (pdsc->kern_dbpage) {
+ iounmap(pdsc->kern_dbpage);
+ pdsc->kern_dbpage = NULL;
+ }
+}
+
static int pdsc_core_init(struct pdsc *pdsc)
{
union pds_core_dev_comp comp = {};
@@ -310,9 +321,32 @@ static int pdsc_core_init(struct pdsc *pdsc)
struct pds_core_dev_init_data_in cidi;
u32 dbid_count;
u32 dbpage_num;
+ int numdescs;
size_t sz;
int err;
+ /* Scale the descriptor ring length based on number of CPUs and VFs */
+ numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
+ numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
+ numdescs = roundup_pow_of_two(numdescs);
+ err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
+ PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
+ numdescs,
+ sizeof(union pds_core_adminq_cmd),
+ sizeof(union pds_core_adminq_comp),
+ 0, &pdsc->adminqcq);
+ if (err)
+ return err;
+
+ err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq",
+ PDS_CORE_QCQ_F_NOTIFYQ,
+ PDSC_NOTIFYQ_LENGTH,
+ sizeof(struct pds_core_notifyq_cmd),
+ sizeof(union pds_core_notifyq_comp),
+ 0, &pdsc->notifyqcq);
+ if (err)
+ goto err_out_uninit;
+
cidi.adminq_q_base = cpu_to_le64(pdsc->adminqcq.q_base_pa);
cidi.adminq_cq_base = cpu_to_le64(pdsc->adminqcq.cq_base_pa);
cidi.notifyq_cq_base = cpu_to_le64(pdsc->notifyqcq.cq.base_pa);
@@ -336,7 +370,7 @@ static int pdsc_core_init(struct pdsc *pdsc)
if (err) {
dev_err(pdsc->dev, "Device init command failed: %pe\n",
ERR_PTR(err));
- return err;
+ goto err_out_uninit;
}
pdsc->hw_index = le32_to_cpu(cido.core_hw_index);
@@ -346,7 +380,8 @@ static int pdsc_core_init(struct pdsc *pdsc)
pdsc->kern_dbpage = pdsc_map_dbpage(pdsc, dbpage_num);
if (!pdsc->kern_dbpage) {
dev_err(pdsc->dev, "Cannot map dbpage, aborting\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_out_uninit;
}
pdsc->adminqcq.q.hw_type = cido.adminq_hw_type;
@@ -359,6 +394,10 @@ static int pdsc_core_init(struct pdsc *pdsc)
pdsc->last_eid = 0;
+ return 0;
+
+err_out_uninit:
+ pdsc_core_uninit(pdsc);
return err;
}
@@ -401,38 +440,12 @@ static int pdsc_viftypes_init(struct pdsc *pdsc)
int pdsc_setup(struct pdsc *pdsc, bool init)
{
- int numdescs;
int err;
err = pdsc_dev_init(pdsc);
if (err)
return err;
- /* Scale the descriptor ring length based on number of CPUs and VFs */
- numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
- numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
- numdescs = roundup_pow_of_two(numdescs);
- err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
- PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
- numdescs,
- sizeof(union pds_core_adminq_cmd),
- sizeof(union pds_core_adminq_comp),
- 0, &pdsc->adminqcq);
- if (err)
- goto err_out_teardown;
-
- err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq",
- PDS_CORE_QCQ_F_NOTIFYQ,
- PDSC_NOTIFYQ_LENGTH,
- sizeof(struct pds_core_notifyq_cmd),
- sizeof(union pds_core_notifyq_comp),
- 0, &pdsc->notifyqcq);
- if (err)
- goto err_out_teardown;
-
- /* NotifyQ rides on the AdminQ interrupt */
- pdsc->notifyqcq.intx = pdsc->adminqcq.intx;
-
/* Set up the Core with the AdminQ and NotifyQ info */
err = pdsc_core_init(pdsc);
if (err)
@@ -458,35 +471,20 @@ err_out_teardown:
void pdsc_teardown(struct pdsc *pdsc, bool removing)
{
- int i;
-
if (!pdsc->pdev->is_virtfn)
pdsc_devcmd_reset(pdsc);
if (pdsc->adminqcq.work.func)
cancel_work_sync(&pdsc->adminqcq.work);
- pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
- pdsc_qcq_free(pdsc, &pdsc->adminqcq);
+
+ pdsc_core_uninit(pdsc);
if (removing) {
kfree(pdsc->viftype_status);
pdsc->viftype_status = NULL;
}
- if (pdsc->intr_info) {
- for (i = 0; i < pdsc->nintrs; i++)
- pdsc_intr_free(pdsc, i);
-
- kfree(pdsc->intr_info);
- pdsc->intr_info = NULL;
- pdsc->nintrs = 0;
- }
-
- if (pdsc->kern_dbpage) {
- iounmap(pdsc->kern_dbpage);
- pdsc->kern_dbpage = NULL;
- }
+ pdsc_dev_uninit(pdsc);
- pci_free_irq_vectors(pdsc->pdev);
set_bit(PDSC_S_FW_DEAD, &pdsc->state);
}
@@ -609,8 +607,7 @@ static void pdsc_check_pci_health(struct pdsc *pdsc)
if (fw_status != PDS_RC_BAD_PCI)
return;
- pdsc_reset_prepare(pdsc->pdev);
- pdsc_reset_done(pdsc->pdev);
+ pci_reset_function(pdsc->pdev);
}
void pdsc_health_thread(struct work_struct *work)
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index 110c4b826b22..92d7657dd614 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -282,9 +282,7 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
int pdsc_devcmd_init(struct pdsc *pdsc);
int pdsc_devcmd_reset(struct pdsc *pdsc);
int pdsc_dev_init(struct pdsc *pdsc);
-
-void pdsc_reset_prepare(struct pci_dev *pdev);
-void pdsc_reset_done(struct pci_dev *pdev);
+void pdsc_dev_uninit(struct pdsc *pdsc);
int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
irq_handler_t handler, void *data);
diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c
index 4e8579ca1c8c..6bdd02b7aa6d 100644
--- a/drivers/net/ethernet/amd/pds_core/debugfs.c
+++ b/drivers/net/ethernet/amd/pds_core/debugfs.c
@@ -32,8 +32,8 @@ void pdsc_debugfs_del_dev(struct pdsc *pdsc)
static int identity_show(struct seq_file *seq, void *v)
{
- struct pdsc *pdsc = seq->private;
struct pds_core_dev_identity *ident;
+ struct pdsc *pdsc = seq->private;
int vt;
ident = &pdsc->dev_ident;
@@ -106,10 +106,8 @@ static const struct debugfs_reg32 intr_ctrl_regs[] = {
void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
{
- struct dentry *qcq_dentry, *q_dentry, *cq_dentry;
- struct dentry *intr_dentry;
+ struct dentry *qcq_dentry, *q_dentry, *cq_dentry, *intr_dentry;
struct debugfs_regset32 *intr_ctrl_regset;
- struct pdsc_intr_info *intr = &pdsc->intr_info[qcq->intx];
struct pdsc_queue *q = &qcq->q;
struct pdsc_cq *cq = &qcq->cq;
@@ -147,6 +145,8 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
debugfs_create_u16("tail", 0400, cq_dentry, &cq->tail_idx);
if (qcq->flags & PDS_CORE_QCQ_F_INTR) {
+ struct pdsc_intr_info *intr = &pdsc->intr_info[qcq->intx];
+
intr_dentry = debugfs_create_dir("intr", qcq->dentry);
if (IS_ERR_OR_NULL(intr_dentry))
return;
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index e65a1632df50..e494e1298dc9 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -316,6 +316,22 @@ static int pdsc_identify(struct pdsc *pdsc)
return 0;
}
+void pdsc_dev_uninit(struct pdsc *pdsc)
+{
+ if (pdsc->intr_info) {
+ int i;
+
+ for (i = 0; i < pdsc->nintrs; i++)
+ pdsc_intr_free(pdsc, i);
+
+ kfree(pdsc->intr_info);
+ pdsc->intr_info = NULL;
+ pdsc->nintrs = 0;
+ }
+
+ pci_free_irq_vectors(pdsc->pdev);
+}
+
int pdsc_dev_init(struct pdsc *pdsc)
{
unsigned int nintrs;
@@ -341,10 +357,8 @@ int pdsc_dev_init(struct pdsc *pdsc)
/* Get intr_info struct array for tracking */
pdsc->intr_info = kcalloc(nintrs, sizeof(*pdsc->intr_info), GFP_KERNEL);
- if (!pdsc->intr_info) {
- err = -ENOMEM;
- goto err_out;
- }
+ if (!pdsc->intr_info)
+ return -ENOMEM;
err = pci_alloc_irq_vectors(pdsc->pdev, nintrs, nintrs, PCI_IRQ_MSIX);
if (err != nintrs) {
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index cdbf053b5376..ab6133e7db42 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -45,6 +45,7 @@ static void pdsc_unmap_bars(struct pdsc *pdsc)
for (i = 0; i < PDS_CORE_BARS_MAX; i++) {
if (bars[i].vaddr)
pci_iounmap(pdsc->pdev, bars[i].vaddr);
+ bars[i].vaddr = NULL;
}
}
@@ -451,6 +452,9 @@ static void pdsc_remove(struct pci_dev *pdev)
static void pdsc_stop_health_thread(struct pdsc *pdsc)
{
+ if (pdsc->pdev->is_virtfn)
+ return;
+
timer_shutdown_sync(&pdsc->wdtimer);
if (pdsc->health_work.func)
cancel_work_sync(&pdsc->health_work);
@@ -458,23 +462,35 @@ static void pdsc_stop_health_thread(struct pdsc *pdsc)
static void pdsc_restart_health_thread(struct pdsc *pdsc)
{
+ if (pdsc->pdev->is_virtfn)
+ return;
+
timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
mod_timer(&pdsc->wdtimer, jiffies + 1);
}
-void pdsc_reset_prepare(struct pci_dev *pdev)
+static void pdsc_reset_prepare(struct pci_dev *pdev)
{
struct pdsc *pdsc = pci_get_drvdata(pdev);
pdsc_stop_health_thread(pdsc);
pdsc_fw_down(pdsc);
+ if (pdev->is_virtfn) {
+ struct pdsc *pf;
+
+ pf = pdsc_get_pf_struct(pdsc->pdev);
+ if (!IS_ERR(pf))
+ pdsc_auxbus_dev_del(pdsc, pf);
+ }
+
pdsc_unmap_bars(pdsc);
pci_release_regions(pdev);
- pci_disable_device(pdev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
}
-void pdsc_reset_done(struct pci_dev *pdev)
+static void pdsc_reset_done(struct pci_dev *pdev)
{
struct pdsc *pdsc = pci_get_drvdata(pdev);
struct device *dev = pdsc->dev;
@@ -504,12 +520,43 @@ void pdsc_reset_done(struct pci_dev *pdev)
pdsc_fw_up(pdsc);
pdsc_restart_health_thread(pdsc);
+
+ if (pdev->is_virtfn) {
+ struct pdsc *pf;
+
+ pf = pdsc_get_pf_struct(pdsc->pdev);
+ if (!IS_ERR(pf))
+ pdsc_auxbus_dev_add(pdsc, pf);
+ }
+}
+
+static pci_ers_result_t pdsc_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t error)
+{
+ if (error == pci_channel_io_frozen) {
+ pdsc_reset_prepare(pdev);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_NONE;
+}
+
+static void pdsc_pci_error_resume(struct pci_dev *pdev)
+{
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+
+ if (test_bit(PDSC_S_FW_DEAD, &pdsc->state))
+ pci_reset_function_locked(pdev);
}
static const struct pci_error_handlers pdsc_err_handler = {
/* FLR handling */
.reset_prepare = pdsc_reset_prepare,
.reset_done = pdsc_reset_done,
+
+ /* AER handling */
+ .error_detected = pdsc_pci_error_detected,
+ .resume = pdsc_pci_error_resume,
};
static struct pci_driver pdsc_driver = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 18a6c8d99fa0..a2606ee3b0a5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -15,6 +15,7 @@
#include "aq_macsec.h"
#include "aq_main.h"
+#include <linux/linkmode.h>
#include <linux/ptp_clock_kernel.h>
static void aq_ethtool_get_regs(struct net_device *ndev,
@@ -681,23 +682,19 @@ static int aq_ethtool_get_ts_info(struct net_device *ndev,
return 0;
}
-static u32 eee_mask_to_ethtool_mask(u32 speed)
+static void eee_mask_to_ethtool_mask(unsigned long *mode, u32 speed)
{
- u32 rate = 0;
-
if (speed & AQ_NIC_RATE_EEE_10G)
- rate |= SUPPORTED_10000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
if (speed & AQ_NIC_RATE_EEE_1G)
- rate |= SUPPORTED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
if (speed & AQ_NIC_RATE_EEE_100M)
- rate |= SUPPORTED_100baseT_Full;
-
- return rate;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
}
-static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
+static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_keee *eee)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
u32 rate, supported_rates;
@@ -713,14 +710,14 @@ static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
if (err < 0)
return err;
- eee->supported = eee_mask_to_ethtool_mask(supported_rates);
+ eee_mask_to_ethtool_mask(eee->supported, supported_rates);
if (aq_nic->aq_nic_cfg.eee_speeds)
- eee->advertised = eee->supported;
+ linkmode_copy(eee->advertised, eee->supported);
- eee->lp_advertised = eee_mask_to_ethtool_mask(rate);
+ eee_mask_to_ethtool_mask(eee->lp_advertised, rate);
- eee->eee_enabled = !!eee->advertised;
+ eee->eee_enabled = !linkmode_empty(eee->advertised);
eee->tx_lpi_enabled = eee->eee_enabled;
if ((supported_rates & rate) & AQ_NIC_RATE_EEE_MSK)
@@ -729,7 +726,7 @@ static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
return 0;
}
-static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee)
+static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_keee *eee)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
u32 rate, supported_rates;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
index abd4832e4ed2..5acb3e16b567 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -993,7 +993,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
return 0;
err_exit_hwts_rx:
- aq_ring_free(&aq_ptp->hwts_rx);
+ aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
err_exit_ptp_rx:
aq_ring_free(&aq_ptp->ptp_rx);
err_exit_ptp_tx:
@@ -1011,7 +1011,7 @@ void aq_ptp_ring_free(struct aq_nic_s *aq_nic)
aq_ring_free(&aq_ptp->ptp_tx);
aq_ring_free(&aq_ptp->ptp_rx);
- aq_ring_free(&aq_ptp->hwts_rx);
+ aq_ring_hwts_rx_free(&aq_ptp->hwts_rx);
aq_ptp_skb_ring_release(&aq_ptp->skb_ring);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index cda8597b4e14..f7433abd6591 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -919,6 +919,19 @@ void aq_ring_free(struct aq_ring_s *self)
}
}
+void aq_ring_hwts_rx_free(struct aq_ring_s *self)
+{
+ if (!self)
+ return;
+
+ if (self->dx_ring) {
+ dma_free_coherent(aq_nic_get_dev(self->aq_nic),
+ self->size * self->dx_size + AQ_CFG_RXDS_DEF,
+ self->dx_ring, self->dx_ring_pa);
+ self->dx_ring = NULL;
+ }
+}
+
unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
{
unsigned int count;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 52847310740a..d627ace850ff 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -210,6 +210,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self);
int aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic, unsigned int idx,
unsigned int size, unsigned int dx_size);
+void aq_ring_hwts_rx_free(struct aq_ring_s *self);
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data);
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index 29b04a274d07..a806dadc4196 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -31,6 +31,20 @@ static void _intr2_mask_set(struct bcmasp_priv *priv, u32 mask)
priv->irq_mask |= mask;
}
+void bcmasp_enable_phy_irq(struct bcmasp_intf *intf, int en)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ /* Only supported with internal phys */
+ if (!intf->internal_phy)
+ return;
+
+ if (en)
+ _intr2_mask_clear(priv, ASP_INTR2_PHY_EVENT(intf->channel));
+ else
+ _intr2_mask_set(priv, ASP_INTR2_PHY_EVENT(intf->channel));
+}
+
void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en)
{
struct bcmasp_priv *priv = intf->parent;
@@ -79,6 +93,9 @@ static void bcmasp_intr2_handling(struct bcmasp_intf *intf, u32 status)
__napi_schedule_irqoff(&intf->tx_napi);
}
}
+
+ if (status & ASP_INTR2_PHY_EVENT(intf->channel))
+ phy_mac_interrupt(intf->ndev->phydev);
}
static irqreturn_t bcmasp_isr(int irq, void *data)
@@ -535,9 +552,6 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
int j = 0, i;
for (i = 0; i < NUM_NET_FILTERS; i++) {
- if (j == *rule_cnt)
- return -EMSGSIZE;
-
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -547,6 +561,9 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
priv->net_filters[i - 1].wake_filter)
continue;
+ if (j == *rule_cnt)
+ return -EMSGSIZE;
+
rule_locs[j++] = priv->net_filters[i].fs.location;
}
@@ -972,7 +989,26 @@ static void bcmasp_core_init(struct bcmasp_priv *priv)
ASP_INTR2_CLEAR);
}
-static void bcmasp_core_clock_select(struct bcmasp_priv *priv, bool slow)
+static void bcmasp_core_clock_select_many(struct bcmasp_priv *priv, bool slow)
+{
+ u32 reg;
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CORE_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CORE_CLOCK_SELECT);
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CPU_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CPU_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CPU_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CPU_CLOCK_SELECT);
+}
+
+static void bcmasp_core_clock_select_one(struct bcmasp_priv *priv, bool slow)
{
u32 reg;
@@ -1166,6 +1202,24 @@ static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv)
}
}
+static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en)
+{
+ u32 reg, phy_lpi_overwrite;
+
+ reg = rx_edpkt_core_rl(intf->parent, ASP_EDPKT_SPARE_REG);
+ phy_lpi_overwrite = intf->internal_phy ? ASP_EDPKT_SPARE_REG_EPHY_LPI :
+ ASP_EDPKT_SPARE_REG_GPHY_LPI;
+
+ if (en)
+ reg |= phy_lpi_overwrite;
+ else
+ reg &= ~phy_lpi_overwrite;
+
+ rx_edpkt_core_wl(intf->parent, reg, ASP_EDPKT_SPARE_REG);
+
+ usleep_range(50, 100);
+}
+
static struct bcmasp_hw_info v20_hw_info = {
.rx_ctrl_flush = ASP_RX_CTRL_FLUSH,
.umac2fb = UMAC2FB_OFFSET,
@@ -1178,6 +1232,7 @@ static const struct bcmasp_plat_data v20_plat_data = {
.init_wol = bcmasp_init_wol_per_intf,
.enable_wol = bcmasp_enable_wol_per_intf,
.destroy_wol = bcmasp_wol_irq_destroy_per_intf,
+ .core_clock_select = bcmasp_core_clock_select_one,
.hw_info = &v20_hw_info,
};
@@ -1194,17 +1249,39 @@ static const struct bcmasp_plat_data v21_plat_data = {
.init_wol = bcmasp_init_wol_shared,
.enable_wol = bcmasp_enable_wol_shared,
.destroy_wol = bcmasp_wol_irq_destroy_shared,
+ .core_clock_select = bcmasp_core_clock_select_one,
+ .hw_info = &v21_hw_info,
+};
+
+static const struct bcmasp_plat_data v22_plat_data = {
+ .init_wol = bcmasp_init_wol_shared,
+ .enable_wol = bcmasp_enable_wol_shared,
+ .destroy_wol = bcmasp_wol_irq_destroy_shared,
+ .core_clock_select = bcmasp_core_clock_select_many,
.hw_info = &v21_hw_info,
+ .eee_fixup = bcmasp_eee_fixup,
};
+static void bcmasp_set_pdata(struct bcmasp_priv *priv, const struct bcmasp_plat_data *pdata)
+{
+ priv->init_wol = pdata->init_wol;
+ priv->enable_wol = pdata->enable_wol;
+ priv->destroy_wol = pdata->destroy_wol;
+ priv->core_clock_select = pdata->core_clock_select;
+ priv->eee_fixup = pdata->eee_fixup;
+ priv->hw_info = pdata->hw_info;
+}
+
static const struct of_device_id bcmasp_of_match[] = {
{ .compatible = "brcm,asp-v2.0", .data = &v20_plat_data },
{ .compatible = "brcm,asp-v2.1", .data = &v21_plat_data },
+ { .compatible = "brcm,asp-v2.2", .data = &v22_plat_data },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcmasp_of_match);
static const struct of_device_id bcmasp_mdio_of_match[] = {
+ { .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
{ .compatible = "brcm,asp-v2.0-mdio", },
{ /* sentinel */ },
@@ -1265,16 +1342,13 @@ static int bcmasp_probe(struct platform_device *pdev)
if (!pdata)
return dev_err_probe(dev, -EINVAL, "unable to find platform data\n");
- priv->init_wol = pdata->init_wol;
- priv->enable_wol = pdata->enable_wol;
- priv->destroy_wol = pdata->destroy_wol;
- priv->hw_info = pdata->hw_info;
+ bcmasp_set_pdata(priv, pdata);
/* Enable all clocks to ensure successful probing */
bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
/* Switch to the main clock */
- bcmasp_core_clock_select(priv, false);
+ priv->core_clock_select(priv, false);
bcmasp_intr2_mask_set_all(priv);
bcmasp_intr2_clear_all(priv);
@@ -1381,7 +1455,7 @@ static int __maybe_unused bcmasp_suspend(struct device *d)
*/
bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE);
- bcmasp_core_clock_select(priv, true);
+ priv->core_clock_select(priv, true);
clk_disable_unprepare(priv->clk);
@@ -1399,7 +1473,7 @@ static int __maybe_unused bcmasp_resume(struct device *d)
return ret;
/* Switch to the main clock domain */
- bcmasp_core_clock_select(priv, false);
+ priv->core_clock_select(priv, false);
/* Re-enable all clocks for re-initialization */
bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index ec90add6b03e..f93cb3da44b0 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -19,6 +19,8 @@
#define ASP_INTR2_TX_DESC(intr) BIT((intr) + 14)
#define ASP_INTR2_UMC0_WAKE BIT(22)
#define ASP_INTR2_UMC1_WAKE BIT(28)
+#define ASP_INTR2_PHY_EVENT(intr) ((intr) ? BIT(30) | BIT(31) : \
+ BIT(24) | BIT(25))
#define ASP_WAKEUP_INTR2_OFFSET 0x1200
#define ASP_WAKEUP_INTR2_STATUS 0x0
@@ -33,6 +35,12 @@
#define ASP_WAKEUP_INTR2_FILT_1 BIT(3)
#define ASP_WAKEUP_INTR2_FW BIT(4)
+#define ASP_CTRL2_OFFSET 0x2000
+#define ASP_CTRL2_CORE_CLOCK_SELECT 0x0
+#define ASP_CTRL2_CORE_CLOCK_SELECT_MAIN BIT(0)
+#define ASP_CTRL2_CPU_CLOCK_SELECT 0x4
+#define ASP_CTRL2_CPU_CLOCK_SELECT_MAIN BIT(0)
+
#define ASP_TX_ANALYTICS_OFFSET 0x4c000
#define ASP_TX_ANALYTICS_CTRL 0x0
@@ -134,8 +142,11 @@ enum asp_rx_net_filter_block {
#define ASP_EDPKT_RX_PKT_CNT 0x138
#define ASP_EDPKT_HDR_EXTR_CNT 0x13c
#define ASP_EDPKT_HDR_OUT_CNT 0x140
+#define ASP_EDPKT_SPARE_REG 0x174
+#define ASP_EDPKT_SPARE_REG_EPHY_LPI BIT(4)
+#define ASP_EDPKT_SPARE_REG_GPHY_LPI BIT(3)
-#define ASP_CTRL 0x101000
+#define ASP_CTRL_OFFSET 0x101000
#define ASP_CTRL_ASP_SW_INIT 0x04
#define ASP_CTRL_ASP_SW_INIT_ACPUSS_CORE BIT(0)
#define ASP_CTRL_ASP_SW_INIT_ASP_TX BIT(1)
@@ -306,6 +317,7 @@ struct bcmasp_intf {
struct bcmasp_desc *rx_edpkt_cpu;
dma_addr_t rx_edpkt_dma_addr;
dma_addr_t rx_edpkt_dma_read;
+ dma_addr_t rx_edpkt_dma_valid;
/* RX buffer prefetcher ring*/
void *rx_ring_cpu;
@@ -337,7 +349,7 @@ struct bcmasp_intf {
int wol_irq;
unsigned int wol_irq_enabled:1;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
#define NUM_NET_FILTERS 32
@@ -372,6 +384,8 @@ struct bcmasp_plat_data {
void (*init_wol)(struct bcmasp_priv *priv);
void (*enable_wol)(struct bcmasp_intf *intf, bool en);
void (*destroy_wol)(struct bcmasp_priv *priv);
+ void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
+ void (*eee_fixup)(struct bcmasp_intf *priv, bool en);
struct bcmasp_hw_info *hw_info;
};
@@ -390,6 +404,8 @@ struct bcmasp_priv {
void (*init_wol)(struct bcmasp_priv *priv);
void (*enable_wol)(struct bcmasp_intf *intf, bool en);
void (*destroy_wol)(struct bcmasp_priv *priv);
+ void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
+ void (*eee_fixup)(struct bcmasp_intf *intf, bool en);
void __iomem *base;
struct bcmasp_hw_info *hw_info;
@@ -530,7 +546,8 @@ BCMASP_CORE_IO_MACRO(rx_analytics, ASP_RX_ANALYTICS_OFFSET);
BCMASP_CORE_IO_MACRO(rx_ctrl, ASP_RX_CTRL_OFFSET);
BCMASP_CORE_IO_MACRO(rx_filter, ASP_RX_FILTER_OFFSET);
BCMASP_CORE_IO_MACRO(rx_edpkt, ASP_EDPKT_OFFSET);
-BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL);
+BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL_OFFSET);
+BCMASP_CORE_IO_MACRO(ctrl2, ASP_CTRL2_OFFSET);
struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
struct device_node *ndev_dn, int i);
@@ -541,6 +558,8 @@ void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en);
void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en);
+void bcmasp_enable_phy_irq(struct bcmasp_intf *intf, int en);
+
void bcmasp_flush_rx_port(struct bcmasp_intf *intf);
extern const struct ethtool_ops bcmasp_ethtool_ops;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index ce6a3d56fb23..484fc2b5626f 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -360,29 +360,26 @@ void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable)
umac_wl(intf, reg, UMC_EEE_CTRL);
intf->eee.eee_enabled = enable;
- intf->eee.eee_active = enable;
}
-static int bcmasp_get_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmasp_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_eee *p = &intf->eee;
+ struct ethtool_keee *p = &intf->eee;
if (!dev->phydev)
return -ENODEV;
- e->eee_enabled = p->eee_enabled;
- e->eee_active = p->eee_active;
e->tx_lpi_enabled = p->tx_lpi_enabled;
e->tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
return phy_ethtool_get_eee(dev->phydev, e);
}
-static int bcmasp_set_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmasp_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_eee *p = &intf->eee;
+ struct ethtool_keee *p = &intf->eee;
int ret;
if (!dev->phydev)
@@ -399,7 +396,6 @@ static int bcmasp_set_eee(struct net_device *dev, struct ethtool_eee *e)
}
umac_wl(intf, e->tx_lpi_timer, UMC_EEE_LPI_TIMER);
- intf->eee.eee_active = ret >= 0;
intf->eee.tx_lpi_enabled = e->tx_lpi_enabled;
bcmasp_eee_enable_set(intf, true);
}
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index 53e542881255..dd06b68b33ed 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -382,6 +382,7 @@ static void bcmasp_netif_start(struct net_device *dev)
bcmasp_enable_rx_irq(intf, 1);
bcmasp_enable_tx_irq(intf, 1);
+ bcmasp_enable_phy_irq(intf, 1);
phy_start(dev->phydev);
}
@@ -607,6 +608,7 @@ static void bcmasp_adj_link(struct net_device *dev)
struct phy_device *phydev = dev->phydev;
u32 cmd_bits = 0, reg;
int changed = 0;
+ bool active;
if (intf->old_link != phydev->link) {
changed = 1;
@@ -658,8 +660,8 @@ static void bcmasp_adj_link(struct net_device *dev)
reg |= cmd_bits;
umac_wl(intf, reg, UMC_CMD);
- intf->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
- bcmasp_eee_enable_set(intf, intf->eee.eee_active);
+ active = phy_init_eee(phydev, 0) >= 0;
+ bcmasp_eee_enable_set(intf, active);
}
reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
@@ -673,38 +675,78 @@ static void bcmasp_adj_link(struct net_device *dev)
phy_print_status(phydev);
}
-static int bcmasp_init_rx(struct bcmasp_intf *intf)
+static int bcmasp_alloc_buffers(struct bcmasp_intf *intf)
{
struct device *kdev = &intf->parent->pdev->dev;
struct page *buffer_pg;
- dma_addr_t dma;
- void *p;
- u32 reg;
- int ret;
+ /* Alloc RX */
intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
-
- dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(kdev, dma)) {
- __free_pages(buffer_pg, intf->rx_buf_order);
+ if (!buffer_pg)
return -ENOMEM;
- }
+
intf->rx_ring_cpu = page_to_virt(buffer_pg);
- intf->rx_ring_dma = dma;
- intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
+ intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(kdev, intf->rx_ring_dma))
+ goto free_rx_buffer;
+
+ intf->rx_edpkt_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
+ &intf->rx_edpkt_dma_addr, GFP_KERNEL);
+ if (!intf->rx_edpkt_cpu)
+ goto free_rx_buffer_dma;
+
+ /* Alloc TX */
+ intf->tx_spb_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
+ &intf->tx_spb_dma_addr, GFP_KERNEL);
+ if (!intf->tx_spb_cpu)
+ goto free_rx_edpkt_dma;
- p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->rx_edpkt_dma_addr,
+ intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
GFP_KERNEL);
- if (!p) {
- ret = -ENOMEM;
- goto free_rx_ring;
- }
- intf->rx_edpkt_cpu = p;
+ if (!intf->tx_cbs)
+ goto free_tx_spb_dma;
- netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
+ return 0;
+
+free_tx_spb_dma:
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
+ intf->tx_spb_dma_addr);
+free_rx_edpkt_dma:
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
+ intf->rx_edpkt_dma_addr);
+free_rx_buffer_dma:
+ dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+free_rx_buffer:
+ __free_pages(buffer_pg, intf->rx_buf_order);
+
+ return -ENOMEM;
+}
+
+static void bcmasp_reclaim_free_buffers(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+
+ /* RX buffers */
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
+ intf->rx_edpkt_dma_addr);
+ dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
+
+ /* TX buffers */
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
+ intf->tx_spb_dma_addr);
+ kfree(intf->tx_cbs);
+}
+static void bcmasp_init_rx(struct bcmasp_intf *intf)
+{
+ /* Restart from index 0 */
+ intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
+ intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1);
intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
intf->rx_edpkt_index = 0;
@@ -730,64 +772,23 @@ static int bcmasp_init_rx(struct bcmasp_intf *intf)
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
- rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
- RX_EDPKT_DMA_END);
- rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
- RX_EDPKT_DMA_VALID);
-
- reg = UMAC2FB_CFG_DEFAULT_EN |
- ((intf->channel + 11) << UMAC2FB_CFG_CHID_SHIFT);
- reg |= (0xd << UMAC2FB_CFG_OK_SEND_SHIFT);
- umac2fb_wl(intf, reg, UMAC2FB_CFG);
-
- return 0;
-
-free_rx_ring:
- dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
- DMA_FROM_DEVICE);
- __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID);
- return ret;
+ umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) <<
+ UMAC2FB_CFG_CHID_SHIFT) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT),
+ UMAC2FB_CFG);
}
-static void bcmasp_reclaim_free_all_rx(struct bcmasp_intf *intf)
-{
- struct device *kdev = &intf->parent->pdev->dev;
-
- dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
- intf->rx_edpkt_dma_addr);
- dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
- DMA_FROM_DEVICE);
- __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
-}
-static int bcmasp_init_tx(struct bcmasp_intf *intf)
+static void bcmasp_init_tx(struct bcmasp_intf *intf)
{
- struct device *kdev = &intf->parent->pdev->dev;
- void *p;
- int ret;
-
- p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->tx_spb_dma_addr,
- GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- intf->tx_spb_cpu = p;
+ /* Restart from index 0 */
intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
-
- intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
- GFP_KERNEL);
- if (!intf->tx_cbs) {
- ret = -ENOMEM;
- goto free_tx_spb;
- }
-
intf->tx_spb_index = 0;
intf->tx_spb_clean_index = 0;
- netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
-
/* Make sure channels are disabled */
tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
@@ -803,26 +804,6 @@ static int bcmasp_init_tx(struct bcmasp_intf *intf)
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END);
tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
-
- return 0;
-
-free_tx_spb:
- dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
- intf->tx_spb_dma_addr);
-
- return ret;
-}
-
-static void bcmasp_reclaim_free_all_tx(struct bcmasp_intf *intf)
-{
- struct device *kdev = &intf->parent->pdev->dev;
-
- /* Free descriptors */
- dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
- intf->tx_spb_dma_addr);
-
- /* Free cbs */
- kfree(intf->tx_cbs);
}
static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
@@ -910,12 +891,10 @@ static void bcmasp_netif_deinit(struct net_device *dev)
/* Disable interrupts */
bcmasp_enable_tx_irq(intf, 0);
bcmasp_enable_rx_irq(intf, 0);
+ bcmasp_enable_phy_irq(intf, 0);
netif_napi_del(&intf->tx_napi);
- bcmasp_reclaim_free_all_tx(intf);
-
netif_napi_del(&intf->rx_napi);
- bcmasp_reclaim_free_all_rx(intf);
}
static int bcmasp_stop(struct net_device *dev)
@@ -929,6 +908,8 @@ static int bcmasp_stop(struct net_device *dev)
bcmasp_netif_deinit(dev);
+ bcmasp_reclaim_free_buffers(intf);
+
phy_disconnect(dev->phydev);
/* Disable internal EPHY or external PHY */
@@ -1048,6 +1029,12 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
netdev_err(dev, "could not attach to PHY\n");
goto err_phy_disable;
}
+
+ if (intf->internal_phy)
+ dev->phydev->irq = PHY_MAC_INTERRUPT;
+
+ /* Indicate that the MAC is responsible for PHY PM */
+ phydev->mac_managed_pm = true;
} else if (!intf->wolopts) {
ret = phy_resume(dev->phydev);
if (ret)
@@ -1067,17 +1054,12 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
intf->old_link = -1;
intf->old_pause = -1;
- ret = bcmasp_init_tx(intf);
- if (ret)
- goto err_phy_disconnect;
-
- /* Turn on asp */
+ bcmasp_init_tx(intf);
+ netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
bcmasp_enable_tx(intf, 1);
- ret = bcmasp_init_rx(intf);
- if (ret)
- goto err_reclaim_tx;
-
+ bcmasp_init_rx(intf);
+ netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
bcmasp_enable_rx(intf, 1);
/* Turn on UniMAC TX/RX */
@@ -1091,11 +1073,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
return 0;
-err_reclaim_tx:
- bcmasp_reclaim_free_all_tx(intf);
-err_phy_disconnect:
- if (phydev)
- phy_disconnect(phydev);
err_phy_disable:
if (intf->internal_phy)
bcmasp_ephy_enable_set(intf, false);
@@ -1111,13 +1088,24 @@ static int bcmasp_open(struct net_device *dev)
netif_dbg(intf, ifup, dev, "bcmasp open\n");
- ret = clk_prepare_enable(intf->parent->clk);
+ ret = bcmasp_alloc_buffers(intf);
if (ret)
return ret;
- ret = bcmasp_netif_init(dev, true);
+ ret = clk_prepare_enable(intf->parent->clk);
if (ret)
+ goto err_free_mem;
+
+ ret = bcmasp_netif_init(dev, true);
+ if (ret) {
clk_disable_unprepare(intf->parent->clk);
+ goto err_free_mem;
+ }
+
+ return ret;
+
+err_free_mem:
+ bcmasp_reclaim_free_buffers(intf);
return ret;
}
@@ -1326,6 +1314,9 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
ASP_WAKEUP_INTR2_MASK_CLEAR);
}
+ if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ intf->parent->eee_fixup(intf, true);
+
netif_dbg(intf, wol, ndev, "entered WOL mode\n");
}
@@ -1374,6 +1365,9 @@ static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
{
u32 reg;
+ if (intf->eee.eee_enabled && intf->parent->eee_fixup)
+ intf->parent->eee_fixup(intf, false);
+
reg = umac_rl(intf, UMC_MPD_CTRL);
reg &= ~UMC_MPD_CTRL_MPD_EN;
umac_wl(intf, reg, UMC_MPD_CTRL);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e9c1e1bb5580..c9b6acd8c892 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -147,10 +147,11 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
phy_fw_ver[0] = '\0';
bnx2x_get_ext_phy_fw_version(&bp->link_params,
- phy_fw_ver, PHY_FW_VER_LEN);
- strscpy(buf, bp->fw_ver, buf_len);
- snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
- "bc %d.%d.%d%s%s",
+ phy_fw_ver, sizeof(phy_fw_ver));
+ /* This may become truncated. */
+ scnprintf(buf, buf_len,
+ "%sbc %d.%d.%d%s%s",
+ bp->fw_ver,
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
@@ -3537,7 +3538,7 @@ static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
- ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
+ ((skb_inner_transport_offset(skb) >> 1) <<
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
@@ -3569,7 +3570,7 @@ static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
- ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
+ ((skb_transport_offset(skb) >> 1) <<
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
@@ -3612,7 +3613,7 @@ static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
struct eth_tx_parse_bd_e1x *pbd,
u32 xmit_type)
{
- u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
+ u8 hlen = skb_network_offset(skb) >> 1;
/* for now NS flag is not used in Linux */
pbd->global_data =
@@ -3620,8 +3621,7 @@ static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
- pbd->ip_hlen_w = (skb_transport_header(skb) -
- skb_network_header(skb)) >> 1;
+ pbd->ip_hlen_w = skb_network_header_len(skb) >> 1;
hlen += pbd->ip_hlen_w;
@@ -3666,8 +3666,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
u8 outerip_off, outerip_len = 0;
/* from outer IP to transport */
- hlen_w = (skb_inner_transport_header(skb) -
- skb_network_header(skb)) >> 1;
+ hlen_w = skb_inner_transport_offset(skb) >> 1;
/* transport len */
hlen_w += inner_tcp_hdrlen(skb) >> 1;
@@ -3713,7 +3712,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
0, IPPROTO_TCP, 0));
}
- outerip_off = (skb_network_header(skb) - skb->data) >> 1;
+ outerip_off = (skb_network_offset(skb)) >> 1;
*global_data |=
outerip_off |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 81d232e6d05f..58956ed8f531 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1132,7 +1132,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
}
memset(version, 0, sizeof(version));
- bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
+ bnx2x_fill_fw_str(bp, version, sizeof(version));
strlcat(info->fw_version, version, sizeof(info->fw_version));
strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
@@ -2081,34 +2081,31 @@ static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
"Storage only interface"
};
-static u32 bnx2x_eee_to_adv(u32 eee_adv)
+static void bnx2x_eee_to_linkmode(unsigned long *mode, u32 eee_adv)
{
- u32 modes = 0;
-
if (eee_adv & SHMEM_EEE_100M_ADV)
- modes |= ADVERTISED_100baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
if (eee_adv & SHMEM_EEE_1G_ADV)
- modes |= ADVERTISED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
if (eee_adv & SHMEM_EEE_10G_ADV)
- modes |= ADVERTISED_10000baseT_Full;
-
- return modes;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
}
-static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
+static u32 bnx2x_linkmode_to_eee(const unsigned long *mode, u32 shift)
{
u32 eee_adv = 0;
- if (modes & ADVERTISED_100baseT_Full)
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_100M_ADV;
- if (modes & ADVERTISED_1000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_1G_ADV;
- if (modes & ADVERTISED_10000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_10G_ADV;
return eee_adv << shift;
}
-static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnx2x_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnx2x *bp = netdev_priv(dev);
u32 eee_cfg;
@@ -2120,16 +2117,17 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
eee_cfg = bp->link_vars.eee_status;
- edata->supported =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
- SHMEM_EEE_SUPPORTED_SHIFT);
+ bnx2x_eee_to_linkmode(edata->supported,
+ (eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
+ SHMEM_EEE_SUPPORTED_SHIFT);
+
+ bnx2x_eee_to_linkmode(edata->advertised,
+ (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
+ SHMEM_EEE_ADV_STATUS_SHIFT);
- edata->advertised =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
- SHMEM_EEE_ADV_STATUS_SHIFT);
- edata->lp_advertised =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
- SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+ bnx2x_eee_to_linkmode(edata->lp_advertised,
+ (eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
+ SHMEM_EEE_LP_ADV_STATUS_SHIFT);
/* SHMEM value is in 16u units --> Convert to 1u units. */
edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
@@ -2141,7 +2139,7 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnx2x_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnx2x *bp = netdev_priv(dev);
u32 eee_cfg;
@@ -2162,8 +2160,8 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- advertised = bnx2x_adv_to_eee(edata->advertised,
- SHMEM_EEE_ADV_STATUS_SHIFT);
+ advertised = bnx2x_linkmode_to_eee(edata->advertised,
+ SHMEM_EEE_ADV_STATUS_SHIFT);
if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
DP(BNX2X_MSG_ETHTOOL,
"Direct manipulation of EEE advertisement is not supported\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 02808513ffe4..ea310057fe3a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6163,8 +6163,8 @@ static void bnx2x_link_int_ack(struct link_params *params,
static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
{
- str[0] = '\0';
- (*len)--;
+ if (*len)
+ str[0] = '\0';
return 0;
}
@@ -6173,7 +6173,7 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
u16 ret;
if (*len < 10) {
- /* Need more than 10chars for this format */
+ /* Need more than 10 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
@@ -6188,8 +6188,8 @@ static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
{
u16 ret;
- if (*len < 10) {
- /* Need more than 10chars for this format */
+ if (*len < 9) {
+ /* Need more than 9 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
@@ -6208,7 +6208,7 @@ int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
int status = 0;
u8 *ver_p = version;
u16 remain_len = len;
- if (version == NULL || params == NULL)
+ if (version == NULL || params == NULL || len == 0)
return -EINVAL;
bp = params->bp;
@@ -11546,7 +11546,7 @@ static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
str[2] = (spirom_ver & 0xFF0000) >> 16;
str[3] = (spirom_ver & 0xFF000000) >> 24;
str[4] = '\0';
- *len -= 5;
+ *len -= 4;
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 39845d556baf..493b724848c8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -246,6 +246,49 @@ static const u16 bnxt_async_events_arr[] = {
static struct workqueue_struct *bnxt_pf_wq;
+#define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
+#define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
+
+const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
+ .ports = {
+ .src = 0,
+ .dst = 0,
+ },
+ .addrs = {
+ .v6addrs = {
+ .src = BNXT_IPV6_MASK_NONE,
+ .dst = BNXT_IPV6_MASK_NONE,
+ },
+ },
+};
+
+const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
+ .ports = {
+ .src = cpu_to_be16(0xffff),
+ .dst = cpu_to_be16(0xffff),
+ },
+ .addrs = {
+ .v6addrs = {
+ .src = BNXT_IPV6_MASK_ALL,
+ .dst = BNXT_IPV6_MASK_ALL,
+ },
+ },
+};
+
+const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
+ .ports = {
+ .src = cpu_to_be16(0xffff),
+ .dst = cpu_to_be16(0xffff),
+ },
+ .addrs = {
+ .v4addrs = {
+ .src = cpu_to_be32(0xffffffff),
+ .dst = cpu_to_be32(0xffffffff),
+ },
+ },
+};
+
static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
@@ -4168,8 +4211,12 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
int num_vnics = 1;
#ifdef CONFIG_RFS_ACCEL
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == BNXT_FLAG_RFS)
- num_vnics += bp->rx_nr_rings;
+ if (bp->flags & BNXT_FLAG_RFS) {
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ num_vnics++;
+ else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ num_vnics += bp->rx_nr_rings;
+ }
#endif
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
@@ -4186,6 +4233,7 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
static void bnxt_init_vnics(struct bnxt *bp)
{
+ struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
int i;
for (i = 0; i < bp->nr_vnics; i++) {
@@ -4199,20 +4247,33 @@ static void bnxt_init_vnics(struct bnxt *bp)
vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
if (bp->vnic_info[i].rss_hash_key) {
- if (!i) {
+ if (i == BNXT_VNIC_DEFAULT) {
u8 *key = (void *)vnic->rss_hash_key;
int k;
+ if (!bp->rss_hash_key_valid &&
+ !bp->rss_hash_key_updated) {
+ get_random_bytes(bp->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ bp->rss_hash_key_updated = true;
+ }
+
+ memcpy(vnic->rss_hash_key, bp->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+
+ if (!bp->rss_hash_key_updated)
+ continue;
+
+ bp->rss_hash_key_updated = false;
+ bp->rss_hash_key_valid = true;
+
bp->toeplitz_prefix = 0;
- get_random_bytes(vnic->rss_hash_key,
- HW_HASH_KEY_SIZE);
for (k = 0; k < 8; k++) {
bp->toeplitz_prefix <<= 8;
bp->toeplitz_prefix |= key[k];
}
} else {
- memcpy(vnic->rss_hash_key,
- bp->vnic_info[0].rss_hash_key,
+ memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
HW_HASH_KEY_SIZE);
}
}
@@ -4798,6 +4859,44 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
}
}
+void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ u8 type = fltr->type, flags = fltr->flags;
+
+ INIT_LIST_HEAD(&fltr->list);
+ if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
+ (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
+ list_add_tail(&fltr->list, &bp->usr_fltr_list);
+}
+
+void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ if (!list_empty(&fltr->list))
+ list_del_init(&fltr->list);
+}
+
+void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
+{
+ struct bnxt_filter_base *usr_fltr, *tmp;
+
+ list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
+ if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
+ continue;
+ bnxt_del_one_usr_fltr(bp, usr_fltr);
+ }
+}
+
+static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ hlist_del(&fltr->hash);
+ bnxt_del_one_usr_fltr(bp, fltr);
+ if (fltr->flags) {
+ clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
+ bp->ntp_fltr_count--;
+ }
+ kfree(fltr);
+}
+
static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
int i;
@@ -4813,12 +4912,10 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
head = &bp->ntp_fltr_hash_tbl[i];
hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
bnxt_del_l2_filter(bp, fltr->l2_fltr);
- if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
+ if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
+ !list_empty(&fltr->base.list)))
continue;
- hlist_del(&fltr->base.hash);
- clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
- bp->ntp_fltr_count--;
- kfree(fltr);
+ bnxt_del_fltr(bp, &fltr->base);
}
}
if (!all)
@@ -4840,7 +4937,7 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_MAX_FLTR, GFP_KERNEL);
+ bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
rc = -ENOMEM;
@@ -4859,14 +4956,10 @@ static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
head = &bp->l2_fltr_hash_tbl[i];
hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
- if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
+ if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
+ !list_empty(&fltr->base.list)))
continue;
- hlist_del(&fltr->base.hash);
- if (fltr->base.flags) {
- clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
- bp->ntp_fltr_count--;
- }
- kfree(fltr);
+ bnxt_del_fltr(bp, &fltr->base);
}
}
}
@@ -5039,8 +5132,13 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
if (rc)
goto alloc_mem_err;
- bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
- BNXT_VNIC_UCAST_FLAG;
+ bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
+ BNXT_VNIC_MCAST_FLAG |
+ BNXT_VNIC_UCAST_FLAG;
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
+ bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
+ BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
+
rc = bnxt_alloc_vnic_attributes(bp);
if (rc)
goto alloc_mem_err;
@@ -5342,6 +5440,7 @@ void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
return;
}
hlist_del_rcu(&fltr->base.hash);
+ bnxt_del_one_usr_fltr(bp, &fltr->base);
if (fltr->base.flags) {
clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
bp->ntp_fltr_count--;
@@ -5480,13 +5579,15 @@ static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
int bit_id;
bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
- BNXT_MAX_FLTR, 0);
+ bp->max_fltr, 0);
if (bit_id < 0)
return -ENOMEM;
fltr->base.sw_id = (u16)bit_id;
+ bp->ntp_fltr_count++;
}
head = &bp->l2_fltr_hash_tbl[idx];
hlist_add_head_rcu(&fltr->base.hash, head);
+ bnxt_insert_usr_fltr(bp, &fltr->base);
set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
atomic_set(&fltr->refcnt, 1);
return 0;
@@ -5519,6 +5620,40 @@ static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
return fltr;
}
+struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u16 flags)
+{
+ struct bnxt_l2_filter *fltr;
+ u32 idx;
+ int rc;
+
+ idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
+ BNXT_L2_FLTR_HASH_MASK;
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ fltr = __bnxt_lookup_l2_filter(bp, key, idx);
+ if (fltr) {
+ fltr = ERR_PTR(-EEXIST);
+ goto l2_filter_exit;
+ }
+ fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC);
+ if (!fltr) {
+ fltr = ERR_PTR(-ENOMEM);
+ goto l2_filter_exit;
+ }
+ fltr->base.flags = flags;
+ rc = bnxt_init_l2_filter(bp, fltr, key, idx);
+ if (rc) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ bnxt_del_l2_filter(bp, fltr);
+ return ERR_PTR(rc);
+ }
+
+l2_filter_exit:
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return fltr;
+}
+
static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
{
#ifdef CONFIG_BNXT_SRIOV
@@ -5650,15 +5785,38 @@ void bnxt_fill_ipv6_mask(__be32 mask[4])
mask[i] = cpu_to_be32(~0);
}
+static void
+bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
+ struct hwrm_cfa_ntuple_filter_alloc_input *req,
+ u16 rxq)
+{
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
+ struct bnxt_vnic_info *vnic;
+ u32 enables;
+
+ vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
+ req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
+ enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
+ req->enables |= cpu_to_le32(enables);
+ req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
+ } else {
+ u32 flags;
+
+ flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
+ req->flags |= cpu_to_le32(flags);
+ req->dst_id = cpu_to_le16(rxq);
+ }
+}
+
int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr)
{
struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct hwrm_cfa_ntuple_filter_alloc_input *req;
+ struct bnxt_flow_masks *masks = &fltr->fmasks;
struct flow_keys *keys = &fltr->fkeys;
struct bnxt_l2_filter *l2_fltr;
struct bnxt_vnic_info *vnic;
- u32 flags = 0;
int rc;
rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
@@ -5668,16 +5826,16 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
l2_fltr = fltr->l2_fltr;
req->l2_filter_id = l2_fltr->base.filter_id;
-
- if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
- flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
- req->dst_id = cpu_to_le16(fltr->base.rxq);
+ if (fltr->base.flags & BNXT_ACT_DROP) {
+ req->flags =
+ cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
+ } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
+ bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr->base.rxq);
} else {
vnic = &bp->vnic_info[fltr->base.rxq + 1];
req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
}
- req->flags = cpu_to_le32(flags);
- req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+ req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
req->ethertype = htons(ETH_P_IP);
req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
@@ -5687,25 +5845,15 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req->ethertype = htons(ETH_P_IPV6);
req->ip_addr_type =
CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- *(struct in6_addr *)&req->src_ipaddr[0] =
- keys->addrs.v6addrs.src;
- bnxt_fill_ipv6_mask(req->src_ipaddr_mask);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- *(struct in6_addr *)&req->dst_ipaddr[0] =
- keys->addrs.v6addrs.dst;
- bnxt_fill_ipv6_mask(req->dst_ipaddr_mask);
- }
+ *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
+ *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
+ *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
+ *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
} else {
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- req->src_ipaddr[0] = keys->addrs.v4addrs.src;
- req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
- req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
- }
+ req->src_ipaddr[0] = keys->addrs.v4addrs.src;
+ req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
+ req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+ req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
}
if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
@@ -5713,14 +5861,10 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
}
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
- req->src_port = keys->ports.src;
- req->src_port_mask = cpu_to_be16(0xffff);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
- req->dst_port = keys->ports.dst;
- req->dst_port_mask = cpu_to_be16(0xffff);
- }
+ req->src_port = keys->ports.src;
+ req->src_port_mask = masks->ports.src;
+ req->dst_port = keys->ports.dst;
+ req->dst_port_mask = masks->ports.dst;
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
@@ -5971,7 +6115,10 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
for (i = 0; i < tbl_size; i++) {
u16 ring_id, j;
- j = bp->rss_indir_tbl[i];
+ if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
+ j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
+ else
+ j = bp->rss_indir_tbl[i];
rxr = &bp->rx_ring[j];
ring_id = rxr->rx_ring_struct.fw_ring_id;
@@ -5985,10 +6132,13 @@ static void
__bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
struct bnxt_vnic_info *vnic)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
bnxt_fill_hw_rss_tbl_p5(bp, vnic);
- else
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
+ } else {
bnxt_fill_hw_rss_tbl(bp, vnic);
+ }
if (bp->rss_hash_delta) {
req->hash_type = cpu_to_le32(bp->rss_hash_delta);
@@ -6061,7 +6211,7 @@ exit:
static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct hwrm_vnic_rss_qcfg_output *resp;
struct hwrm_vnic_rss_qcfg_input *req;
@@ -6165,6 +6315,7 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
{
+ struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_cfg_input *req;
unsigned int ring = 0, grp_idx;
@@ -6194,8 +6345,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
- req->rss_rule =
- cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
+ req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
@@ -6292,7 +6442,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
vnic_no_ring_grps:
for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
- if (vnic_id == 0)
+ if (vnic_id == BNXT_VNIC_DEFAULT)
req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
resp = hwrm_req_hold(bp, req);
@@ -6351,6 +6501,14 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
}
if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
}
hwrm_req_drop(bp, req);
return rc;
@@ -6918,6 +7076,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_hw_ring_grps =
le32_to_cpu(resp->alloc_hw_ring_grps);
hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
+ hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
cp = le16_to_cpu(resp->alloc_cmpl_rings);
stats = le16_to_cpu(resp->alloc_stat_ctx);
hw_resc->resv_irqs = cp;
@@ -6973,8 +7132,7 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
static bool bnxt_rfs_supported(struct bnxt *bp);
static struct hwrm_func_cfg_input *
-__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
u32 enables = 0;
@@ -6983,52 +7141,42 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return NULL;
req->fid = cpu_to_le16(0xffff);
- enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- req->num_tx_rings = cpu_to_le16(tx_rings);
+ enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
if (BNXT_NEW_RM(bp)) {
- enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
- enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
- enables |= tx_rings + ring_grps ?
+ enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
+ enables |= hwr->cp_p5 ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= rx_rings ?
- FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
} else {
- enables |= cp_rings ?
+ enables |= hwr->cp ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= ring_grps ?
- FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
- FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
- }
- enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
-
- req->num_rx_rings = cpu_to_le16(rx_rings);
+ enables |= hwr->grp ?
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ }
+ enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
+ 0;
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
+ req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
-
- req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
- req->num_msix = cpu_to_le16(cp_rings);
- req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
+ req->num_msix = cpu_to_le16(hwr->cp);
} else {
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(1);
- if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
- bnxt_rfs_supported(bp))
- req->num_rsscos_ctxs =
- cpu_to_le16(ring_grps + 1);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp);
+ req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
}
- req->num_stat_ctxs = cpu_to_le16(stats);
- req->num_vnics = cpu_to_le16(vnics);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
}
req->enables = cpu_to_le32(enables);
return req;
}
static struct hwrm_func_vf_cfg_input *
-__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
u32 enables = 0;
@@ -7036,51 +7184,46 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
return NULL;
- enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
- enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- enables |= tx_rings + ring_grps ?
+ enables |= hwr->cp_p5 ?
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
} else {
- enables |= cp_rings ?
- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= ring_grps ?
+ enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
+ enables |= hwr->grp ?
FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
}
- enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
- req->num_tx_rings = cpu_to_le16(tx_rings);
- req->num_rx_rings = cpu_to_le16(rx_rings);
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
+ req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
-
- req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
} else {
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp);
+ req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
}
- req->num_stat_ctxs = cpu_to_le16(stats);
- req->num_vnics = cpu_to_le16(vnics);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
req->enables = cpu_to_le32(enables);
return req;
}
static int
-bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
int rc;
- req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
if (!req)
return -ENOMEM;
@@ -7094,25 +7237,23 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return rc;
if (bp->hwrm_spec_code < 0x10601)
- bp->hw_resc.resv_tx_rings = tx_rings;
+ bp->hw_resc.resv_tx_rings = hwr->tx;
return bnxt_hwrm_get_rings(bp);
}
static int
-bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
int rc;
if (!BNXT_NEW_RM(bp)) {
- bp->hw_resc.resv_tx_rings = tx_rings;
+ bp->hw_resc.resv_tx_rings = hwr->tx;
return 0;
}
- req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
if (!req)
return -ENOMEM;
@@ -7123,15 +7264,12 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return bnxt_hwrm_get_rings(bp);
}
-static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
- int cp, int stat, int vnic)
+static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
if (BNXT_PF(bp))
- return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
- vnic);
+ return bnxt_hwrm_reserve_pf_rings(bp, hwr);
else
- return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
- vnic);
+ return bnxt_hwrm_reserve_vf_rings(bp, hwr);
}
int bnxt_nq_rings_in_use(struct bnxt *bp)
@@ -7174,6 +7312,24 @@ static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
return cp + ulp_stat;
}
+static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
+{
+ if (!hwr->grp)
+ return 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
+
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ rss_ctx *= hwr->vnic;
+ return rss_ctx;
+ }
+ if (BNXT_VF(bp))
+ return BNXT_VF_MAX_RSS_CTX;
+ if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
+ return hwr->grp + 1;
+ return 1;
+}
+
/* Check if a default RSS map needs to be setup. This function is only
* used on older firmware that does not require reserving RX rings.
*/
@@ -7189,13 +7345,24 @@ static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
}
}
+static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
+{
+ if (bp->flags & BNXT_FLAG_RFS) {
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ return 2;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ return rx_rings + 1;
+ }
+ return 1;
+}
+
static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int cp = bnxt_cp_rings_in_use(bp);
int nq = bnxt_nq_rings_in_use(bp);
int rx = bp->rx_nr_rings, stat;
- int vnic = 1, grp = rx;
+ int vnic, grp = rx;
if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
bp->hwrm_spec_code >= 0x10601)
@@ -7210,9 +7377,9 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
bnxt_check_rss_tbl_no_rmgr(bp);
return false;
}
- if ((bp->flags & BNXT_FLAG_RFS) &&
- !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- vnic = rx + 1;
+
+ vnic = bnxt_get_total_vnics(bp, rx);
+
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
stat = bnxt_get_func_stat_ctxs(bp);
@@ -7227,47 +7394,65 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
return false;
}
-static int __bnxt_reserve_rings(struct bnxt *bp)
+static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- int cp = bnxt_nq_rings_in_use(bp);
- int tx = bp->tx_nr_rings;
- int rx = bp->rx_nr_rings;
- int grp, rx_rings, rc;
- int vnic = 1, stat;
+
+ hwr->tx = hw_resc->resv_tx_rings;
+ if (BNXT_NEW_RM(bp)) {
+ hwr->rx = hw_resc->resv_rx_rings;
+ hwr->cp = hw_resc->resv_irqs;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr->cp_p5 = hw_resc->resv_cp_rings;
+ hwr->grp = hw_resc->resv_hw_ring_grps;
+ hwr->vnic = hw_resc->resv_vnics;
+ hwr->stat = hw_resc->resv_stat_ctxs;
+ hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
+ }
+}
+
+static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
+{
+ return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
+ hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
+}
+
+static int __bnxt_reserve_rings(struct bnxt *bp)
+{
+ struct bnxt_hw_rings hwr = {0};
+ int rx_rings, rc;
bool sh = false;
int tx_cp;
if (!bnxt_need_reserve_rings(bp))
return 0;
+ hwr.cp = bnxt_nq_rings_in_use(bp);
+ hwr.tx = bp->tx_nr_rings;
+ hwr.rx = bp->rx_nr_rings;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
- if ((bp->flags & BNXT_FLAG_RFS) &&
- !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- vnic = rx + 1;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr.cp_p5 = hwr.rx + hwr.tx;
+
+ hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
+
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx <<= 1;
- grp = bp->rx_nr_rings;
- stat = bnxt_get_func_stat_ctxs(bp);
+ hwr.rx <<= 1;
+ hwr.grp = bp->rx_nr_rings;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
+ hwr.stat = bnxt_get_func_stat_ctxs(bp);
- rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
+ rc = bnxt_hwrm_reserve_rings(bp, &hwr);
if (rc)
return rc;
- tx = hw_resc->resv_tx_rings;
- if (BNXT_NEW_RM(bp)) {
- rx = hw_resc->resv_rx_rings;
- cp = hw_resc->resv_irqs;
- grp = hw_resc->resv_hw_ring_grps;
- vnic = hw_resc->resv_vnics;
- stat = hw_resc->resv_stat_ctxs;
- }
+ bnxt_copy_reserved_rings(bp, &hwr);
- rx_rings = rx;
+ rx_rings = hwr.rx;
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- if (rx >= 2) {
- rx_rings = rx >> 1;
+ if (hwr.rx >= 2) {
+ rx_rings = hwr.rx >> 1;
} else {
if (netif_running(bp->dev))
return -ENOMEM;
@@ -7279,17 +7464,17 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
bnxt_set_ring_params(bp);
}
}
- rx_rings = min_t(int, rx_rings, grp);
- cp = min_t(int, cp, bp->cp_nr_rings);
- if (stat > bnxt_get_ulp_stat_ctxs(bp))
- stat -= bnxt_get_ulp_stat_ctxs(bp);
- cp = min_t(int, cp, stat);
- rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
+ rx_rings = min_t(int, rx_rings, hwr.grp);
+ hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
+ if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
+ hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
+ hwr.cp = min_t(int, hwr.cp, hwr.stat);
+ rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx = rx_rings << 1;
- tx_cp = bnxt_num_tx_to_cp(bp, tx);
- cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
- bp->tx_nr_rings = tx;
+ hwr.rx = rx_rings << 1;
+ tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
+ hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
+ bp->tx_nr_rings = hwr.tx;
/* If we cannot reserve all the RX rings, reset the RSS map only
* if absolutely necessary
@@ -7306,9 +7491,9 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
}
}
bp->rx_nr_rings = rx_rings;
- bp->cp_nr_rings = cp;
+ bp->cp_nr_rings = hwr.cp;
- if (!tx || !rx || !cp || !grp || !vnic || !stat)
+ if (!bnxt_rings_ok(bp, &hwr))
return -ENOMEM;
if (!netif_is_rxfh_configured(bp->dev))
@@ -7317,9 +7502,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
return rc;
}
-static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
u32 flags;
@@ -7327,8 +7510,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (!BNXT_NEW_RM(bp))
return 0;
- req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
@@ -7342,15 +7524,12 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return hwrm_req_send_silent(bp, req);
}
-static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
u32 flags;
- req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
if (BNXT_NEW_RM(bp)) {
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
@@ -7368,20 +7547,15 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return hwrm_req_send_silent(bp, req);
}
-static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
if (bp->hwrm_spec_code < 0x10801)
return 0;
if (BNXT_PF(bp))
- return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
- ring_grps, cp_rings, stats,
- vnics);
+ return bnxt_hwrm_check_pf_rings(bp, hwr);
- return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ return bnxt_hwrm_check_vf_rings(bp, hwr);
}
static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
@@ -8709,6 +8883,13 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+ hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
+ hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
+ hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
+ hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
+ hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
+ hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -8717,12 +8898,6 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
pf->max_vfs = le16_to_cpu(resp->max_vfs);
- pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
- pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
- pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
- pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
- pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
- pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
bp->flags &= ~BNXT_FLAG_WOL_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
@@ -8825,6 +9000,14 @@ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
+ if (flags &
+ CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
+
+ if (flags &
+ CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
+
hwrm_cfa_adv_qcaps_exit:
hwrm_req_drop(bp, req);
return rc;
@@ -9689,10 +9872,28 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
return __bnxt_setup_vnic(bp, vnic_id);
}
+static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, u16 vnic_id,
+ u16 start_rx_ring_idx, int rx_rings)
+{
+ int rc;
+
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, rx_rings);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+ vnic_id, rc);
+ return rc;
+ }
+ return bnxt_setup_vnic(bp, vnic_id);
+}
+
static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{
int i, rc = 0;
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ return bnxt_alloc_and_setup_vnic(bp, BNXT_VNIC_NTUPLE, 0,
+ bp->rx_nr_rings);
+
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
@@ -9708,14 +9909,7 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
vnic->flags |= BNXT_VNIC_RFS_FLAG;
if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
- rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
- vnic_id, rc);
- break;
- }
- rc = bnxt_setup_vnic(bp, vnic_id);
- if (rc)
+ if (bnxt_alloc_and_setup_vnic(bp, vnic_id, ring_id, 1))
break;
}
return rc;
@@ -9756,7 +9950,7 @@ static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
int rc = 0;
unsigned int rx_nr_rings = bp->rx_nr_rings;
@@ -9785,7 +9979,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rx_nr_rings--;
/* default vnic 0 */
- rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
+ rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, 0, rx_nr_rings);
if (rc) {
netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
goto err_out;
@@ -9794,7 +9988,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (BNXT_VF(bp))
bnxt_hwrm_func_qcfg(bp);
- rc = bnxt_setup_vnic(bp, 0);
+ rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT);
if (rc)
goto err_out;
if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
@@ -10621,10 +10815,10 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
- eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
@@ -10766,7 +10960,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->module_status = resp->module_status;
if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
u16 fw_speeds;
eee->eee_active = 0;
@@ -10775,8 +10969,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
eee->eee_active = 1;
fw_speeds = le16_to_cpu(
resp->link_partner_adv_eee_link_speed_mask);
- eee->lp_advertised =
- _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
}
/* Pull initial EEE config */
@@ -10786,8 +10979,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
eee->eee_enabled = 1;
fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
- eee->advertised =
- _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
if (resp->eee_config_phy_addr &
PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
@@ -10957,7 +11149,7 @@ int bnxt_hwrm_set_pause(struct bnxt *bp)
static void bnxt_hwrm_set_eee(struct bnxt *bp,
struct hwrm_port_phy_cfg_input *req)
{
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
if (eee->eee_enabled) {
u16 eee_speeds;
@@ -11087,6 +11279,7 @@ static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
hw_resc->resv_rx_rings = 0;
hw_resc->resv_hw_ring_grps = 0;
hw_resc->resv_vnics = 0;
+ hw_resc->resv_rsscos_ctxs = 0;
if (!fw_reset) {
bp->tx_nr_rings = 0;
bp->rx_nr_rings = 0;
@@ -11322,22 +11515,25 @@ static void bnxt_get_wol_settings(struct bnxt *bp)
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
return true;
if (eee->eee_enabled) {
- u32 advertising =
- _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
+
+ _bnxt_fw_to_linkmode(advertising, link_info->advertising);
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
eee->eee_enabled = 0;
return false;
}
- if (eee->advertised & ~advertising) {
- eee->advertised = advertising & eee->supported;
+ if (linkmode_andnot(tmp, eee->advertised, advertising)) {
+ linkmode_and(eee->advertised, advertising,
+ eee->supported);
return false;
}
}
@@ -11442,6 +11638,42 @@ static int bnxt_reinit_after_abort(struct bnxt *bp)
return rc;
}
+static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ struct bnxt_ntuple_filter *ntp_fltr;
+ struct bnxt_l2_filter *l2_fltr;
+
+ if (list_empty(&fltr->list))
+ return;
+
+ if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
+ ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
+ atomic_inc(&l2_fltr->refcnt);
+ ntp_fltr->l2_fltr = l2_fltr;
+ if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
+ bnxt_del_ntp_filter(bp, ntp_fltr);
+ netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
+ fltr->sw_id);
+ }
+ } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
+ l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
+ if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
+ bnxt_del_l2_filter(bp, l2_fltr);
+ netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
+ fltr->sw_id);
+ }
+ }
+}
+
+static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
+{
+ struct bnxt_filter_base *usr_fltr, *tmp;
+
+ list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
+ bnxt_cfg_one_usr_fltr(bp, usr_fltr);
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -11528,6 +11760,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_vf_reps_open(bp);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
+ bnxt_cfg_usr_fltrs(bp);
return 0;
open_err_irq:
@@ -11969,8 +12202,8 @@ void bnxt_get_ring_err_stats(struct bnxt *bp,
static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
struct netdev_hw_addr *ha;
u8 *haddr;
int mc_count = 0;
@@ -12004,7 +12237,7 @@ static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
static bool bnxt_uc_list_updated(struct bnxt *bp)
{
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct netdev_hw_addr *ha;
int off = 0;
@@ -12031,7 +12264,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
if (!test_bit(BNXT_STATE_OPEN, &bp->state))
return;
- vnic = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
mask = vnic->rx_mask;
mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
@@ -12062,7 +12295,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
static int bnxt_cfg_rx_mode(struct bnxt *bp)
{
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct netdev_hw_addr *ha;
int i, off = 0, rc;
bool uc_update;
@@ -12174,21 +12407,32 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
/* If runtime conditions support RFS */
static bool bnxt_rfs_capable(struct bnxt *bp)
{
- int vnics, max_vnics, max_rss_ctxs;
+ struct bnxt_hw_rings hwr = {0};
+ int max_vnics, max_rss_ctxs;
+ hwr.rss_ctx = 1;
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
+ /* 2 VNICS: default + Ntuple */
+ hwr.vnic = 2;
+ hwr.rss_ctx = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
+ hwr.vnic;
+ goto check_reserve_vnic;
+ }
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return bnxt_rfs_supported(bp);
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
- vnics = 1 + bp->rx_nr_rings;
+ hwr.vnic = 1 + bp->rx_nr_rings;
+check_reserve_vnic:
max_vnics = bnxt_get_max_func_vnics(bp);
max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
- /* RSS contexts not a limiting factor */
- if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
- max_rss_ctxs = max_vnics;
- if (vnics > max_vnics || vnics > max_rss_ctxs) {
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ !(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP))
+ hwr.rss_ctx = hwr.vnic;
+
+ if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
if (bp->rx_nr_rings > 1)
netdev_warn(bp->dev,
"Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
@@ -12199,15 +12443,19 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (!BNXT_NEW_RM(bp))
return true;
- if (vnics == bp->hw_resc.resv_vnics)
+ if (hwr.vnic == bp->hw_resc.resv_vnics &&
+ hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
return true;
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
- if (vnics <= bp->hw_resc.resv_vnics)
+ bnxt_hwrm_reserve_rings(bp, &hwr);
+ if (hwr.vnic <= bp->hw_resc.resv_vnics &&
+ hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
return true;
netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
+ hwr.vnic = 1;
+ hwr.rss_ctx = 0;
+ bnxt_hwrm_reserve_rings(bp, &hwr);
return false;
}
@@ -12246,14 +12494,24 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
return features;
}
+static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
+ bool link_re_init, u32 flags, bool update_tpa)
+{
+ bnxt_close_nic(bp, irq_re_init, link_re_init);
+ bp->flags = flags;
+ if (update_tpa)
+ bnxt_set_ring_params(bp);
+ return bnxt_open_nic(bp, irq_re_init, link_re_init);
+}
+
static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
{
+ bool update_tpa = false, update_ntuple = false;
struct bnxt *bp = netdev_priv(dev);
u32 flags = bp->flags;
u32 changes;
int rc = 0;
bool re_init = false;
- bool update_tpa = false;
flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
if (features & NETIF_F_GRO_HW)
@@ -12269,6 +12527,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (features & NETIF_F_NTUPLE)
flags |= BNXT_FLAG_RFS;
+ else
+ bnxt_clear_usr_fltrs(bp, true);
changes = flags ^ bp->flags;
if (changes & BNXT_FLAG_TPA) {
@@ -12282,6 +12542,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (changes & ~BNXT_FLAG_TPA)
re_init = true;
+ if (changes & BNXT_FLAG_RFS)
+ update_ntuple = true;
+
if (flags != bp->flags) {
u32 old_flags = bp->flags;
@@ -12292,14 +12555,12 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
- if (re_init) {
- bnxt_close_nic(bp, false, false);
- bp->flags = flags;
- if (update_tpa)
- bnxt_set_ring_params(bp);
+ if (update_ntuple)
+ return bnxt_reinit_features(bp, true, false, flags, update_tpa);
+
+ if (re_init)
+ return bnxt_reinit_features(bp, false, false, flags, update_tpa);
- return bnxt_open_nic(bp, false, false);
- }
if (update_tpa) {
bp->flags = flags;
rc = bnxt_set_tpa(bp,
@@ -13129,9 +13390,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
- int tx_rings_needed, stats;
+ struct bnxt_hw_rings hwr = {0};
int rx_rings = rx;
- int cp, vnics;
if (tcs)
tx_sets = tcs;
@@ -13144,26 +13404,27 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx_rings <<= 1;
- tx_rings_needed = tx * tx_sets + tx_xdp;
- if (max_tx < tx_rings_needed)
+ hwr.rx = rx_rings;
+ hwr.tx = tx * tx_sets + tx_xdp;
+ if (max_tx < hwr.tx)
return -ENOMEM;
- vnics = 1;
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) ==
- BNXT_FLAG_RFS)
- vnics += rx;
+ hwr.vnic = bnxt_get_total_vnics(bp, rx);
- tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp);
- cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
- if (max_cp < cp)
+ tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
+ hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
+ if (max_cp < hwr.cp)
return -ENOMEM;
- stats = cp;
+ hwr.stat = hwr.cp;
if (BNXT_NEW_RM(bp)) {
- cp += bnxt_get_ulp_msix_num(bp);
- stats += bnxt_get_ulp_stat_ctxs(bp);
+ hwr.cp += bnxt_get_ulp_msix_num(bp);
+ hwr.stat += bnxt_get_ulp_stat_ctxs(bp);
+ hwr.grp = rx;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
}
- return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
- stats, vnics);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr.cp_p5 = hwr.tx + rx;
+ return bnxt_hwrm_check_rings(bp, &hwr);
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -13766,6 +14027,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
return rc;
eth_hw_addr_set(dev, addr->sa_data);
+ bnxt_clear_usr_fltrs(bp, true);
if (netif_running(dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -13888,7 +14150,7 @@ u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
if (skb)
return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
- vnic = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
}
@@ -13899,7 +14161,7 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
int bit_id;
spin_lock_bh(&bp->ntp_fltr_lock);
- bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, BNXT_MAX_FLTR, 0);
+ bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
if (bit_id < 0) {
spin_unlock_bh(&bp->ntp_fltr_lock);
return -ENOMEM;
@@ -13911,6 +14173,7 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
head = &bp->ntp_fltr_hash_tbl[idx];
hlist_add_head_rcu(&fltr->base.hash, head);
set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
+ bnxt_insert_usr_fltr(bp, &fltr->base);
bp->ntp_fltr_count++;
spin_unlock_bh(&bp->ntp_fltr_lock);
return 0;
@@ -13919,45 +14182,39 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct bnxt_ntuple_filter *f2)
{
+ struct bnxt_flow_masks *masks1 = &f1->fmasks;
+ struct bnxt_flow_masks *masks2 = &f2->fmasks;
struct flow_keys *keys1 = &f1->fkeys;
struct flow_keys *keys2 = &f2->fkeys;
- if (f1->ntuple_flags != f2->ntuple_flags)
- return false;
-
if (keys1->basic.n_proto != keys2->basic.n_proto ||
keys1->basic.ip_proto != keys2->basic.ip_proto)
return false;
if (keys1->basic.n_proto == htons(ETH_P_IP)) {
- if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
- keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src) ||
- ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
- keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst))
+ if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
+ masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
+ keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
+ masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
return false;
} else {
- if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
- memcmp(&keys1->addrs.v6addrs.src,
- &keys2->addrs.v6addrs.src,
- sizeof(keys1->addrs.v6addrs.src))) ||
- ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
- memcmp(&keys1->addrs.v6addrs.dst,
- &keys2->addrs.v6addrs.dst,
- sizeof(keys1->addrs.v6addrs.dst))))
+ if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
+ &keys2->addrs.v6addrs.src) ||
+ !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
+ &masks2->addrs.v6addrs.src) ||
+ !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
+ &keys2->addrs.v6addrs.dst) ||
+ !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
+ &masks2->addrs.v6addrs.dst))
return false;
}
- if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) &&
- keys1->ports.src != keys2->ports.src) ||
- ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) &&
- keys1->ports.dst != keys2->ports.dst))
- return false;
-
- if (keys1->control.flags == keys2->control.flags &&
- f1->l2_fltr == f2->l2_fltr)
- return true;
-
- return false;
+ return keys1->ports.src == keys2->ports.src &&
+ masks1->ports.src == masks2->ports.src &&
+ keys1->ports.dst == keys2->ports.dst &&
+ masks1->ports.dst == masks2->ports.dst &&
+ keys1->control.flags == keys2->control.flags &&
+ f1->l2_fltr == f2->l2_fltr;
}
struct bnxt_ntuple_filter *
@@ -13988,7 +14245,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u32 flags;
if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
- l2_fltr = bp->vnic_info[0].l2_filters[0];
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
atomic_inc(&l2_fltr->refcnt);
} else {
struct bnxt_l2_key key;
@@ -14022,10 +14279,13 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rc = -EPROTONOSUPPORT;
goto err_free;
}
- if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
- bp->hwrm_spec_code < 0x10601) {
- rc = -EPROTONOSUPPORT;
- goto err_free;
+ new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
+ if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
+ if (bp->hwrm_spec_code < 0x10601) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
+ new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
}
flags = fkeys->control.flags;
if (((flags & FLOW_DIS_ENCAPSULATION) &&
@@ -14033,9 +14293,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rc = -EPROTONOSUPPORT;
goto err_free;
}
-
new_fltr->l2_fltr = l2_fltr;
- new_fltr->ntuple_flags = BNXT_NTUPLE_MATCH_ALL;
idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
rcu_read_lock();
@@ -14070,6 +14328,7 @@ void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
return;
}
hlist_del_rcu(&fltr->base.hash);
+ bnxt_del_one_usr_fltr(bp, &fltr->base);
bp->ntp_fltr_count--;
spin_unlock_bh(&bp->ntp_fltr_lock);
bnxt_del_l2_filter(bp, fltr->l2_fltr);
@@ -14264,6 +14523,70 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_bridge_setlink = bnxt_bridge_setlink,
};
+static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_cp_ring_info *cpr;
+ u64 *sw;
+
+ cpr = &bp->bnapi[i]->cp_ring;
+ sw = cpr->stats.sw_stats;
+
+ stats->packets = 0;
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
+
+ stats->bytes = 0;
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
+
+ stats->alloc_fail = cpr->sw_stats.rx.rx_oom_discards;
+}
+
+static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_napi *bnapi;
+ u64 *sw;
+
+ bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
+ sw = bnapi->cp_ring.stats.sw_stats;
+
+ stats->packets = 0;
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
+
+ stats->bytes = 0;
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
+}
+
+static void bnxt_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ rx->packets = bp->net_stats_prev.rx_packets;
+ rx->bytes = bp->net_stats_prev.rx_bytes;
+ rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
+
+ tx->packets = bp->net_stats_prev.tx_packets;
+ tx->bytes = bp->net_stats_prev.tx_bytes;
+}
+
+static const struct netdev_stat_ops bnxt_stat_ops = {
+ .get_queue_stats_rx = bnxt_get_queue_stats_rx,
+ .get_queue_stats_tx = bnxt_get_queue_stats_tx,
+ .get_base_stats = bnxt_get_base_stats,
+};
+
static void bnxt_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -14669,6 +14992,7 @@ void bnxt_print_device_info(struct bnxt *bp)
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct bnxt_hw_resc *hw_resc;
struct net_device *dev;
struct bnxt *bp;
int rc, max_irqs;
@@ -14710,6 +15034,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err_free;
dev->netdev_ops = &bnxt_netdev_ops;
+ dev->stat_ops = &bnxt_stat_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
pci_set_drvdata(pdev, dev);
@@ -14827,6 +15152,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ hw_resc = &bp->hw_resc;
+ bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
+ BNXT_L2_FLTR_MAX_FLTR;
+ /* Older firmware may not report these filters properly */
+ if (bp->max_fltr < BNXT_MAX_FLTR)
+ bp->max_fltr = BNXT_MAX_FLTR;
bnxt_init_l2_fltr_tbl(bp);
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
@@ -14879,6 +15210,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_dl;
+ INIT_LIST_HEAD(&bp->usr_fltr_list);
+
rc = register_netdev(dev);
if (rc)
goto init_err_cleanup;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 47338b48ca20..dd849e715c9b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1213,6 +1213,9 @@ struct bnxt_ring_grp_info {
u16 cp_fw_ring_id;
};
+#define BNXT_VNIC_DEFAULT 0
+#define BNXT_VNIC_NTUPLE 1
+
struct bnxt_vnic_info {
u16 fw_vnic_id; /* returned by Chimp during alloc */
#define BNXT_MAX_CTX_PER_VNIC 8
@@ -1252,11 +1255,24 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_MCAST_FLAG 4
#define BNXT_VNIC_UCAST_FLAG 8
#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
+#define BNXT_VNIC_NTUPLE_FLAG 0x20
+};
+
+struct bnxt_hw_rings {
+ int tx;
+ int rx;
+ int grp;
+ int cp;
+ int cp_p5;
+ int stat;
+ int vnic;
+ int rss_ctx;
};
struct bnxt_hw_resc {
u16 min_rsscos_ctxs;
u16 max_rsscos_ctxs;
+ u16 resv_rsscos_ctxs;
u16 min_cp_rings;
u16 max_cp_rings;
u16 resv_cp_rings;
@@ -1281,6 +1297,12 @@ struct bnxt_hw_resc {
u16 max_nqs;
u16 max_irqs;
u16 resv_irqs;
+ u32 max_encap_records;
+ u32 max_decap_records;
+ u32 max_tx_em_flows;
+ u32 max_tx_wm_flows;
+ u32 max_rx_em_flows;
+ u32 max_rx_wm_flows;
};
#if defined(CONFIG_BNXT_SRIOV)
@@ -1315,12 +1337,6 @@ struct bnxt_pf_info {
u16 active_vfs;
u16 registered_vfs;
u16 max_vfs;
- u32 max_encap_records;
- u32 max_decap_records;
- u32 max_tx_em_flows;
- u32 max_tx_wm_flows;
- u32 max_rx_em_flows;
- u32 max_rx_wm_flows;
unsigned long *vf_event_bmap;
u16 hwrm_cmd_req_pages;
u8 vf_resv_strategy;
@@ -1334,6 +1350,7 @@ struct bnxt_pf_info {
struct bnxt_filter_base {
struct hlist_node hash;
+ struct list_head list;
__le64 filter_id;
u8 type;
#define BNXT_FLTR_TYPE_NTUPLE 1
@@ -1355,19 +1372,21 @@ struct bnxt_filter_base {
struct rcu_head rcu;
};
+struct bnxt_flow_masks {
+ struct flow_dissector_key_ports ports;
+ struct flow_dissector_key_addrs addrs;
+};
+
+extern const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE;
+extern const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL;
+extern const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL;
+
struct bnxt_ntuple_filter {
+ /* base filter must be the first member */
struct bnxt_filter_base base;
struct flow_keys fkeys;
+ struct bnxt_flow_masks fmasks;
struct bnxt_l2_filter *l2_fltr;
- u32 ntuple_flags;
-#define BNXT_NTUPLE_MATCH_SRC_IP 1
-#define BNXT_NTUPLE_MATCH_DST_IP 2
-#define BNXT_NTUPLE_MATCH_SRC_PORT 4
-#define BNXT_NTUPLE_MATCH_DST_PORT 8
-#define BNXT_NTUPLE_MATCH_ALL (BNXT_NTUPLE_MATCH_SRC_IP | \
- BNXT_NTUPLE_MATCH_DST_IP | \
- BNXT_NTUPLE_MATCH_SRC_PORT | \
- BNXT_NTUPLE_MATCH_DST_PORT)
u32 flow_id;
};
@@ -1394,6 +1413,7 @@ struct bnxt_ipv6_tuple {
#define BNXT_L2_KEY_SIZE (sizeof(struct bnxt_l2_key) / 4)
struct bnxt_l2_filter {
+ /* base filter must be the first member */
struct bnxt_filter_base base;
struct bnxt_l2_key l2_key;
atomic_t refcnt;
@@ -2217,6 +2237,14 @@ struct bnxt {
#define BNXT_RSS_CAP_UDP_RSS_CAP BIT(1)
#define BNXT_RSS_CAP_NEW_RSS_CAP BIT(2)
#define BNXT_RSS_CAP_RSS_TCAM BIT(3)
+#define BNXT_RSS_CAP_AH_V4_RSS_CAP BIT(4)
+#define BNXT_RSS_CAP_AH_V6_RSS_CAP BIT(5)
+#define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(6)
+#define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7)
+
+ u8 rss_hash_key[HW_HASH_KEY_SIZE];
+ u8 rss_hash_key_valid:1;
+ u8 rss_hash_key_updated:1;
u16 max_mtu;
u8 max_tc;
@@ -2301,12 +2329,17 @@ struct bnxt {
#define BNXT_FW_CAP_PRE_RESV_VNICS BIT_ULL(35)
#define BNXT_FW_CAP_BACKING_STORE_V2 BIT_ULL(36)
#define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(37)
+ #define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38)
+ #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
u32 fw_dbg_cap;
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
#define BNXT_PTP_USE_RTC(bp) (!BNXT_MH(bp) && \
((bp)->fw_cap & BNXT_FW_CAP_PTP_RTC))
+#define BNXT_SUPPORTS_NTUPLE_VNIC(bp) \
+ (BNXT_PF(bp) && ((bp)->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3))
+
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u16 hwrm_cmd_kong_seq;
@@ -2428,6 +2461,7 @@ struct bnxt {
unsigned long *ntp_fltr_bmap;
int ntp_fltr_count;
+ int max_fltr;
#define BNXT_L2_FLTR_MAX_FLTR 1024
#define BNXT_L2_FLTR_HASH_SIZE 32
@@ -2437,12 +2471,14 @@ struct bnxt {
u32 hash_seed;
u64 toeplitz_prefix;
+ struct list_head usr_fltr_list;
+
/* To protect link related settings during link changes and
* ethtool settings changes.
*/
struct mutex link_lock;
struct bnxt_link_info link_info;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
@@ -2641,10 +2677,16 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
+void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
+void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only);
int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr);
+struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u16 flags);
int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr);
int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr);
int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index dc4ca706b0e2..1d240a27455a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -968,6 +968,7 @@ static int bnxt_set_channels(struct net_device *dev,
return -EINVAL;
}
+ bnxt_clear_usr_fltrs(bp, true);
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
@@ -1058,11 +1059,17 @@ static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp,
static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
+ u32 count;
+
cmd->data = bp->ntp_fltr_count;
rcu_read_lock();
+ count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0,
+ cmd->rule_cnt);
cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl,
BNXT_NTP_FLTR_HASH_SIZE,
- rule_locs, 0, cmd->rule_cnt);
+ rule_locs, count,
+ cmd->rule_cnt);
rcu_read_unlock();
return 0;
@@ -1074,13 +1081,44 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
(struct ethtool_rx_flow_spec *)&cmd->fs;
struct bnxt_filter_base *fltr_base;
struct bnxt_ntuple_filter *fltr;
+ struct bnxt_flow_masks *fmasks;
struct flow_keys *fkeys;
int rc = -EINVAL;
- if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
+ if (fs->location >= bp->max_fltr)
return rc;
rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE,
+ fs->location);
+ if (fltr_base) {
+ struct ethhdr *h_ether = &fs->h_u.ether_spec;
+ struct ethhdr *m_ether = &fs->m_u.ether_spec;
+ struct bnxt_l2_filter *l2_fltr;
+ struct bnxt_l2_key *l2_key;
+
+ l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
+ l2_key = &l2_fltr->l2_key;
+ fs->flow_type = ETHER_FLOW;
+ ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr);
+ eth_broadcast_addr(m_ether->h_dest);
+ if (l2_key->vlan) {
+ struct ethtool_flow_ext *m_ext = &fs->m_ext;
+ struct ethtool_flow_ext *h_ext = &fs->h_ext;
+
+ fs->flow_type |= FLOW_EXT;
+ m_ext->vlan_tci = htons(0xfff);
+ h_ext->vlan_tci = htons(l2_key->vlan);
+ }
+ if (fltr_base->flags & BNXT_ACT_RING_DST)
+ fs->ring_cookie = fltr_base->rxq;
+ if (fltr_base->flags & BNXT_ACT_FUNC_DST)
+ fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) <<
+ ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ rcu_read_unlock();
+ return 0;
+ }
fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
BNXT_NTP_FLTR_HASH_SIZE,
fs->location);
@@ -1091,59 +1129,74 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
fkeys = &fltr->fkeys;
+ fmasks = &fltr->fmasks;
if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
- if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ if (fkeys->basic.ip_proto == IPPROTO_ICMP ||
+ fkeys->basic.ip_proto == IPPROTO_RAW) {
+ fs->flow_type = IP_USER_FLOW;
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ if (fkeys->basic.ip_proto == IPPROTO_ICMP)
+ fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP;
+ else
+ fs->h_u.usr_ip4_spec.proto = IPPROTO_RAW;
+ fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK;
+ } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V4_FLOW;
- else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
fs->flow_type = UDP_V4_FLOW;
- else
+ } else {
goto fltr_err;
-
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
- fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
- fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
}
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+
+ fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+ fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src;
+ fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+ fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst;
+ if (fs->flow_type == TCP_V4_FLOW ||
+ fs->flow_type == UDP_V4_FLOW) {
fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src;
fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst;
}
} else {
- if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ if (fkeys->basic.ip_proto == IPPROTO_ICMPV6 ||
+ fkeys->basic.ip_proto == IPPROTO_RAW) {
+ fs->flow_type = IPV6_USER_FLOW;
+ if (fkeys->basic.ip_proto == IPPROTO_ICMPV6)
+ fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6;
+ else
+ fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_RAW;
+ fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK;
+ } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V6_FLOW;
- else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
fs->flow_type = UDP_V6_FLOW;
- else
+ } else {
goto fltr_err;
-
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
- fkeys->addrs.v6addrs.src;
- bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6src);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
- fkeys->addrs.v6addrs.dst;
- bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6dst);
}
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
+ fkeys->addrs.v6addrs.src;
+ *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] =
+ fmasks->addrs.v6addrs.src;
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
+ fkeys->addrs.v6addrs.dst;
+ *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] =
+ fmasks->addrs.v6addrs.dst;
+ if (fs->flow_type == TCP_V6_FLOW ||
+ fs->flow_type == UDP_V6_FLOW) {
fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src;
fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
+ fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst;
}
}
- fs->ring_cookie = fltr->base.rxq;
+ if (fltr->base.flags & BNXT_ACT_DROP)
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fs->ring_cookie = fltr->base.rxq;
rc = 0;
fltr_err:
@@ -1152,17 +1205,78 @@ fltr_err:
return rc;
}
-#define IPV4_ALL_MASK ((__force __be32)~0)
-#define L4_PORT_ALL_MASK ((__force __be16)~0)
+static int bnxt_add_l2_cls_rule(struct bnxt *bp,
+ struct ethtool_rx_flow_spec *fs)
+{
+ u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ struct ethhdr *h_ether = &fs->h_u.ether_spec;
+ struct ethhdr *m_ether = &fs->m_u.ether_spec;
+ struct bnxt_l2_filter *fltr;
+ struct bnxt_l2_key key;
+ u16 vnic_id;
+ u8 flags;
+ int rc;
+
+ if (BNXT_CHIP_P5_PLUS(bp))
+ return -EOPNOTSUPP;
-static bool ipv6_mask_is_full(__be32 mask[4])
+ if (!is_broadcast_ether_addr(m_ether->h_dest))
+ return -EINVAL;
+ ether_addr_copy(key.dst_mac_addr, h_ether->h_dest);
+ key.vlan = 0;
+ if (fs->flow_type & FLOW_EXT) {
+ struct ethtool_flow_ext *m_ext = &fs->m_ext;
+ struct ethtool_flow_ext *h_ext = &fs->h_ext;
+
+ if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci)
+ return -EINVAL;
+ key.vlan = ntohs(h_ext->vlan_tci);
+ }
+
+ if (vf) {
+ flags = BNXT_ACT_FUNC_DST;
+ vnic_id = 0xffff;
+ vf--;
+ } else {
+ flags = BNXT_ACT_RING_DST;
+ vnic_id = bp->vnic_info[ring + 1].fw_vnic_id;
+ }
+ fltr = bnxt_alloc_new_l2_filter(bp, &key, flags);
+ if (IS_ERR(fltr))
+ return PTR_ERR(fltr);
+
+ fltr->base.fw_vnic_id = vnic_id;
+ fltr->base.rxq = ring;
+ fltr->base.vf_idx = vf;
+ rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
+ if (rc)
+ bnxt_del_l2_filter(bp, fltr);
+ else
+ fs->location = fltr->base.sw_id;
+ return rc;
+}
+
+static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec,
+ struct ethtool_usrip4_spec *ip_mask)
{
- return (mask[0] & mask[1] & mask[2] & mask[3]) == IPV4_ALL_MASK;
+ if (ip_mask->l4_4_bytes || ip_mask->tos ||
+ ip_spec->ip_ver != ETH_RX_NFC_IP4 ||
+ ip_mask->proto != BNXT_IP_PROTO_FULL_MASK ||
+ (ip_spec->proto != IPPROTO_RAW && ip_spec->proto != IPPROTO_ICMP))
+ return false;
+ return true;
}
-static bool ipv6_mask_is_zero(__be32 mask[4])
+static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec,
+ struct ethtool_usrip6_spec *ip_mask)
{
- return !(mask[0] | mask[1] | mask[2] | mask[3]);
+ if (ip_mask->l4_4_bytes || ip_mask->tclass ||
+ ip_mask->l4_proto != BNXT_IP_PROTO_FULL_MASK ||
+ (ip_spec->l4_proto != IPPROTO_RAW &&
+ ip_spec->l4_proto != IPPROTO_ICMPV6))
+ return false;
+ return true;
}
static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
@@ -1172,6 +1286,7 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
struct bnxt_ntuple_filter *new_fltr, *fltr;
struct bnxt_l2_filter *l2_fltr;
+ struct bnxt_flow_masks *fmasks;
u32 flow_type = fs->flow_type;
struct flow_keys *fkeys;
u32 idx;
@@ -1183,17 +1298,42 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
return -EOPNOTSUPP;
+ if (flow_type == IP_USER_FLOW) {
+ if (!bnxt_verify_ntuple_ip4_flow(&fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec))
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_type == IPV6_USER_FLOW) {
+ if (!bnxt_verify_ntuple_ip6_flow(&fs->h_u.usr_ip6_spec,
+ &fs->m_u.usr_ip6_spec))
+ return -EOPNOTSUPP;
+ }
+
new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL);
if (!new_fltr)
return -ENOMEM;
- l2_fltr = bp->vnic_info[0].l2_filters[0];
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
atomic_inc(&l2_fltr->refcnt);
new_fltr->l2_fltr = l2_fltr;
+ fmasks = &new_fltr->fmasks;
fkeys = &new_fltr->fkeys;
rc = -EOPNOTSUPP;
switch (flow_type) {
+ case IP_USER_FLOW: {
+ struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec;
+
+ fkeys->basic.ip_proto = ip_spec->proto;
+ fkeys->basic.n_proto = htons(ETH_P_IP);
+ fkeys->addrs.v4addrs.src = ip_spec->ip4src;
+ fmasks->addrs.v4addrs.src = ip_mask->ip4src;
+ fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
+ fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
+ break;
+ }
case TCP_V4_FLOW:
case UDP_V4_FLOW: {
struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec;
@@ -1203,32 +1343,26 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
if (flow_type == UDP_V4_FLOW)
fkeys->basic.ip_proto = IPPROTO_UDP;
fkeys->basic.n_proto = htons(ETH_P_IP);
+ fkeys->addrs.v4addrs.src = ip_spec->ip4src;
+ fmasks->addrs.v4addrs.src = ip_mask->ip4src;
+ fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
+ fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
+ fkeys->ports.src = ip_spec->psrc;
+ fmasks->ports.src = ip_mask->psrc;
+ fkeys->ports.dst = ip_spec->pdst;
+ fmasks->ports.dst = ip_mask->pdst;
+ break;
+ }
+ case IPV6_USER_FLOW: {
+ struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec;
- if (ip_mask->ip4src == IPV4_ALL_MASK) {
- fkeys->addrs.v4addrs.src = ip_spec->ip4src;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
- } else if (ip_mask->ip4src) {
- goto ntuple_err;
- }
- if (ip_mask->ip4dst == IPV4_ALL_MASK) {
- fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
- } else if (ip_mask->ip4dst) {
- goto ntuple_err;
- }
-
- if (ip_mask->psrc == L4_PORT_ALL_MASK) {
- fkeys->ports.src = ip_spec->psrc;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
- } else if (ip_mask->psrc) {
- goto ntuple_err;
- }
- if (ip_mask->pdst == L4_PORT_ALL_MASK) {
- fkeys->ports.dst = ip_spec->pdst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
- } else if (ip_mask->pdst) {
- goto ntuple_err;
- }
+ fkeys->basic.ip_proto = ip_spec->l4_proto;
+ fkeys->basic.n_proto = htons(ETH_P_IPV6);
+ fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
+ fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
+ fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
+ fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
break;
}
case TCP_V6_FLOW:
@@ -1241,40 +1375,21 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
fkeys->basic.ip_proto = IPPROTO_UDP;
fkeys->basic.n_proto = htons(ETH_P_IPV6);
- if (ipv6_mask_is_full(ip_mask->ip6src)) {
- fkeys->addrs.v6addrs.src =
- *(struct in6_addr *)&ip_spec->ip6src;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
- } else if (!ipv6_mask_is_zero(ip_mask->ip6src)) {
- goto ntuple_err;
- }
- if (ipv6_mask_is_full(ip_mask->ip6dst)) {
- fkeys->addrs.v6addrs.dst =
- *(struct in6_addr *)&ip_spec->ip6dst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
- } else if (!ipv6_mask_is_zero(ip_mask->ip6dst)) {
- goto ntuple_err;
- }
-
- if (ip_mask->psrc == L4_PORT_ALL_MASK) {
- fkeys->ports.src = ip_spec->psrc;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
- } else if (ip_mask->psrc) {
- goto ntuple_err;
- }
- if (ip_mask->pdst == L4_PORT_ALL_MASK) {
- fkeys->ports.dst = ip_spec->pdst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
- } else if (ip_mask->pdst) {
- goto ntuple_err;
- }
+ fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
+ fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
+ fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
+ fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
+ fkeys->ports.src = ip_spec->psrc;
+ fmasks->ports.src = ip_mask->psrc;
+ fkeys->ports.dst = ip_spec->pdst;
+ fmasks->ports.dst = ip_mask->pdst;
break;
}
default:
rc = -EOPNOTSUPP;
goto ntuple_err;
}
- if (!new_fltr->ntuple_flags)
+ if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks)))
goto ntuple_err;
idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL);
@@ -1287,8 +1402,11 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
}
rcu_read_unlock();
- new_fltr->base.rxq = ring;
new_fltr->base.flags = BNXT_ACT_NO_AGING;
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ new_fltr->base.flags |= BNXT_ACT_DROP;
+ else
+ new_fltr->base.rxq = ring;
__set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
if (!rc) {
@@ -1321,6 +1439,18 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (fs->location != RX_CLS_LOC_ANY)
return -EINVAL;
+ flow_type = fs->flow_type;
+ if ((flow_type == IP_USER_FLOW ||
+ flow_type == IPV6_USER_FLOW) &&
+ !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO))
+ return -EOPNOTSUPP;
+ if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
+ return -EINVAL;
+ flow_type &= ~FLOW_EXT;
+
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW)
+ return bnxt_add_ntuple_cls_rule(bp, fs);
+
ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
if (BNXT_VF(bp) && vf)
@@ -1330,12 +1460,8 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (!vf && ring >= bp->rx_nr_rings)
return -EINVAL;
- flow_type = fs->flow_type;
- if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
- return -EINVAL;
- flow_type &= ~FLOW_EXT;
if (flow_type == ETHER_FLOW)
- rc = -EOPNOTSUPP;
+ rc = bnxt_add_l2_cls_rule(bp, fs);
else
rc = bnxt_add_ntuple_cls_rule(bp, fs);
return rc;
@@ -1346,11 +1472,22 @@ static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd)
struct ethtool_rx_flow_spec *fs = &cmd->fs;
struct bnxt_filter_base *fltr_base;
struct bnxt_ntuple_filter *fltr;
+ u32 id = fs->location;
rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE, id);
+ if (fltr_base) {
+ struct bnxt_l2_filter *l2_fltr;
+
+ l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
+ rcu_read_unlock();
+ bnxt_hwrm_l2_filter_free(bp, l2_fltr);
+ bnxt_del_l2_filter(bp, l2_fltr);
+ return 0;
+ }
fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
- BNXT_NTP_FLTR_HASH_SIZE,
- fs->location);
+ BNXT_NTP_FLTR_HASH_SIZE, id);
if (!fltr_base) {
rcu_read_unlock();
return -ENOENT;
@@ -1396,8 +1533,14 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough;
- case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
+ if (bp->rss_hash_cfg &
+ (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4))
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case SCTP_V4_FLOW:
case AH_V4_FLOW:
case ESP_V4_FLOW:
case IPV4_FLOW:
@@ -1415,8 +1558,14 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough;
- case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
+ if (bp->rss_hash_cfg &
+ (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6))
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case SCTP_V6_FLOW:
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
@@ -1463,6 +1612,24 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
if (tuple == 4)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+ } else if (cmd->flow_type == AH_ESP_V4_FLOW) {
+ if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V4_RSS_CAP) ||
+ !(bp->rss_cap & BNXT_RSS_CAP_ESP_V4_RSS_CAP)))
+ return -EINVAL;
+ rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4);
+ if (tuple == 4)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4;
+ } else if (cmd->flow_type == AH_ESP_V6_FLOW) {
+ if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V6_RSS_CAP) ||
+ !(bp->rss_cap & BNXT_RSS_CAP_ESP_V6_RSS_CAP)))
+ return -EINVAL;
+ rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6);
+ if (tuple == 4)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6;
} else if (tuple == 4) {
return -EINVAL;
}
@@ -1521,7 +1688,7 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bp->ntp_fltr_count;
- cmd->data = BNXT_NTP_FLTR_MAX_FLTR | RX_CLS_LOC_SPECIAL;
+ cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL;
break;
case ETHTOOL_GRXCLSRLALL:
@@ -1596,7 +1763,7 @@ static int bnxt_get_rxfh(struct net_device *dev,
if (!bp->vnic_info)
return 0;
- vnic = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
if (rxfh->indir && bp->rss_indir_tbl) {
tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
@@ -1619,8 +1786,10 @@ static int bnxt_set_rxfh(struct net_device *dev,
if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->key)
- return -EOPNOTSUPP;
+ if (rxfh->key) {
+ memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE);
+ bp->rss_hash_key_updated = true;
+ }
if (rxfh->indir) {
u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
@@ -1631,7 +1800,7 @@ static int bnxt_set_rxfh(struct net_device *dev,
if (pad)
memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
}
-
+ bnxt_clear_usr_fltrs(bp, false);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -1751,31 +1920,21 @@ static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
-u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
+/* TODO: support 25GB, 40GB, 50GB with different cable type */
+void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds)
{
- u32 speed_mask = 0;
+ linkmode_zero(mode);
- /* TODO: support 25GB, 40GB, 50GB with different cable type */
- /* set the advertised speeds */
if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
- speed_mask |= ADVERTISED_100baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
- speed_mask |= ADVERTISED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
- speed_mask |= ADVERTISED_2500baseX_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
- speed_mask |= ADVERTISED_10000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
- speed_mask |= ADVERTISED_40000baseCR4_Full;
-
- if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
- speed_mask |= ADVERTISED_Pause;
- else if (fw_pause & BNXT_LINK_PAUSE_TX)
- speed_mask |= ADVERTISED_Asym_Pause;
- else if (fw_pause & BNXT_LINK_PAUSE_RX)
- speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
-
- return speed_mask;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode);
}
enum bnxt_media_type {
@@ -2643,23 +2802,22 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
return 0;
}
-u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode)
{
u16 fw_speed_mask = 0;
- /* only support autoneg at speed 100, 1000, and 10000 */
- if (advertising & (ADVERTISED_100baseT_Full |
- ADVERTISED_100baseT_Half)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
- }
- if (advertising & (ADVERTISED_1000baseT_Full |
- ADVERTISED_1000baseT_Half)) {
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
- }
- if (advertising & ADVERTISED_10000baseT_Full)
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
- if (advertising & ADVERTISED_40000baseCR4_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
return fw_speed_mask;
@@ -3884,12 +4042,13 @@ static int bnxt_set_eeprom(struct net_device *dev,
eeprom->len);
}
-static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnxt_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
struct bnxt *bp = netdev_priv(dev);
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
- u32 advertising;
int rc = 0;
if (!BNXT_PHY_CFG_ABLE(bp))
@@ -3899,7 +4058,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
mutex_lock(&bp->link_lock);
- advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ _bnxt_fw_to_linkmode(advertising, link_info->advertising);
if (!edata->eee_enabled)
goto eee_ok;
@@ -3919,16 +4078,15 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
edata->tx_lpi_timer = eee->tx_lpi_timer;
}
}
- if (!edata->advertised) {
- edata->advertised = advertising & eee->supported;
- } else if (edata->advertised & ~advertising) {
- netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
- edata->advertised, advertising);
+ if (linkmode_empty(edata->advertised)) {
+ linkmode_and(edata->advertised, advertising, eee->supported);
+ } else if (linkmode_andnot(tmp, edata->advertised, advertising)) {
+ netdev_warn(dev, "EEE advertised must be a subset of autoneg advertised speeds\n");
rc = -EINVAL;
goto eee_exit;
}
- eee->advertised = edata->advertised;
+ linkmode_copy(eee->advertised, edata->advertised);
eee->tx_lpi_enabled = edata->tx_lpi_enabled;
eee->tx_lpi_timer = edata->tx_lpi_timer;
eee_ok:
@@ -3942,7 +4100,7 @@ eee_exit:
return rc;
}
-static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnxt *bp = netdev_priv(dev);
@@ -3954,12 +4112,12 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
/* Preserve tx_lpi_timer so that the last value will be used
* by default when it is re-enabled.
*/
- edata->advertised = 0;
+ linkmode_zero(edata->advertised);
edata->tx_lpi_enabled = 0;
}
if (!bp->eee.eee_active)
- edata->lp_advertised = 0;
+ linkmode_zero(edata->lp_advertised);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index a8ecef8ab82c..e2ee030237d4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -43,12 +43,14 @@ struct bnxt_led_cfg {
#define BNXT_PXP_REG_LEN 0x3110
+#define BNXT_IP_PROTO_FULL_MASK 0xFF
+
extern const struct ethtool_ops bnxt_ethtool_ops;
u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
-u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
+void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds);
u32 bnxt_fw_to_ethtool_speed(u16);
-u16 bnxt_get_fw_auto_link_speeds(u32);
+u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode);
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 2d7ae71287b1..7396e2823e32 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1313,14 +1313,13 @@ void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
}
priv->eee.eee_enabled = enable;
- priv->eee.eee_active = enable;
priv->eee.tx_lpi_enabled = tx_lpi_enabled;
}
-static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct ethtool_eee *p = &priv->eee;
+ struct ethtool_keee *p = &priv->eee;
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
@@ -1328,18 +1327,17 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
if (!dev->phydev)
return -ENODEV;
- e->eee_enabled = p->eee_enabled;
- e->eee_active = p->eee_active;
e->tx_lpi_enabled = p->tx_lpi_enabled;
e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
return phy_ethtool_get_eee(dev->phydev, e);
}
-static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct ethtool_eee *p = &priv->eee;
+ struct ethtool_keee *p = &priv->eee;
+ bool active;
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
@@ -1352,9 +1350,9 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
if (!p->eee_enabled) {
bcmgenet_eee_enable_set(dev, false, false);
} else {
- p->eee_active = phy_init_eee(dev->phydev, false) >= 0;
+ active = phy_init_eee(dev->phydev, false) >= 0;
bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
- bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled);
+ bcmgenet_eee_enable_set(dev, active, e->tx_lpi_enabled);
}
return phy_ethtool_set_eee(dev->phydev, e);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 1985c0ec4da2..7523b60b3c1c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -645,7 +645,7 @@ struct bcmgenet_priv {
struct bcmgenet_mib_counters mib;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
#define GENET_IO_MACRO(name, offset) \
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 97ea76d443ab..9ada89355747 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -30,6 +30,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
u32 reg, cmd_bits = 0;
+ bool active;
/* speed */
if (phydev->speed == SPEED_1000)
@@ -88,9 +89,9 @@ static void bcmgenet_mac_config(struct net_device *dev)
}
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
+ active = phy_init_eee(phydev, 0) >= 0;
bcmgenet_eee_enable_set(dev,
- priv->eee.eee_enabled && priv->eee.eee_active,
+ priv->eee.eee_enabled && active,
priv->eee.tx_lpi_enabled);
}
@@ -475,6 +476,10 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
ppd.wait_func = bcmgenet_mii_wait;
ppd.wait_func_data = priv;
ppd.bus_name = "bcmgenet MII bus";
+ /* Pass a reference to our "main" clock which is used for MDIO
+ * transfers
+ */
+ ppd.clk = priv->clk;
/* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD
* and is 2 * 32-bits word long, 8 bytes total.
@@ -673,7 +678,5 @@ void bcmgenet_mii_exit(struct net_device *dev)
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
of_node_put(priv->phy_dn);
- clk_prepare_enable(priv->clk);
platform_device_unregister(priv->mii_pdev);
- clk_disable_unprepare(priv->clk);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 04964bbe08cf..eee759054aad 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -2338,10 +2338,10 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
tg3_phy_toggle_auxctl_smdsp(tp, false);
}
-static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
+static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_keee *eee)
{
u32 val;
- struct ethtool_eee *dest = &tp->eee;
+ struct ethtool_keee *dest = &tp->eee;
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
return;
@@ -2362,13 +2362,13 @@ static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
/* Pull lp advertised settings */
if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
return;
- dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val);
/* Pull advertised and eee_enabled settings */
if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
return;
dest->eee_enabled = !!val;
- dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(dest->advertised, val);
/* Pull tx_lpi_enabled */
val = tr32(TG3_CPMU_EEE_MODE);
@@ -4354,23 +4354,12 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
if (!err) {
u32 err2;
- val = 0;
- /* Advertise 100-BaseTX EEE ability */
- if (advertise & ADVERTISED_100baseT_Full)
- val |= MDIO_AN_EEE_ADV_100TX;
- /* Advertise 1000-BaseT EEE ability */
- if (advertise & ADVERTISED_1000baseT_Full)
- val |= MDIO_AN_EEE_ADV_1000T;
-
- if (!tp->eee.eee_enabled) {
+ if (!tp->eee.eee_enabled)
val = 0;
- tp->eee.advertised = 0;
- } else {
- tp->eee.advertised = advertise &
- (ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Full);
- }
+ else
+ val = ethtool_adv_to_mmd_eee_adv_t(advertise);
+ mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val);
err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
if (err)
val = 0;
@@ -4618,7 +4607,7 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
static bool tg3_phy_eee_config_ok(struct tg3 *tp)
{
- struct ethtool_eee eee;
+ struct ethtool_keee eee = {};
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
return true;
@@ -4626,13 +4615,13 @@ static bool tg3_phy_eee_config_ok(struct tg3 *tp)
tg3_eee_pull_config(tp, &eee);
if (tp->eee.eee_enabled) {
- if (tp->eee.advertised != eee.advertised ||
+ if (!linkmode_equal(tp->eee.advertised, eee.advertised) ||
tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
return false;
} else {
/* EEE is disabled but we're advertising */
- if (eee.advertised)
+ if (!linkmode_empty(eee.advertised))
return false;
}
@@ -14180,7 +14169,7 @@ static int tg3_set_coalesce(struct net_device *dev,
return 0;
}
-static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int tg3_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct tg3 *tp = netdev_priv(dev);
@@ -14189,7 +14178,7 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- if (edata->advertised != tp->eee.advertised) {
+ if (!linkmode_equal(edata->advertised, tp->eee.advertised)) {
netdev_warn(tp->dev,
"Direct manipulation of EEE advertisement is not supported\n");
return -EINVAL;
@@ -14202,7 +14191,9 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EINVAL;
}
- tp->eee = *edata;
+ tp->eee.eee_enabled = edata->eee_enabled;
+ tp->eee.tx_lpi_enabled = edata->tx_lpi_enabled;
+ tp->eee.tx_lpi_timer = edata->tx_lpi_timer;
tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
tg3_warn_mgmt_link_flap(tp);
@@ -14217,7 +14208,7 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int tg3_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct tg3 *tp = netdev_priv(dev);
@@ -15655,10 +15646,13 @@ static int tg3_phy_probe(struct tg3 *tp)
tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
- tp->eee.supported = SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full;
- tp->eee.advertised = ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Full;
+ linkmode_zero(tp->eee.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ tp->eee.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ tp->eee.supported);
+ linkmode_copy(tp->eee.advertised, tp->eee.supported);
+
tp->eee.eee_enabled = 1;
tp->eee.tx_lpi_enabled = 1;
tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 5016475e5005..cf1b2b123c7e 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3419,7 +3419,7 @@ struct tg3 {
unsigned int irq_cnt;
struct ethtool_coalesce coal;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
/* firmware info */
const char *fw_needed;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 31191b520b58..c32174484a96 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1091,10 +1091,10 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
* Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
*/
static void
-bnad_tx_cleanup(struct delayed_work *work)
+bnad_tx_cleanup(struct work_struct *work)
{
struct bnad_tx_info *tx_info =
- container_of(work, struct bnad_tx_info, tx_cleanup_work);
+ container_of(work, struct bnad_tx_info, tx_cleanup_work.work);
struct bnad *bnad = NULL;
struct bna_tcb *tcb;
unsigned long flags;
@@ -1170,7 +1170,7 @@ bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
* Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
*/
static void
-bnad_rx_cleanup(void *work)
+bnad_rx_cleanup(struct work_struct *work)
{
struct bnad_rx_info *rx_info =
container_of(work, struct bnad_rx_info, rx_cleanup_work);
@@ -1991,8 +1991,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
}
tx_info->tx = tx;
- INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
- (work_func_t)bnad_tx_cleanup);
+ INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, bnad_tx_cleanup);
/* Register ISR for the Tx object */
if (intr_info->intr_type == BNA_INTR_T_MSIX) {
@@ -2248,8 +2247,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
rx_info->rx = rx;
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- INIT_WORK(&rx_info->rx_cleanup_work,
- (work_func_t)(bnad_rx_cleanup));
+ INIT_WORK(&rx_info->rx_cleanup_work, bnad_rx_cleanup);
/*
* Init NAPI, so that state is set to NAPI_STATE_SCHED,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b5ff2e1a9975..49d5808b7d11 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -804,20 +804,6 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
}
/**
- * calc_tx_descs - calculate the number of Tx descriptors for a packet
- * @skb: the packet
- * @chip_ver: chip version
- *
- * Returns the number of Tx descriptors needed for the given Ethernet
- * packet, including the needed WR and CPL headers.
- */
-static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
- unsigned int chip_ver)
-{
- return flits_to_desc(calc_tx_flits(skb, chip_ver));
-}
-
-/**
* cxgb4_write_sgl - populate a scatter/gather list for a packet
* @skb: the packet
* @q: the Tx queue we are writing into
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 37bd38d772e8..d266a87297a5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -872,7 +872,7 @@ error:
return NETDEV_TX_OK;
}
-/* dev_base_lock rwlock held, nominally process context */
+/* rcu_read_lock potentially held, nominally process context */
static void enic_get_stats(struct net_device *netdev,
struct rtnl_link_stats64 *net_stats)
{
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.c b/drivers/net/ethernet/cisco/enic/vnic_vic.c
index 20fcb20b42ed..66b577835338 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_vic.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.c
@@ -49,7 +49,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
tlv->type = htons(type);
tlv->length = htons(length);
- memcpy(tlv->value, value, length);
+ unsafe_memcpy(tlv->value, value, length,
+ /* Flexible array of flexible arrays */);
vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
vp->length = htonl(ntohl(vp->length) +
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index c2c5c589a5e3..44af1d13d931 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -590,5 +590,6 @@ module_pci_driver(pci_driver);
module_param(polling_frequency, long, 0444);
MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
+MODULE_DESCRIPTION("Beckhoff CX5020 EtherCAT Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 9aeff2b37a61..4b15af6b7122 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -229,8 +229,10 @@ static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
* would delay a working loopback anyway, let's ensure that loopback
* is working immediately by setting link mode directly
*/
- if (!retval && enable)
+ if (!retval && enable) {
+ netif_carrier_on(adapter->netdev);
tsnep_set_link_mode(adapter);
+ }
return retval;
}
@@ -238,7 +240,7 @@ static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
static int tsnep_phy_open(struct tsnep_adapter *adapter)
{
struct phy_device *phydev;
- struct ethtool_eee ethtool_eee;
+ struct ethtool_keee ethtool_keee;
int retval;
retval = phy_connect_direct(adapter->netdev, adapter->phydev,
@@ -257,8 +259,8 @@ static int tsnep_phy_open(struct tsnep_adapter *adapter)
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
/* disable EEE autoneg, EEE not supported by TSNEP */
- memset(&ethtool_eee, 0, sizeof(ethtool_eee));
- phy_ethtool_set_eee(adapter->phydev, &ethtool_eee);
+ memset(&ethtool_keee, 0, sizeof(ethtool_keee));
+ phy_ethtool_set_eee(adapter->phydev, &ethtool_keee);
adapter->phydev->irq = PHY_MAC_INTERRUPT;
phy_start(adapter->phydev);
@@ -719,17 +721,25 @@ static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx)
static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter,
struct xdp_buff *xdp,
- struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx,
+ bool zc)
{
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
bool xmit;
+ u32 type;
if (unlikely(!xdpf))
return false;
+ /* no page pool for zero copy */
+ if (zc)
+ type = TSNEP_TX_TYPE_XDP_NDO;
+ else
+ type = TSNEP_TX_TYPE_XDP_TX;
+
__netif_tx_lock(tx_nq, smp_processor_id());
- xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX);
+ xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type);
/* Avoid transmit queue timeout since we share it with the slow path */
if (xmit)
@@ -1258,6 +1268,14 @@ static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse)
return desc_refilled;
}
+static void tsnep_xsk_rx_need_wakeup(struct tsnep_rx *rx, int desc_available)
+{
+ if (desc_available)
+ xsk_set_rx_need_wakeup(rx->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx->xsk_pool);
+}
+
static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
struct xdp_buff *xdp, int *status,
struct netdev_queue *tx_nq, struct tsnep_tx *tx)
@@ -1273,7 +1291,7 @@ static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
case XDP_PASS:
return false;
case XDP_TX:
- if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
+ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false))
goto out_failure;
*status |= TSNEP_XDP_TX;
return true;
@@ -1323,7 +1341,7 @@ static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog,
case XDP_PASS:
return false;
case XDP_TX:
- if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
+ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true))
goto out_failure;
*status |= TSNEP_XDP_TX;
return true;
@@ -1619,10 +1637,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
desc_available -= tsnep_rx_refill_zc(rx, desc_available, false);
if (xsk_uses_need_wakeup(rx->xsk_pool)) {
- if (desc_available)
- xsk_set_rx_need_wakeup(rx->xsk_pool);
- else
- xsk_clear_rx_need_wakeup(rx->xsk_pool);
+ tsnep_xsk_rx_need_wakeup(rx, desc_available);
return done;
}
@@ -1767,14 +1782,8 @@ static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
* first polling would be too late as need wakeup signalisation would
* be delayed for an indefinite time
*/
- if (xsk_uses_need_wakeup(rx->xsk_pool)) {
- int desc_available = tsnep_rx_desc_available(rx);
-
- if (desc_available)
- xsk_set_rx_need_wakeup(rx->xsk_pool);
- else
- xsk_clear_rx_need_wakeup(rx->xsk_pool);
- }
+ if (xsk_uses_need_wakeup(rx->xsk_pool))
+ tsnep_xsk_rx_need_wakeup(rx, tsnep_rx_desc_available(rx));
}
static bool tsnep_pending(struct tsnep_queue *queue)
@@ -2562,8 +2571,7 @@ static int tsnep_probe(struct platform_device *pdev)
mutex_init(&adapter->rxnfc_lock);
INIT_LIST_HEAD(&adapter->rxnfc_rules);
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adapter->addr = devm_ioremap_resource(&pdev->dev, io);
+ adapter->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &io);
if (IS_ERR(adapter->addr))
return PTR_ERR(adapter->addr);
netdev->mem_start = io->start;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index bfdbdab443ae..9f07f4947b63 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2402,7 +2402,7 @@ static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
static int enetc_phylink_connect(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct ethtool_eee edata;
+ struct ethtool_keee edata;
int err;
if (!priv->phylink) {
@@ -2418,7 +2418,7 @@ static int enetc_phylink_connect(struct net_device *ndev)
}
/* disable EEE autoneg, until ENETC driver supports it */
- memset(&edata, 0, sizeof(struct ethtool_eee));
+ memset(&edata, 0, sizeof(struct ethtool_keee));
phylink_ethtool_set_eee(priv->phylink, &edata);
phylink_start(priv->phylink);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index a8fbcada6b01..a19cb2a786fd 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -672,7 +672,7 @@ struct fec_enet_private {
unsigned int itr_clk_rate;
/* tx lpi eee mode */
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
unsigned int clk_ref_rate;
/* ptp clock period in ns*/
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 432523b2c789..d7693fdf640d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -85,8 +85,6 @@ static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
-/* Pause frame feild and FIFO threshold */
-#define FEC_ENET_FCE (1 << 5)
#define FEC_ENET_RSEM_V 0x84
#define FEC_ENET_RSFL_V 16
#define FEC_ENET_RAEM_V 0x8
@@ -240,8 +238,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define PKT_MINBUF_SIZE 64
/* FEC receive acceleration */
-#define FEC_RACC_IPDIS (1 << 1)
-#define FEC_RACC_PRODIS (1 << 2)
+#define FEC_RACC_IPDIS BIT(1)
+#define FEC_RACC_PRODIS BIT(2)
#define FEC_RACC_SHIFT16 BIT(7)
#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
@@ -273,8 +271,23 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_MMFR_TA (2 << 16)
#define FEC_MMFR_DATA(v) (v & 0xffff)
/* FEC ECR bits definition */
-#define FEC_ECR_MAGICEN (1 << 2)
-#define FEC_ECR_SLEEP (1 << 3)
+#define FEC_ECR_RESET BIT(0)
+#define FEC_ECR_ETHEREN BIT(1)
+#define FEC_ECR_MAGICEN BIT(2)
+#define FEC_ECR_SLEEP BIT(3)
+#define FEC_ECR_EN1588 BIT(4)
+#define FEC_ECR_BYTESWP BIT(8)
+/* FEC RCR bits definition */
+#define FEC_RCR_LOOP BIT(0)
+#define FEC_RCR_HALFDPX BIT(1)
+#define FEC_RCR_MII BIT(2)
+#define FEC_RCR_PROMISC BIT(3)
+#define FEC_RCR_BC_REJ BIT(4)
+#define FEC_RCR_FLOWCTL BIT(5)
+#define FEC_RCR_RMII BIT(8)
+#define FEC_RCR_10BASET BIT(9)
+/* TX WMARK bits */
+#define FEC_TXWMRK_STRFWD BIT(8)
#define FEC_MII_TIMEOUT 30000 /* us */
@@ -1062,7 +1075,7 @@ fec_restart(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
- u32 ecntl = 0x2; /* ETHEREN */
+ u32 ecntl = FEC_ECR_ETHEREN;
/* Whack a reset. We should wait for this.
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC
@@ -1137,18 +1150,18 @@ fec_restart(struct net_device *ndev)
fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
rcntl |= (1 << 6);
else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
- rcntl |= (1 << 8);
+ rcntl |= FEC_RCR_RMII;
else
- rcntl &= ~(1 << 8);
+ rcntl &= ~FEC_RCR_RMII;
/* 1G, 100M or 10M */
if (ndev->phydev) {
if (ndev->phydev->speed == SPEED_1000)
ecntl |= (1 << 5);
else if (ndev->phydev->speed == SPEED_100)
- rcntl &= ~(1 << 9);
+ rcntl &= ~FEC_RCR_10BASET;
else
- rcntl |= (1 << 9);
+ rcntl |= FEC_RCR_10BASET;
}
} else {
#ifdef FEC_MIIGSK_ENR
@@ -1181,7 +1194,7 @@ fec_restart(struct net_device *ndev)
if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
ndev->phydev && ndev->phydev->pause)) {
- rcntl |= FEC_ENET_FCE;
+ rcntl |= FEC_RCR_FLOWCTL;
/* set FIFO threshold parameter to reduce overrun */
writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
@@ -1192,7 +1205,7 @@ fec_restart(struct net_device *ndev)
/* OPD */
writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
} else {
- rcntl &= ~FEC_ENET_FCE;
+ rcntl &= ~FEC_RCR_FLOWCTL;
}
#endif /* !defined(CONFIG_M5272) */
@@ -1207,13 +1220,13 @@ fec_restart(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */
- ecntl |= (1 << 8);
+ ecntl |= FEC_ECR_BYTESWP;
/* enable ENET store and forward mode */
- writel(1 << 8, fep->hwp + FEC_X_WMRK);
+ writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
}
if (fep->bufdesc_ex)
- ecntl |= (1 << 4);
+ ecntl |= FEC_ECR_EN1588;
if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
fep->rgmii_txc_dly)
@@ -1312,7 +1325,7 @@ static void
fec_stop(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
+ u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII;
u32 val;
/* We cannot expect a graceful transmit stop without link !!! */
@@ -1331,7 +1344,7 @@ fec_stop(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
writel(0, fep->hwp + FEC_ECNTRL);
} else {
- writel(1, fep->hwp + FEC_ECNTRL);
+ writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
udelay(10);
}
} else {
@@ -1345,12 +1358,11 @@ fec_stop(struct net_device *ndev)
/* We have to keep ENET enabled to have MII interrupt stay working */
if (fep->quirks & FEC_QUIRK_ENET_MAC &&
!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
- writel(2, fep->hwp + FEC_ECNTRL);
+ writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
}
-
static void
fec_timeout(struct net_device *ndev, unsigned int txqueue)
{
@@ -2005,6 +2017,37 @@ static int fec_get_mac(struct net_device *ndev)
/*
* Phy section
*/
+
+/* LPI Sleep Ts count base on tx clk (clk_ref).
+ * The lpi sleep cnt value = X us / (cycle_ns).
+ */
+static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ return us * (fep->clk_ref_rate / 1000) / 1000;
+}
+
+static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct ethtool_keee *p = &fep->eee;
+ unsigned int sleep_cycle, wake_cycle;
+
+ if (enable) {
+ sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
+ wake_cycle = sleep_cycle;
+ } else {
+ sleep_cycle = 0;
+ wake_cycle = 0;
+ }
+
+ writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
+ writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
+
+ return 0;
+}
+
static void fec_enet_adjust_link(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -2044,6 +2087,8 @@ static void fec_enet_adjust_link(struct net_device *ndev)
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
}
+ if (fep->quirks & FEC_QUIRK_HAS_EEE)
+ fec_enet_eee_mode_set(ndev, phy_dev->enable_tx_lpi);
} else {
if (fep->link) {
netif_stop_queue(ndev);
@@ -2403,6 +2448,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
else
phy_set_max_speed(phy_dev, 100);
+ if (fep->quirks & FEC_QUIRK_HAS_EEE)
+ phy_support_eee(phy_dev);
+
fep->link = 0;
fep->full_duplex = 0;
@@ -3109,50 +3157,11 @@ static int fec_enet_set_coalesce(struct net_device *ndev,
return 0;
}
-/* LPI Sleep Ts count base on tx clk (clk_ref).
- * The lpi sleep cnt value = X us / (cycle_ns).
- */
-static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
-
- return us * (fep->clk_ref_rate / 1000) / 1000;
-}
-
-static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_eee *p = &fep->eee;
- unsigned int sleep_cycle, wake_cycle;
- int ret = 0;
-
- if (enable) {
- ret = phy_init_eee(ndev->phydev, false);
- if (ret)
- return ret;
-
- sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
- wake_cycle = sleep_cycle;
- } else {
- sleep_cycle = 0;
- wake_cycle = 0;
- }
-
- p->tx_lpi_enabled = enable;
- p->eee_enabled = enable;
- p->eee_active = enable;
-
- writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
- writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
-
- return 0;
-}
-
static int
-fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_eee *p = &fep->eee;
+ struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3160,20 +3169,16 @@ fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
if (!netif_running(ndev))
return -ENETDOWN;
- edata->eee_enabled = p->eee_enabled;
- edata->eee_active = p->eee_active;
edata->tx_lpi_timer = p->tx_lpi_timer;
- edata->tx_lpi_enabled = p->tx_lpi_enabled;
return phy_ethtool_get_eee(ndev->phydev, edata);
}
static int
-fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct ethtool_eee *p = &fep->eee;
- int ret = 0;
+ struct ethtool_keee *p = &fep->eee;
if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
return -EOPNOTSUPP;
@@ -3183,15 +3188,6 @@ fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
p->tx_lpi_timer = edata->tx_lpi_timer;
- if (!edata->eee_enabled || !edata->tx_lpi_enabled ||
- !edata->tx_lpi_timer)
- ret = fec_enet_eee_mode_set(ndev, false);
- else
- ret = fec_enet_eee_mode_set(ndev, true);
-
- if (ret)
- return ret;
-
return phy_ethtool_set_eee(ndev->phydev, edata);
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 9ba15d3183d7..758535adc9ff 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1073,6 +1073,14 @@ int memac_initialization(struct mac_device *mac_dev,
unsigned long capabilities;
unsigned long *supported;
+ /* The internal connection to the serdes is XGMII, but this isn't
+ * really correct for the phy mode (which is the external connection).
+ * However, this is how all older device trees say that they want
+ * 10GBASE-R (aka XFI), so just convert it for them.
+ */
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
+
mac_dev->phylink_ops = &memac_mac_ops;
mac_dev->set_promisc = memac_set_promiscuous;
mac_dev->change_addr = memac_modify_mac_address;
@@ -1139,7 +1147,7 @@ int memac_initialization(struct mac_device *mac_dev,
* (and therefore that xfi_pcs cannot be set). If we are defaulting to
* XGMII, assume this is for XFI. Otherwise, assume it is for SGMII.
*/
- if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_10GBASER)
memac->xfi_pcs = pcs;
else
memac->sgmii_pcs = pcs;
@@ -1153,14 +1161,6 @@ int memac_initialization(struct mac_device *mac_dev,
goto _return_fm_mac_free;
}
- /* The internal connection to the serdes is XGMII, but this isn't
- * really correct for the phy mode (which is the external connection).
- * However, this is how all older device trees say that they want
- * 10GBASE-R (aka XFI), so just convert it for them.
- */
- if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
- mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
-
/* TODO: The following interface modes are supported by (some) hardware
* but not by this driver:
* - 1000BASE-KX
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index e3dfbd7a4236..a811238c018d 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1649,7 +1649,7 @@ static int init_phy(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
phy_interface_t interface = priv->interface;
struct phy_device *phydev;
- struct ethtool_eee edata;
+ struct ethtool_keee edata;
linkmode_set_bit_array(phy_10_100_features_array,
ARRAY_SIZE(phy_10_100_features_array),
@@ -1681,7 +1681,7 @@ static int init_phy(struct net_device *dev)
phy_support_asym_pause(phydev);
/* disable EEE autoneg, EEE not supported by eTSEC */
- memset(&edata, 0, sizeof(struct ethtool_eee));
+ memset(&edata, 0, sizeof(struct ethtool_keee));
phy_ethtool_set_eee(phydev, &edata);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index b80349154604..4814c96d5fe7 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -9,6 +9,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/u64_stats_sync.h>
@@ -51,12 +52,16 @@
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
+#define GVE_MAX_RX_BUFFER_SIZE 4096
+
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
#define GVE_XDP_ACTIONS 5
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
+#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
+
#define DQO_QPL_DEFAULT_TX_PAGES 512
#define DQO_QPL_DEFAULT_RX_PAGES 2048
@@ -150,6 +155,11 @@ struct gve_rx_compl_queue_dqo {
u32 mask; /* Mask for indices to the size of the ring */
};
+struct gve_header_buf {
+ u8 *data;
+ dma_addr_t addr;
+};
+
/* Stores state for tracking buffers posted to HW */
struct gve_rx_buf_state_dqo {
/* The page posted to HW. */
@@ -252,19 +262,26 @@ struct gve_rx_ring {
/* track number of used buffers */
u16 used_buf_states_cnt;
+
+ /* Address info of the buffers for header-split */
+ struct gve_header_buf hdr_bufs;
} dqo;
};
u64 rbytes; /* free-running bytes received */
+ u64 rx_hsplit_bytes; /* free-running header bytes received */
u64 rpackets; /* free-running packets received */
u32 cnt; /* free-running total number of completed packets */
u32 fill_cnt; /* free-running total number of descs and buffs posted */
u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
+ u64 rx_hsplit_pkt; /* free-running packets with headers split */
u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
u64 rx_copied_pkt; /* free-running total number of copied packets */
u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
+ /* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
+ u64 rx_hsplit_unsplit_pkt;
u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
@@ -622,6 +639,56 @@ struct gve_ptype_lut {
struct gve_ptype ptypes[GVE_NUM_PTYPES];
};
+/* Parameters for allocating queue page lists */
+struct gve_qpls_alloc_cfg {
+ struct gve_qpl_config *qpl_cfg;
+ struct gve_queue_config *tx_cfg;
+ struct gve_queue_config *rx_cfg;
+
+ u16 num_xdp_queues;
+ bool raw_addressing;
+ bool is_gqi;
+
+ /* Allocated resources are returned here */
+ struct gve_queue_page_list *qpls;
+};
+
+/* Parameters for allocating resources for tx queues */
+struct gve_tx_alloc_rings_cfg {
+ struct gve_queue_config *qcfg;
+
+ /* qpls and qpl_cfg must already be allocated */
+ struct gve_queue_page_list *qpls;
+ struct gve_qpl_config *qpl_cfg;
+
+ u16 ring_size;
+ u16 start_idx;
+ u16 num_rings;
+ bool raw_addressing;
+
+ /* Allocated resources are returned here */
+ struct gve_tx_ring *tx;
+};
+
+/* Parameters for allocating resources for rx queues */
+struct gve_rx_alloc_rings_cfg {
+ /* tx config is also needed to determine QPL ids */
+ struct gve_queue_config *qcfg;
+ struct gve_queue_config *qcfg_tx;
+
+ /* qpls and qpl_cfg must already be allocated */
+ struct gve_queue_page_list *qpls;
+ struct gve_qpl_config *qpl_cfg;
+
+ u16 ring_size;
+ u16 packet_buffer_size;
+ bool raw_addressing;
+ bool enable_header_split;
+
+ /* Allocated resources are returned here */
+ struct gve_rx_ring *rx;
+};
+
/* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
* when the entire configure_device_resources command is zeroed out and the
* queue_format is not specified.
@@ -729,13 +796,17 @@ struct gve_priv {
struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */
- int data_buffer_size_dqo;
+ u16 data_buffer_size_dqo;
+ u16 max_rx_buffer_size; /* device limit */
enum gve_queue_format queue_format;
/* Interrupt coalescing settings */
u32 tx_coalesce_usecs;
u32 rx_coalesce_usecs;
+
+ u16 header_buf_size; /* device configured, header-split supported if non-zero */
+ bool header_split_enabled; /* True if the header split is enabled by the user */
};
enum gve_service_task_flags_bit {
@@ -917,14 +988,14 @@ static inline bool gve_is_qpl(struct gve_priv *priv)
priv->queue_format == GVE_DQO_QPL_FORMAT;
}
-/* Returns the number of tx queue page lists
- */
-static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
+/* Returns the number of tx queue page lists */
+static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
+ int num_xdp_queues,
+ bool is_qpl)
{
- if (!gve_is_qpl(priv))
+ if (!is_qpl)
return 0;
-
- return priv->tx_cfg.num_queues + priv->num_xdp_queues;
+ return tx_cfg->num_queues + num_xdp_queues;
}
/* Returns the number of XDP tx queue page lists
@@ -937,14 +1008,13 @@ static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
return priv->num_xdp_queues;
}
-/* Returns the number of rx queue page lists
- */
-static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
+/* Returns the number of rx queue page lists */
+static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
+ bool is_qpl)
{
- if (!gve_is_qpl(priv))
+ if (!is_qpl)
return 0;
-
- return priv->rx_cfg.num_queues;
+ return rx_cfg->num_queues;
}
static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
@@ -957,59 +1027,59 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
return priv->tx_cfg.max_queues + rx_qid;
}
+/* Returns the index into priv->qpls where a certain rx queue's QPL resides */
+static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
+{
+ return tx_cfg->max_queues + rx_qid;
+}
+
static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
{
return gve_tx_qpl_id(priv, 0);
}
-static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv)
+/* Returns the index into priv->qpls where the first rx queue's QPL resides */
+static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
{
- return gve_rx_qpl_id(priv, 0);
+ return gve_get_rx_qpl_id(tx_cfg, 0);
}
-/* Returns a pointer to the next available tx qpl in the list of qpls
- */
+/* Returns a pointer to the next available tx qpl in the list of qpls */
static inline
-struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid)
+struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg,
+ int tx_qid)
{
- int id = gve_tx_qpl_id(priv, tx_qid);
-
/* QPL already in use */
- if (test_bit(id, priv->qpl_cfg.qpl_id_map))
+ if (test_bit(tx_qid, cfg->qpl_cfg->qpl_id_map))
return NULL;
-
- set_bit(id, priv->qpl_cfg.qpl_id_map);
- return &priv->qpls[id];
+ set_bit(tx_qid, cfg->qpl_cfg->qpl_id_map);
+ return &cfg->qpls[tx_qid];
}
-/* Returns a pointer to the next available rx qpl in the list of qpls
- */
+/* Returns a pointer to the next available rx qpl in the list of qpls */
static inline
-struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid)
+struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_rx_alloc_rings_cfg *cfg,
+ int rx_qid)
{
- int id = gve_rx_qpl_id(priv, rx_qid);
-
+ int id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx_qid);
/* QPL already in use */
- if (test_bit(id, priv->qpl_cfg.qpl_id_map))
+ if (test_bit(id, cfg->qpl_cfg->qpl_id_map))
return NULL;
-
- set_bit(id, priv->qpl_cfg.qpl_id_map);
- return &priv->qpls[id];
+ set_bit(id, cfg->qpl_cfg->qpl_id_map);
+ return &cfg->qpls[id];
}
-/* Unassigns the qpl with the given id
- */
-static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
+/* Unassigns the qpl with the given id */
+static inline void gve_unassign_qpl(struct gve_qpl_config *qpl_cfg, int id)
{
- clear_bit(id, priv->qpl_cfg.qpl_id_map);
+ clear_bit(id, qpl_cfg->qpl_id_map);
}
-/* Returns the correct dma direction for tx and rx qpls
- */
+/* Returns the correct dma direction for tx and rx qpls */
static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
int id)
{
- if (id < gve_rx_start_qpl_id(priv))
+ if (id < gve_rx_start_qpl_id(&priv->tx_cfg))
return DMA_TO_DEVICE;
else
return DMA_FROM_DEVICE;
@@ -1036,6 +1106,9 @@ static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
return gve_xdp_tx_queue_id(priv, 0);
}
+/* gqi napi handler defined in gve_main.c */
+int gve_napi_poll(struct napi_struct *napi, int budget);
+
/* buffers */
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
@@ -1051,8 +1124,12 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
bool gve_xdp_poll(struct gve_notify_block *block, int budget);
-int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings);
-void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings);
+int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg);
+void gve_tx_free_rings_gqi(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg);
+void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
+void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
u32 gve_tx_load_event_counter(struct gve_priv *priv,
struct gve_tx_ring *tx);
bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
@@ -1061,7 +1138,15 @@ void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
int gve_rx_poll(struct gve_notify_block *block, int budget);
bool gve_rx_work_pending(struct gve_rx_ring *rx);
int gve_rx_alloc_rings(struct gve_priv *priv);
-void gve_rx_free_rings_gqi(struct gve_priv *priv);
+int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg);
+void gve_rx_free_rings_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg);
+void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
+void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
+u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
+bool gve_header_split_supported(const struct gve_priv *priv);
+int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 12fbd723ecc6..ae12ac38e18b 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -40,7 +40,8 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
- struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
+ struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
u16 option_length = be16_to_cpu(option->option_length);
@@ -147,6 +148,23 @@ void gve_parse_device_option(struct gve_priv *priv,
}
*dev_op_jumbo_frames = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_BUFFER_SIZES:
+ if (option_length < sizeof(**dev_op_buffer_sizes) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Buffer Sizes",
+ (int)sizeof(**dev_op_buffer_sizes),
+ GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_buffer_sizes))
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "Buffer Sizes");
+ *dev_op_buffer_sizes = (void *)(option + 1);
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -164,7 +182,8 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
- struct gve_device_option_dqo_qpl **dev_op_dqo_qpl)
+ struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
struct gve_device_option *dev_opt;
@@ -185,7 +204,7 @@ gve_process_device_options(struct gve_priv *priv,
gve_parse_device_option(priv, descriptor, dev_opt,
dev_op_gqi_rda, dev_op_gqi_qpl,
dev_op_dqo_rda, dev_op_jumbo_frames,
- dev_op_dqo_qpl);
+ dev_op_dqo_qpl, dev_op_buffer_sizes);
dev_opt = next_opt;
}
@@ -640,6 +659,9 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
cpu_to_be16(rx_buff_ring_entries);
cmd.create_rx_queue.enable_rsc =
!!(priv->dev->features & NETIF_F_LRO);
+ if (priv->header_split_enabled)
+ cmd.create_rx_queue.header_buffer_size =
+ cpu_to_be16(priv->header_buf_size);
}
return gve_adminq_issue_cmd(priv, &cmd);
@@ -755,7 +777,9 @@ static void gve_enable_supported_features(struct gve_priv *priv,
const struct gve_device_option_jumbo_frames
*dev_op_jumbo_frames,
const struct gve_device_option_dqo_qpl
- *dev_op_dqo_qpl)
+ *dev_op_dqo_qpl,
+ const struct gve_device_option_buffer_sizes
+ *dev_op_buffer_sizes)
{
/* Before control reaches this point, the page-size-capped max MTU from
* the gve_device_descriptor field has already been stored in
@@ -779,10 +803,22 @@ static void gve_enable_supported_features(struct gve_priv *priv,
if (priv->rx_pages_per_qpl == 0)
priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
}
+
+ if (dev_op_buffer_sizes &&
+ (supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) {
+ priv->max_rx_buffer_size =
+ be16_to_cpu(dev_op_buffer_sizes->packet_buffer_size);
+ priv->header_buf_size =
+ be16_to_cpu(dev_op_buffer_sizes->header_buffer_size);
+ dev_info(&priv->pdev->dev,
+ "BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
+ priv->max_rx_buffer_size, priv->header_buf_size);
+ }
}
int gve_adminq_describe_device(struct gve_priv *priv)
{
+ struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
@@ -816,7 +852,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
&dev_op_gqi_qpl, &dev_op_dqo_rda,
&dev_op_jumbo_frames,
- &dev_op_dqo_qpl);
+ &dev_op_dqo_qpl,
+ &dev_op_buffer_sizes);
if (err)
goto free_device_descriptor;
@@ -885,7 +922,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
gve_enable_supported_features(priv, supported_features_mask,
- dev_op_jumbo_frames, dev_op_dqo_qpl);
+ dev_op_jumbo_frames, dev_op_dqo_qpl,
+ dev_op_buffer_sizes);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 5865ccdccbd0..5ac972e45ff8 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -125,6 +125,15 @@ struct gve_device_option_jumbo_frames {
static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8);
+struct gve_device_option_buffer_sizes {
+ /* GVE_SUP_BUFFER_SIZES_MASK bit should be set */
+ __be32 supported_features_mask;
+ __be16 packet_buffer_size;
+ __be16 header_buffer_size;
+};
+
+static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -140,6 +149,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_DQO_RDA = 0x4,
GVE_DEV_OPT_ID_DQO_QPL = 0x7,
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
+ GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
};
enum gve_dev_opt_req_feat_mask {
@@ -149,10 +159,12 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
};
enum gve_sup_feature_mask {
GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
+ GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -165,6 +177,7 @@ enum gve_driver_capbility {
gve_driver_capability_dqo_qpl = 2, /* reserved for future use */
gve_driver_capability_dqo_rda = 3,
gve_driver_capability_alt_miss_compl = 4,
+ gve_driver_capability_flexible_buffer_size = 5,
};
#define GVE_CAP1(a) BIT((int)a)
@@ -176,7 +189,8 @@ enum gve_driver_capbility {
(GVE_CAP1(gve_driver_capability_gqi_qpl) | \
GVE_CAP1(gve_driver_capability_gqi_rda) | \
GVE_CAP1(gve_driver_capability_dqo_rda) | \
- GVE_CAP1(gve_driver_capability_alt_miss_compl))
+ GVE_CAP1(gve_driver_capability_alt_miss_compl) | \
+ GVE_CAP1(gve_driver_capability_flexible_buffer_size))
#define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
@@ -260,7 +274,9 @@ struct gve_adminq_create_rx_queue {
__be16 packet_buffer_size;
__be16 rx_buff_ring_size;
u8 enable_rsc;
- u8 padding[5];
+ u8 padding1;
+ __be16 header_buffer_size;
+ u8 padding2[2];
};
static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56);
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index c36b93f0de15..b81584829c40 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -38,10 +38,18 @@ netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
netdev_features_t features);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
-int gve_tx_alloc_rings_dqo(struct gve_priv *priv);
-void gve_tx_free_rings_dqo(struct gve_priv *priv);
-int gve_rx_alloc_rings_dqo(struct gve_priv *priv);
-void gve_rx_free_rings_dqo(struct gve_priv *priv);
+int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg);
+void gve_tx_free_rings_dqo(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg);
+void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx);
+void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx);
+int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg);
+void gve_rx_free_rings_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg);
+void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx);
+void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx);
int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
struct napi_struct *napi);
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx);
@@ -93,4 +101,6 @@ gve_set_itr_coalesce_usecs_dqo(struct gve_priv *priv,
gve_write_irq_doorbell_dqo(priv, block,
gve_setup_itr_interval_dqo(usecs));
}
+
+int gve_napi_poll_dqo(struct napi_struct *napi, int budget);
#endif /* _GVE_DQO_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index e5397aa1e48f..9aebfb843d9d 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -4,7 +4,6 @@
* Copyright (C) 2015-2021 Google, Inc.
*/
-#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include "gve.h"
#include "gve_adminq.h"
@@ -40,17 +39,18 @@ static u32 gve_get_msglevel(struct net_device *netdev)
* as declared in enum xdp_action inside file uapi/linux/bpf.h .
*/
static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
- "rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
- "rx_dropped", "tx_dropped", "tx_timeouts",
+ "rx_packets", "rx_hsplit_pkt", "tx_packets", "rx_bytes",
+ "tx_bytes", "rx_dropped", "tx_dropped", "tx_timeouts",
"rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
+ "rx_hsplit_unsplit_pkt",
"interface_up_cnt", "interface_down_cnt", "reset_cnt",
"page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
};
static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
- "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]",
- "rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]",
- "rx_frag_alloc_cnt[%u]",
+ "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]",
+ "rx_bytes[%u]", "rx_hsplit_bytes[%u]", "rx_cont_packet_cnt[%u]",
+ "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_frag_alloc_cnt[%u]",
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
@@ -154,11 +154,13 @@ static void
gve_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
- u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail,
- tmp_rx_buf_alloc_fail, tmp_rx_desc_err_dropped_pkt,
+ u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes,
+ tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
+ tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt,
tmp_tx_pkts, tmp_tx_bytes;
- u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
- rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, tx_dropped;
+ u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt,
+ rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes,
+ tx_dropped;
int stats_idx, base_stats_idx, max_stats_idx;
struct stats *report_stats;
int *rx_qid_to_stats_idx;
@@ -185,8 +187,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
kfree(rx_qid_to_stats_idx);
return;
}
- for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0,
- rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0;
+ for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
+ rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
+ rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
+ ring = 0;
ring < priv->rx_cfg.num_queues; ring++) {
if (priv->rx) {
do {
@@ -195,18 +199,23 @@ gve_get_ethtool_stats(struct net_device *netdev,
start =
u64_stats_fetch_begin(&priv->rx[ring].statss);
tmp_rx_pkts = rx->rpackets;
+ tmp_rx_hsplit_pkt = rx->rx_hsplit_pkt;
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
+ tmp_rx_hsplit_unsplit_pkt =
+ rx->rx_hsplit_unsplit_pkt;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
rx_pkts += tmp_rx_pkts;
+ rx_hsplit_pkt += tmp_rx_hsplit_pkt;
rx_bytes += tmp_rx_bytes;
rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
+ rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt;
}
}
for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
@@ -227,6 +236,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
i = 0;
data[i++] = rx_pkts;
+ data[i++] = rx_hsplit_pkt;
data[i++] = tx_pkts;
data[i++] = rx_bytes;
data[i++] = tx_bytes;
@@ -238,6 +248,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx_skb_alloc_fail;
data[i++] = rx_buf_alloc_fail;
data[i++] = rx_desc_err_dropped_pkt;
+ data[i++] = rx_hsplit_unsplit_pkt;
data[i++] = priv->interface_up_cnt;
data[i++] = priv->interface_down_cnt;
data[i++] = priv->reset_cnt;
@@ -277,6 +288,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
start =
u64_stats_fetch_begin(&priv->rx[ring].statss);
tmp_rx_bytes = rx->rbytes;
+ tmp_rx_hsplit_bytes = rx->rx_hsplit_bytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
@@ -284,6 +296,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
data[i++] = tmp_rx_bytes;
+ data[i++] = tmp_rx_hsplit_bytes;
data[i++] = rx->rx_cont_packet_cnt;
data[i++] = rx->rx_frag_flip_cnt;
data[i++] = rx->rx_frag_copy_cnt;
@@ -480,6 +493,29 @@ static void gve_get_ringparam(struct net_device *netdev,
cmd->tx_max_pending = priv->tx_desc_cnt;
cmd->rx_pending = priv->rx_desc_cnt;
cmd->tx_pending = priv->tx_desc_cnt;
+
+ if (!gve_header_split_supported(priv))
+ kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
+ else if (priv->header_split_enabled)
+ kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+ else
+ kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
+}
+
+static int gve_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *cmd,
+ struct kernel_ethtool_ringparam *kernel_cmd,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ if (priv->tx_desc_cnt != cmd->tx_pending ||
+ priv->rx_desc_cnt != cmd->rx_pending) {
+ dev_info(&priv->pdev->dev, "Modify ring size is not supported.\n");
+ return -EOPNOTSUPP;
+ }
+
+ return gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
}
static int gve_user_reset(struct net_device *netdev, u32 *flags)
@@ -655,6 +691,7 @@ static int gve_set_coalesce(struct net_device *netdev,
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.get_drvinfo = gve_get_drvinfo,
.get_strings = gve_get_strings,
.get_sset_count = gve_get_sset_count,
@@ -667,6 +704,7 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_coalesce = gve_get_coalesce,
.set_coalesce = gve_set_coalesce,
.get_ringparam = gve_get_ringparam,
+ .set_ringparam = gve_set_ringparam,
.reset = gve_user_reset,
.get_tunable = gve_get_tunable,
.set_tunable = gve_set_tunable,
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 619bf63ec935..166bd827a6d7 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -22,6 +22,7 @@
#include "gve_dqo.h"
#include "gve_adminq.h"
#include "gve_register.h"
+#include "gve_utils.h"
#define GVE_DEFAULT_RX_COPYBREAK (256)
@@ -252,7 +253,7 @@ static irqreturn_t gve_intr_dqo(int irq, void *arg)
return IRQ_HANDLED;
}
-static int gve_napi_poll(struct napi_struct *napi, int budget)
+int gve_napi_poll(struct napi_struct *napi, int budget)
{
struct gve_notify_block *block;
__be32 __iomem *irq_doorbell;
@@ -302,7 +303,7 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
+int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
{
struct gve_notify_block *block =
container_of(napi, struct gve_notify_block, napi);
@@ -581,19 +582,59 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
gve_clear_device_resources_ok(priv);
}
-static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
- int (*gve_poll)(struct napi_struct *, int))
+static int gve_unregister_qpl(struct gve_priv *priv, u32 i)
{
- struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ int err;
+
+ err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Failed to unregister queue page list %d\n",
+ priv->qpls[i].id);
+ return err;
+ }
- netif_napi_add(priv->dev, &block->napi, gve_poll);
+ priv->num_registered_pages -= priv->qpls[i].num_entries;
+ return 0;
}
-static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
+static int gve_register_qpl(struct gve_priv *priv, u32 i)
{
- struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ int num_rx_qpls;
+ int pages;
+ int err;
+
+ /* Rx QPLs succeed Tx QPLs in the priv->qpls array. */
+ num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
+ if (i >= gve_rx_start_qpl_id(&priv->tx_cfg) + num_rx_qpls) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot register nonexisting QPL at index %d\n", i);
+ return -EINVAL;
+ }
+
+ pages = priv->qpls[i].num_entries;
+
+ if (pages + priv->num_registered_pages > priv->max_registered_pages) {
+ netif_err(priv, drv, priv->dev,
+ "Reached max number of registered pages %llu > %llu\n",
+ pages + priv->num_registered_pages,
+ priv->max_registered_pages);
+ return -EINVAL;
+ }
+
+ err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to register queue page list %d\n",
+ priv->qpls[i].id);
+ /* This failure will trigger a reset - no need to clean
+ * up
+ */
+ return err;
+ }
- netif_napi_del(&block->napi);
+ priv->num_registered_pages += pages;
+ return 0;
}
static int gve_register_xdp_qpls(struct gve_priv *priv)
@@ -602,55 +643,41 @@ static int gve_register_xdp_qpls(struct gve_priv *priv)
int err;
int i;
- start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
+ start_id = gve_xdp_tx_start_queue_id(priv);
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to register queue page list %d\n",
- priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean
- * up
- */
+ err = gve_register_qpl(priv, i);
+ /* This failure will trigger a reset - no need to clean up */
+ if (err)
return err;
- }
}
return 0;
}
static int gve_register_qpls(struct gve_priv *priv)
{
+ int num_tx_qpls, num_rx_qpls;
int start_id;
int err;
int i;
- start_id = gve_tx_start_qpl_id(priv);
- for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
- err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to register queue page list %d\n",
- priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean
- * up
- */
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
+ gve_is_qpl(priv));
+ num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
+
+ for (i = 0; i < num_tx_qpls; i++) {
+ err = gve_register_qpl(priv, i);
+ if (err)
return err;
- }
}
- start_id = gve_rx_start_qpl_id(priv);
- for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
- err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to register queue page list %d\n",
- priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean
- * up
- */
+ /* there might be a gap between the tx and rx qpl ids */
+ start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
+ for (i = 0; i < num_rx_qpls; i++) {
+ err = gve_register_qpl(priv, start_id + i);
+ if (err)
return err;
- }
}
+
return 0;
}
@@ -660,48 +687,40 @@ static int gve_unregister_xdp_qpls(struct gve_priv *priv)
int err;
int i;
- start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
+ start_id = gve_xdp_tx_start_queue_id(priv);
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean up */
- if (err) {
- netif_err(priv, drv, priv->dev,
- "Failed to unregister queue page list %d\n",
- priv->qpls[i].id);
+ err = gve_unregister_qpl(priv, i);
+ /* This failure will trigger a reset - no need to clean */
+ if (err)
return err;
- }
}
return 0;
}
static int gve_unregister_qpls(struct gve_priv *priv)
{
+ int num_tx_qpls, num_rx_qpls;
int start_id;
int err;
int i;
- start_id = gve_tx_start_qpl_id(priv);
- for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
- err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean up */
- if (err) {
- netif_err(priv, drv, priv->dev,
- "Failed to unregister queue page list %d\n",
- priv->qpls[i].id);
+ num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
+ gve_is_qpl(priv));
+ num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
+
+ for (i = 0; i < num_tx_qpls; i++) {
+ err = gve_unregister_qpl(priv, i);
+ /* This failure will trigger a reset - no need to clean */
+ if (err)
return err;
- }
}
- start_id = gve_rx_start_qpl_id(priv);
- for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
- err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean up */
- if (err) {
- netif_err(priv, drv, priv->dev,
- "Failed to unregister queue page list %d\n",
- priv->qpls[i].id);
+ start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
+ for (i = 0; i < num_rx_qpls; i++) {
+ err = gve_unregister_qpl(priv, start_id + i);
+ /* This failure will trigger a reset - no need to clean */
+ if (err)
return err;
- }
}
return 0;
}
@@ -776,120 +795,124 @@ static int gve_create_rings(struct gve_priv *priv)
return 0;
}
-static void add_napi_init_xdp_sync_stats(struct gve_priv *priv,
- int (*napi_poll)(struct napi_struct *napi,
- int budget))
+static void init_xdp_sync_stats(struct gve_priv *priv)
{
int start_id = gve_xdp_tx_start_queue_id(priv);
int i;
- /* Add xdp tx napi & init sync stats*/
+ /* Init stats */
for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
u64_stats_init(&priv->tx[i].statss);
priv->tx[i].ntfy_id = ntfy_idx;
- gve_add_napi(priv, ntfy_idx, napi_poll);
}
}
-static void add_napi_init_sync_stats(struct gve_priv *priv,
- int (*napi_poll)(struct napi_struct *napi,
- int budget))
+static void gve_init_sync_stats(struct gve_priv *priv)
{
int i;
- /* Add tx napi & init sync stats*/
- for (i = 0; i < gve_num_tx_queues(priv); i++) {
- int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
-
+ for (i = 0; i < priv->tx_cfg.num_queues; i++)
u64_stats_init(&priv->tx[i].statss);
- priv->tx[i].ntfy_id = ntfy_idx;
- gve_add_napi(priv, ntfy_idx, napi_poll);
- }
- /* Add rx napi & init sync stats*/
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- int ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
+ /* Init stats for XDP TX queues */
+ init_xdp_sync_stats(priv);
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
u64_stats_init(&priv->rx[i].statss);
- priv->rx[i].ntfy_id = ntfy_idx;
- gve_add_napi(priv, ntfy_idx, napi_poll);
+}
+
+static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg)
+{
+ cfg->qcfg = &priv->tx_cfg;
+ cfg->raw_addressing = !gve_is_qpl(priv);
+ cfg->qpls = priv->qpls;
+ cfg->qpl_cfg = &priv->qpl_cfg;
+ cfg->ring_size = priv->tx_desc_cnt;
+ cfg->start_idx = 0;
+ cfg->num_rings = gve_num_tx_queues(priv);
+ cfg->tx = priv->tx;
+}
+
+static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings)
+{
+ int i;
+
+ if (!priv->tx)
+ return;
+
+ for (i = start_id; i < start_id + num_rings; i++) {
+ if (gve_is_gqi(priv))
+ gve_tx_stop_ring_gqi(priv, i);
+ else
+ gve_tx_stop_ring_dqo(priv, i);
}
}
-static void gve_tx_free_rings(struct gve_priv *priv, int start_id, int num_rings)
+static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
+ int num_rings)
{
- if (gve_is_gqi(priv)) {
- gve_tx_free_rings_gqi(priv, start_id, num_rings);
- } else {
- gve_tx_free_rings_dqo(priv);
+ int i;
+
+ for (i = start_id; i < start_id + num_rings; i++) {
+ if (gve_is_gqi(priv))
+ gve_tx_start_ring_gqi(priv, i);
+ else
+ gve_tx_start_ring_dqo(priv, i);
}
}
static int gve_alloc_xdp_rings(struct gve_priv *priv)
{
- int start_id;
+ struct gve_tx_alloc_rings_cfg cfg = {0};
int err = 0;
if (!priv->num_xdp_queues)
return 0;
- start_id = gve_xdp_tx_start_queue_id(priv);
- err = gve_tx_alloc_rings(priv, start_id, priv->num_xdp_queues);
+ gve_tx_get_curr_alloc_cfg(priv, &cfg);
+ cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
+ cfg.num_rings = priv->num_xdp_queues;
+
+ err = gve_tx_alloc_rings_gqi(priv, &cfg);
if (err)
return err;
- add_napi_init_xdp_sync_stats(priv, gve_napi_poll);
+
+ gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings);
+ init_xdp_sync_stats(priv);
return 0;
}
-static int gve_alloc_rings(struct gve_priv *priv)
+static int gve_alloc_rings(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
- /* Setup tx rings */
- priv->tx = kvcalloc(priv->tx_cfg.max_queues, sizeof(*priv->tx),
- GFP_KERNEL);
- if (!priv->tx)
- return -ENOMEM;
-
if (gve_is_gqi(priv))
- err = gve_tx_alloc_rings(priv, 0, gve_num_tx_queues(priv));
+ err = gve_tx_alloc_rings_gqi(priv, tx_alloc_cfg);
else
- err = gve_tx_alloc_rings_dqo(priv);
+ err = gve_tx_alloc_rings_dqo(priv, tx_alloc_cfg);
if (err)
- goto free_tx;
-
- /* Setup rx rings */
- priv->rx = kvcalloc(priv->rx_cfg.max_queues, sizeof(*priv->rx),
- GFP_KERNEL);
- if (!priv->rx) {
- err = -ENOMEM;
- goto free_tx_queue;
- }
+ return err;
if (gve_is_gqi(priv))
- err = gve_rx_alloc_rings(priv);
+ err = gve_rx_alloc_rings_gqi(priv, rx_alloc_cfg);
else
- err = gve_rx_alloc_rings_dqo(priv);
+ err = gve_rx_alloc_rings_dqo(priv, rx_alloc_cfg);
if (err)
- goto free_rx;
-
- if (gve_is_gqi(priv))
- add_napi_init_sync_stats(priv, gve_napi_poll);
- else
- add_napi_init_sync_stats(priv, gve_napi_poll_dqo);
+ goto free_tx;
return 0;
-free_rx:
- kvfree(priv->rx);
- priv->rx = NULL;
-free_tx_queue:
- gve_tx_free_rings(priv, 0, gve_num_tx_queues(priv));
free_tx:
- kvfree(priv->tx);
- priv->tx = NULL;
+ if (gve_is_gqi(priv))
+ gve_tx_free_rings_gqi(priv, tx_alloc_cfg);
+ else
+ gve_tx_free_rings_dqo(priv, tx_alloc_cfg);
return err;
}
@@ -937,52 +960,30 @@ static int gve_destroy_rings(struct gve_priv *priv)
return 0;
}
-static void gve_rx_free_rings(struct gve_priv *priv)
-{
- if (gve_is_gqi(priv))
- gve_rx_free_rings_gqi(priv);
- else
- gve_rx_free_rings_dqo(priv);
-}
-
static void gve_free_xdp_rings(struct gve_priv *priv)
{
- int ntfy_idx, start_id;
- int i;
+ struct gve_tx_alloc_rings_cfg cfg = {0};
+
+ gve_tx_get_curr_alloc_cfg(priv, &cfg);
+ cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
+ cfg.num_rings = priv->num_xdp_queues;
- start_id = gve_xdp_tx_start_queue_id(priv);
if (priv->tx) {
- for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
- ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
- gve_remove_napi(priv, ntfy_idx);
- }
- gve_tx_free_rings(priv, start_id, priv->num_xdp_queues);
+ gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings);
+ gve_tx_free_rings_gqi(priv, &cfg);
}
}
-static void gve_free_rings(struct gve_priv *priv)
+static void gve_free_rings(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_cfg)
{
- int num_tx_queues = gve_num_tx_queues(priv);
- int ntfy_idx;
- int i;
-
- if (priv->tx) {
- for (i = 0; i < num_tx_queues; i++) {
- ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
- gve_remove_napi(priv, ntfy_idx);
- }
- gve_tx_free_rings(priv, 0, num_tx_queues);
- kvfree(priv->tx);
- priv->tx = NULL;
- }
- if (priv->rx) {
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
- gve_remove_napi(priv, ntfy_idx);
- }
- gve_rx_free_rings(priv);
- kvfree(priv->rx);
- priv->rx = NULL;
+ if (gve_is_gqi(priv)) {
+ gve_tx_free_rings_gqi(priv, tx_cfg);
+ gve_rx_free_rings_gqi(priv, rx_cfg);
+ } else {
+ gve_tx_free_rings_dqo(priv, tx_cfg);
+ gve_rx_free_rings_dqo(priv, rx_cfg);
}
}
@@ -1004,21 +1005,13 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
return 0;
}
-static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
- int pages)
+static int gve_alloc_queue_page_list(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl,
+ u32 id, int pages)
{
- struct gve_queue_page_list *qpl = &priv->qpls[id];
int err;
int i;
- if (pages + priv->num_registered_pages > priv->max_registered_pages) {
- netif_err(priv, drv, priv->dev,
- "Reached max number of registered pages %llu > %llu\n",
- pages + priv->num_registered_pages,
- priv->max_registered_pages);
- return -EINVAL;
- }
-
qpl->id = id;
qpl->num_entries = 0;
qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
@@ -1039,7 +1032,6 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
return -ENOMEM;
qpl->num_entries++;
}
- priv->num_registered_pages += pages;
return 0;
}
@@ -1053,9 +1045,10 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
put_page(page);
}
-static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
+static void gve_free_queue_page_list(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl,
+ int id)
{
- struct gve_queue_page_list *qpl = &priv->qpls[id];
int i;
if (!qpl->pages)
@@ -1072,19 +1065,30 @@ static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
free_pages:
kvfree(qpl->pages);
qpl->pages = NULL;
- priv->num_registered_pages -= qpl->num_entries;
}
-static int gve_alloc_xdp_qpls(struct gve_priv *priv)
+static void gve_free_n_qpls(struct gve_priv *priv,
+ struct gve_queue_page_list *qpls,
+ int start_id,
+ int num_qpls)
+{
+ int i;
+
+ for (i = start_id; i < start_id + num_qpls; i++)
+ gve_free_queue_page_list(priv, &qpls[i], i);
+}
+
+static int gve_alloc_n_qpls(struct gve_priv *priv,
+ struct gve_queue_page_list *qpls,
+ int page_count,
+ int start_id,
+ int num_qpls)
{
- int start_id;
- int i, j;
int err;
+ int i;
- start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_alloc_queue_page_list(priv, i,
- priv->tx_pages_per_qpl);
+ for (i = start_id; i < start_id + num_qpls; i++) {
+ err = gve_alloc_queue_page_list(priv, &qpls[i], i, page_count);
if (err)
goto free_qpls;
}
@@ -1092,95 +1096,89 @@ static int gve_alloc_xdp_qpls(struct gve_priv *priv)
return 0;
free_qpls:
- for (j = start_id; j <= i; j++)
- gve_free_queue_page_list(priv, j);
+ /* Must include the failing QPL too for gve_alloc_queue_page_list fails
+ * without cleaning up.
+ */
+ gve_free_n_qpls(priv, qpls, start_id, i - start_id + 1);
return err;
}
-static int gve_alloc_qpls(struct gve_priv *priv)
+static int gve_alloc_qpls(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *cfg)
{
- int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
+ int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
+ int rx_start_id, tx_num_qpls, rx_num_qpls;
+ struct gve_queue_page_list *qpls;
int page_count;
- int start_id;
- int i, j;
int err;
- if (!gve_is_qpl(priv))
+ if (cfg->raw_addressing)
return 0;
- priv->qpls = kvcalloc(max_queues, sizeof(*priv->qpls), GFP_KERNEL);
- if (!priv->qpls)
+ qpls = kvcalloc(max_queues, sizeof(*qpls), GFP_KERNEL);
+ if (!qpls)
return -ENOMEM;
- start_id = gve_tx_start_qpl_id(priv);
- page_count = priv->tx_pages_per_qpl;
- for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
- err = gve_alloc_queue_page_list(priv, i,
- page_count);
- if (err)
- goto free_qpls;
+ cfg->qpl_cfg->qpl_map_size = BITS_TO_LONGS(max_queues) *
+ sizeof(unsigned long) * BITS_PER_BYTE;
+ cfg->qpl_cfg->qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!cfg->qpl_cfg->qpl_id_map) {
+ err = -ENOMEM;
+ goto free_qpl_array;
}
- start_id = gve_rx_start_qpl_id(priv);
+ /* Allocate TX QPLs */
+ page_count = priv->tx_pages_per_qpl;
+ tx_num_qpls = gve_num_tx_qpls(cfg->tx_cfg, cfg->num_xdp_queues,
+ gve_is_qpl(priv));
+ err = gve_alloc_n_qpls(priv, qpls, page_count, 0, tx_num_qpls);
+ if (err)
+ goto free_qpl_map;
+ /* Allocate RX QPLs */
+ rx_start_id = gve_rx_start_qpl_id(cfg->tx_cfg);
/* For GQI_QPL number of pages allocated have 1:1 relationship with
* number of descriptors. For DQO, number of pages required are
* more than descriptors (because of out of order completions).
*/
- page_count = priv->queue_format == GVE_GQI_QPL_FORMAT ?
- priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
- for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
- err = gve_alloc_queue_page_list(priv, i,
- page_count);
- if (err)
- goto free_qpls;
- }
-
- priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(max_queues) *
- sizeof(unsigned long) * BITS_PER_BYTE;
- priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
- sizeof(unsigned long), GFP_KERNEL);
- if (!priv->qpl_cfg.qpl_id_map) {
- err = -ENOMEM;
- goto free_qpls;
- }
+ page_count = cfg->is_gqi ? priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
+ rx_num_qpls = gve_num_rx_qpls(cfg->rx_cfg, gve_is_qpl(priv));
+ err = gve_alloc_n_qpls(priv, qpls, page_count, rx_start_id, rx_num_qpls);
+ if (err)
+ goto free_tx_qpls;
+ cfg->qpls = qpls;
return 0;
-free_qpls:
- for (j = 0; j <= i; j++)
- gve_free_queue_page_list(priv, j);
- kvfree(priv->qpls);
- priv->qpls = NULL;
+free_tx_qpls:
+ gve_free_n_qpls(priv, qpls, 0, tx_num_qpls);
+free_qpl_map:
+ kvfree(cfg->qpl_cfg->qpl_id_map);
+ cfg->qpl_cfg->qpl_id_map = NULL;
+free_qpl_array:
+ kvfree(qpls);
return err;
}
-static void gve_free_xdp_qpls(struct gve_priv *priv)
-{
- int start_id;
- int i;
-
- start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
- for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++)
- gve_free_queue_page_list(priv, i);
-}
-
-static void gve_free_qpls(struct gve_priv *priv)
+static void gve_free_qpls(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *cfg)
{
- int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
+ int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
+ struct gve_queue_page_list *qpls = cfg->qpls;
int i;
- if (!priv->qpls)
+ if (!qpls)
return;
- kvfree(priv->qpl_cfg.qpl_id_map);
- priv->qpl_cfg.qpl_id_map = NULL;
+ kvfree(cfg->qpl_cfg->qpl_id_map);
+ cfg->qpl_cfg->qpl_id_map = NULL;
for (i = 0; i < max_queues; i++)
- gve_free_queue_page_list(priv, i);
+ gve_free_queue_page_list(priv, &qpls[i], i);
- kvfree(priv->qpls);
- priv->qpls = NULL;
+ kvfree(qpls);
+ cfg->qpls = NULL;
}
/* Use this to schedule a reset when the device is capable of continuing
@@ -1278,58 +1276,178 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
static void gve_drain_page_cache(struct gve_priv *priv)
{
- struct page_frag_cache *nc;
int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- nc = &priv->rx[i].page_cache;
- if (nc->va) {
- __page_frag_cache_drain(virt_to_page(nc->va),
- nc->pagecnt_bias);
- nc->va = NULL;
- }
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ page_frag_cache_drain(&priv->rx[i].page_cache);
+}
+
+static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *cfg)
+{
+ cfg->raw_addressing = !gve_is_qpl(priv);
+ cfg->is_gqi = gve_is_gqi(priv);
+ cfg->num_xdp_queues = priv->num_xdp_queues;
+ cfg->qpl_cfg = &priv->qpl_cfg;
+ cfg->tx_cfg = &priv->tx_cfg;
+ cfg->rx_cfg = &priv->rx_cfg;
+ cfg->qpls = priv->qpls;
+}
+
+static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg)
+{
+ cfg->qcfg = &priv->rx_cfg;
+ cfg->qcfg_tx = &priv->tx_cfg;
+ cfg->raw_addressing = !gve_is_qpl(priv);
+ cfg->enable_header_split = priv->header_split_enabled;
+ cfg->qpls = priv->qpls;
+ cfg->qpl_cfg = &priv->qpl_cfg;
+ cfg->ring_size = priv->rx_desc_cnt;
+ cfg->packet_buffer_size = gve_is_gqi(priv) ?
+ GVE_DEFAULT_RX_BUFFER_SIZE :
+ priv->data_buffer_size_dqo;
+ cfg->rx = priv->rx;
+}
+
+static void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ gve_qpls_get_curr_alloc_cfg(priv, qpls_alloc_cfg);
+ gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
+ gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg);
+}
+
+static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
+{
+ int i;
+
+ for (i = 0; i < num_rings; i++) {
+ if (gve_is_gqi(priv))
+ gve_rx_start_ring_gqi(priv, i);
+ else
+ gve_rx_start_ring_dqo(priv, i);
}
}
-static int gve_open(struct net_device *dev)
+static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
{
- struct gve_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (!priv->rx)
+ return;
+
+ for (i = 0; i < num_rings; i++) {
+ if (gve_is_gqi(priv))
+ gve_rx_stop_ring_gqi(priv, i);
+ else
+ gve_rx_stop_ring_dqo(priv, i);
+ }
+}
+
+static void gve_queues_mem_free(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ gve_free_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
+ gve_free_qpls(priv, qpls_alloc_cfg);
+}
+
+static int gve_queues_mem_alloc(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ int err;
+
+ err = gve_alloc_qpls(priv, qpls_alloc_cfg);
+ if (err) {
+ netif_err(priv, drv, priv->dev, "Failed to alloc QPLs\n");
+ return err;
+ }
+ tx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
+ rx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
+ err = gve_alloc_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
+ if (err) {
+ netif_err(priv, drv, priv->dev, "Failed to alloc rings\n");
+ goto free_qpls;
+ }
+
+ return 0;
+
+free_qpls:
+ gve_free_qpls(priv, qpls_alloc_cfg);
+ return err;
+}
+
+static void gve_queues_mem_remove(struct gve_priv *priv)
+{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
+
+ gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ gve_queues_mem_free(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ priv->qpls = NULL;
+ priv->tx = NULL;
+ priv->rx = NULL;
+}
+
+/* The passed-in queue memory is stored into priv and the queues are made live.
+ * No memory is allocated. Passed-in memory is freed on errors.
+ */
+static int gve_queues_start(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ struct net_device *dev = priv->dev;
int err;
+ /* Record new resources into priv */
+ priv->qpls = qpls_alloc_cfg->qpls;
+ priv->tx = tx_alloc_cfg->tx;
+ priv->rx = rx_alloc_cfg->rx;
+
+ /* Record new configs into priv */
+ priv->qpl_cfg = *qpls_alloc_cfg->qpl_cfg;
+ priv->tx_cfg = *tx_alloc_cfg->qcfg;
+ priv->rx_cfg = *rx_alloc_cfg->qcfg;
+ priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
+ priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
+
if (priv->xdp_prog)
priv->num_xdp_queues = priv->rx_cfg.num_queues;
else
priv->num_xdp_queues = 0;
- err = gve_alloc_qpls(priv);
- if (err)
- return err;
-
- err = gve_alloc_rings(priv);
- if (err)
- goto free_qpls;
+ gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings);
+ gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues);
+ gve_init_sync_stats(priv);
err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
if (err)
- goto free_rings;
+ goto stop_and_free_rings;
err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
if (err)
- goto free_rings;
+ goto stop_and_free_rings;
err = gve_reg_xdp_info(priv, dev);
if (err)
- goto free_rings;
+ goto stop_and_free_rings;
err = gve_register_qpls(priv);
if (err)
goto reset;
- if (!gve_is_gqi(priv)) {
- /* Hard code this for now. This may be tuned in the future for
- * performance.
- */
- priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
- }
+ priv->header_split_enabled = rx_alloc_cfg->enable_header_split;
+ priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size;
+
err = gve_create_rings(priv);
if (err)
goto reset;
@@ -1346,32 +1464,53 @@ static int gve_open(struct net_device *dev)
priv->interface_up_cnt++;
return 0;
-free_rings:
- gve_free_rings(priv);
-free_qpls:
- gve_free_qpls(priv);
- return err;
-
reset:
- /* This must have been called from a reset due to the rtnl lock
- * so just return at this point.
- */
if (gve_get_reset_in_progress(priv))
- return err;
- /* Otherwise reset before returning */
+ goto stop_and_free_rings;
gve_reset_and_teardown(priv, true);
/* if this fails there is nothing we can do so just ignore the return */
gve_reset_recovery(priv, false);
/* return the original error */
return err;
+stop_and_free_rings:
+ gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
+ gve_queues_mem_remove(priv);
+ return err;
}
-static int gve_close(struct net_device *dev)
+static int gve_open(struct net_device *dev)
{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(dev);
int err;
- netif_carrier_off(dev);
+ gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+
+ err = gve_queues_mem_alloc(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ return err;
+
+ /* No need to free on error: ownership of resources is lost after
+ * calling gve_queues_start.
+ */
+ err = gve_queues_start(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int gve_queues_stop(struct gve_priv *priv)
+{
+ int err;
+
+ netif_carrier_off(priv->dev);
if (gve_get_device_rings_ok(priv)) {
gve_turndown(priv);
gve_drain_page_cache(priv);
@@ -1386,8 +1525,10 @@ static int gve_close(struct net_device *dev)
del_timer_sync(&priv->stats_report_timer);
gve_unreg_xdp_info(priv);
- gve_free_rings(priv);
- gve_free_qpls(priv);
+
+ gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv));
+ gve_rx_stop_rings(priv, priv->rx_cfg.num_queues);
+
priv->interface_down_cnt++;
return 0;
@@ -1402,10 +1543,26 @@ err:
return gve_reset_recovery(priv, false);
}
+static int gve_close(struct net_device *dev)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = gve_queues_stop(priv);
+ if (err)
+ return err;
+
+ gve_queues_mem_remove(priv);
+ return 0;
+}
+
static int gve_remove_xdp_queues(struct gve_priv *priv)
{
+ int qpl_start_id;
int err;
+ qpl_start_id = gve_xdp_tx_start_queue_id(priv);
+
err = gve_destroy_xdp_rings(priv);
if (err)
return err;
@@ -1416,18 +1573,22 @@ static int gve_remove_xdp_queues(struct gve_priv *priv)
gve_unreg_xdp_info(priv);
gve_free_xdp_rings(priv);
- gve_free_xdp_qpls(priv);
+
+ gve_free_n_qpls(priv, priv->qpls, qpl_start_id, gve_num_xdp_qpls(priv));
priv->num_xdp_queues = 0;
return 0;
}
static int gve_add_xdp_queues(struct gve_priv *priv)
{
+ int start_id;
int err;
- priv->num_xdp_queues = priv->tx_cfg.num_queues;
+ priv->num_xdp_queues = priv->rx_cfg.num_queues;
- err = gve_alloc_xdp_qpls(priv);
+ start_id = gve_xdp_tx_start_queue_id(priv);
+ err = gve_alloc_n_qpls(priv, priv->qpls, priv->tx_pages_per_qpl,
+ start_id, gve_num_xdp_qpls(priv));
if (err)
goto err;
@@ -1452,7 +1613,7 @@ static int gve_add_xdp_queues(struct gve_priv *priv)
free_xdp_rings:
gve_free_xdp_rings(priv);
free_xdp_qpls:
- gve_free_xdp_qpls(priv);
+ gve_free_n_qpls(priv, priv->qpls, start_id, gve_num_xdp_qpls(priv));
err:
priv->num_xdp_queues = 0;
return err;
@@ -1702,42 +1863,87 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+static int gve_adjust_config(struct gve_priv *priv,
+ struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ int err;
+
+ /* Allocate resources for the new confiugration */
+ err = gve_queues_mem_alloc(priv, qpls_alloc_cfg,
+ tx_alloc_cfg, rx_alloc_cfg);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Adjust config failed to alloc new queues");
+ return err;
+ }
+
+ /* Teardown the device and free existing resources */
+ err = gve_close(priv->dev);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Adjust config failed to close old queues");
+ gve_queues_mem_free(priv, qpls_alloc_cfg,
+ tx_alloc_cfg, rx_alloc_cfg);
+ return err;
+ }
+
+ /* Bring the device back up again with the new resources. */
+ err = gve_queues_start(priv, qpls_alloc_cfg,
+ tx_alloc_cfg, rx_alloc_cfg);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Adjust config failed to start new queues, !!! DISABLING ALL QUEUES !!!\n");
+ /* No need to free on error: ownership of resources is lost after
+ * calling gve_queues_start.
+ */
+ gve_turndown(priv);
+ return err;
+ }
+
+ return 0;
+}
+
int gve_adjust_queues(struct gve_priv *priv,
struct gve_queue_config new_rx_config,
struct gve_queue_config new_tx_config)
{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
+ struct gve_qpl_config new_qpl_cfg;
int err;
- if (netif_carrier_ok(priv->dev)) {
- /* To make this process as simple as possible we teardown the
- * device, set the new configuration, and then bring the device
- * up again.
- */
- err = gve_close(priv->dev);
- /* we have already tried to reset in close,
- * just fail at this point
- */
- if (err)
- return err;
- priv->tx_cfg = new_tx_config;
- priv->rx_cfg = new_rx_config;
+ gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
- err = gve_open(priv->dev);
- if (err)
- goto err;
+ /* qpl_cfg is not read-only, it contains a map that gets updated as
+ * rings are allocated, which is why we cannot use the yet unreleased
+ * one in priv.
+ */
+ qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+
+ /* Relay the new config from ethtool */
+ qpls_alloc_cfg.tx_cfg = &new_tx_config;
+ tx_alloc_cfg.qcfg = &new_tx_config;
+ rx_alloc_cfg.qcfg_tx = &new_tx_config;
+ qpls_alloc_cfg.rx_cfg = &new_rx_config;
+ rx_alloc_cfg.qcfg = &new_rx_config;
+ tx_alloc_cfg.num_rings = new_tx_config.num_queues;
- return 0;
+ if (netif_carrier_ok(priv->dev)) {
+ err = gve_adjust_config(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ return err;
}
/* Set the config for the next up. */
priv->tx_cfg = new_tx_config;
priv->rx_cfg = new_rx_config;
return 0;
-err:
- netif_err(priv, drv, priv->dev,
- "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
- gve_turndown(priv);
- return err;
}
static void gve_turndown(struct gve_priv *priv)
@@ -1853,40 +2059,91 @@ out:
priv->tx_timeo_cnt++;
}
+u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit)
+{
+ if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE)
+ return GVE_MAX_RX_BUFFER_SIZE;
+ else
+ return GVE_DEFAULT_RX_BUFFER_SIZE;
+}
+
+/* header-split is not supported on non-DQO_RDA yet even if device advertises it */
+bool gve_header_split_supported(const struct gve_priv *priv)
+{
+ return priv->header_buf_size && priv->queue_format == GVE_DQO_RDA_FORMAT;
+}
+
+int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
+{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
+ bool enable_hdr_split;
+ int err = 0;
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
+ return 0;
+
+ if (!gve_header_split_supported(priv)) {
+ dev_err(&priv->pdev->dev, "Header-split not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
+ enable_hdr_split = true;
+ else
+ enable_hdr_split = false;
+
+ if (enable_hdr_split == priv->header_split_enabled)
+ return 0;
+
+ gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+
+ rx_alloc_cfg.enable_header_split = enable_hdr_split;
+ rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split);
+
+ if (netif_running(priv->dev))
+ err = gve_adjust_config(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ return err;
+}
+
static int gve_set_features(struct net_device *netdev,
netdev_features_t features)
{
const netdev_features_t orig_features = netdev->features;
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(netdev);
+ struct gve_qpl_config new_qpl_cfg;
int err;
+ gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ /* qpl_cfg is not read-only, it contains a map that gets updated as
+ * rings are allocated, which is why we cannot use the yet unreleased
+ * one in priv.
+ */
+ qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+
if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
netdev->features ^= NETIF_F_LRO;
if (netif_carrier_ok(netdev)) {
- /* To make this process as simple as possible we
- * teardown the device, set the new configuration,
- * and then bring the device up again.
- */
- err = gve_close(netdev);
- /* We have already tried to reset in close, just fail
- * at this point.
- */
- if (err)
- goto err;
-
- err = gve_open(netdev);
- if (err)
- goto err;
+ err = gve_adjust_config(priv, &qpls_alloc_cfg,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err) {
+ /* Revert the change on error. */
+ netdev->features = orig_features;
+ return err;
+ }
}
}
return 0;
-err:
- /* Reverts the change on error. */
- netdev->features = orig_features;
- netif_err(priv, drv, netdev,
- "Set features failed! !!! DISABLING ALL QUEUES !!!\n");
- return err;
}
static const struct net_device_ops gve_netdev_ops = {
@@ -2051,6 +2308,8 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
goto err;
}
+ priv->num_registered_pages = 0;
+
if (skip_describe_device)
goto setup_device;
@@ -2080,7 +2339,6 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
if (!gve_is_gqi(priv))
netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
- priv->num_registered_pages = 0;
priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
/* gvnic has one Notification Block per MSI-x vector, except for the
* management vector
@@ -2297,6 +2555,8 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->service_task_flags = 0x0;
priv->state_flags = 0x0;
priv->ethtool_flags = 0x0;
+ priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
+ priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_set_probe_in_progress(priv);
priv->gve_wq = alloc_ordered_workqueue("gve", 0);
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 76615d47e055..20f5a9e7fae9 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -23,7 +23,9 @@ static void gve_rx_free_buffer(struct device *dev,
gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE);
}
-static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
+static void gve_rx_unfill_pages(struct gve_priv *priv,
+ struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
u32 slots = rx->mask + 1;
int i;
@@ -36,7 +38,7 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
for (i = 0; i < slots; i++)
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
- gve_unassign_qpl(priv, rx->data.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
rx->data.qpl = NULL;
for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
@@ -49,16 +51,26 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
rx->data.page_info = NULL;
}
-static void gve_rx_free_ring(struct gve_priv *priv, int idx)
+void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
+{
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+
+ if (!gve_rx_was_added_to_block(priv, idx))
+ return;
+
+ gve_remove_napi(priv, ntfy_idx);
+ gve_rx_remove_from_block(priv, idx);
+}
+
+static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
- struct gve_rx_ring *rx = &priv->rx[idx];
struct device *dev = &priv->pdev->dev;
u32 slots = rx->mask + 1;
+ int idx = rx->q_num;
size_t bytes;
- gve_rx_remove_from_block(priv, idx);
-
- bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
+ bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
rx->desc.desc_ring = NULL;
@@ -66,7 +78,7 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx)
rx->q_resources, rx->q_resources_bus);
rx->q_resources = NULL;
- gve_rx_unfill_pages(priv, rx);
+ gve_rx_unfill_pages(priv, rx, cfg);
bytes = sizeof(*rx->data.data_ring) * slots;
dma_free_coherent(dev, bytes, rx->data.data_ring,
@@ -93,7 +105,8 @@ static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
struct gve_rx_slot_page_info *page_info,
- union gve_rx_data_slot *data_slot)
+ union gve_rx_data_slot *data_slot,
+ struct gve_rx_ring *rx)
{
struct page *page;
dma_addr_t dma;
@@ -101,14 +114,19 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE,
GFP_ATOMIC);
- if (err)
+ if (err) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_buf_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
return err;
+ }
gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
return 0;
}
-static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
+static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
struct gve_priv *priv = rx->gve;
u32 slots;
@@ -127,7 +145,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
return -ENOMEM;
if (!rx->data.raw_addressing) {
- rx->data.qpl = gve_assign_rx_qpl(priv, rx->q_num);
+ rx->data.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
if (!rx->data.qpl) {
kvfree(rx->data.page_info);
rx->data.page_info = NULL;
@@ -143,8 +161,9 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
&rx->data.data_ring[i].qpl_offset);
continue;
}
- err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i],
- &rx->data.data_ring[i]);
+ err = gve_rx_alloc_buffer(priv, &priv->pdev->dev,
+ &rx->data.page_info[i],
+ &rx->data.data_ring[i], rx);
if (err)
goto alloc_err_rda;
}
@@ -185,7 +204,7 @@ alloc_err_qpl:
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
- gve_unassign_qpl(priv, rx->data.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
rx->data.qpl = NULL;
return err;
@@ -207,13 +226,23 @@ static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
ctx->drop_pkt = false;
}
-static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
+void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
+{
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+
+ gve_rx_add_to_block(priv, idx);
+ gve_add_napi(priv, ntfy_idx, gve_napi_poll);
+}
+
+static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx)
{
- struct gve_rx_ring *rx = &priv->rx[idx];
struct device *hdev = &priv->pdev->dev;
+ u32 slots = priv->rx_data_slot_cnt;
int filled_pages;
size_t bytes;
- u32 slots;
int err;
netif_dbg(priv, drv, priv->dev, "allocating rx ring\n");
@@ -223,9 +252,8 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
rx->gve = priv;
rx->q_num = idx;
- slots = priv->rx_data_slot_cnt;
rx->mask = slots - 1;
- rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
+ rx->data.raw_addressing = cfg->raw_addressing;
/* alloc rx data ring */
bytes = sizeof(*rx->data.data_ring) * slots;
@@ -246,7 +274,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
goto abort_with_slots;
}
- filled_pages = gve_prefill_rx_pages(rx);
+ filled_pages = gve_rx_prefill_pages(rx, cfg);
if (filled_pages < 0) {
err = -ENOMEM;
goto abort_with_copy_pool;
@@ -269,7 +297,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
(unsigned long)rx->data.data_bus);
/* alloc rx desc ring */
- bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
+ bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus,
GFP_KERNEL);
if (!rx->desc.desc_ring) {
@@ -277,15 +305,11 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
goto abort_with_q_resources;
}
rx->cnt = 0;
- rx->db_threshold = priv->rx_desc_cnt / 2;
+ rx->db_threshold = slots / 2;
rx->desc.seqno = 1;
- /* Allocating half-page buffers allows page-flipping which is faster
- * than copying or allocating new pages.
- */
rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
- gve_rx_add_to_block(priv, idx);
return 0;
@@ -294,7 +318,7 @@ abort_with_q_resources:
rx->q_resources, rx->q_resources_bus);
rx->q_resources = NULL;
abort_filled:
- gve_rx_unfill_pages(priv, rx);
+ gve_rx_unfill_pages(priv, rx, cfg);
abort_with_copy_pool:
kvfree(rx->qpl_copy_pool);
rx->qpl_copy_pool = NULL;
@@ -306,36 +330,58 @@ abort_with_slots:
return err;
}
-int gve_rx_alloc_rings(struct gve_priv *priv)
+int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
+ struct gve_rx_ring *rx;
int err = 0;
- int i;
+ int i, j;
+
+ if (!cfg->raw_addressing && !cfg->qpls) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc QPL ring before allocing QPLs\n");
+ return -EINVAL;
+ }
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- err = gve_rx_alloc_ring(priv, i);
+ rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+
+ for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ err = gve_rx_alloc_ring_gqi(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to alloc rx ring=%d: err=%d\n",
i, err);
- break;
+ goto cleanup;
}
}
- /* Unallocate if there was an error */
- if (err) {
- int j;
- for (j = 0; j < i; j++)
- gve_rx_free_ring(priv, j);
- }
+ cfg->rx = rx;
+ return 0;
+
+cleanup:
+ for (j = 0; j < i; j++)
+ gve_rx_free_ring_gqi(priv, &rx[j], cfg);
+ kvfree(rx);
return err;
}
-void gve_rx_free_rings_gqi(struct gve_priv *priv)
+void gve_rx_free_rings_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
+ struct gve_rx_ring *rx = cfg->rx;
int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++)
- gve_rx_free_ring(priv, i);
+ if (!rx)
+ return;
+
+ for (i = 0; i < cfg->qcfg->num_queues; i++)
+ gve_rx_free_ring_gqi(priv, &rx[i], cfg);
+
+ kvfree(rx);
+ cfg->rx = NULL;
}
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx)
@@ -896,10 +942,7 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
gve_rx_free_buffer(dev, page_info, data_slot);
page_info->page = NULL;
if (gve_rx_alloc_buffer(priv, dev, page_info,
- data_slot)) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_buf_alloc_fail++;
- u64_stats_update_end(&rx->statss);
+ data_slot, rx)) {
break;
}
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index f281e42a7ef9..8e8071308aeb 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -199,20 +199,42 @@ static int gve_alloc_page_dqo(struct gve_rx_ring *rx,
return 0;
}
-static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
+static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
+{
+ struct device *hdev = &priv->pdev->dev;
+ int buf_count = rx->dqo.bufq.mask + 1;
+
+ if (rx->dqo.hdr_bufs.data) {
+ dma_free_coherent(hdev, priv->header_buf_size * buf_count,
+ rx->dqo.hdr_bufs.data, rx->dqo.hdr_bufs.addr);
+ rx->dqo.hdr_bufs.data = NULL;
+ }
+}
+
+void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
+{
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+
+ if (!gve_rx_was_added_to_block(priv, idx))
+ return;
+
+ gve_remove_napi(priv, ntfy_idx);
+ gve_rx_remove_from_block(priv, idx);
+}
+
+static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
- struct gve_rx_ring *rx = &priv->rx[idx];
struct device *hdev = &priv->pdev->dev;
size_t completion_queue_slots;
size_t buffer_queue_slots;
+ int idx = rx->q_num;
size_t size;
int i;
completion_queue_slots = rx->dqo.complq.mask + 1;
buffer_queue_slots = rx->dqo.bufq.mask + 1;
- gve_rx_remove_from_block(priv, idx);
-
if (rx->q_resources) {
dma_free_coherent(hdev, sizeof(*rx->q_resources),
rx->q_resources, rx->q_resources_bus);
@@ -226,7 +248,7 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
}
if (rx->dqo.qpl) {
- gve_unassign_qpl(priv, rx->dqo.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, rx->dqo.qpl->id);
rx->dqo.qpl = NULL;
}
@@ -248,20 +270,44 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
kvfree(rx->dqo.buf_states);
rx->dqo.buf_states = NULL;
+ gve_rx_free_hdr_bufs(priv, rx);
+
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
-static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
+static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
+{
+ struct device *hdev = &priv->pdev->dev;
+ int buf_count = rx->dqo.bufq.mask + 1;
+
+ rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count,
+ &rx->dqo.hdr_bufs.addr, GFP_KERNEL);
+ if (!rx->dqo.hdr_bufs.data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx)
+{
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+
+ gve_rx_add_to_block(priv, idx);
+ gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
+}
+
+static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx)
{
- struct gve_rx_ring *rx = &priv->rx[idx];
struct device *hdev = &priv->pdev->dev;
size_t size;
int i;
- const u32 buffer_queue_slots =
- priv->queue_format == GVE_DQO_RDA_FORMAT ?
- priv->options_dqo_rda.rx_buff_ring_entries : priv->rx_desc_cnt;
- const u32 completion_queue_slots = priv->rx_desc_cnt;
+ const u32 buffer_queue_slots = cfg->raw_addressing ?
+ priv->options_dqo_rda.rx_buff_ring_entries : cfg->ring_size;
+ const u32 completion_queue_slots = cfg->ring_size;
netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
@@ -274,7 +320,7 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
rx->ctx.skb_head = NULL;
rx->ctx.skb_tail = NULL;
- rx->dqo.num_buf_states = priv->queue_format == GVE_DQO_RDA_FORMAT ?
+ rx->dqo.num_buf_states = cfg->raw_addressing ?
min_t(s16, S16_MAX, buffer_queue_slots * 4) :
priv->rx_pages_per_qpl;
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
@@ -283,6 +329,11 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
if (!rx->dqo.buf_states)
return -ENOMEM;
+ /* Allocate header buffers for header-split */
+ if (cfg->enable_header_split)
+ if (gve_rx_alloc_hdr_bufs(priv, rx))
+ goto err;
+
/* Set up linked list of buffer IDs */
for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
rx->dqo.buf_states[i].next = i + 1;
@@ -308,8 +359,8 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
if (!rx->dqo.bufq.desc_ring)
goto err;
- if (priv->queue_format != GVE_DQO_RDA_FORMAT) {
- rx->dqo.qpl = gve_assign_rx_qpl(priv, rx->q_num);
+ if (!cfg->raw_addressing) {
+ rx->dqo.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
if (!rx->dqo.qpl)
goto err;
rx->dqo.next_qpl_page_idx = 0;
@@ -320,12 +371,10 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
if (!rx->q_resources)
goto err;
- gve_rx_add_to_block(priv, idx);
-
return 0;
err:
- gve_rx_free_ring_dqo(priv, idx);
+ gve_rx_free_ring_dqo(priv, rx, cfg);
return -ENOMEM;
}
@@ -337,13 +386,26 @@ void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx)
iowrite32(rx->dqo.bufq.tail, &priv->db_bar2[index]);
}
-int gve_rx_alloc_rings_dqo(struct gve_priv *priv)
+int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
- int err = 0;
+ struct gve_rx_ring *rx;
+ int err;
int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- err = gve_rx_alloc_ring_dqo(priv, i);
+ if (!cfg->raw_addressing && !cfg->qpls) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc QPL ring before allocing QPLs\n");
+ return -EINVAL;
+ }
+
+ rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
+ GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+
+ for (i = 0; i < cfg->qcfg->num_queues; i++) {
+ err = gve_rx_alloc_ring_dqo(priv, cfg, &rx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to alloc rx ring=%d: err=%d\n",
@@ -352,21 +414,30 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv)
}
}
+ cfg->rx = rx;
return 0;
err:
for (i--; i >= 0; i--)
- gve_rx_free_ring_dqo(priv, i);
-
+ gve_rx_free_ring_dqo(priv, &rx[i], cfg);
+ kvfree(rx);
return err;
}
-void gve_rx_free_rings_dqo(struct gve_priv *priv)
+void gve_rx_free_rings_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
+ struct gve_rx_ring *rx = cfg->rx;
int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++)
- gve_rx_free_ring_dqo(priv, i);
+ if (!rx)
+ return;
+
+ for (i = 0; i < cfg->qcfg->num_queues; i++)
+ gve_rx_free_ring_dqo(priv, &rx[i], cfg);
+
+ kvfree(rx);
+ cfg->rx = NULL;
}
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
@@ -404,6 +475,10 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states);
desc->buf_addr = cpu_to_le64(buf_state->addr +
buf_state->page_info.page_offset);
+ if (rx->dqo.hdr_bufs.data)
+ desc->header_buf_addr =
+ cpu_to_le64(rx->dqo.hdr_bufs.addr +
+ priv->header_buf_size * bufq->tail);
bufq->tail = (bufq->tail + 1) & bufq->mask;
complq->num_free_slots--;
@@ -419,7 +494,7 @@ void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
static void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_buf_state_dqo *buf_state)
{
- const int data_buffer_size = priv->data_buffer_size_dqo;
+ const u16 data_buffer_size = priv->data_buffer_size_dqo;
int pagecount;
/* Can't reuse if we only fit one buffer per page */
@@ -606,13 +681,16 @@ static int gve_rx_append_frags(struct napi_struct *napi,
*/
static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
const struct gve_rx_compl_desc_dqo *compl_desc,
- int queue_idx)
+ u32 desc_idx, int queue_idx)
{
const u16 buffer_id = le16_to_cpu(compl_desc->buf_id);
+ const bool hbo = compl_desc->header_buffer_overflow;
const bool eop = compl_desc->end_of_packet != 0;
+ const bool hsplit = compl_desc->split_header;
struct gve_rx_buf_state_dqo *buf_state;
struct gve_priv *priv = rx->gve;
u16 buf_len;
+ u16 hdr_len;
if (unlikely(buffer_id >= rx->dqo.num_buf_states)) {
net_err_ratelimited("%s: Invalid RX buffer_id=%u\n",
@@ -633,12 +711,35 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
}
buf_len = compl_desc->packet_len;
+ hdr_len = compl_desc->header_len;
/* Page might have not been used for awhile and was likely last written
* by a different thread.
*/
prefetch(buf_state->page_info.page);
+ /* Copy the header into the skb in the case of header split */
+ if (hsplit) {
+ int unsplit = 0;
+
+ if (hdr_len && !hbo) {
+ rx->ctx.skb_head = gve_rx_copy_data(priv->dev, napi,
+ rx->dqo.hdr_bufs.data +
+ desc_idx * priv->header_buf_size,
+ hdr_len);
+ if (unlikely(!rx->ctx.skb_head))
+ goto error;
+ rx->ctx.skb_tail = rx->ctx.skb_head;
+ } else {
+ unsplit = 1;
+ }
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_hsplit_pkt++;
+ rx->rx_hsplit_unsplit_pkt += unsplit;
+ rx->rx_hsplit_bytes += hdr_len;
+ u64_stats_update_end(&rx->statss);
+ }
+
/* Sync the portion of dma buffer for CPU to read. */
dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
buf_state->page_info.page_offset,
@@ -781,7 +882,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
/* Do not read data until we own the descriptor */
dma_rmb();
- err = gve_rx_dqo(napi, rx, compl_desc, rx->q_num);
+ err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num);
if (err < 0) {
gve_rx_free_skb(rx);
u64_stats_update_begin(&rx->statss);
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 07ba124780df..4b9853adc113 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -196,29 +196,36 @@ static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 to_do, bool try_to_wake);
-static void gve_tx_free_ring(struct gve_priv *priv, int idx)
+void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
{
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_tx_ring *tx = &priv->tx[idx];
+
+ if (!gve_tx_was_added_to_block(priv, idx))
+ return;
+
+ gve_remove_napi(priv, ntfy_idx);
+ gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
+ netdev_tx_reset_queue(tx->netdev_txq);
+ gve_tx_remove_from_block(priv, idx);
+}
+
+static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct gve_tx_alloc_rings_cfg *cfg)
+{
struct device *hdev = &priv->pdev->dev;
+ int idx = tx->q_num;
size_t bytes;
u32 slots;
- gve_tx_remove_from_block(priv, idx);
slots = tx->mask + 1;
- if (tx->q_num < priv->tx_cfg.num_queues) {
- gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
- netdev_tx_reset_queue(tx->netdev_txq);
- } else {
- gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
- }
-
dma_free_coherent(hdev, sizeof(*tx->q_resources),
tx->q_resources, tx->q_resources_bus);
tx->q_resources = NULL;
if (!tx->raw_addressing) {
gve_tx_fifo_release(priv, &tx->tx_fifo);
- gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
tx->tx_fifo.qpl = NULL;
}
@@ -232,11 +239,23 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx)
netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
}
-static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
+void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx)
{
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_tx_ring *tx = &priv->tx[idx];
+
+ gve_tx_add_to_block(priv, idx);
+
+ tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ gve_add_napi(priv, ntfy_idx, gve_napi_poll);
+}
+
+static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg,
+ struct gve_tx_ring *tx,
+ int idx)
+{
struct device *hdev = &priv->pdev->dev;
- u32 slots = priv->tx_desc_cnt;
size_t bytes;
/* Make sure everything is zeroed to start */
@@ -245,23 +264,23 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
spin_lock_init(&tx->xdp_lock);
tx->q_num = idx;
- tx->mask = slots - 1;
+ tx->mask = cfg->ring_size - 1;
/* alloc metadata */
- tx->info = vcalloc(slots, sizeof(*tx->info));
+ tx->info = vcalloc(cfg->ring_size, sizeof(*tx->info));
if (!tx->info)
return -ENOMEM;
/* alloc tx queue */
- bytes = sizeof(*tx->desc) * slots;
+ bytes = sizeof(*tx->desc) * cfg->ring_size;
tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
if (!tx->desc)
goto abort_with_info;
- tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
- tx->dev = &priv->pdev->dev;
+ tx->raw_addressing = cfg->raw_addressing;
+ tx->dev = hdev;
if (!tx->raw_addressing) {
- tx->tx_fifo.qpl = gve_assign_tx_qpl(priv, idx);
+ tx->tx_fifo.qpl = gve_assign_tx_qpl(cfg, idx);
if (!tx->tx_fifo.qpl)
goto abort_with_desc;
/* map Tx FIFO */
@@ -277,12 +296,6 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
if (!tx->q_resources)
goto abort_with_fifo;
- netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx,
- (unsigned long)tx->bus);
- if (idx < priv->tx_cfg.num_queues)
- tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
- gve_tx_add_to_block(priv, idx);
-
return 0;
abort_with_fifo:
@@ -290,7 +303,7 @@ abort_with_fifo:
gve_tx_fifo_release(priv, &tx->tx_fifo);
abort_with_qpl:
if (!tx->raw_addressing)
- gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL;
@@ -300,36 +313,73 @@ abort_with_info:
return -ENOMEM;
}
-int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings)
+int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg)
{
+ struct gve_tx_ring *tx = cfg->tx;
int err = 0;
- int i;
+ int i, j;
+
+ if (!cfg->raw_addressing && !cfg->qpls) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc QPL ring before allocing QPLs\n");
+ return -EINVAL;
+ }
+
+ if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc more than the max num of Tx rings\n");
+ return -EINVAL;
+ }
+
+ if (cfg->start_idx == 0) {
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
+ } else if (!tx) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc tx rings from a nonzero start idx without tx array\n");
+ return -EINVAL;
+ }
- for (i = start_id; i < start_id + num_rings; i++) {
- err = gve_tx_alloc_ring(priv, i);
+ for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to alloc tx ring=%d: err=%d\n",
i, err);
- break;
+ goto cleanup;
}
}
- /* Unallocate if there was an error */
- if (err) {
- int j;
- for (j = start_id; j < i; j++)
- gve_tx_free_ring(priv, j);
- }
+ cfg->tx = tx;
+ return 0;
+
+cleanup:
+ for (j = 0; j < i; j++)
+ gve_tx_free_ring_gqi(priv, &tx[j], cfg);
+ if (cfg->start_idx == 0)
+ kvfree(tx);
return err;
}
-void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings)
+void gve_tx_free_rings_gqi(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg)
{
+ struct gve_tx_ring *tx = cfg->tx;
int i;
- for (i = start_id; i < start_id + num_rings; i++)
- gve_tx_free_ring(priv, i);
+ if (!tx)
+ return;
+
+ for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ gve_tx_free_ring_gqi(priv, &tx[i], cfg);
+
+ if (cfg->start_idx == 0) {
+ kvfree(tx);
+ cfg->tx = NULL;
+ }
}
/* gve_tx_avail - Calculates the number of slots available in the ring
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index f59c4710f118..bc34b6cd3a3e 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -188,13 +188,27 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
}
}
-static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx)
+void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx)
{
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_tx_ring *tx = &priv->tx[idx];
- struct device *hdev = &priv->pdev->dev;
- size_t bytes;
+ if (!gve_tx_was_added_to_block(priv, idx))
+ return;
+
+ gve_remove_napi(priv, ntfy_idx);
+ gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL);
+ netdev_tx_reset_queue(tx->netdev_txq);
+ gve_tx_clean_pending_packets(tx);
gve_tx_remove_from_block(priv, idx);
+}
+
+static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct gve_tx_alloc_rings_cfg *cfg)
+{
+ struct device *hdev = &priv->pdev->dev;
+ int idx = tx->q_num;
+ size_t bytes;
if (tx->q_resources) {
dma_free_coherent(hdev, sizeof(*tx->q_resources),
@@ -223,7 +237,7 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx)
tx->dqo.tx_qpl_buf_next = NULL;
if (tx->dqo.qpl) {
- gve_unassign_qpl(priv, tx->dqo.qpl->id);
+ gve_unassign_qpl(cfg->qpl_cfg, tx->dqo.qpl->id);
tx->dqo.qpl = NULL;
}
@@ -253,9 +267,22 @@ static int gve_tx_qpl_buf_init(struct gve_tx_ring *tx)
return 0;
}
-static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
+void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx)
{
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_tx_ring *tx = &priv->tx[idx];
+
+ gve_tx_add_to_block(priv, idx);
+
+ tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
+}
+
+static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg,
+ struct gve_tx_ring *tx,
+ int idx)
+{
struct device *hdev = &priv->pdev->dev;
int num_pending_packets;
size_t bytes;
@@ -263,12 +290,11 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
memset(tx, 0, sizeof(*tx));
tx->q_num = idx;
- tx->dev = &priv->pdev->dev;
- tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ tx->dev = hdev;
atomic_set_release(&tx->dqo_compl.hw_tx_head, 0);
/* Queue sizes must be a power of 2 */
- tx->mask = priv->tx_desc_cnt - 1;
+ tx->mask = cfg->ring_size - 1;
tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ?
priv->options_dqo_rda.tx_comp_ring_entries - 1 :
tx->mask;
@@ -327,8 +353,8 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
if (!tx->q_resources)
goto err;
- if (gve_is_qpl(priv)) {
- tx->dqo.qpl = gve_assign_tx_qpl(priv, idx);
+ if (!cfg->raw_addressing) {
+ tx->dqo.qpl = gve_assign_tx_qpl(cfg, idx);
if (!tx->dqo.qpl)
goto err;
@@ -336,22 +362,45 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
goto err;
}
- gve_tx_add_to_block(priv, idx);
-
return 0;
err:
- gve_tx_free_ring_dqo(priv, idx);
+ gve_tx_free_ring_dqo(priv, tx, cfg);
return -ENOMEM;
}
-int gve_tx_alloc_rings_dqo(struct gve_priv *priv)
+int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg)
{
+ struct gve_tx_ring *tx = cfg->tx;
int err = 0;
- int i;
+ int i, j;
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
- err = gve_tx_alloc_ring_dqo(priv, i);
+ if (!cfg->raw_addressing && !cfg->qpls) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc QPL ring before allocing QPLs\n");
+ return -EINVAL;
+ }
+
+ if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc more than the max num of Tx rings\n");
+ return -EINVAL;
+ }
+
+ if (cfg->start_idx == 0) {
+ tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
+ GFP_KERNEL);
+ if (!tx)
+ return -ENOMEM;
+ } else if (!tx) {
+ netif_err(priv, drv, priv->dev,
+ "Cannot alloc tx rings from a nonzero start idx without tx array\n");
+ return -EINVAL;
+ }
+
+ for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
+ err = gve_tx_alloc_ring_dqo(priv, cfg, &tx[i], i);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to alloc tx ring=%d: err=%d\n",
@@ -360,27 +409,32 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv)
}
}
+ cfg->tx = tx;
return 0;
err:
- for (i--; i >= 0; i--)
- gve_tx_free_ring_dqo(priv, i);
-
+ for (j = 0; j < i; j++)
+ gve_tx_free_ring_dqo(priv, &tx[j], cfg);
+ if (cfg->start_idx == 0)
+ kvfree(tx);
return err;
}
-void gve_tx_free_rings_dqo(struct gve_priv *priv)
+void gve_tx_free_rings_dqo(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg)
{
+ struct gve_tx_ring *tx = cfg->tx;
int i;
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
- struct gve_tx_ring *tx = &priv->tx[i];
+ if (!tx)
+ return;
- gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL);
- netdev_tx_reset_queue(tx->netdev_txq);
- gve_tx_clean_pending_packets(tx);
+ for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
+ gve_tx_free_ring_dqo(priv, &tx[i], cfg);
- gve_tx_free_ring_dqo(priv, i);
+ if (cfg->start_idx == 0) {
+ kvfree(tx);
+ cfg->tx = NULL;
}
}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 26e08d753270..2349750075a5 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -8,6 +8,14 @@
#include "gve_adminq.h"
#include "gve_utils.h"
+bool gve_tx_was_added_to_block(struct gve_priv *priv, int queue_idx)
+{
+ struct gve_notify_block *block =
+ &priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
+
+ return block->tx != NULL;
+}
+
void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
{
struct gve_notify_block *block =
@@ -30,6 +38,14 @@ void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
queue_idx);
}
+bool gve_rx_was_added_to_block(struct gve_priv *priv, int queue_idx)
+{
+ struct gve_notify_block *block =
+ &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
+
+ return block->rx != NULL;
+}
+
void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
{
struct gve_notify_block *block =
@@ -48,11 +64,9 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
rx->ntfy_id = ntfy_idx;
}
-struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
- struct gve_rx_slot_page_info *page_info, u16 len)
+struct sk_buff *gve_rx_copy_data(struct net_device *dev, struct napi_struct *napi,
+ u8 *data, u16 len)
{
- void *va = page_info->page_address + page_info->page_offset +
- page_info->pad;
struct sk_buff *skb;
skb = napi_alloc_skb(napi, len);
@@ -60,12 +74,21 @@ struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
return NULL;
__skb_put(skb, len);
- skb_copy_to_linear_data_offset(skb, 0, va, len);
+ skb_copy_to_linear_data_offset(skb, 0, data, len);
skb->protocol = eth_type_trans(skb, dev);
return skb;
}
+struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
+ struct gve_rx_slot_page_info *page_info, u16 len)
+{
+ void *va = page_info->page_address + page_info->page_offset +
+ page_info->pad;
+
+ return gve_rx_copy_data(dev, napi, va, len);
+}
+
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info)
{
page_info->pagecnt_bias--;
@@ -81,3 +104,18 @@ void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info)
page_ref_add(page_info->page, INT_MAX - pagecount);
}
}
+
+void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
+ int (*gve_poll)(struct napi_struct *, int))
+{
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ netif_napi_add(priv->dev, &block->napi, gve_poll);
+}
+
+void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
+{
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ netif_napi_del(&block->napi);
+}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.h b/drivers/net/ethernet/google/gve/gve_utils.h
index 324fd98a6112..bf2e9a0adb36 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.h
+++ b/drivers/net/ethernet/google/gve/gve_utils.h
@@ -11,17 +11,25 @@
#include "gve.h"
+bool gve_tx_was_added_to_block(struct gve_priv *priv, int queue_idx);
void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx);
void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx);
+bool gve_rx_was_added_to_block(struct gve_priv *priv, int queue_idx);
void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx);
void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
+struct sk_buff *gve_rx_copy_data(struct net_device *dev, struct napi_struct *napi,
+ u8 *data, u16 len);
+
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len);
/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info);
+void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
+ int (*gve_poll)(struct napi_struct *, int));
+void gve_remove_napi(struct gve_priv *priv, int ntfy_idx);
#endif /* _GVE_UTILS_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 8a1027ad340d..d4293f76d69d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -12,7 +12,9 @@
#define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
-static struct class *hnae_class;
+static const struct class hnae_class = {
+ .name = "hnae",
+};
static void
hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
@@ -111,7 +113,7 @@ static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode)
WARN_ON(!fwnode);
- dev = class_find_device(hnae_class, NULL, fwnode, __ae_match);
+ dev = class_find_device(&hnae_class, NULL, fwnode, __ae_match);
return dev ? cls_to_ae_dev(dev) : NULL;
}
@@ -415,7 +417,7 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
hdev->owner = owner;
hdev->id = (int)atomic_inc_return(&id);
hdev->cls_dev.parent = hdev->dev;
- hdev->cls_dev.class = hnae_class;
+ hdev->cls_dev.class = &hnae_class;
hdev->cls_dev.release = hnae_release;
(void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
ret = device_register(&hdev->cls_dev);
@@ -448,13 +450,12 @@ EXPORT_SYMBOL(hnae_ae_unregister);
static int __init hnae_init(void)
{
- hnae_class = class_create("hnae");
- return PTR_ERR_OR_ZERO(hnae_class);
+ return class_register(&hnae_class);
}
static void __exit hnae_exit(void)
{
- class_destroy(hnae_class);
+ class_unregister(&hnae_class);
}
subsys_initcall(hnae_init);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index d7e175a9cb49..f19f1e1d1f9f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -388,6 +388,7 @@ struct hnae3_dev_specs {
u16 mc_mac_size;
u32 mac_stats_num;
u8 tnl_num;
+ u8 hilink_version;
};
struct hnae3_client_ops {
@@ -819,6 +820,7 @@ struct hnae3_tc_info {
u8 max_tc; /* Total number of TCs */
u8 num_tc; /* Total number of enabled TCs */
bool mqprio_active;
+ bool mqprio_destroy;
bool dcb_ets_active;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index d92ad6082d8e..652d71326231 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -351,7 +351,7 @@ static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
{
static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = {
- {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS},
+ {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_CFG_RST_TIMEOUT},
};
u32 i;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
index 533c19d25e4f..552396518e08 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -55,7 +55,7 @@
#define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3
#define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024
#define HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT 30000
-#define HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS 500000
+#define HCLGE_COMM_CMDQ_CFG_RST_TIMEOUT 1000000
enum hclge_opcode_type {
/* Generic commands */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index 3b6dbf158b98..f72dc0cee30e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -76,7 +76,7 @@ static int hns3_dcbnl_ieee_delapp(struct net_device *ndev, struct dcb_app *app)
if (hns3_nic_resetting(ndev))
return -EBUSY;
- if (h->kinfo.dcb_ops->ieee_setapp)
+ if (h->kinfo.dcb_ops->ieee_delapp)
return h->kinfo.dcb_ops->ieee_delapp(h, app);
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index c083d1d10767..807eb3bbb11c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1097,6 +1097,8 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
*pos += scnprintf(buf + *pos, len - *pos,
"TX timeout threshold: %d seconds\n",
dev->watchdog_timeo / HZ);
+ *pos += scnprintf(buf + *pos, len - *pos, "Hilink Version: %u\n",
+ dev_specs->hilink_version);
}
static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index f1695c889d3a..19668a8d22f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2473,9 +2473,9 @@ static netdev_features_t hns3_features_check(struct sk_buff *skb,
return features;
if (skb->encapsulation)
- len = skb_inner_transport_header(skb) - skb->data;
+ len = skb_inner_transport_offset(skb);
else
- len = skb_transport_header(skb) - skb->data;
+ len = skb_transport_offset(skb);
/* Assume L4 is 60 byte as TCP is the only protocol with a
* a flexible value, and it's max len is 60 bytes.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 4d15eb73b972..9bb708fa42f2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -828,7 +828,8 @@ struct hclge_dev_specs_1_cmd {
__le16 mc_mac_size;
u8 rsv1[6];
u8 tnl_num;
- u8 rsv2[5];
+ u8 hilink_version;
+ u8 rsv2[4];
};
/* mac speed type defined in firmware command */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index b98301e205f7..eabbacb1c714 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -619,6 +619,8 @@ static int hclge_setup_tc(struct hnae3_handle *h,
return ret;
}
+ kinfo->tc_info.mqprio_destroy = !tc;
+
ret = hclge_notify_down_uinit(hdev);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 5ea9e59569ef..b4afb66efe5c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -645,8 +645,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
}
- count += 1;
- handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ if (hdev->ae_dev->dev_specs.hilink_version !=
+ HCLGE_HILINK_H60) {
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ }
+
count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
count += 1;
@@ -884,7 +888,7 @@ static const struct hclge_speed_bit_map speed_bit_map[] = {
{HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
{HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS},
{HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS},
- {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
+ {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BITS},
};
static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
@@ -940,7 +944,7 @@ static void hclge_update_fec_support(struct hclge_mac *mac)
mac->supported);
}
-static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[8] = {
+static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[] = {
{HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT},
{HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT},
{HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT},
@@ -948,10 +952,12 @@ static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[8] = {
{HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT},
{HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT},
{HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT},
- {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_EXT_BIT,
+ ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
};
-static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[6] = {
+static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[] = {
{HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT},
{HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
{HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT},
@@ -959,11 +965,13 @@ static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[6] = {
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT},
{HCLGE_SUPPORT_100G_R2_BIT,
ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT},
- {HCLGE_SUPPORT_200G_BIT,
+ {HCLGE_SUPPORT_200G_R4_EXT_BIT,
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_BIT,
ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT},
};
-static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[8] = {
+static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[] = {
{HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT},
{HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT},
{HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT},
@@ -971,10 +979,12 @@ static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[8] = {
{HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT},
{HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT},
{HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT},
- {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_EXT_BIT,
+ ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
};
-static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[9] = {
+static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[] = {
{HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT},
{HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
{HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
@@ -983,7 +993,9 @@ static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[9] = {
{HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT},
{HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
{HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT},
- {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_EXT_BIT,
+ ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
+ {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
};
static void hclge_convert_setting_sr(u16 speed_ability,
@@ -1154,7 +1166,7 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
static u32 hclge_get_max_speed(u16 speed_ability)
{
- if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ if (speed_ability & HCLGE_SUPPORT_200G_BITS)
return HCLGE_MAC_SPEED_200G;
if (speed_ability & HCLGE_SUPPORT_100G_BITS)
@@ -1350,6 +1362,7 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
ae_dev->dev_specs.tnl_num = req1->tnl_num;
+ ae_dev->dev_specs.hilink_version = req1->hilink_version;
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@ -2890,7 +2903,10 @@ static int hclge_mac_init(struct hclge_dev *hdev)
int ret;
hdev->support_sfp_query = true;
- hdev->hw.mac.duplex = HCLGE_MAC_FULL;
+
+ if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ hdev->hw.mac.duplex = HCLGE_MAC_FULL;
+
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
if (ret)
@@ -12092,6 +12108,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ hclge_reset_tc_config(hdev);
+
ret = hclge_tm_init_hw(hdev, true);
if (ret) {
dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 51979cf71262..e821dd2f1528 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -191,9 +191,10 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_SUPPORT_40G_BIT BIT(5)
#define HCLGE_SUPPORT_100M_BIT BIT(6)
#define HCLGE_SUPPORT_10M_BIT BIT(7)
-#define HCLGE_SUPPORT_200G_BIT BIT(8)
+#define HCLGE_SUPPORT_200G_R4_EXT_BIT BIT(8)
#define HCLGE_SUPPORT_50G_R1_BIT BIT(9)
#define HCLGE_SUPPORT_100G_R2_BIT BIT(10)
+#define HCLGE_SUPPORT_200G_R4_BIT BIT(11)
#define HCLGE_SUPPORT_GE \
(HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT)
@@ -201,6 +202,8 @@ enum HLCGE_PORT_TYPE {
(HCLGE_SUPPORT_50G_R2_BIT | HCLGE_SUPPORT_50G_R1_BIT)
#define HCLGE_SUPPORT_100G_BITS \
(HCLGE_SUPPORT_100G_R4_BIT | HCLGE_SUPPORT_100G_R2_BIT)
+#define HCLGE_SUPPORT_200G_BITS \
+ (HCLGE_SUPPORT_200G_R4_EXT_BIT | HCLGE_SUPPORT_200G_R4_BIT)
enum HCLGE_DEV_STATE {
HCLGE_STATE_REINITING,
@@ -253,6 +256,12 @@ enum HCLGE_MAC_DUPLEX {
HCLGE_MAC_FULL
};
+/* hilink version */
+enum hclge_hilink_version {
+ HCLGE_HILINK_H32 = 0,
+ HCLGE_HILINK_H60 = 1,
+};
+
#define QUERY_SFP_SPEED 0
#define QUERY_ACTIVE_SPEED 1
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 4b0d07ca2505..d4a0e0be7a72 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -1123,10 +1123,11 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
- if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
+ if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B) ||
+ req->mbx_src_vfid > hdev->num_req_vfs)) {
dev_warn(&hdev->pdev->dev,
- "dropped invalid mailbox message, code = %u\n",
- req->msg.code);
+ "dropped invalid mailbox message, code = %u, vfid = %u\n",
+ req->msg.code, req->mbx_src_vfid);
/* dropping/not processing this invalid message */
crq->desc[crq->next_to_use].flag = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 80a2a0073d97..507d7ce26d83 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -108,7 +108,7 @@ void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
u64 ns = nsec;
u32 sec_h;
- if (!test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
+ if (!hdev->ptp || !test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
return;
/* Since the BD does not have enough space for the higher 16 bits of
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index c58c31221762..00c3f2548bf6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -2143,3 +2143,19 @@ int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable)
return ret;
}
+
+void hclge_reset_tc_config(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = &hdev->vport[0];
+ struct hnae3_knic_private_info *kinfo;
+
+ kinfo = &vport->nic.kinfo;
+
+ if (!kinfo->tc_info.mqprio_destroy)
+ return;
+
+ /* clear tc info, including mqprio_destroy and mqprio_active */
+ memset(&kinfo->tc_info, 0, sizeof(kinfo->tc_info));
+ hclge_tm_schd_info_update(hdev, 0);
+ hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 53eec6df5194..0985916629d3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -277,4 +277,5 @@ int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
int hclge_up_to_tc_map(struct hclge_dev *hdev);
int hclge_dscp_to_tc_map(struct hclge_dev *hdev);
int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable);
+void hclge_reset_tc_config(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 5e27470c6b1e..f2d4669c81cf 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -987,7 +987,7 @@ static void sun3_82586_timeout(struct net_device *dev, unsigned int txqueue)
{
#ifdef DEBUG
printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus);
- printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status));
+ printk("%s: command-stats: %04x\n", dev->name, swab16(p->xmit_cmds[0]->cmd_status));
printk("%s: check, whether you set the right interrupt number!\n",dev->name);
#endif
sun3_82586_close(dev);
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index d55638ad8704..639fbb12bd35 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -368,6 +368,15 @@ config IGC
To compile this driver as a module, choose M here. The module
will be called igc.
+
+config IGC_LEDS
+ def_bool LEDS_TRIGGER_NETDEV
+ depends on IGC && LEDS_CLASS
+ depends on LEDS_CLASS=y || IGC=m
+ help
+ Optional support for controlling the NIC LED's with the netdev
+ LED trigger.
+
config IDPF
tristate "Intel(R) Infrastructure Data Path Function Support"
depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 01f0f12035ca..3fcb8daaa243 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -171,8 +171,8 @@ static int debug = 3;
static int eeprom_bad_csum_allow = 0;
static int use_io = 0;
module_param(debug, int, 0);
-module_param(eeprom_bad_csum_allow, int, 0);
-module_param(use_io, int, 0);
+module_param(eeprom_bad_csum_allow, int, 0444);
+module_param(use_io, int, 0444);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index fc0f98ea6133..dc553c51d79a 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -2186,7 +2186,7 @@ static int e1000_get_rxnfc(struct net_device *netdev,
}
}
-static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int e1000e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -2223,16 +2223,16 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
if (ret_val)
goto release;
- edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->supported, phy_data);
/* EEE Advertised */
- edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+ mii_eee_cap1_mod_linkmode_t(edata->advertised, adapter->eee_advert);
/* EEE Link Partner Advertised */
ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data);
if (ret_val)
goto release;
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data);
/* EEE PCS Status */
ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data);
@@ -2262,11 +2262,13 @@ release:
return ret_val;
}
-static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int e1000e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = {};
struct e1000_hw *hw = &adapter->hw;
- struct ethtool_eee eee_curr;
+ struct ethtool_keee eee_curr;
s32 ret_val;
ret_val = e1000e_get_eee(netdev, &eee_curr);
@@ -2283,12 +2285,17 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EINVAL;
}
- if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ supported);
+
+ if (linkmode_andnot(tmp, edata->advertised, supported)) {
e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n");
return -EINVAL;
}
- adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index a2788fd5f8bb..19e450a5bd31 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -2559,7 +2559,7 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
(u16)(mac_reg & 0xFFFF));
hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
- FIELD_GET(E1000_RAH_AV, mac_reg));
+ (u16)((mac_reg & E1000_RAH_AV) >> 16));
}
e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index af5d9d97a0d6..cc8c531ec3df 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6688,14 +6688,14 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (adapter->hw.phy.type == e1000_phy_igp_3) {
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
} else if (hw->mac.type >= e1000_pch_lpt) {
- if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+ if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) {
/* ULP does not support wake from unicast, multicast
* or broadcast.
*/
retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
-
- if (retval)
- return retval;
+ if (retval)
+ return retval;
+ }
}
/* Ensure that the appropriate bits are set in LPI_CTRL
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 9b701615c7c6..ba24f3fa92c3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -687,6 +687,54 @@ struct i40e_pf {
};
/**
+ * __i40e_pf_next_vsi - get next valid VSI
+ * @pf: pointer to the PF struct
+ * @idx: pointer to start position number
+ *
+ * Find and return next non-NULL VSI pointer in pf->vsi array and
+ * updates idx position. Returns NULL if no VSI is found.
+ **/
+static __always_inline struct i40e_vsi *
+__i40e_pf_next_vsi(struct i40e_pf *pf, int *idx)
+{
+ while (*idx < pf->num_alloc_vsi) {
+ if (pf->vsi[*idx])
+ return pf->vsi[*idx];
+ (*idx)++;
+ }
+ return NULL;
+}
+
+#define i40e_pf_for_each_vsi(_pf, _i, _vsi) \
+ for (_i = 0, _vsi = __i40e_pf_next_vsi(_pf, &_i); \
+ _vsi; \
+ _i++, _vsi = __i40e_pf_next_vsi(_pf, &_i))
+
+/**
+ * __i40e_pf_next_veb - get next valid VEB
+ * @pf: pointer to the PF struct
+ * @idx: pointer to start position number
+ *
+ * Find and return next non-NULL VEB pointer in pf->veb array and
+ * updates idx position. Returns NULL if no VEB is found.
+ **/
+static __always_inline struct i40e_veb *
+__i40e_pf_next_veb(struct i40e_pf *pf, int *idx)
+{
+ while (*idx < I40E_MAX_VEB) {
+ if (pf->veb[*idx])
+ return pf->veb[*idx];
+ (*idx)++;
+ }
+ return NULL;
+}
+
+#define i40e_pf_for_each_veb(_pf, _i, _veb) \
+ for (_i = 0, _veb = __i40e_pf_next_veb(_pf, &_i); \
+ _veb; \
+ _i++, _veb = __i40e_pf_next_veb(_pf, &_i))
+
+/**
* i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
* @macaddr: the MAC Address as the base key
*
@@ -735,7 +783,6 @@ struct i40e_new_mac_filter {
struct i40e_veb {
struct i40e_pf *pf;
u16 idx;
- u16 veb_idx; /* index of VEB parent */
u16 seid;
u16 uplink_seid;
u16 stats_idx; /* index of VEB parent */
@@ -1120,14 +1167,12 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
static inline struct i40e_vsi *
i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type)
{
+ struct i40e_vsi *vsi;
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- struct i40e_vsi *vsi = pf->vsi[i];
-
- if (vsi && vsi->type == type)
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->type == type)
return vsi;
- }
return NULL;
}
@@ -1309,4 +1354,40 @@ static inline struct i40e_pf *i40e_hw_to_pf(struct i40e_hw *hw)
struct device *i40e_hw_to_dev(struct i40e_hw *hw);
+/**
+ * i40e_pf_get_vsi_by_seid - find VSI by SEID
+ * @pf: pointer to a PF
+ * @seid: SEID of the VSI
+ **/
+static inline struct i40e_vsi *
+i40e_pf_get_vsi_by_seid(struct i40e_pf *pf, u16 seid)
+{
+ struct i40e_vsi *vsi;
+ int i;
+
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->seid == seid)
+ return vsi;
+
+ return NULL;
+}
+
+/**
+ * i40e_pf_get_veb_by_seid - find VEB by SEID
+ * @pf: pointer to a PF
+ * @seid: SEID of the VSI
+ **/
+static inline struct i40e_veb *
+i40e_pf_get_veb_by_seid(struct i40e_pf *pf, u16 seid)
+{
+ struct i40e_veb *veb;
+ int i;
+
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->seid == seid)
+ return veb;
+
+ return NULL;
+}
+
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 306758428aef..b32071ee84af 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -148,8 +148,6 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev)
u32 reg_idx;
qv_info = &qvlist_info->qv_info[i];
- if (!qv_info)
- continue;
reg_idx = I40E_PFINT_LNKLSTN(qv_info->v_idx - 1);
wr32(&pf->hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
}
@@ -576,8 +574,6 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev,
for (i = 0; i < qvlist_info->num_vectors; i++) {
qv_info = &qvlist_info->qv_info[i];
- if (!qv_info)
- continue;
v_idx = qv_info->v_idx;
/* Validate vector id belongs to this client */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 9d88ed6105fd..8db1eb0c1768 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1523,7 +1523,7 @@ void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
reg = rd32(hw, I40E_PRTDCB_RETSTCC(i));
reg &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK |
I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
- I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
+ I40E_PRTDCB_RETSTCC_ETSTC_MASK);
reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_BWSHARE_MASK,
bw_share[i]);
reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 6b60dc9b7736..d76497566e40 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -43,7 +43,7 @@
#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
#define I40E_LLDP_TLV_OUI_SHIFT 8
-#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
+#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFFU << I40E_LLDP_TLV_OUI_SHIFT)
/* Defines for IEEE ETS TLV */
#define I40E_IEEE_ETS_MAXTC_SHIFT 0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index b96a92187ab3..8aa43aefe84c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -947,16 +947,16 @@ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi,
static void i40e_dcbnl_del_app(struct i40e_pf *pf,
struct i40e_dcb_app_priority_table *app)
{
+ struct i40e_vsi *vsi;
int v, err;
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v] && pf->vsi[v]->netdev) {
- err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
+ i40e_pf_for_each_vsi(pf, v, vsi)
+ if (vsi->netdev) {
+ err = i40e_dcbnl_vsi_del_app(vsi, app);
dev_dbg(&pf->pdev->dev, "Deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
- pf->vsi[v]->seid, err, app->selector,
+ vsi->seid, err, app->selector,
app->protocolid, app->priority);
}
- }
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index ef70ddbe9c2f..f9ba45f596c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -24,31 +24,13 @@ enum ring_type {
**/
static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
{
- int i;
-
- if (seid < 0)
+ if (seid < 0) {
dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
- else
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
- return pf->vsi[i];
-
- return NULL;
-}
-/**
- * i40e_dbg_find_veb - searches for the veb with the given seid
- * @pf: the PF structure to search for the veb
- * @seid: seid of the veb it is searching for
- **/
-static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
-{
- int i;
+ return NULL;
+ }
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && pf->veb[i]->seid == seid)
- return pf->veb[i];
- return NULL;
+ return i40e_pf_get_vsi_by_seid(pf, seid);
}
/**************************************************************
@@ -653,12 +635,11 @@ out:
**/
static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i])
- dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
- i, pf->vsi[i]->seid);
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", i, vsi->seid);
}
/**
@@ -696,15 +677,14 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
{
struct i40e_veb *veb;
- veb = i40e_dbg_find_veb(pf, seid);
+ veb = i40e_pf_get_veb_by_seid(pf, seid);
if (!veb) {
dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
return;
}
dev_info(&pf->pdev->dev,
- "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
- veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
- veb->uplink_seid,
+ "veb idx=%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
+ veb->idx, veb->stats_idx, veb->seid, veb->uplink_seid,
veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
i40e_dbg_dump_eth_stats(pf, &veb->stats);
}
@@ -718,11 +698,8 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
struct i40e_veb *veb;
int i;
- for (i = 0; i < I40E_MAX_VEB; i++) {
- veb = pf->veb[i];
- if (veb)
- i40e_dbg_dump_veb_seid(pf, veb->seid);
- }
+ i40e_pf_for_each_veb(pf, i, veb)
+ i40e_dbg_dump_veb_seid(pf, veb->seid);
}
/**
@@ -851,10 +828,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if (strncmp(cmd_buf, "add relay", 9) == 0) {
struct i40e_veb *veb;
- int uplink_seid, i;
+ u8 enabled_tc = 0x1;
+ int uplink_seid;
cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
- if (cnt != 2) {
+ if (cnt == 0) {
+ uplink_seid = 0;
+ vsi_seid = 0;
+ } else if (cnt != 2) {
dev_info(&pf->pdev->dev,
"add relay: bad command string, cnt=%d\n",
cnt);
@@ -866,33 +847,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
- vsi = i40e_dbg_find_vsi(pf, vsi_seid);
- if (!vsi) {
- dev_info(&pf->pdev->dev,
- "add relay: VSI %d not found\n", vsi_seid);
- goto command_write_done;
- }
-
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && pf->veb[i]->seid == uplink_seid)
- break;
- if (i >= I40E_MAX_VEB && uplink_seid != 0 &&
- uplink_seid != pf->mac_seid) {
+ if (uplink_seid != 0 && uplink_seid != pf->mac_seid) {
dev_info(&pf->pdev->dev,
"add relay: relay uplink %d not found\n",
uplink_seid);
goto command_write_done;
+ } else if (uplink_seid) {
+ vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "add relay: VSI %d not found\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+ enabled_tc = vsi->tc_config.enabled_tc;
+ } else if (vsi_seid) {
+ dev_info(&pf->pdev->dev,
+ "add relay: VSI must be 0 for floating relay\n");
+ goto command_write_done;
}
- veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid,
- vsi->tc_config.enabled_tc);
+ veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, enabled_tc);
if (veb)
dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
else
dev_info(&pf->pdev->dev, "add relay failed\n");
} else if (strncmp(cmd_buf, "del relay", 9) == 0) {
+ struct i40e_veb *veb;
int i;
+
cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
if (cnt != 1) {
dev_info(&pf->pdev->dev,
@@ -906,9 +890,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
/* find the veb */
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && pf->veb[i]->seid == veb_seid)
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->seid == veb_seid)
break;
+
if (i >= I40E_MAX_VEB) {
dev_info(&pf->pdev->dev,
"del relay: relay %d not found\n", veb_seid);
@@ -916,7 +901,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
- i40e_veb_release(pf->veb[i]);
+ i40e_veb_release(veb);
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
unsigned int v;
int ret;
@@ -1251,8 +1236,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (cnt == 0) {
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- i40e_vsi_reset_stats(pf->vsi[i]);
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ i40e_vsi_reset_stats(vsi);
dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
} else if (cnt == 1) {
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index c841779713f6..42e7e6cdaa6d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -5644,7 +5644,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
return 0;
}
-static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp phy_cfg;
@@ -5664,16 +5664,12 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
if (phy_cfg.eee_capability == 0)
return -EOPNOTSUPP;
- edata->supported = SUPPORTED_Autoneg;
- edata->lp_advertised = edata->supported;
-
/* Get current configuration */
status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL);
if (status)
return -EAGAIN;
- edata->advertised = phy_cfg.eee_capability ? SUPPORTED_Autoneg : 0U;
- edata->eee_enabled = !!edata->advertised;
+ edata->eee_enabled = !!phy_cfg.eee_capability;
edata->tx_lpi_enabled = pf->stats.tx_lpi_status;
edata->eee_active = pf->stats.tx_lpi_status && pf->stats.rx_lpi_status;
@@ -5682,7 +5678,7 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
}
static int i40e_is_eee_param_supported(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -5691,7 +5687,6 @@ static int i40e_is_eee_param_supported(struct net_device *netdev,
u32 value;
const char *name;
} param[] = {
- {edata->advertised & ~SUPPORTED_Autoneg, "advertise"},
{edata->tx_lpi_timer, "tx-timer"},
{edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"}
};
@@ -5709,7 +5704,7 @@ static int i40e_is_eee_param_supported(struct net_device *netdev,
return 0;
}
-static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int i40e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp abilities;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 6e7fd473abfd..f86578857e8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -310,11 +310,12 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
**/
struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
{
+ struct i40e_vsi *vsi;
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && (pf->vsi[i]->id == id))
- return pf->vsi[i];
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->id == id)
+ return vsi;
return NULL;
}
@@ -552,24 +553,19 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
**/
void i40e_pf_reset_stats(struct i40e_pf *pf)
{
+ struct i40e_veb *veb;
int i;
memset(&pf->stats, 0, sizeof(pf->stats));
memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
pf->stat_offsets_loaded = false;
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (pf->veb[i]) {
- memset(&pf->veb[i]->stats, 0,
- sizeof(pf->veb[i]->stats));
- memset(&pf->veb[i]->stats_offsets, 0,
- sizeof(pf->veb[i]->stats_offsets));
- memset(&pf->veb[i]->tc_stats, 0,
- sizeof(pf->veb[i]->tc_stats));
- memset(&pf->veb[i]->tc_stats_offsets, 0,
- sizeof(pf->veb[i]->tc_stats_offsets));
- pf->veb[i]->stat_offsets_loaded = false;
- }
+ i40e_pf_for_each_veb(pf, i, veb) {
+ memset(&veb->stats, 0, sizeof(veb->stats));
+ memset(&veb->stats_offsets, 0, sizeof(veb->stats_offsets));
+ memset(&veb->tc_stats, 0, sizeof(veb->tc_stats));
+ memset(&veb->tc_stats_offsets, 0, sizeof(veb->tc_stats_offsets));
+ veb->stat_offsets_loaded = false;
}
pf->hw_csum_rx_error = 0;
}
@@ -2879,6 +2875,7 @@ err_no_memory_locked:
**/
static void i40e_sync_filters_subtask(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v;
if (!pf)
@@ -2890,11 +2887,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
return;
}
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v] &&
- (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
- !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
- int ret = i40e_sync_vsi_filters(pf->vsi[v]);
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ if ((vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
+ !test_bit(__I40E_VSI_RELEASING, vsi->state)) {
+ int ret = i40e_sync_vsi_filters(vsi);
if (ret) {
/* come back and try again later */
@@ -4926,27 +4922,23 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int pf_q, err, q_end;
+ u32 pf_q, tx_q_end, rx_q_end;
/* When port TX is suspended, don't wait */
if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
return i40e_vsi_stop_rings_no_wait(vsi);
- q_end = vsi->base_queue + vsi->num_queue_pairs;
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
- i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
+ tx_q_end = vsi->base_queue +
+ vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
+ for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++)
+ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, false);
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
- err = i40e_control_wait_rx_q(pf, pf_q, false);
- if (err)
- dev_info(&pf->pdev->dev,
- "VSI seid %d Rx ring %d disable timeout\n",
- vsi->seid, pf_q);
- }
+ rx_q_end = vsi->base_queue + vsi->num_queue_pairs;
+ for (pf_q = vsi->base_queue; pf_q < rx_q_end; pf_q++)
+ i40e_control_rx_q(pf, pf_q, false);
msleep(I40E_DISABLE_TX_GAP_MSEC);
- pf_q = vsi->base_queue;
- for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
+ for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++)
wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
i40e_vsi_wait_queues_disabled(vsi);
@@ -5170,6 +5162,7 @@ static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
**/
static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int i;
if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
@@ -5179,9 +5172,10 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
I40E_IWARP_IRQ_PILE_ID);
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i])
- i40e_vsi_free_q_vectors(pf->vsi[i]);
+
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ i40e_vsi_free_q_vectors(vsi);
+
i40e_reset_interrupt_capability(pf);
}
@@ -5278,12 +5272,11 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
**/
static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v;
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v])
- i40e_quiesce_vsi(pf->vsi[v]);
- }
+ i40e_pf_for_each_vsi(pf, v, vsi)
+ i40e_quiesce_vsi(vsi);
}
/**
@@ -5292,12 +5285,11 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
**/
static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v;
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v])
- i40e_unquiesce_vsi(pf->vsi[v]);
- }
+ i40e_pf_for_each_vsi(pf, v, vsi)
+ i40e_unquiesce_vsi(vsi);
}
/**
@@ -5358,14 +5350,13 @@ wait_rx:
**/
static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int v, ret = 0;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
- if (pf->vsi[v]) {
- ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
- if (ret)
- break;
- }
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ ret = i40e_vsi_wait_queues_disabled(vsi);
+ if (ret)
+ break;
}
return ret;
@@ -6782,32 +6773,29 @@ out:
**/
static void i40e_dcb_reconfigure(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
u8 tc_map = 0;
int ret;
- u8 v;
+ int v;
/* Enable the TCs available on PF to all VEBs */
tc_map = i40e_pf_get_tc_map(pf);
if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
return;
- for (v = 0; v < I40E_MAX_VEB; v++) {
- if (!pf->veb[v])
- continue;
- ret = i40e_veb_config_tc(pf->veb[v], tc_map);
+ i40e_pf_for_each_veb(pf, v, veb) {
+ ret = i40e_veb_config_tc(veb, tc_map);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed configuring TC for VEB seid=%d\n",
- pf->veb[v]->seid);
+ veb->seid);
/* Will try to configure as many components */
}
}
/* Update each VSI */
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (!pf->vsi[v])
- continue;
-
+ i40e_pf_for_each_vsi(pf, v, vsi) {
/* - Enable all TCs for the LAN VSI
* - For all others keep them at TC0 for now
*/
@@ -6816,17 +6804,17 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
else
tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
- ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
+ ret = i40e_vsi_config_tc(vsi, tc_map);
if (ret) {
dev_info(&pf->pdev->dev,
"Failed configuring TC for VSI seid=%d\n",
- pf->vsi[v]->seid);
+ vsi->seid);
/* Will try to configure as many components */
} else {
/* Re-configure VSI vectors based on updated TC map */
- i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
- if (pf->vsi[v]->netdev)
- i40e_dcbnl_set_all(pf->vsi[v]);
+ i40e_vsi_map_rings_to_vectors(vsi);
+ if (vsi->netdev)
+ i40e_dcbnl_set_all(vsi);
}
}
}
@@ -9261,7 +9249,9 @@ int i40e_close(struct net_device *netdev)
**/
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
{
+ struct i40e_vsi *vsi;
u32 val;
+ int i;
/* do the biggest reset indicated */
if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
@@ -9317,29 +9307,20 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
"FW LLDP is enabled\n");
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
- int v;
-
/* Find the VSI(s) that requested a re-init */
- dev_info(&pf->pdev->dev,
- "VSI reinit requested\n");
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- struct i40e_vsi *vsi = pf->vsi[v];
+ dev_info(&pf->pdev->dev, "VSI reinit requested\n");
- if (vsi != NULL &&
- test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ if (test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
vsi->state))
- i40e_vsi_reinit_locked(pf->vsi[v]);
+ i40e_vsi_reinit_locked(vsi);
}
} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
- int v;
-
/* Find the VSI(s) that needs to be brought down */
dev_info(&pf->pdev->dev, "VSI down requested\n");
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- struct i40e_vsi *vsi = pf->vsi[v];
- if (vsi != NULL &&
- test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ if (test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
vsi->state)) {
set_bit(__I40E_VSI_DOWN, vsi->state);
i40e_down(vsi);
@@ -9892,6 +9873,7 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
**/
static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
{
+ struct i40e_vsi *vsi;
struct i40e_pf *pf;
int i;
@@ -9899,15 +9881,10 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
return;
pf = veb->pf;
- /* depth first... */
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
- i40e_veb_link_event(pf->veb[i], link_up);
-
- /* ... now the local VSIs */
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
- i40e_vsi_link_event(pf->vsi[i], link_up);
+ /* Send link event to contained VSIs */
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->uplink_seid == veb->seid)
+ i40e_vsi_link_event(vsi, link_up);
}
/**
@@ -9999,6 +9976,8 @@ static void i40e_link_event(struct i40e_pf *pf)
**/
static void i40e_watchdog_subtask(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
int i;
/* if interface is down do nothing */
@@ -10019,15 +9998,14 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
- for (i = 0; i < pf->num_alloc_vsi; i++)
- if (pf->vsi[i] && pf->vsi[i]->netdev)
- i40e_update_stats(pf->vsi[i]);
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->netdev)
+ i40e_update_stats(vsi);
if (test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)) {
/* Update the stats for the active switching components */
- for (i = 0; i < I40E_MAX_VEB; i++)
- if (pf->veb[i])
- i40e_update_veb_stats(pf->veb[i]);
+ i40e_pf_for_each_veb(pf, i, veb)
+ i40e_update_veb_stats(veb);
}
i40e_ptp_rx_hang(pf);
@@ -10372,89 +10350,84 @@ static void i40e_config_bridge_mode(struct i40e_veb *veb)
}
/**
- * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
+ * i40e_reconstitute_veb - rebuild the VEB and VSIs connected to it
* @veb: pointer to the VEB instance
*
- * This is a recursive function that first builds the attached VSIs then
- * recurses in to build the next layer of VEB. We track the connections
- * through our own index numbers because the seid's from the HW could
- * change across the reset.
+ * This is a function that builds the attached VSIs. We track the connections
+ * through our own index numbers because the seid's from the HW could change
+ * across the reset.
**/
static int i40e_reconstitute_veb(struct i40e_veb *veb)
{
struct i40e_vsi *ctl_vsi = NULL;
struct i40e_pf *pf = veb->pf;
- int v, veb_idx;
- int ret;
+ struct i40e_vsi *vsi;
+ int v, ret;
- /* build VSI that owns this VEB, temporarily attached to base VEB */
- for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
- if (pf->vsi[v] &&
- pf->vsi[v]->veb_idx == veb->idx &&
- pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
- ctl_vsi = pf->vsi[v];
- break;
- }
- }
- if (!ctl_vsi) {
- dev_info(&pf->pdev->dev,
- "missing owner VSI for veb_idx %d\n", veb->idx);
- ret = -ENOENT;
- goto end_reconstitute;
+ /* As we do not maintain PV (port virtualizer) switch element then
+ * there can be only one non-floating VEB that have uplink to MAC SEID
+ * and its control VSI is the main one.
+ */
+ if (WARN_ON(veb->uplink_seid && veb->uplink_seid != pf->mac_seid)) {
+ dev_err(&pf->pdev->dev,
+ "Invalid uplink SEID for VEB %d\n", veb->idx);
+ return -ENOENT;
}
- if (ctl_vsi != pf->vsi[pf->lan_vsi])
- ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
- ret = i40e_add_vsi(ctl_vsi);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "rebuild of veb_idx %d owner VSI failed: %d\n",
- veb->idx, ret);
- goto end_reconstitute;
+
+ if (veb->uplink_seid == pf->mac_seid) {
+ /* Check that the LAN VSI has VEB owning flag set */
+ ctl_vsi = pf->vsi[pf->lan_vsi];
+
+ if (WARN_ON(ctl_vsi->veb_idx != veb->idx ||
+ !(ctl_vsi->flags & I40E_VSI_FLAG_VEB_OWNER))) {
+ dev_err(&pf->pdev->dev,
+ "Invalid control VSI for VEB %d\n", veb->idx);
+ return -ENOENT;
+ }
+
+ /* Add the control VSI to switch */
+ ret = i40e_add_vsi(ctl_vsi);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Rebuild of owner VSI for VEB %d failed: %d\n",
+ veb->idx, ret);
+ return ret;
+ }
+
+ i40e_vsi_reset_stats(ctl_vsi);
}
- i40e_vsi_reset_stats(ctl_vsi);
/* create the VEB in the switch and move the VSI onto the VEB */
ret = i40e_add_veb(veb, ctl_vsi);
if (ret)
- goto end_reconstitute;
+ return ret;
- if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
- veb->bridge_mode = BRIDGE_MODE_VEB;
- else
- veb->bridge_mode = BRIDGE_MODE_VEPA;
- i40e_config_bridge_mode(veb);
+ if (veb->uplink_seid) {
+ if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags))
+ veb->bridge_mode = BRIDGE_MODE_VEB;
+ else
+ veb->bridge_mode = BRIDGE_MODE_VEPA;
+ i40e_config_bridge_mode(veb);
+ }
/* create the remaining VSIs attached to this VEB */
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ if (vsi == ctl_vsi)
continue;
- if (pf->vsi[v]->veb_idx == veb->idx) {
- struct i40e_vsi *vsi = pf->vsi[v];
-
+ if (vsi->veb_idx == veb->idx) {
vsi->uplink_seid = veb->seid;
ret = i40e_add_vsi(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
"rebuild of vsi_idx %d failed: %d\n",
v, ret);
- goto end_reconstitute;
+ return ret;
}
i40e_vsi_reset_stats(vsi);
}
}
- /* create any VEBs attached to this VEB - RECURSION */
- for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
- if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
- pf->veb[veb_idx]->uplink_seid = veb->seid;
- ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
- if (ret)
- break;
- }
- }
-
-end_reconstitute:
return ret;
}
@@ -10722,6 +10695,7 @@ static void i40e_clean_xps_state(struct i40e_vsi *vsi)
static void i40e_prep_for_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi *vsi;
int ret = 0;
u32 v;
@@ -10736,11 +10710,9 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
/* quiesce the VSIs and their queues that are not already DOWN */
i40e_pf_quiesce_all_vsi(pf);
- for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v]) {
- i40e_clean_xps_state(pf->vsi[v]);
- pf->vsi[v]->seid = 0;
- }
+ i40e_pf_for_each_vsi(pf, v, vsi) {
+ i40e_clean_xps_state(vsi);
+ vsi->seid = 0;
}
i40e_shutdown_adminq(&pf->hw);
@@ -10854,6 +10826,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
+ struct i40e_veb *veb;
int ret;
u32 val;
int v;
@@ -10995,35 +10968,29 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
*/
if (vsi->uplink_seid != pf->mac_seid) {
dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
- /* find the one VEB connected to the MAC, and find orphans */
- for (v = 0; v < I40E_MAX_VEB; v++) {
- if (!pf->veb[v])
- continue;
- if (pf->veb[v]->uplink_seid == pf->mac_seid ||
- pf->veb[v]->uplink_seid == 0) {
- ret = i40e_reconstitute_veb(pf->veb[v]);
-
- if (!ret)
- continue;
+ /* Rebuild VEBs */
+ i40e_pf_for_each_veb(pf, v, veb) {
+ ret = i40e_reconstitute_veb(veb);
+ if (!ret)
+ continue;
- /* If Main VEB failed, we're in deep doodoo,
- * so give up rebuilding the switch and set up
- * for minimal rebuild of PF VSI.
- * If orphan failed, we'll report the error
- * but try to keep going.
- */
- if (pf->veb[v]->uplink_seid == pf->mac_seid) {
- dev_info(&pf->pdev->dev,
- "rebuild of switch failed: %d, will try to set up simple PF connection\n",
- ret);
- vsi->uplink_seid = pf->mac_seid;
- break;
- } else if (pf->veb[v]->uplink_seid == 0) {
- dev_info(&pf->pdev->dev,
- "rebuild of orphan VEB failed: %d\n",
- ret);
- }
+ /* If Main VEB failed, we're in deep doodoo,
+ * so give up rebuilding the switch and set up
+ * for minimal rebuild of PF VSI.
+ * If orphan failed, we'll report the error
+ * but try to keep going.
+ */
+ if (veb->uplink_seid == pf->mac_seid) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of switch failed: %d, will try to set up simple PF connection\n",
+ ret);
+ vsi->uplink_seid = pf->mac_seid;
+ break;
+ } else if (veb->uplink_seid == 0) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of orphan VEB failed: %d\n",
+ ret);
}
}
}
@@ -12102,6 +12069,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
*/
static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi;
int err, i;
/* We cleared the MSI and MSI-X flags when disabling the old interrupt
@@ -12118,13 +12086,12 @@ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
/* Now that we've re-acquired IRQs, we need to remap the vectors and
* rings together again.
*/
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i]) {
- err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
- if (err)
- goto err_unwind;
- i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
- }
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ err = i40e_vsi_alloc_q_vectors(vsi);
+ if (err)
+ goto err_unwind;
+
+ i40e_vsi_map_rings_to_vectors(vsi);
}
err = i40e_setup_misc_vector(pf);
@@ -13126,19 +13093,16 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- struct i40e_veb *veb = NULL;
struct nlattr *attr, *br_spec;
- int i, rem;
+ struct i40e_veb *veb;
+ int rem;
/* Only for PF VSI for now */
if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
return -EOPNOTSUPP;
/* Find the HW bridge for PF VSI */
- for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
- veb = pf->veb[i];
- }
+ veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!br_spec)
@@ -13203,19 +13167,14 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- struct i40e_veb *veb = NULL;
- int i;
+ struct i40e_veb *veb;
/* Only for PF VSI for now */
if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
return -EOPNOTSUPP;
/* Find the HW bridge for the PF VSI */
- for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
- veb = pf->veb[i];
- }
-
+ veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
if (!veb)
return 0;
@@ -13249,12 +13208,12 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb,
features &= ~NETIF_F_GSO_MASK;
/* MACLEN can support at most 63 words */
- len = skb_network_header(skb) - skb->data;
+ len = skb_network_offset(skb);
if (len & ~(63 * 2))
goto out_err;
/* IPLEN and EIPLEN can support at most 127 dwords */
- len = skb_transport_header(skb) - skb_network_header(skb);
+ len = skb_network_header_len(skb);
if (len & ~(127 * 4))
goto out_err;
@@ -13564,9 +13523,9 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
return err;
i40e_queue_pair_disable_irq(vsi, queue_pair);
+ i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
- i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
i40e_queue_pair_clean_rings(vsi, queue_pair);
i40e_queue_pair_reset_stats(vsi, queue_pair);
@@ -14149,7 +14108,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
struct hlist_node *h;
- struct i40e_veb *veb = NULL;
+ struct i40e_veb *veb;
struct i40e_pf *pf;
u16 uplink_seid;
int i, n, bkt;
@@ -14213,29 +14172,28 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
/* If this was the last thing on the VEB, except for the
* controlling VSI, remove the VEB, which puts the controlling
- * VSI onto the next level down in the switch.
+ * VSI onto the uplink port.
*
* Well, okay, there's one more exception here: don't remove
- * the orphan VEBs yet. We'll wait for an explicit remove request
+ * the floating VEBs yet. We'll wait for an explicit remove request
* from up the network stack.
*/
- for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] &&
- pf->vsi[i]->uplink_seid == uplink_seid &&
- (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
- n++; /* count the VSIs */
- }
- }
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (!pf->veb[i])
- continue;
- if (pf->veb[i]->uplink_seid == uplink_seid)
- n++; /* count the VEBs */
- if (pf->veb[i]->seid == uplink_seid)
- veb = pf->veb[i];
+ veb = i40e_pf_get_veb_by_seid(pf, uplink_seid);
+ if (veb && veb->uplink_seid) {
+ n = 0;
+
+ /* Count non-controlling VSIs present on the VEB */
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->uplink_seid == uplink_seid &&
+ (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
+ n++;
+
+ /* If there is no VSI except the control one then release
+ * the VEB and put the control VSI onto VEB uplink.
+ */
+ if (!n)
+ i40e_veb_release(veb);
}
- if (n == 0 && veb && veb->uplink_seid != 0)
- i40e_veb_release(veb);
return 0;
}
@@ -14393,8 +14351,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
struct i40e_vsi *vsi = NULL;
struct i40e_veb *veb = NULL;
u16 alloc_queue_pairs;
- int ret, i;
int v_idx;
+ int ret;
/* The requested uplink_seid must be either
* - the PF's port seid
@@ -14409,21 +14367,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
*
* Find which uplink_seid we were given and create a new VEB if needed
*/
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
- veb = pf->veb[i];
- break;
- }
- }
-
+ veb = i40e_pf_get_veb_by_seid(pf, uplink_seid);
if (!veb && uplink_seid != pf->mac_seid) {
-
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
- vsi = pf->vsi[i];
- break;
- }
- }
+ vsi = i40e_pf_get_vsi_by_seid(pf, uplink_seid);
if (!vsi) {
dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
uplink_seid);
@@ -14452,10 +14398,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
}
i40e_config_bridge_mode(veb);
}
- for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
- if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
- veb = pf->veb[i];
- }
+ veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid);
if (!veb) {
dev_info(&pf->pdev->dev, "couldn't add VEB\n");
return NULL;
@@ -14685,29 +14628,24 @@ static void i40e_switch_branch_release(struct i40e_veb *branch)
struct i40e_pf *pf = branch->pf;
u16 branch_seid = branch->seid;
u16 veb_idx = branch->idx;
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
int i;
/* release any VEBs on this VEB - RECURSION */
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (!pf->veb[i])
- continue;
- if (pf->veb[i]->uplink_seid == branch->seid)
- i40e_switch_branch_release(pf->veb[i]);
- }
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->uplink_seid == branch->seid)
+ i40e_switch_branch_release(veb);
/* Release the VSIs on this VEB, but not the owner VSI.
*
* NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
* the VEB itself, so don't use (*branch) after this loop.
*/
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (!pf->vsi[i])
- continue;
- if (pf->vsi[i]->uplink_seid == branch_seid &&
- (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
- i40e_vsi_release(pf->vsi[i]);
- }
- }
+ i40e_pf_for_each_vsi(pf, i, vsi)
+ if (vsi->uplink_seid == branch_seid &&
+ (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
+ i40e_vsi_release(vsi);
/* There's one corner case where the VEB might not have been
* removed, so double check it here and remove it if needed.
@@ -14745,38 +14683,35 @@ static void i40e_veb_clear(struct i40e_veb *veb)
**/
void i40e_veb_release(struct i40e_veb *veb)
{
- struct i40e_vsi *vsi = NULL;
+ struct i40e_vsi *vsi, *vsi_it;
struct i40e_pf *pf;
int i, n = 0;
pf = veb->pf;
/* find the remaining VSI and check for extras */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
+ i40e_pf_for_each_vsi(pf, i, vsi_it)
+ if (vsi_it->uplink_seid == veb->seid) {
+ if (vsi_it->flags & I40E_VSI_FLAG_VEB_OWNER)
+ vsi = vsi_it;
n++;
- vsi = pf->vsi[i];
}
- }
- if (n != 1) {
+
+ /* Floating VEB has to be empty and regular one must have
+ * single owner VSI.
+ */
+ if ((veb->uplink_seid && n != 1) || (!veb->uplink_seid && n != 0)) {
dev_info(&pf->pdev->dev,
"can't remove VEB %d with %d VSIs left\n",
veb->seid, n);
return;
}
- /* move the remaining VSI to uplink veb */
- vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
+ /* For regular VEB move the owner VSI to uplink port */
if (veb->uplink_seid) {
+ vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
vsi->uplink_seid = veb->uplink_seid;
- if (veb->uplink_seid == pf->mac_seid)
- vsi->veb_idx = I40E_NO_VEB;
- else
- vsi->veb_idx = veb->veb_idx;
- } else {
- /* floating VEB */
- vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
- vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
+ vsi->veb_idx = I40E_NO_VEB;
}
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
@@ -14794,8 +14729,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
bool enable_stats = !!test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
int ret;
- ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
- veb->enabled_tc, false,
+ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi ? vsi->seid : 0,
+ veb->enabled_tc, vsi ? false : true,
&veb->seid, enable_stats, NULL);
/* get a VEB from the hardware */
@@ -14827,9 +14762,11 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
return -ENOENT;
}
- vsi->uplink_seid = veb->seid;
- vsi->veb_idx = veb->idx;
- vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+ if (vsi) {
+ vsi->uplink_seid = veb->seid;
+ vsi->veb_idx = veb->idx;
+ vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+ }
return 0;
}
@@ -14854,8 +14791,9 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
u16 uplink_seid, u16 vsi_seid,
u8 enabled_tc)
{
- struct i40e_veb *veb, *uplink_veb = NULL;
- int vsi_idx, veb_idx;
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_veb *veb;
+ int veb_idx;
int ret;
/* if one seid is 0, the other must be 0 to create a floating relay */
@@ -14868,26 +14806,11 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
}
/* make sure there is such a vsi and uplink */
- for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
- if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
- break;
- if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
- dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
- vsi_seid);
- return NULL;
- }
-
- if (uplink_seid && uplink_seid != pf->mac_seid) {
- for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
- if (pf->veb[veb_idx] &&
- pf->veb[veb_idx]->seid == uplink_seid) {
- uplink_veb = pf->veb[veb_idx];
- break;
- }
- }
- if (!uplink_veb) {
- dev_info(&pf->pdev->dev,
- "uplink seid %d not found\n", uplink_seid);
+ if (vsi_seid) {
+ vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid);
+ if (!vsi) {
+ dev_err(&pf->pdev->dev, "vsi seid %d not found\n",
+ vsi_seid);
return NULL;
}
}
@@ -14899,14 +14822,14 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
veb = pf->veb[veb_idx];
veb->flags = flags;
veb->uplink_seid = uplink_seid;
- veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
/* create the VEB in the switch */
- ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
+ ret = i40e_add_veb(veb, vsi);
if (ret)
goto err_veb;
- if (vsi_idx == pf->lan_vsi)
+
+ if (vsi && vsi->idx == pf->lan_vsi)
pf->lan_veb = veb->idx;
return veb;
@@ -14934,6 +14857,7 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
u8 element_type = ele->element_type;
u16 seid = le16_to_cpu(ele->seid);
+ struct i40e_veb *veb;
if (printconfig)
dev_info(&pf->pdev->dev,
@@ -14952,13 +14876,10 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
int v;
/* find existing or else empty VEB */
- for (v = 0; v < I40E_MAX_VEB; v++) {
- if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
- pf->lan_veb = v;
- break;
- }
- }
- if (pf->lan_veb >= I40E_MAX_VEB) {
+ veb = i40e_pf_get_veb_by_seid(pf, seid);
+ if (veb) {
+ pf->lan_veb = veb->idx;
+ } else {
v = i40e_veb_mem_alloc(pf);
if (v < 0)
break;
@@ -14971,7 +14892,6 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
pf->veb[pf->lan_veb]->seid = seid;
pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
pf->veb[pf->lan_veb]->pf = pf;
- pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
break;
case I40E_SWITCH_ELEMENT_TYPE_VSI:
if (num_reported != 1)
@@ -15634,6 +15554,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_I40E_DCB
enum i40e_get_fw_lldp_status_resp lldp_status;
#endif /* CONFIG_I40E_DCB */
+ struct i40e_vsi *vsi;
struct i40e_pf *pf;
struct i40e_hw *hw;
u16 wol_nvm_bits;
@@ -15644,7 +15565,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif /* CONFIG_I40E_DCB */
int err;
u32 val;
- u32 i;
err = pci_enable_device_mem(pdev);
if (err)
@@ -15994,12 +15914,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
/* if FDIR VSI was set up, start it now */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
- i40e_vsi_open(pf->vsi[i]);
- break;
- }
- }
+ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
+ if (vsi)
+ i40e_vsi_open(vsi);
/* The driver only wants link up/down and module qualification
* reports from firmware. Note the negative logic.
@@ -16245,6 +16162,8 @@ static void i40e_remove(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
int ret_code;
int i;
@@ -16302,24 +16221,19 @@ static void i40e_remove(struct pci_dev *pdev)
/* If there is a switch structure or any orphans, remove them.
* This will leave only the PF's VSI remaining.
*/
- for (i = 0; i < I40E_MAX_VEB; i++) {
- if (!pf->veb[i])
- continue;
-
- if (pf->veb[i]->uplink_seid == pf->mac_seid ||
- pf->veb[i]->uplink_seid == 0)
- i40e_switch_branch_release(pf->veb[i]);
- }
+ i40e_pf_for_each_veb(pf, i, veb)
+ if (veb->uplink_seid == pf->mac_seid ||
+ veb->uplink_seid == 0)
+ i40e_switch_branch_release(veb);
/* Now we can shutdown the PF's VSIs, just before we kill
* adminq and hmc.
*/
- for (i = pf->num_alloc_vsi; i--;)
- if (pf->vsi[i]) {
- i40e_vsi_close(pf->vsi[i]);
- i40e_vsi_release(pf->vsi[i]);
- pf->vsi[i] = NULL;
- }
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ i40e_vsi_close(vsi);
+ i40e_vsi_release(vsi);
+ pf->vsi[i] = NULL;
+ }
i40e_cloud_filter_exit(pf);
@@ -16356,18 +16270,17 @@ unmap:
/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
rtnl_lock();
i40e_clear_interrupt_scheme(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++) {
- if (pf->vsi[i]) {
- if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
- i40e_vsi_clear_rings(pf->vsi[i]);
- i40e_vsi_clear(pf->vsi[i]);
- pf->vsi[i] = NULL;
- }
+ i40e_pf_for_each_vsi(pf, i, vsi) {
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
+ i40e_vsi_clear_rings(vsi);
+
+ i40e_vsi_clear(vsi);
+ pf->vsi[i] = NULL;
}
rtnl_unlock();
- for (i = 0; i < I40E_MAX_VEB; i++) {
- kfree(pf->veb[i]);
+ i40e_pf_for_each_veb(pf, i, veb) {
+ kfree(veb);
pf->veb[i] = NULL;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index af4269330581..ce1f11b8ad65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -567,8 +567,7 @@ static inline bool i40e_is_fw_ver_lt(struct i40e_hw *hw, u16 maj, u16 min)
**/
static inline bool i40e_is_fw_ver_eq(struct i40e_hw *hw, u16 maj, u16 min)
{
- return (hw->aq.fw_maj_ver > maj ||
- (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min));
+ return (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min);
}
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 908cdbd3ec5d..83a34e98bdc7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -491,8 +491,6 @@ static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
u32 v_idx, reg_idx, reg;
qv_info = &qvlist_info->qv_info[i];
- if (!qv_info)
- continue;
v_idx = qv_info->v_idx;
if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
/* Figure out the queue after CEQ and make that the
@@ -562,8 +560,6 @@ i40e_config_rdma_qvlist(struct i40e_vf *vf,
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
for (i = 0; i < qvlist_info->num_vectors; i++) {
qv_info = &qvlist_info->qv_info[i];
- if (!qv_info)
- continue;
/* Validate vector id belongs to this vf */
if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
@@ -2848,6 +2844,24 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
+/**
+ * i40e_can_vf_change_mac
+ * @vf: pointer to the VF info
+ *
+ * Return true if the VF is allowed to change its MAC filters, false otherwise
+ */
+static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
+{
+ /* If the VF MAC address has been set administratively (via the
+ * ndo_set_vf_mac command), then deny permission to the VF to
+ * add/delete unicast MAC addresses, unless the VF is trusted
+ */
+ if (vf->pf_set_mac && !vf->trusted)
+ return false;
+
+ return true;
+}
+
#define I40E_MAX_MACVLAN_PER_HW 3072
#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
(num_ports))
@@ -2907,8 +2921,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
* The VF may request to set the MAC address filter already
* assigned to it so do not return an error in that case.
*/
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
- !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
+ if (!i40e_can_vf_change_mac(vf) &&
+ !is_multicast_ether_addr(addr) &&
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
@@ -3114,19 +3128,29 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
ret = -EINVAL;
goto error_param;
}
- if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
- was_unimac_deleted = true;
}
vsi = pf->vsi[vf->lan_vsi_idx];
spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete addresses from the list */
- for (i = 0; i < al->num_elements; i++)
+ for (i = 0; i < al->num_elements; i++) {
+ const u8 *addr = al->list[i].addr;
+
+ /* Allow to delete VF primary MAC only if it was not set
+ * administratively by PF or if VF is trusted.
+ */
+ if (ether_addr_equal(addr, vf->default_lan_addr.addr) &&
+ i40e_can_vf_change_mac(vf))
+ was_unimac_deleted = true;
+ else
+ continue;
+
if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
ret = -EINVAL;
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
}
+ }
spin_unlock_bh(&vsi->mac_filter_hash_lock);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 335fd13e86f7..ef2440f3abf8 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2170,19 +2170,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_add_cloud_filter(adapter);
return 0;
}
-
- if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
- iavf_del_cloud_filter(adapter);
- return 0;
- }
if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
iavf_del_cloud_filter(adapter);
return 0;
}
- if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
- iavf_add_cloud_filter(adapter);
- return 0;
- }
if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
iavf_add_fdir_filter(adapter);
return IAVF_SUCCESS;
@@ -4423,12 +4414,12 @@ static netdev_features_t iavf_features_check(struct sk_buff *skb,
features &= ~NETIF_F_GSO_MASK;
/* MACLEN can support at most 63 words */
- len = skb_network_header(skb) - skb->data;
+ len = skb_network_offset(skb);
if (len & ~(63 * 2))
goto out_err;
/* IPLEN and EIPLEN can support at most 127 dwords */
- len = skb_transport_header(skb) - skb_network_header(skb);
+ len = skb_network_header_len(skb);
if (len & ~(127 * 4))
goto out_err;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 367b613d92c0..365c03d1c462 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -493,7 +493,6 @@ enum ice_pf_flags {
ICE_FLAG_DCB_ENA,
ICE_FLAG_FD_ENA,
ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */
- ICE_FLAG_PTP, /* PTP is enabled by software */
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */
ICE_FLAG_CLS_FLOWER,
@@ -606,6 +605,7 @@ struct ice_pf {
wait_queue_head_t reset_wait_queue;
u32 hw_csum_rx_error;
+ u32 hw_rx_eipe_error;
u32 oicr_err_reg;
struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */
struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */
@@ -896,6 +896,7 @@ static inline bool ice_is_adq_active(struct ice_pf *pf)
}
void ice_debugfs_fwlog_init(struct ice_pf *pf);
+void ice_debugfs_pf_deinit(struct ice_pf *pf);
void ice_debugfs_init(void);
void ice_debugfs_exit(void);
void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module);
@@ -983,6 +984,8 @@ void ice_service_task_schedule(struct ice_pf *pf);
int ice_load(struct ice_pf *pf);
void ice_unload(struct ice_pf *pf);
void ice_adv_lnk_speed_maps_init(void);
+int ice_init_dev(struct ice_pf *pf);
+void ice_deinit_dev(struct ice_pf *pf);
/**
* ice_set_rdma_cap - enable RDMA support
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index cca0e753f38f..7cee365cc7d1 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2018-2020, Intel Corporation. */
#include "ice.h"
+#include <net/rps.h>
/**
* ice_is_arfs_active - helper to check is aRFS is active
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 7ac847718882..d2fd315556a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -190,15 +190,13 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
q_vector = vsi->q_vectors[v_idx];
ice_for_each_tx_ring(tx_ring, q_vector->tx) {
- if (vsi->netdev)
- netif_queue_set_napi(vsi->netdev, tx_ring->q_index,
- NETDEV_QUEUE_TYPE_TX, NULL);
+ ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX,
+ NULL);
tx_ring->q_vector = NULL;
}
ice_for_each_rx_ring(rx_ring, q_vector->rx) {
- if (vsi->netdev)
- netif_queue_set_napi(vsi->netdev, rx_ring->q_index,
- NETDEV_QUEUE_TYPE_RX, NULL);
+ ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX,
+ NULL);
rx_ring->q_vector = NULL;
}
@@ -538,7 +536,7 @@ static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
*
* Return 0 on success and a negative value on error.
*/
-int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
@@ -633,6 +631,62 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0;
}
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
+{
+ if (q_idx >= vsi->num_rxq)
+ return -EINVAL;
+
+ return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
+}
+
+/**
+ * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
+ * @vsi: VSI
+ */
+static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
+{
+ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
+ vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
+ vsi->rx_buf_len = ICE_RXBUF_1664;
+#if (PAGE_SIZE < 8192)
+ } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
+ (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+ vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
+ vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
+#endif
+ } else {
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+ vsi->rx_buf_len = ICE_RXBUF_3072;
+ }
+}
+
+/**
+ * ice_vsi_cfg_rxqs - Configure the VSI for Rx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Rx VSI for operation.
+ */
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
+{
+ u16 i;
+
+ if (vsi->type == ICE_VSI_VF)
+ goto setup_rings;
+
+ ice_vsi_cfg_frame_size(vsi);
+setup_rings:
+ /* set up individual rings */
+ ice_for_each_rxq(vsi, i) {
+ int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
/**
* __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
* @qs_cfg: gathered variables needed for pf->vsi queues assignment
@@ -828,7 +882,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @ring: Tx ring to be configured
* @qg_buf: queue group buffer
*/
-int
+static int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
@@ -899,6 +953,80 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
return 0;
}
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
+ u16 q_idx)
+{
+ DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+
+ if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
+ return -EINVAL;
+
+ qg_buf->num_txqs = 1;
+
+ return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
+}
+
+/**
+ * ice_vsi_cfg_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ * @rings: Tx ring array to be configured
+ * @count: number of Tx ring array elements
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+static int
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
+{
+ DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+ int err = 0;
+ u16 q_idx;
+
+ qg_buf->num_txqs = 1;
+
+ for (q_idx = 0; q_idx < count; q_idx++) {
+ err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx VSI for operation.
+ */
+int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
+{
+ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
+}
+
+/**
+ * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
+ * @vsi: the VSI being configured
+ *
+ * Return 0 on success and a negative value on error
+ * Configure the Tx queues dedicated for XDP in given VSI for operation.
+ */
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
+{
+ int ret;
+ int i;
+
+ ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
+ if (ret)
+ return ret;
+
+ ice_for_each_rxq(vsi, i)
+ ice_tx_xsk_pool(vsi, i);
+
+ return 0;
+}
+
/**
* ice_cfg_itr - configure the initial interrupt throttle values
* @hw: pointer to the HW structure
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index 17321ba75602..b711bc921928 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -6,7 +6,8 @@
#include "ice.h"
-int ice_vsi_cfg_rxq(struct ice_rx_ring *ring);
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
+int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
@@ -14,9 +15,10 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
-int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
- struct ice_aqc_add_tx_qgrp *qg_buf);
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
+ u16 q_idx);
+int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
+int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
void
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 10c32cd80fff..4d8111aeb0ff 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -154,6 +154,12 @@ static int ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E823L_SFP:
hw->mac_type = ICE_MAC_GENERIC;
break;
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ hw->mac_type = ICE_MAC_GENERIC_3K_E825;
+ break;
case ICE_DEV_ID_E830_BACKPLANE:
case ICE_DEV_ID_E830_QSFP56:
case ICE_DEV_ID_E830_SFP:
@@ -170,6 +176,18 @@ static int ice_set_mac_type(struct ice_hw *hw)
}
/**
+ * ice_is_generic_mac - check if device's mac_type is generic
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if mac_type is generic (with SBQ support), false if not
+ */
+bool ice_is_generic_mac(struct ice_hw *hw)
+{
+ return (hw->mac_type == ICE_MAC_GENERIC ||
+ hw->mac_type == ICE_MAC_GENERIC_3K_E825);
+}
+
+/**
* ice_is_e810
* @hw: pointer to the hardware structure
*
@@ -241,6 +259,25 @@ bool ice_is_e823(struct ice_hw *hw)
}
/**
+ * ice_is_e825c - Check if a device is E825C family device
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if the device is E825-C based, false if not.
+ */
+bool ice_is_e825c(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -965,9 +1002,9 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
*/
int ice_init_hw(struct ice_hw *hw)
{
- struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
+ void *mac_buf __free(kfree);
u16 mac_buf_len;
- void *mac_buf;
int status;
/* Set MAC type based on DeviceID */
@@ -1045,7 +1082,7 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_sched;
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps) {
status = -ENOMEM;
goto err_unroll_sched;
@@ -1055,7 +1092,6 @@ int ice_init_hw(struct ice_hw *hw)
status = ice_aq_get_phy_caps(hw->port_info, false,
ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
NULL);
- devm_kfree(ice_hw_to_dev(hw), pcaps);
if (status)
dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
status);
@@ -1082,18 +1118,15 @@ int ice_init_hw(struct ice_hw *hw)
/* Get MAC information */
/* A single port can report up to two (LAN and WoL) addresses */
- mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
- sizeof(struct ice_aqc_manage_mac_read_resp),
- GFP_KERNEL);
- mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
-
+ mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp),
+ GFP_KERNEL);
if (!mac_buf) {
status = -ENOMEM;
goto err_unroll_fltr_mgmt_struct;
}
+ mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
- devm_kfree(ice_hw_to_dev(hw), mac_buf);
if (status)
goto err_unroll_fltr_mgmt_struct;
@@ -1362,9 +1395,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* it to HW register space and enables the hardware to prefetch descriptors
* instead of only fetching them on demand
*/
-int
-ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
- u32 rxq_index)
+int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index)
{
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
@@ -3240,19 +3272,14 @@ int ice_update_link_info(struct ice_port_info *pi)
return status;
if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
- struct ice_aqc_get_phy_caps_data *pcaps;
- struct ice_hw *hw;
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
- hw = pi->hw;
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
- GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
pcaps, NULL);
-
- devm_kfree(ice_hw_to_dev(hw), pcaps);
}
return status;
@@ -3393,8 +3420,8 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
- struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_hw *hw;
int status;
@@ -3404,7 +3431,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
*aq_failures = 0;
hw = pi->hw;
- pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
@@ -3456,7 +3483,6 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
}
out:
- devm_kfree(ice_hw_to_dev(hw), pcaps);
return status;
}
@@ -3535,7 +3561,7 @@ int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec)
{
- struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_get_phy_caps_data *pcaps __free(kfree);
struct ice_hw *hw;
int status;
@@ -3604,8 +3630,6 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
}
out:
- kfree(pcaps);
-
return status;
}
@@ -4325,13 +4349,13 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
/* End of FW Admin Queue command wrappers */
/**
- * ice_write_byte - write a byte to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_byte - write a byte to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u8 src_byte, dest_byte, mask;
u8 *from, *dest;
@@ -4342,14 +4366,11 @@ ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = (u8)(BIT(ce_info->width) - 1);
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
src_byte = *from;
- src_byte &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_byte <<= shift_width;
+ src_byte &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4364,13 +4385,13 @@ ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
- * ice_write_word - write a word to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_word - write a word to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u16 src_word, mask;
__le16 dest_word;
@@ -4382,17 +4403,14 @@ ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
- mask = BIT(ce_info->width) - 1;
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_word = *(u16 *)from;
- src_word &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_word <<= shift_width;
+ src_word &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4407,13 +4425,13 @@ ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
- * ice_write_dword - write a dword to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_dword - write a dword to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u32 src_dword, mask;
__le32 dest_dword;
@@ -4425,25 +4443,14 @@ ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
-
- /* if the field width is exactly 32 on an x86 machine, then the shift
- * operation will not work because the SHL instructions count is masked
- * to 5 bits so the shift will do nothing
- */
- if (ce_info->width < 32)
- mask = BIT(ce_info->width) - 1;
- else
- mask = (u32)~0;
+ mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_dword = *(u32 *)from;
- src_dword &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_dword <<= shift_width;
+ src_dword &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4458,13 +4465,13 @@ ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
- * ice_write_qword - write a qword to a packed context structure
- * @src_ctx: the context structure to read from
- * @dest_ctx: the context to be written to
- * @ce_info: a description of the struct to be filled
+ * ice_pack_ctx_qword - write a qword to a packed context structure
+ * @src_ctx: unpacked source context structure
+ * @dest_ctx: packed destination context data
+ * @ce_info: context element description
*/
-static void
-ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
u64 src_qword, mask;
__le64 dest_qword;
@@ -4476,25 +4483,14 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* prepare the bits and mask */
shift_width = ce_info->lsb % 8;
-
- /* if the field width is exactly 64 on an x86 machine, then the shift
- * operation will not work because the SHL instructions count is masked
- * to 6 bits so the shift will do nothing
- */
- if (ce_info->width < 64)
- mask = BIT_ULL(ce_info->width) - 1;
- else
- mask = (u64)~0;
+ mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width);
/* don't swizzle the bits until after the mask because the mask bits
* will be in a different bit position on big endian machines
*/
src_qword = *(u64 *)from;
- src_qword &= mask;
-
- /* shift to correct alignment */
- mask <<= shift_width;
src_qword <<= shift_width;
+ src_qword &= mask;
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
@@ -4513,11 +4509,10 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @hw: pointer to the hardware structure
* @src_ctx: pointer to a generic non-packed context structure
* @dest_ctx: pointer to memory for the packed structure
- * @ce_info: a description of the structure to be transformed
+ * @ce_info: List of Rx context elements
*/
-int
-ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
int f;
@@ -4533,16 +4528,16 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
}
switch (ce_info[f].size_of) {
case sizeof(u8):
- ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u16):
- ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u32):
- ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]);
break;
case sizeof(u64):
- ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
+ ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]);
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 3e933f75e948..ffb22c7ce28b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -53,9 +53,8 @@ int ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
-int
-ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
- u32 rxq_index);
+int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index);
int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
@@ -72,9 +71,8 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-int
-ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info);
+int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info);
extern struct mutex ice_global_cfg_lock_sw;
@@ -112,6 +110,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
+bool ice_is_generic_mac(struct ice_hw *hw);
bool ice_is_e810(struct ice_hw *hw);
int ice_clear_pf_cfg(struct ice_hw *hw);
int
@@ -251,6 +250,7 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
bool ice_is_e810t(struct ice_hw *hw);
bool ice_is_e823(struct ice_hw *hw);
+bool ice_is_e825c(struct ice_hw *hw);
int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index e7d2474c431c..ffe660f34992 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -666,7 +666,7 @@ bool ice_is_sbq_supported(struct ice_hw *hw)
/* The device sideband queue is only supported on devices with the
* generic MAC type.
*/
- return hw->mac_type == ICE_MAC_GENERIC;
+ return ice_is_generic_mac(hw);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 8b7504a9df31..7532d11ad7f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1825,6 +1825,7 @@ static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type)
seg_id = SEGMENT_TYPE_ICE_E830;
break;
case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
default:
seg_id = SEGMENT_TYPE_ICE_E810;
break;
@@ -1845,6 +1846,9 @@ static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
case ICE_MAC_E830:
sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB;
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ sign_type = SEGMENT_SIGN_TYPE_RSA3K_E825;
+ break;
case ICE_MAC_GENERIC:
default:
sign_type = SEGMENT_SIGN_TYPE_RSA2K;
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
index c2bfba6b9ead..d252d98218d0 100644
--- a/drivers/net/ethernet/intel/ice/ice_debugfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -64,9 +64,6 @@ static const char * const ice_fwlog_level_string[] = {
"verbose",
};
-/* the order in this array is important. it matches the ordering of the
- * values in the FW so the index is the same value as in ice_fwlog_level
- */
static const char * const ice_fwlog_log_size[] = {
"128K",
"256K",
@@ -648,6 +645,16 @@ err_create_module_files:
}
/**
+ * ice_debugfs_pf_deinit - cleanup PF's debugfs
+ * @pf: pointer to the PF struct
+ */
+void ice_debugfs_pf_deinit(struct ice_pf *pf)
+{
+ debugfs_remove_recursive(pf->ice_debugfs_pf);
+ pf->ice_debugfs_pf = NULL;
+}
+
+/**
* ice_debugfs_init - create root directory for debugfs entries
*/
void ice_debugfs_init(void)
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index a2d384dbfc76..9dfae9bce758 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -71,5 +71,13 @@
#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
/* Intel(R) Ethernet Connection E822-L 1GbE */
#define ICE_DEV_ID_E822L_SGMII 0x189A
+/* Intel(R) Ethernet Connection E825-C for backplane */
+#define ICE_DEV_ID_E825C_BACKPLANE 0x579c
+/* Intel(R) Ethernet Connection E825-C for QSFP */
+#define ICE_DEV_ID_E825C_QSFP 0x579d
+/* Intel(R) Ethernet Connection E825-C for SFP */
+#define ICE_DEV_ID_E825C_SFP 0x579e
+/* Intel(R) Ethernet Connection E825-C 1GbE */
+#define ICE_DEV_ID_E825C_SGMII 0x579f
#endif /* _ICE_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 65be56f2af9e..b516e42b41f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -445,6 +445,20 @@ ice_devlink_reload_empr_start(struct ice_pf *pf,
}
/**
+ * ice_devlink_reinit_down - unload given PF
+ * @pf: pointer to the PF struct
+ */
+static void ice_devlink_reinit_down(struct ice_pf *pf)
+{
+ /* No need to take devl_lock, it's already taken by devlink API */
+ ice_unload(pf);
+ rtnl_lock();
+ ice_vsi_decfg(ice_get_main_vsi(pf));
+ rtnl_unlock();
+ ice_deinit_dev(pf);
+}
+
+/**
* ice_devlink_reload_down - prepare for reload
* @devlink: pointer to the devlink instance to reload
* @netns_change: if true, the network namespace is changing
@@ -477,7 +491,7 @@ ice_devlink_reload_down(struct devlink *devlink, bool netns_change,
"Remove all VFs before doing reinit\n");
return -EOPNOTSUPP;
}
- ice_unload(pf);
+ ice_devlink_reinit_down(pf);
return 0;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
return ice_devlink_reload_empr_start(pf, extack);
@@ -1270,6 +1284,45 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
}
/**
+ * ice_devlink_reinit_up - do reinit of the given PF
+ * @pf: pointer to the PF struct
+ */
+static int ice_devlink_reinit_up(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct ice_vsi_cfg_params params;
+ int err;
+
+ err = ice_init_dev(pf);
+ if (err)
+ return err;
+
+ params = ice_vsi_to_params(vsi);
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ rtnl_lock();
+ err = ice_vsi_cfg(vsi, &params);
+ rtnl_unlock();
+ if (err)
+ goto err_vsi_cfg;
+
+ /* No need to take devl_lock, it's already taken by devlink API */
+ err = ice_load(pf);
+ if (err)
+ goto err_load;
+
+ return 0;
+
+err_load:
+ rtnl_lock();
+ ice_vsi_decfg(vsi);
+ rtnl_unlock();
+err_vsi_cfg:
+ ice_deinit_dev(pf);
+ return err;
+}
+
+/**
* ice_devlink_reload_up - do reload up after reinit
* @devlink: pointer to the devlink instance reloading
* @action: the action requested
@@ -1289,7 +1342,7 @@ ice_devlink_reload_up(struct devlink *devlink,
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- return ice_load(pf);
+ return ice_devlink_reinit_up(pf);
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
return ice_devlink_reload_empr_finish(pf, extack);
@@ -1569,6 +1622,7 @@ static const struct devlink_port_ops ice_devlink_port_ops = {
* @pf: the PF to create a devlink port for
*
* Create and register a devlink_port for this PF.
+ * This function has to be called under devl_lock.
*
* Return: zero on success or an error code on failure.
*/
@@ -1581,6 +1635,8 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
struct device *dev;
int err;
+ devlink = priv_to_devlink(pf);
+
dev = ice_pf_to_dev(pf);
devlink_port = &pf->devlink_port;
@@ -1601,10 +1657,9 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
ice_devlink_set_switch_id(pf, &attrs.switch_id);
devlink_port_attrs_set(devlink_port, &attrs);
- devlink = priv_to_devlink(pf);
- err = devlink_port_register_with_ops(devlink, devlink_port, vsi->idx,
- &ice_devlink_port_ops);
+ err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_port_ops);
if (err) {
dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
pf->hw.pf_id, err);
@@ -1619,10 +1674,11 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
* @pf: the PF to cleanup
*
* Unregisters the devlink_port structure associated with this PF.
+ * This function has to be called under devl_lock.
*/
void ice_devlink_destroy_pf_port(struct ice_pf *pf)
{
- devlink_port_unregister(&pf->devlink_port);
+ devl_port_unregister(&pf->devlink_port);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index b9c5eced6326..e92be6f130a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -31,6 +31,26 @@ static const char * const pin_type_name[] = {
};
/**
+ * ice_dpll_is_reset - check if reset is in progress
+ * @pf: private board structure
+ * @extack: error reporting
+ *
+ * If reset is in progress, fill extack with error.
+ *
+ * Return:
+ * * false - no reset in progress
+ * * true - reset in progress
+ */
+static bool ice_dpll_is_reset(struct ice_pf *pf, struct netlink_ext_ack *extack)
+{
+ if (ice_is_reset_in_progress(pf->state)) {
+ NL_SET_ERR_MSG(extack, "PF reset in progress");
+ return true;
+ }
+ return false;
+}
+
+/**
* ice_dpll_pin_freq_set - set pin's frequency
* @pf: private board structure
* @pin: pointer to a pin
@@ -109,6 +129,9 @@ ice_dpll_frequency_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_pin_freq_set(pf, p, pin_type, frequency, extack);
mutex_unlock(&pf->dplls.lock);
@@ -254,6 +277,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
* ice_dpll_pin_enable - enable a pin on dplls
* @hw: board private hw structure
* @pin: pointer to a pin
+ * @dpll_idx: dpll index to connect to output pin
* @pin_type: type of pin being enabled
* @extack: error reporting
*
@@ -266,7 +290,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
*/
static int
ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
- enum ice_dpll_pin_type pin_type,
+ u8 dpll_idx, enum ice_dpll_pin_type pin_type,
struct netlink_ext_ack *extack)
{
u8 flags = 0;
@@ -280,10 +304,12 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0);
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
+ flags = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_SRC_SEL;
if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)
flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN;
- ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0);
+ ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, dpll_idx,
+ 0, 0);
break;
default:
return -EINVAL;
@@ -370,7 +396,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
case ICE_DPLL_PIN_TYPE_INPUT:
ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL,
NULL, &pin->flags[0],
- &pin->freq, NULL);
+ &pin->freq, &pin->phase_adjust);
if (ret)
goto err;
if (ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN & pin->flags[0]) {
@@ -398,14 +424,27 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
ret = ice_aq_get_output_pin_cfg(&pf->hw, pin->idx,
- &pin->flags[0], NULL,
+ &pin->flags[0], &parent,
&pin->freq, NULL);
if (ret)
goto err;
- if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0])
- pin->state[0] = DPLL_PIN_STATE_CONNECTED;
- else
- pin->state[0] = DPLL_PIN_STATE_DISCONNECTED;
+
+ parent &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL;
+ if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ parent == pf->dplls.eec.dpll_idx ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_DISCONNECTED;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ parent == pf->dplls.pps.dpll_idx ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_DISCONNECTED;
+ } else {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ }
break;
case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
for (parent = 0; parent < pf->dplls.rclk.num_parents;
@@ -488,6 +527,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
* @dpll: registered dpll pointer
* @dpll_priv: private data pointer passed on dpll registration
* @status: on success holds dpll's lock status
+ * @status_error: status error value
* @extack: error reporting
*
* Dpll subsystem callback, provides dpll's lock status.
@@ -500,6 +540,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
static int
ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv,
enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
struct netlink_ext_ack *extack)
{
struct ice_dpll *d = dpll_priv;
@@ -568,9 +609,13 @@ ice_dpll_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
if (enable)
- ret = ice_dpll_pin_enable(&pf->hw, p, pin_type, extack);
+ ret = ice_dpll_pin_enable(&pf->hw, p, d->dpll_idx, pin_type,
+ extack);
else
ret = ice_dpll_pin_disable(&pf->hw, p, pin_type, extack);
if (!ret)
@@ -603,6 +648,11 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv,
struct netlink_ext_ack *extack)
{
bool enable = state == DPLL_PIN_STATE_CONNECTED;
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+
+ if (!enable && p->state[d->dpll_idx] == DPLL_PIN_STATE_DISCONNECTED)
+ return 0;
return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable,
extack, ICE_DPLL_PIN_TYPE_OUTPUT);
@@ -665,14 +715,16 @@ ice_dpll_pin_state_get(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_pin_state_update(pf, p, pin_type, extack);
if (ret)
goto unlock;
- if (pin_type == ICE_DPLL_PIN_TYPE_INPUT)
+ if (pin_type == ICE_DPLL_PIN_TYPE_INPUT ||
+ pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
*state = p->state[d->dpll_idx];
- else if (pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
- *state = p->state[0];
ret = 0;
unlock:
mutex_unlock(&pf->dplls.lock);
@@ -790,6 +842,9 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack);
mutex_unlock(&pf->dplls.lock);
@@ -910,6 +965,9 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
u8 flag, flags_en = 0;
int ret;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
switch (type) {
case ICE_DPLL_PIN_TYPE_INPUT:
@@ -1069,6 +1127,9 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
int ret = -EINVAL;
u32 hw_idx;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
hw_idx = parent->idx - pf->dplls.base_rclk_idx;
if (hw_idx >= pf->dplls.num_inputs)
@@ -1123,6 +1184,9 @@ ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv,
int ret = -EINVAL;
u32 hw_idx;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
mutex_lock(&pf->dplls.lock);
hw_idx = parent->idx - pf->dplls.base_rclk_idx;
if (hw_idx >= pf->dplls.num_inputs)
@@ -1305,8 +1369,10 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
struct ice_pf *pf = container_of(d, struct ice_pf, dplls);
struct ice_dpll *de = &pf->dplls.eec;
struct ice_dpll *dp = &pf->dplls.pps;
- int ret;
+ int ret = 0;
+ if (ice_is_reset_in_progress(pf->state))
+ goto resched;
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_update_state(pf, de, false);
if (!ret)
@@ -1326,6 +1392,7 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
ice_dpll_notify_changes(de);
ice_dpll_notify_changes(dp);
+resched:
/* Run twice a second or reschedule if update failed */
kthread_queue_delayed_work(d->kworker, &d->work,
ret ? msecs_to_jiffies(10) :
@@ -1532,7 +1599,7 @@ static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf)
}
if (WARN_ON_ONCE(!vsi || !vsi->netdev))
return;
- netdev_dpll_pin_clear(vsi->netdev);
+ dpll_netdev_pin_clear(vsi->netdev);
dpll_pin_put(rclk->pin);
}
@@ -1576,7 +1643,7 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
}
if (WARN_ON((!vsi || !vsi->netdev)))
return -EINVAL;
- netdev_dpll_pin_set(vsi->netdev, pf->dplls.rclk.pin);
+ dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin);
return 0;
@@ -2055,6 +2122,7 @@ void ice_dpll_init(struct ice_pf *pf)
struct ice_dplls *d = &pf->dplls;
int err = 0;
+ mutex_init(&d->lock);
err = ice_dpll_init_info(pf, cgu);
if (err)
goto err_exit;
@@ -2067,7 +2135,6 @@ void ice_dpll_init(struct ice_pf *pf)
err = ice_dpll_init_pins(pf, cgu);
if (err)
goto deinit_pps;
- mutex_init(&d->lock);
if (cgu) {
err = ice_dpll_init_worker(pf);
if (err)
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index a19b06f18e40..255a9c8151b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -129,6 +129,7 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize),
ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber),
ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error),
+ ICE_PF_STAT("rx_eipe_error.nic", hw_rx_eipe_error),
ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards),
ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors),
ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes),
@@ -801,7 +802,7 @@ static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size)
if (!pf)
return -EINVAL;
- data = devm_kzalloc(ice_pf_to_dev(pf), size, GFP_KERNEL);
+ data = kzalloc(size, GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -944,11 +945,9 @@ static u64 ice_loopback_test(struct net_device *netdev)
int num_frames, valid_frames;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
- struct device *dev;
- u8 *tx_frame;
+ u8 *tx_frame __free(kfree);
int i;
- dev = ice_pf_to_dev(pf);
netdev_info(netdev, "loopback test\n");
test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
@@ -993,7 +992,7 @@ static u64 ice_loopback_test(struct net_device *netdev)
for (i = 0; i < num_frames; i++) {
if (ice_diag_send(tx_ring, tx_frame, ICE_LB_FRAME_SIZE)) {
ret = 8;
- goto lbtest_free_frame;
+ goto remove_mac_filters;
}
}
@@ -1003,8 +1002,6 @@ static u64 ice_loopback_test(struct net_device *netdev)
else if (valid_frames != num_frames)
ret = 10;
-lbtest_free_frame:
- devm_kfree(dev, tx_frame);
remove_mac_filters:
if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI))
netdev_err(netdev, "Could not remove MAC filter for the test VSI\n");
@@ -2486,6 +2483,24 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
case SCTP_V4_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4;
break;
+ case GTPU_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPC_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPC_TEID_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_EH_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_UL_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4;
+ break;
+ case GTPU_DL_V4_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4;
+ break;
case TCP_V6_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6;
break;
@@ -2495,6 +2510,24 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
case SCTP_V6_FLOW:
hdrs |= ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6;
break;
+ case GTPU_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPC_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPC_TEID_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_EH_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_UL_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6;
+ break;
+ case GTPU_DL_V6_FLOW:
+ hdrs |= ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6;
+ break;
default:
break;
}
@@ -2518,6 +2551,12 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
+ case GTPU_V4_FLOW:
+ case GTPC_V4_FLOW:
+ case GTPC_TEID_V4_FLOW:
+ case GTPU_EH_V4_FLOW:
+ case GTPU_UL_V4_FLOW:
+ case GTPU_DL_V4_FLOW:
if (nfc->data & RXH_IP_SRC)
hfld |= ICE_FLOW_HASH_FLD_IPV4_SA;
if (nfc->data & RXH_IP_DST)
@@ -2526,6 +2565,12 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
+ case GTPU_V6_FLOW:
+ case GTPC_V6_FLOW:
+ case GTPC_TEID_V6_FLOW:
+ case GTPU_EH_V6_FLOW:
+ case GTPU_UL_V6_FLOW:
+ case GTPU_DL_V6_FLOW:
if (nfc->data & RXH_IP_SRC)
hfld |= ICE_FLOW_HASH_FLD_IPV6_SA;
if (nfc->data & RXH_IP_DST)
@@ -2564,6 +2609,33 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
}
}
+ if (nfc->data & RXH_GTP_TEID) {
+ switch (nfc->flow_type) {
+ case GTPC_TEID_V4_FLOW:
+ case GTPC_TEID_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPC_TEID;
+ break;
+ case GTPU_V4_FLOW:
+ case GTPU_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_IP_TEID;
+ break;
+ case GTPU_EH_V4_FLOW:
+ case GTPU_EH_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_EH_TEID;
+ break;
+ case GTPU_UL_V4_FLOW:
+ case GTPU_UL_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_UP_TEID;
+ break;
+ case GTPU_DL_V4_FLOW:
+ case GTPU_DL_V6_FLOW:
+ hfld |= ICE_FLOW_HASH_FLD_GTPU_DWN_TEID;
+ break;
+ default:
+ break;
+ }
+ }
+
return hfld;
}
@@ -2676,6 +2748,13 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
hash_flds & ICE_FLOW_HASH_FLD_UDP_DST_PORT ||
hash_flds & ICE_FLOW_HASH_FLD_SCTP_DST_PORT)
nfc->data |= (u64)RXH_L4_B_2_3;
+
+ if (hash_flds & ICE_FLOW_HASH_FLD_GTPC_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_IP_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_EH_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID ||
+ hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID)
+ nfc->data |= (u64)RXH_GTP_TEID;
}
/**
@@ -3360,7 +3439,7 @@ ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
struct ice_pf *pf = ice_netdev_to_pf(dev);
/* only report timestamping if PTP is enabled */
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return ethtool_op_get_ts_info(dev, info);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index ff82915ab497..2fd2e0cb483d 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -37,13 +37,13 @@
#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
-#define ICE_FLOW_HASH_GTP_TEID \
+#define ICE_FLOW_HASH_GTP_C_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
-#define ICE_FLOW_HASH_GTP_IPV4_TEID \
- (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
-#define ICE_FLOW_HASH_GTP_IPV6_TEID \
- (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
+#define ICE_FLOW_HASH_GTP_C_IPV4_TEID \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_C_TEID)
+#define ICE_FLOW_HASH_GTP_C_IPV6_TEID \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_C_TEID)
#define ICE_FLOW_HASH_GTP_U_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID))
@@ -66,6 +66,20 @@
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
ICE_FLOW_HASH_GTP_U_EH_QFI)
+#define ICE_FLOW_HASH_GTP_U_UP \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_UP_TEID))
+#define ICE_FLOW_HASH_GTP_U_DWN \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID))
+
+#define ICE_FLOW_HASH_GTP_U_IPV4_UP \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_UP)
+#define ICE_FLOW_HASH_GTP_U_IPV6_UP \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_UP)
+#define ICE_FLOW_HASH_GTP_U_IPV4_DWN \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_DWN)
+#define ICE_FLOW_HASH_GTP_U_IPV6_DWN \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_DWN)
+
#define ICE_FLOW_HASH_PPPOE_SESS_ID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))
@@ -242,6 +256,13 @@ enum ice_flow_field {
#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \
BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
+#define ICE_FLOW_HASH_FLD_GTPC_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_IP_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_EH_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_UP_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_UP_TEID)
+#define ICE_FLOW_HASH_FLD_GTPU_DWN_TEID \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID)
+
/* Flow headers and fields for AVF support */
enum ice_flow_avf_hdr_field {
/* Values 0 - 28 are reserved for future use */
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c
index 92b5dac481cd..4fd15387a7e5 100644
--- a/drivers/net/ethernet/intel/ice/ice_fwlog.c
+++ b/drivers/net/ethernet/intel/ice/ice_fwlog.c
@@ -188,6 +188,8 @@ void ice_fwlog_deinit(struct ice_hw *hw)
if (hw->bus.func)
return;
+ ice_debugfs_pf_deinit(hw->back);
+
/* make sure FW logging is disabled to not put the FW in a weird state
* for the next driver load
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 2a25323105e5..467372d541d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -152,6 +152,27 @@ ice_lag_find_hw_by_lport(struct ice_lag *lag, u8 lport)
}
/**
+ * ice_pkg_has_lport_extract - check if lport extraction supported
+ * @hw: HW struct
+ */
+static bool ice_pkg_has_lport_extract(struct ice_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < hw->blk[ICE_BLK_SW].es.count; i++) {
+ u16 offset;
+ u8 fv_prot;
+
+ ice_find_prot_off(hw, ICE_BLK_SW, ICE_SW_DEFAULT_PROFILE, i,
+ &fv_prot, &offset);
+ if (fv_prot == ICE_FV_PROT_MDID &&
+ offset == ICE_LP_EXT_BUF_OFFSET)
+ return true;
+ }
+ return false;
+}
+
+/**
* ice_lag_find_primary - returns pointer to primary interfaces lag struct
* @lag: local interfaces lag struct
*/
@@ -1206,7 +1227,7 @@ static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
}
/**
- * ice_lag_init_feature_support_flag - Check for NVM support for LAG
+ * ice_lag_init_feature_support_flag - Check for package and NVM support for LAG
* @pf: PF struct
*/
static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
@@ -1219,7 +1240,7 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
else
ice_clear_feature_support(pf, ICE_F_ROCE_LAG);
- if (caps->sriov_lag)
+ if (caps->sriov_lag && ice_pkg_has_lport_extract(&pf->hw))
ice_set_feature_support(pf, ICE_F_SRIOV_LAG);
else
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index ede833dfa658..183b38792ef2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -17,6 +17,9 @@ enum ice_lag_role {
#define ICE_LAG_INVALID_PORT 0xFF
#define ICE_LAG_RESET_RETRIES 5
+#define ICE_SW_DEFAULT_PROFILE 0
+#define ICE_FV_PROT_MDID 255
+#define ICE_LP_EXT_BUF_OFFSET 32
struct ice_pf;
struct ice_vf;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 9be724291ef8..ee3f0d3e3f6d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1618,6 +1618,25 @@ static const struct ice_rss_hash_cfg default_rss_cfgs[] = {
*/
{ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4,
ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpc4 with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_IPV4, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpc4t with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_C_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu4 with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_U_IPV4_TEID, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu4e with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_U_IPV4_EH, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu4u with input set IPv4 src/dst */
+ { ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_U_IPV4_UP, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu4d with input set IPv4 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV4,
+ ICE_FLOW_HASH_GTP_U_IPV4_DWN, ICE_RSS_OUTER_HEADERS, false},
+
/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
{ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6,
ICE_HASH_TCP_IPV6, ICE_RSS_ANY_HEADERS, false},
@@ -1632,6 +1651,24 @@ static const struct ice_rss_hash_cfg default_rss_cfgs[] = {
/* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */
{ICE_FLOW_SEG_HDR_ESP,
ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpc6 with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPC | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_IPV6, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpc6t with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_C_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu6 with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_U_IPV6_TEID, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu6e with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_U_IPV6_EH, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu6u with input set IPv6 src/dst */
+ { ICE_FLOW_SEG_HDR_GTPU_UP | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_U_IPV6_UP, ICE_RSS_OUTER_HEADERS, false},
+ /* configure RSS for gtpu6d with input set IPv6 src/dst */
+ {ICE_FLOW_SEG_HDR_GTPU_DWN | ICE_FLOW_SEG_HDR_IPV6,
+ ICE_FLOW_HASH_GTP_U_IPV6_DWN, ICE_RSS_OUTER_HEADERS, false},
};
/**
@@ -1672,27 +1709,6 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
}
/**
- * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
- * @vsi: VSI
- */
-static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
-{
- if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
- vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
- vsi->rx_buf_len = ICE_RXBUF_1664;
-#if (PAGE_SIZE < 8192)
- } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
- (vsi->netdev->mtu <= ETH_DATA_LEN)) {
- vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
- vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
-#endif
- } else {
- vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
- vsi->rx_buf_len = ICE_RXBUF_3072;
- }
-}
-
-/**
* ice_pf_state_is_nominal - checks the PF for nominal state
* @pf: pointer to PF to check
*
@@ -1795,114 +1811,6 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
}
-int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
-{
- if (q_idx >= vsi->num_rxq)
- return -EINVAL;
-
- return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
-}
-
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
-{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
-
- if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
- return -EINVAL;
-
- qg_buf->num_txqs = 1;
-
- return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
-}
-
-/**
- * ice_vsi_cfg_rxqs - Configure the VSI for Rx
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Rx VSI for operation.
- */
-int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
-{
- u16 i;
-
- if (vsi->type == ICE_VSI_VF)
- goto setup_rings;
-
- ice_vsi_cfg_frame_size(vsi);
-setup_rings:
- /* set up individual rings */
- ice_for_each_rxq(vsi, i) {
- int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
-
- if (err)
- return err;
- }
-
- return 0;
-}
-
-/**
- * ice_vsi_cfg_txqs - Configure the VSI for Tx
- * @vsi: the VSI being configured
- * @rings: Tx ring array to be configured
- * @count: number of Tx ring array elements
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx VSI for operation.
- */
-static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
-{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
- int err = 0;
- u16 q_idx;
-
- qg_buf->num_txqs = 1;
-
- for (q_idx = 0; q_idx < count; q_idx++) {
- err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
- if (err)
- break;
- }
-
- return err;
-}
-
-/**
- * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx VSI for operation.
- */
-int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
-{
- return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
-}
-
-/**
- * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
- * @vsi: the VSI being configured
- *
- * Return 0 on success and a negative value on error
- * Configure the Tx queues dedicated for XDP in given VSI for operation.
- */
-int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
-{
- int ret;
- int i;
-
- ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
- if (ret)
- return ret;
-
- ice_for_each_rxq(vsi, i)
- ice_tx_xsk_pool(vsi, i);
-
- return 0;
-}
-
/**
* ice_intrl_usec_to_reg - convert interrupt rate limit to register value
* @intrl: interrupt rate limit in usecs
@@ -2426,7 +2334,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
ice_vsi_map_rings_to_vectors(vsi);
/* Associate q_vector rings to napi */
- ice_vsi_set_napi_queues(vsi, true);
+ ice_vsi_set_napi_queues(vsi);
vsi->stat_offsets_loaded = false;
@@ -2849,74 +2757,19 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
}
/**
- * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
- * @vsi: the VSI being un-configured
- */
-void ice_vsi_dis_irq(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
- u32 val;
- int i;
-
- /* disable interrupt causation from each queue */
- if (vsi->tx_rings) {
- ice_for_each_txq(vsi, i) {
- if (vsi->tx_rings[i]) {
- u16 reg;
-
- reg = vsi->tx_rings[i]->reg_idx;
- val = rd32(hw, QINT_TQCTL(reg));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(reg), val);
- }
- }
- }
-
- if (vsi->rx_rings) {
- ice_for_each_rxq(vsi, i) {
- if (vsi->rx_rings[i]) {
- u16 reg;
-
- reg = vsi->rx_rings[i]->reg_idx;
- val = rd32(hw, QINT_RQCTL(reg));
- val &= ~QINT_RQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_RQCTL(reg), val);
- }
- }
- }
-
- /* disable each interrupt */
- ice_for_each_q_vector(vsi, i) {
- if (!vsi->q_vectors[i])
- continue;
- wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
- }
-
- ice_flush(hw);
-
- /* don't call synchronize_irq() for VF's from the host */
- if (vsi->type == ICE_VSI_VF)
- return;
-
- ice_for_each_q_vector(vsi, i)
- synchronize_irq(vsi->q_vectors[i]->irq.virq);
-}
-
-/**
- * ice_queue_set_napi - Set the napi instance for the queue
+ * __ice_queue_set_napi - Set the napi instance for the queue
* @dev: device to which NAPI and queue belong
* @queue_index: Index of queue
* @type: queue type as RX or TX
* @napi: NAPI context
* @locked: is the rtnl_lock already held
*
- * Set the napi instance for the queue
+ * Set the napi instance for the queue. Caller indicates the lock status.
*/
static void
-ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
- enum netdev_queue_type type, struct napi_struct *napi,
- bool locked)
+__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi,
+ bool locked)
{
if (!locked)
rtnl_lock();
@@ -2926,26 +2779,79 @@ ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
}
/**
- * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * ice_queue_set_napi - Set the napi instance for the queue
+ * @vsi: VSI being configured
+ * @queue_index: Index of queue
+ * @type: queue type as RX or TX
+ * @napi: NAPI context
+ *
+ * Set the napi instance for the queue. The rtnl lock state is derived from the
+ * execution path.
+ */
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi)
+{
+ struct ice_pf *pf = vsi->back;
+
+ if (!vsi->netdev)
+ return;
+
+ if (current_work() == &pf->serv_task ||
+ test_bit(ICE_PREPARED_FOR_RESET, pf->state) ||
+ test_bit(ICE_DOWN, pf->state) ||
+ test_bit(ICE_SUSPENDED, pf->state))
+ __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+ false);
+ else
+ __ice_queue_set_napi(vsi->netdev, queue_index, type, napi,
+ true);
+}
+
+/**
+ * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
* @q_vector: q_vector pointer
* @locked: is the rtnl_lock already held
*
+ * Associate the q_vector napi with all the queue[s] on the vector.
+ * Caller indicates the lock status.
+ */
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+{
+ struct ice_rx_ring *rx_ring;
+ struct ice_tx_ring *tx_ring;
+
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
+ locked);
+
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
+ locked);
+ /* Also set the interrupt number for the NAPI */
+ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
+}
+
+/**
+ * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
+ * @q_vector: q_vector pointer
+ *
* Associate the q_vector napi with all the queue[s] on the vector
*/
-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector)
{
struct ice_rx_ring *rx_ring;
struct ice_tx_ring *tx_ring;
ice_for_each_rx_ring(rx_ring, q_vector->rx)
- ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
- NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
- locked);
+ ice_queue_set_napi(q_vector->vsi, rx_ring->q_index,
+ NETDEV_QUEUE_TYPE_RX, &q_vector->napi);
ice_for_each_tx_ring(tx_ring, q_vector->tx)
- ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
- NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
- locked);
+ ice_queue_set_napi(q_vector->vsi, tx_ring->q_index,
+ NETDEV_QUEUE_TYPE_TX, &q_vector->napi);
/* Also set the interrupt number for the NAPI */
netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
}
@@ -2953,11 +2859,10 @@ void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
/**
* ice_vsi_set_napi_queues
* @vsi: VSI pointer
- * @locked: is the rtnl_lock already held
*
* Associate queue[s] with napi for all vectors
*/
-void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
{
int i;
@@ -2965,7 +2870,7 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
return;
ice_for_each_q_vector(vsi, i)
- ice_q_vector_set_napi_queues(vsi->q_vectors[i], locked);
+ ice_q_vector_set_napi_queues(vsi->q_vectors[i]);
}
/**
@@ -3140,7 +3045,7 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
}
}
- tx_ring_stats = vsi_stat->rx_ring_stats;
+ tx_ring_stats = vsi_stat->tx_ring_stats;
vsi_stat->tx_ring_stats =
krealloc_array(vsi_stat->tx_ring_stats, req_txq,
sizeof(*vsi_stat->tx_ring_stats),
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 71bd27244941..9cd23afe5f15 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -54,14 +54,6 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf);
void ice_update_eth_stats(struct ice_vsi *vsi);
-int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
-
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
-
-int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
-
-int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
-
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi);
@@ -72,8 +64,6 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
-int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
-
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
@@ -91,9 +81,15 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
-void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
+void
+ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index,
+ enum netdev_queue_type type, struct napi_struct *napi);
+
+void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked);
-void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked);
+void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector);
+
+void ice_vsi_set_napi_queues(struct ice_vsi *vsi);
int ice_vsi_release(struct ice_vsi *vsi);
@@ -114,8 +110,6 @@ void
ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
bool ena_ts);
-void ice_vsi_dis_irq(struct ice_vsi *vsi);
-
void ice_vsi_free_irq(struct ice_vsi *vsi);
void ice_vsi_free_rx_rings(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index dd4a9bc0dfdc..33a164fa325a 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -613,7 +613,7 @@ skip:
ice_pf_dis_all_vsi(pf, false);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_prepare_for_reset(pf);
+ ice_ptp_prepare_for_reset(pf, reset_type);
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_exit(pf);
@@ -1649,8 +1649,10 @@ static void ice_clean_sbq_subtask(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
- /* Nothing to do here if sideband queue is not supported */
- if (!ice_is_sbq_supported(hw)) {
+ /* if mac_type is not generic, sideband is not supported
+ * and there's nothing to do here
+ */
+ if (!ice_is_generic_mac(hw)) {
clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
return;
}
@@ -3495,7 +3497,7 @@ static void ice_napi_add(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, v_idx) {
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
ice_napi_poll);
- ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
+ __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false);
}
}
@@ -4572,90 +4574,6 @@ static void ice_decfg_netdev(struct ice_vsi *vsi)
vsi->netdev = NULL;
}
-static int ice_start_eth(struct ice_vsi *vsi)
-{
- int err;
-
- err = ice_init_mac_fltr(vsi->back);
- if (err)
- return err;
-
- err = ice_vsi_open(vsi);
- if (err)
- ice_fltr_remove_all(vsi);
-
- return err;
-}
-
-static void ice_stop_eth(struct ice_vsi *vsi)
-{
- ice_fltr_remove_all(vsi);
- ice_vsi_close(vsi);
-}
-
-static int ice_init_eth(struct ice_pf *pf)
-{
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
- int err;
-
- if (!vsi)
- return -EINVAL;
-
- /* init channel list */
- INIT_LIST_HEAD(&vsi->ch_list);
-
- err = ice_cfg_netdev(vsi);
- if (err)
- return err;
- /* Setup DCB netlink interface */
- ice_dcbnl_setup(vsi);
-
- err = ice_init_mac_fltr(pf);
- if (err)
- goto err_init_mac_fltr;
-
- err = ice_devlink_create_pf_port(pf);
- if (err)
- goto err_devlink_create_pf_port;
-
- SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
-
- err = ice_register_netdev(vsi);
- if (err)
- goto err_register_netdev;
-
- err = ice_tc_indir_block_register(vsi);
- if (err)
- goto err_tc_indir_block_register;
-
- ice_napi_add(vsi);
-
- return 0;
-
-err_tc_indir_block_register:
- ice_unregister_netdev(vsi);
-err_register_netdev:
- ice_devlink_destroy_pf_port(pf);
-err_devlink_create_pf_port:
-err_init_mac_fltr:
- ice_decfg_netdev(vsi);
- return err;
-}
-
-static void ice_deinit_eth(struct ice_pf *pf)
-{
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
-
- if (!vsi)
- return;
-
- ice_vsi_close(vsi);
- ice_unregister_netdev(vsi);
- ice_devlink_destroy_pf_port(pf);
- ice_tc_indir_block_unregister(vsi);
- ice_decfg_netdev(vsi);
-}
-
/**
* ice_wait_for_fw - wait for full FW readiness
* @hw: pointer to the hardware structure
@@ -4681,7 +4599,7 @@ static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
return -ETIMEDOUT;
}
-static int ice_init_dev(struct ice_pf *pf)
+int ice_init_dev(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
@@ -4774,7 +4692,7 @@ err_init_pf:
return err;
}
-static void ice_deinit_dev(struct ice_pf *pf)
+void ice_deinit_dev(struct ice_pf *pf)
{
ice_free_irq_msix_misc(pf);
ice_deinit_pf(pf);
@@ -5079,31 +4997,47 @@ static void ice_deinit(struct ice_pf *pf)
/**
* ice_load - load pf by init hw and starting VSI
* @pf: pointer to the pf instance
+ *
+ * This function has to be called under devl_lock.
*/
int ice_load(struct ice_pf *pf)
{
- struct ice_vsi_cfg_params params = {};
struct ice_vsi *vsi;
int err;
- err = ice_init_dev(pf);
+ devl_assert_locked(priv_to_devlink(pf));
+
+ vsi = ice_get_main_vsi(pf);
+
+ /* init channel list */
+ INIT_LIST_HEAD(&vsi->ch_list);
+
+ err = ice_cfg_netdev(vsi);
if (err)
return err;
- vsi = ice_get_main_vsi(pf);
+ /* Setup DCB netlink interface */
+ ice_dcbnl_setup(vsi);
- params = ice_vsi_to_params(vsi);
- params.flags = ICE_VSI_FLAG_INIT;
+ err = ice_init_mac_fltr(pf);
+ if (err)
+ goto err_init_mac_fltr;
- rtnl_lock();
- err = ice_vsi_cfg(vsi, &params);
+ err = ice_devlink_create_pf_port(pf);
if (err)
- goto err_vsi_cfg;
+ goto err_devlink_create_pf_port;
+
+ SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
+
+ err = ice_register_netdev(vsi);
+ if (err)
+ goto err_register_netdev;
- err = ice_start_eth(ice_get_main_vsi(pf));
+ err = ice_tc_indir_block_register(vsi);
if (err)
- goto err_start_eth;
- rtnl_unlock();
+ goto err_tc_indir_block_register;
+
+ ice_napi_add(vsi);
err = ice_init_rdma(pf);
if (err)
@@ -5117,29 +5051,35 @@ int ice_load(struct ice_pf *pf)
return 0;
err_init_rdma:
- ice_vsi_close(ice_get_main_vsi(pf));
- rtnl_lock();
-err_start_eth:
- ice_vsi_decfg(ice_get_main_vsi(pf));
-err_vsi_cfg:
- rtnl_unlock();
- ice_deinit_dev(pf);
+ ice_tc_indir_block_unregister(vsi);
+err_tc_indir_block_register:
+ ice_unregister_netdev(vsi);
+err_register_netdev:
+ ice_devlink_destroy_pf_port(pf);
+err_devlink_create_pf_port:
+err_init_mac_fltr:
+ ice_decfg_netdev(vsi);
return err;
}
/**
* ice_unload - unload pf by stopping VSI and deinit hw
* @pf: pointer to the pf instance
+ *
+ * This function has to be called under devl_lock.
*/
void ice_unload(struct ice_pf *pf)
{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+
+ devl_assert_locked(priv_to_devlink(pf));
+
ice_deinit_features(pf);
ice_deinit_rdma(pf);
- rtnl_lock();
- ice_stop_eth(ice_get_main_vsi(pf));
- ice_vsi_decfg(ice_get_main_vsi(pf));
- rtnl_unlock();
- ice_deinit_dev(pf);
+ ice_tc_indir_block_unregister(vsi);
+ ice_unregister_netdev(vsi);
+ ice_devlink_destroy_pf_port(pf);
+ ice_decfg_netdev(vsi);
}
/**
@@ -5237,27 +5177,23 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (err)
goto err_init;
- err = ice_init_eth(pf);
- if (err)
- goto err_init_eth;
-
- err = ice_init_rdma(pf);
+ devl_lock(priv_to_devlink(pf));
+ err = ice_load(pf);
+ devl_unlock(priv_to_devlink(pf));
if (err)
- goto err_init_rdma;
+ goto err_load;
err = ice_init_devlink(pf);
if (err)
goto err_init_devlink;
- ice_init_features(pf);
-
return 0;
err_init_devlink:
- ice_deinit_rdma(pf);
-err_init_rdma:
- ice_deinit_eth(pf);
-err_init_eth:
+ devl_lock(priv_to_devlink(pf));
+ ice_unload(pf);
+ devl_unlock(priv_to_devlink(pf));
+err_load:
ice_deinit(pf);
err_init:
pci_disable_device(pdev);
@@ -5340,8 +5276,6 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
- ice_debugfs_exit();
-
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
@@ -5355,12 +5289,14 @@ static void ice_remove(struct pci_dev *pdev)
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
- ice_deinit_features(pf);
+
ice_deinit_devlink(pf);
- ice_deinit_rdma(pf);
- ice_deinit_eth(pf);
- ice_deinit(pf);
+ devl_lock(priv_to_devlink(pf));
+ ice_unload(pf);
+ devl_unlock(priv_to_devlink(pf));
+
+ ice_deinit(pf);
ice_vsi_release_all(pf);
ice_setup_mc_magic_wake(pf);
@@ -5447,6 +5383,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
if (ret)
goto err_reinit;
ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ ice_vsi_set_napi_queues(pf->vsi[v]);
}
ret = ice_req_irq_msix_misc(pf);
@@ -5752,6 +5689,10 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) },
@@ -5841,6 +5782,7 @@ module_init(ice_module_init);
static void __exit ice_module_exit(void)
{
pci_unregister_driver(&ice_driver);
+ ice_debugfs_exit();
destroy_workqueue(ice_wq);
destroy_workqueue(ice_lag_wq);
pr_info("module unloaded\n");
@@ -6736,6 +6678,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
{
struct rtnl_link_stats64 *net_stats, *stats_prev;
struct rtnl_link_stats64 *vsi_stats;
+ struct ice_pf *pf = vsi->back;
u64 pkts, bytes;
int i;
@@ -6781,21 +6724,18 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
net_stats = &vsi->net_stats;
stats_prev = &vsi->net_stats_prev;
- /* clear prev counters after reset */
- if (vsi_stats->tx_packets < stats_prev->tx_packets ||
- vsi_stats->rx_packets < stats_prev->rx_packets) {
- stats_prev->tx_packets = 0;
- stats_prev->tx_bytes = 0;
- stats_prev->rx_packets = 0;
- stats_prev->rx_bytes = 0;
+ /* Update netdev counters, but keep in mind that values could start at
+ * random value after PF reset. And as we increase the reported stat by
+ * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
+ * let's skip this round.
+ */
+ if (likely(pf->stat_prev_loaded)) {
+ net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
+ net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
+ net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
+ net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
}
- /* update netdev counters */
- net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
- net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
- net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
- net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
-
stats_prev->tx_packets = vsi_stats->tx_packets;
stats_prev->tx_bytes = vsi_stats->tx_bytes;
stats_prev->rx_packets = vsi_stats->rx_packets;
@@ -7060,6 +7000,50 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
+ * @vsi: the VSI being un-configured
+ */
+static void ice_vsi_dis_irq(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 val;
+ int i;
+
+ /* disable interrupt causation from each Rx queue; Tx queues are
+ * handled in ice_vsi_stop_tx_ring()
+ */
+ if (vsi->rx_rings) {
+ ice_for_each_rxq(vsi, i) {
+ if (vsi->rx_rings[i]) {
+ u16 reg;
+
+ reg = vsi->rx_rings[i]->reg_idx;
+ val = rd32(hw, QINT_RQCTL(reg));
+ val &= ~QINT_RQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_RQCTL(reg), val);
+ }
+ }
+ }
+
+ /* disable each interrupt */
+ ice_for_each_q_vector(vsi, i) {
+ if (!vsi->q_vectors[i])
+ continue;
+ wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
+ }
+
+ ice_flush(hw);
+
+ /* don't call synchronize_irq() for VF's from the host */
+ if (vsi->type == ICE_VSI_VF)
+ return;
+
+ ice_for_each_q_vector(vsi, i)
+ synchronize_irq(vsi->q_vectors[i]->irq.virq);
+}
+
+/**
* ice_down - Shutdown the connection
* @vsi: The VSI being stopped
*
@@ -7548,7 +7532,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
* fail.
*/
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_reset(pf);
+ ice_ptp_rebuild(pf, reset_type);
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_init(pf);
@@ -8012,6 +7996,8 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
pf_sw = pf->first_sw;
/* find the attribute in the netlink message */
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
__u16 mode;
diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h
index 82bc54fec7f3..a2562f04267f 100644
--- a/drivers/net/ethernet/intel/ice/ice_osdep.h
+++ b/drivers/net/ethernet/intel/ice/ice_osdep.h
@@ -24,7 +24,7 @@
#define rd64(a, reg) readq((a)->hw_addr + (reg))
#define ice_flush(a) rd32((a), GLGEN_STAT)
-#define ICE_M(m, s) ((m) << (s))
+#define ICE_M(m, s) ((m ## U) << (s))
struct ice_dma_mem {
void *va;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 3b6605c8585e..c11eba07283c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -601,17 +601,13 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
/* Read the low 32 bit value */
raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
- /* For PHYs which don't implement a proper timestamp ready bitmap,
- * verify that the timestamp value is different from the last cached
- * timestamp. If it is not, skip this for now assuming it hasn't yet
- * been captured by hardware.
+ /* Devices using this interface always verify the timestamp differs
+ * relative to the last cached timestamp value.
*/
- if (!drop_ts && tx->verify_cached &&
- raw_tstamp == tx->tstamps[idx].cached_tstamp)
+ if (raw_tstamp == tx->tstamps[idx].cached_tstamp)
return;
- if (tx->verify_cached && raw_tstamp)
- tx->tstamps[idx].cached_tstamp = raw_tstamp;
+ tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
@@ -701,9 +697,11 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
hw = &pf->hw;
/* Read the Tx ready status first */
- err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
- if (err)
- return;
+ if (tx->has_ready_bitmap) {
+ err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
+ if (err)
+ return;
+ }
/* Drop packets if the link went down */
link_up = ptp_port->link_up;
@@ -731,7 +729,8 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
* If we do not, the hardware logic for generating a new
* interrupt can get stuck on some devices.
*/
- if (!(tstamp_ready & BIT_ULL(phy_idx))) {
+ if (tx->has_ready_bitmap &&
+ !(tstamp_ready & BIT_ULL(phy_idx))) {
if (drop_ts)
goto skip_ts_read;
@@ -751,7 +750,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
* from the last cached timestamp. If it is not, skip this for
* now assuming it hasn't yet been captured by hardware.
*/
- if (!drop_ts && tx->verify_cached &&
+ if (!drop_ts && !tx->has_ready_bitmap &&
raw_tstamp == tx->tstamps[idx].cached_tstamp)
continue;
@@ -761,7 +760,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
skip_ts_read:
spin_lock_irqsave(&tx->lock, flags);
- if (tx->verify_cached && raw_tstamp)
+ if (!tx->has_ready_bitmap && raw_tstamp)
tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
skb = tx->tstamps[idx].skb;
@@ -965,6 +964,22 @@ ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
}
/**
+ * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock
+ * @pf: Board private structure
+ *
+ * Called by the clock owner to flush all the Tx timestamp trackers associated
+ * with the clock.
+ */
+static void
+ice_ptp_flush_all_tx_tracker(struct ice_pf *pf)
+{
+ struct ice_ptp_port *port;
+
+ list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member)
+ ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx);
+}
+
+/**
* ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
* @pf: Board private structure
* @tx: Tx tracking structure to release
@@ -1014,7 +1029,7 @@ ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
tx->block = port / ICE_PORTS_PER_QUAD;
tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
tx->len = INDEX_PER_PORT_E82X;
- tx->verify_cached = 0;
+ tx->has_ready_bitmap = 1;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -1037,7 +1052,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
* verify new timestamps against cached copy of the last read
* timestamp.
*/
- tx->verify_cached = 1;
+ tx->has_ready_bitmap = 0;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -1430,7 +1445,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
struct ice_ptp_port *ptp_port;
struct ice_hw *hw = &pf->hw;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return;
if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS))
@@ -1456,14 +1471,14 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
}
/**
- * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt
+ * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings
* @pf: PF private structure
* @ena: bool value to enable or disable interrupt
* @threshold: Minimum number of packets at which intr is triggered
*
* Utility function to enable or disable Tx timestamp interrupt and threshold
*/
-static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
+static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
{
struct ice_hw *hw = &pf->hw;
int err = 0;
@@ -2162,7 +2177,7 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
struct hwtstamp_config *config;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return -EIO;
config = &pf->ptp.tstamp_config;
@@ -2232,7 +2247,7 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
struct hwtstamp_config config;
int err;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return -EAGAIN;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -2616,7 +2631,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
int err;
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return;
err = ice_ptp_update_cached_phctime(pf);
@@ -2629,36 +2644,72 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
}
/**
- * ice_ptp_reset - Initialize PTP hardware clock support after reset
+ * ice_ptp_prepare_for_reset - Prepare PTP for reset
+ * @pf: Board private structure
+ * @reset_type: the reset type being performed
+ */
+void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
+{
+ struct ice_ptp *ptp = &pf->ptp;
+ u8 src_tmr;
+
+ if (ptp->state != ICE_PTP_READY)
+ return;
+
+ ptp->state = ICE_PTP_RESETTING;
+
+ /* Disable timestamping for both Tx and Rx */
+ ice_ptp_disable_timestamp_mode(pf);
+
+ kthread_cancel_delayed_work_sync(&ptp->work);
+
+ if (reset_type == ICE_RESET_PFR)
+ return;
+
+ ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
+
+ /* Disable periodic outputs */
+ ice_ptp_disable_all_clkout(pf);
+
+ src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
+
+ /* Disable source clock */
+ wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
+
+ /* Acquire PHC and system timer to restore after reset */
+ ptp->reset_time = ktime_get_real_ns();
+}
+
+/**
+ * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset
* @pf: Board private structure
+ *
+ * Companion function for ice_ptp_rebuild() which handles tasks that only the
+ * PTP clock owner instance should perform.
*/
-void ice_ptp_reset(struct ice_pf *pf)
+static int ice_ptp_rebuild_owner(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
struct timespec64 ts;
- int err, itr = 1;
u64 time_diff;
-
- if (test_bit(ICE_PFR_REQ, pf->state) ||
- !ice_pf_src_tmr_owned(pf))
- goto pfr;
+ int err;
err = ice_ptp_init_phc(hw);
if (err)
- goto err;
+ return err;
/* Acquire the global hardware lock */
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
- goto err;
+ return err;
}
/* Write the increment time value to PHY and LAN */
err = ice_ptp_write_incval(hw, ice_base_incval(pf));
if (err) {
ice_ptp_unlock(hw);
- goto err;
+ return err;
}
/* Write the initial Time value to PHY and LAN using the cached PHC
@@ -2674,38 +2725,54 @@ void ice_ptp_reset(struct ice_pf *pf)
err = ice_ptp_write_init(pf, &ts);
if (err) {
ice_ptp_unlock(hw);
- goto err;
+ return err;
}
/* Release the global hardware lock */
ice_ptp_unlock(hw);
+ /* Flush software tracking of any outstanding timestamps since we're
+ * about to flush the PHY timestamp block.
+ */
+ ice_ptp_flush_all_tx_tracker(pf);
+
if (!ice_is_e810(hw)) {
/* Enable quad interrupts */
- err = ice_ptp_tx_ena_intr(pf, true, itr);
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
if (err)
- goto err;
- }
+ return err;
-pfr:
- /* Init Tx structures */
- if (ice_is_e810(&pf->hw)) {
- err = ice_ptp_init_tx_e810(pf, &ptp->port.tx);
- } else {
- kthread_init_delayed_work(&ptp->port.ov_work,
- ice_ptp_wait_for_offsets);
- err = ice_ptp_init_tx_e82x(pf, &ptp->port.tx,
- ptp->port.port_num);
+ ice_ptp_restart_all_phy(pf);
}
- if (err)
+
+ return 0;
+}
+
+/**
+ * ice_ptp_rebuild - Initialize PTP hardware clock support after reset
+ * @pf: Board private structure
+ * @reset_type: the reset type being performed
+ */
+void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
+{
+ struct ice_ptp *ptp = &pf->ptp;
+ int err;
+
+ if (ptp->state == ICE_PTP_READY) {
+ ice_ptp_prepare_for_reset(pf, reset_type);
+ } else if (ptp->state != ICE_PTP_RESETTING) {
+ err = -EINVAL;
+ dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n");
goto err;
+ }
- set_bit(ICE_FLAG_PTP, pf->flags);
+ if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) {
+ err = ice_ptp_rebuild_owner(pf);
+ if (err)
+ goto err;
+ }
- /* Restart the PHY timestamping block */
- if (!test_bit(ICE_PFR_REQ, pf->state) &&
- ice_pf_src_tmr_owned(pf))
- ice_ptp_restart_all_phy(pf);
+ ptp->state = ICE_PTP_READY;
/* Start periodic work going */
kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
@@ -2714,6 +2781,7 @@ pfr:
return;
err:
+ ptp->state = ICE_PTP_ERROR;
dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
}
@@ -2923,39 +2991,6 @@ int ice_ptp_clock_index(struct ice_pf *pf)
}
/**
- * ice_ptp_prepare_for_reset - Prepare PTP for reset
- * @pf: Board private structure
- */
-void ice_ptp_prepare_for_reset(struct ice_pf *pf)
-{
- struct ice_ptp *ptp = &pf->ptp;
- u8 src_tmr;
-
- clear_bit(ICE_FLAG_PTP, pf->flags);
-
- /* Disable timestamping for both Tx and Rx */
- ice_ptp_disable_timestamp_mode(pf);
-
- kthread_cancel_delayed_work_sync(&ptp->work);
-
- if (test_bit(ICE_PFR_REQ, pf->state))
- return;
-
- ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
-
- /* Disable periodic outputs */
- ice_ptp_disable_all_clkout(pf);
-
- src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
-
- /* Disable source clock */
- wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
-
- /* Acquire PHC and system timer to restore after reset */
- ptp->reset_time = ktime_get_real_ns();
-}
-
-/**
* ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
* @pf: Board private structure
*
@@ -2967,7 +3002,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
struct timespec64 ts;
- int err, itr = 1;
+ int err;
err = ice_ptp_init_phc(hw);
if (err) {
@@ -3002,7 +3037,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
if (!ice_is_e810(hw)) {
/* Enable quad interrupts */
- err = ice_ptp_tx_ena_intr(pf, true, itr);
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
if (err)
goto err_exit;
}
@@ -3195,6 +3230,8 @@ void ice_ptp_init(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
int err;
+ ptp->state = ICE_PTP_INITIALIZING;
+
ice_ptp_init_phy_model(hw);
ice_ptp_init_tx_interrupt_mode(pf);
@@ -3219,12 +3256,13 @@ void ice_ptp_init(struct ice_pf *pf)
/* Configure initial Tx interrupt settings */
ice_ptp_cfg_tx_interrupt(pf);
- set_bit(ICE_FLAG_PTP, pf->flags);
- err = ice_ptp_init_work(pf, ptp);
+ err = ice_ptp_create_auxbus_device(pf);
if (err)
goto err;
- err = ice_ptp_create_auxbus_device(pf);
+ ptp->state = ICE_PTP_READY;
+
+ err = ice_ptp_init_work(pf, ptp);
if (err)
goto err;
@@ -3237,7 +3275,7 @@ err:
ptp_clock_unregister(ptp->clock);
pf->ptp.clock = NULL;
}
- clear_bit(ICE_FLAG_PTP, pf->flags);
+ ptp->state = ICE_PTP_ERROR;
dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
}
@@ -3250,9 +3288,11 @@ err:
*/
void ice_ptp_release(struct ice_pf *pf)
{
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ if (pf->ptp.state != ICE_PTP_READY)
return;
+ pf->ptp.state = ICE_PTP_UNINIT;
+
/* Disable timestamping for both Tx and Rx */
ice_ptp_disable_timestamp_mode(pf);
@@ -3260,8 +3300,6 @@ void ice_ptp_release(struct ice_pf *pf)
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
- clear_bit(ICE_FLAG_PTP, pf->flags);
-
kthread_cancel_delayed_work_sync(&pf->ptp.work);
ice_ptp_port_phy_stop(&pf->ptp.port);
@@ -3271,6 +3309,9 @@ void ice_ptp_release(struct ice_pf *pf)
pf->ptp.kworker = NULL;
}
+ if (ice_pf_src_tmr_owned(pf))
+ ice_ptp_unregister_auxbus_driver(pf);
+
if (!pf->ptp.clock)
return;
@@ -3280,7 +3321,5 @@ void ice_ptp_release(struct ice_pf *pf)
ptp_clock_unregister(pf->ptp.clock);
pf->ptp.clock = NULL;
- ice_ptp_unregister_auxbus_driver(pf);
-
dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 087dd32d8762..3af20025043a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -100,7 +100,7 @@ struct ice_perout_channel {
* the last timestamp we read for a given index. If the current timestamp
* value is the same as the cached value, we assume a new timestamp hasn't
* been captured. This avoids reporting stale timestamps to the stack. This is
- * only done if the verify_cached flag is set in ice_ptp_tx structure.
+ * only done if the has_ready_bitmap flag is not set in ice_ptp_tx structure.
*/
struct ice_tx_tstamp {
struct sk_buff *skb;
@@ -130,7 +130,9 @@ enum ice_tx_tstamp_work {
* @init: if true, the tracker is initialized;
* @calibrating: if true, the PHY is calibrating the Tx offset. During this
* window, timestamps are temporarily disabled.
- * @verify_cached: if true, verify new timestamp differs from last read value
+ * @has_ready_bitmap: if true, the hardware has a valid Tx timestamp ready
+ * bitmap register. If false, fall back to verifying new
+ * timestamp values against previously cached copy.
* @last_ll_ts_idx_read: index of the last LL TS read by the FW
*/
struct ice_ptp_tx {
@@ -143,7 +145,7 @@ struct ice_ptp_tx {
u8 len;
u8 init : 1;
u8 calibrating : 1;
- u8 verify_cached : 1;
+ u8 has_ready_bitmap : 1;
s8 last_ll_ts_idx_read;
};
@@ -203,8 +205,17 @@ struct ice_ptp_port_owner {
#define GLTSYN_TGT_H_IDX_MAX 4
+enum ice_ptp_state {
+ ICE_PTP_UNINIT = 0,
+ ICE_PTP_INITIALIZING,
+ ICE_PTP_READY,
+ ICE_PTP_RESETTING,
+ ICE_PTP_ERROR,
+};
+
/**
* struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
+ * @state: current state of PTP state machine
* @tx_interrupt_mode: the TX interrupt mode for the PTP clock
* @port: data for the PHY port initialization procedure
* @ports_owner: data for the auxiliary driver owner
@@ -227,6 +238,7 @@ struct ice_ptp_port_owner {
* @late_cached_phc_updates: number of times cached PHC update is late
*/
struct ice_ptp {
+ enum ice_ptp_state state;
enum ice_ptp_tx_interrupt tx_interrupt_mode;
struct ice_ptp_port port;
struct ice_ptp_port_owner ports_owner;
@@ -304,8 +316,9 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx);
-void ice_ptp_reset(struct ice_pf *pf);
-void ice_ptp_prepare_for_reset(struct ice_pf *pf);
+void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
+void ice_ptp_prepare_for_reset(struct ice_pf *pf,
+ enum ice_reset_req reset_type);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
@@ -345,8 +358,15 @@ ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
return 0;
}
-static inline void ice_ptp_reset(struct ice_pf *pf) { }
-static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { }
+static inline void ice_ptp_rebuild(struct ice_pf *pf,
+ enum ice_reset_req reset_type)
+{
+}
+
+static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf,
+ enum ice_reset_req reset_type)
+{
+}
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index a94a1c48c3de..a958fcf3e6be 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -240,7 +240,6 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
}
vf->lan_vsi_idx = vsi->idx;
- vf->lan_vsi_num = vsi->vsi_num;
return vsi;
}
@@ -1068,6 +1067,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
struct ice_pf *pf = pci_get_drvdata(pdev);
u16 prev_msix, prev_queues, queues;
bool needs_rebuild = false;
+ struct ice_vsi *vsi;
struct ice_vf *vf;
int id;
@@ -1102,6 +1102,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (!vf)
return -ENOENT;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -ENOENT;
+
prev_msix = vf->num_msix;
prev_queues = vf->num_vf_qs;
@@ -1122,7 +1126,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (vf->first_vector_idx < 0)
goto unroll;
- if (ice_vf_reconfig_vsi(vf)) {
+ if (ice_vf_reconfig_vsi(vf) || ice_vf_init_host_cfg(vf, vsi)) {
/* Try to rebuild with previous values */
needs_rebuild = true;
goto unroll;
@@ -1148,8 +1152,10 @@ unroll:
if (vf->first_vector_idx < 0)
return -EINVAL;
- if (needs_rebuild)
+ if (needs_rebuild) {
ice_vf_reconfig_vsi(vf);
+ ice_vf_init_host_cfg(vf, vsi);
+ }
ice_ena_vf_mappings(vf);
ice_put_vf(vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 839e5da24ad5..f8f1d2bdc1be 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -143,8 +143,12 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
(decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
- if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
- BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
+ if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) {
+ ring->vsi->back->hw_rx_eipe_error++;
+ return;
+ }
+
+ if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S))))
goto checksum_fail;
if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 41ab6d7bbd9e..9ff92dba5823 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -132,6 +132,7 @@ enum ice_mac_type {
ICE_MAC_E810,
ICE_MAC_E830,
ICE_MAC_GENERIC,
+ ICE_MAC_GENERIC_3K_E825,
};
/* Media Types */
@@ -1072,7 +1073,7 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_OROM_VER_BUILD_SHIFT 8
#define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT)
#define ICE_OROM_VER_SHIFT 24
-#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT)
+#define ICE_OROM_VER_MASK (0xffU << ICE_OROM_VER_SHIFT)
#define ICE_SR_PFA_PTR 0x40
#define ICE_SR_1ST_NVM_BANK_PTR 0x42
#define ICE_SR_NVM_BANK_SIZE 0x43
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 2ffdae9a82df..21d26e19338a 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -280,12 +280,6 @@ int ice_vf_reconfig_vsi(struct ice_vf *vf)
return err;
}
- /* Update the lan_vsi_num field since it might have been changed. The
- * PF lan_vsi_idx number remains the same so we don't need to change
- * that.
- */
- vf->lan_vsi_num = vsi->vsi_num;
-
return 0;
}
@@ -315,7 +309,6 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
* vf->lan_vsi_idx
*/
vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
- vf->lan_vsi_num = vsi->vsi_num;
return 0;
}
@@ -1315,13 +1308,12 @@ int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
}
/**
- * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
+ * ice_vf_invalidate_vsi - invalidate vsi_idx to remove VSI access
* @vf: VF to remove access to VSI for
*/
void ice_vf_invalidate_vsi(struct ice_vf *vf)
{
vf->lan_vsi_idx = ICE_NO_VSI;
- vf->lan_vsi_num = ICE_NO_VSI;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 0cc9034065c5..fec16919ec19 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -109,11 +109,6 @@ struct ice_vf {
u8 spoofchk:1;
u8 link_forced:1;
u8 link_up:1; /* only valid if VF link is forced */
- /* VSI indices - actual VSI pointers are maintained in the PF structure
- * When assigned, these will be non-zero, because VSI 0 is always
- * the main LAN VSI for the PF.
- */
- u16 lan_vsi_num; /* ID as used by firmware */
unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index c925813ec9ca..1ff9818b4c84 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -440,7 +440,6 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vf->driver_caps = *(u32 *)msg;
else
vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
- VIRTCHNL_VF_OFFLOAD_RSS_REG |
VIRTCHNL_VF_OFFLOAD_VLAN;
vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
@@ -453,14 +452,8 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
vf->driver_caps);
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
- } else {
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
- else
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
- }
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
@@ -506,7 +499,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->rss_lut_size = ICE_LUT_VSI_SIZE;
vfres->max_mtu = ice_vc_get_max_frame_size(vf);
- vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
+ vfres->vsi_res[0].vsi_id = ICE_VF_VSI_ID;
vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
@@ -552,27 +545,20 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
*/
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = ice_find_vsi(pf, vsi_id);
-
- return (vsi && (vsi->vf == vf));
+ return vsi_id == ICE_VF_VSI_ID;
}
/**
* ice_vc_isvalid_q_id
- * @vf: pointer to the VF info
- * @vsi_id: VSI ID
+ * @vsi: VSI to check queue ID against
* @qid: VSI relative queue ID
*
* check for the valid queue ID
*/
-static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
+static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid)
{
- struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id);
/* allocated Tx and Rx queues should be always equal for VF VSI */
- return (vsi && (qid < vsi->alloc_txq));
+ return qid < vsi->alloc_txq;
}
/**
@@ -1330,7 +1316,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
*/
q_map = vqs->rx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1352,7 +1338,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1457,7 +1443,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1483,7 +1469,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
} else if (q_map) {
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1539,7 +1525,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
vsi_q_id = vsi_q_id_idx;
- if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
return VIRTCHNL_STATUS_ERR_PARAM;
q_vector->num_ring_rx++;
@@ -1553,7 +1539,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
vsi_q_id = vsi_q_id_idx;
- if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
return VIRTCHNL_STATUS_ERR_PARAM;
q_vector->num_ring_tx++;
@@ -1710,7 +1696,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
qpi->txq.headwb_enabled ||
!ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
!ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
- !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
+ !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
goto error_param;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index 60dfbe05980a..3a4115869153 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -19,6 +19,15 @@
#define ICE_MAX_MACADDR_PER_VF 18
#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
+/* VFs only get a single VSI. For ice hardware, the VF does not need to know
+ * its VSI index. However, the virtchnl interface requires a VSI number,
+ * mainly due to legacy hardware.
+ *
+ * Since the VF doesn't need this information, report a static value to the VF
+ * instead of leaking any information about the PF or hardware setup.
+ */
+#define ICE_VF_VSI_ID 1
+
struct ice_virtchnl_ops {
int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index 5e19d48a05b4..d796dbd2a440 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -13,8 +13,6 @@
* - opcodes needed by VF when caps are activated
*
* Caps that don't use new opcodes (no opcodes should be allowed):
- * - VIRTCHNL_VF_OFFLOAD_RSS_AQ
- * - VIRTCHNL_VF_OFFLOAD_RSS_REG
* - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
* - VIRTCHNL_VF_OFFLOAD_CRC
* - VIRTCHNL_VF_OFFLOAD_RX_POLLING
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index f001553e1a1a..8e4ff3af86c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -94,9 +94,6 @@ ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
return -EINVAL;
- if (vsi_id != vf->lan_vsi_num)
- return -EINVAL;
-
if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 8b81a1677045..1857220d27fe 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -179,6 +179,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
return -EBUSY;
usleep_range(1000, 2000);
}
+
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+ ice_qvec_toggle_napi(vsi, q_vector, false);
+
netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
@@ -195,13 +199,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
}
- ice_qvec_dis_irq(vsi, rx_ring, q_vector);
-
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
- ice_qvec_toggle_napi(vsi, q_vector, false);
ice_qp_clean_rings(vsi, q_idx);
ice_qp_reset_stats(vsi, q_idx);
@@ -217,53 +218,39 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
*/
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
- u16 size = __struct_size(qg_buf);
struct ice_q_vector *q_vector;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
int err;
- if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
- return -EINVAL;
-
- qg_buf->num_txqs = 1;
-
- tx_ring = vsi->tx_rings[q_idx];
- rx_ring = vsi->rx_rings[q_idx];
- q_vector = rx_ring->q_vector;
-
- err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
+ err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
if (err)
return err;
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
- memset(qg_buf, 0, size);
- qg_buf->num_txqs = 1;
- err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
+ err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
if (err)
return err;
ice_set_ring_xdp(xdp_ring);
ice_tx_xsk_pool(vsi, q_idx);
}
- err = ice_vsi_cfg_rxq(rx_ring);
+ err = ice_vsi_cfg_single_rxq(vsi, q_idx);
if (err)
return err;
+ q_vector = vsi->rx_rings[q_idx]->q_vector;
ice_qvec_cfg_msix(vsi, q_vector);
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
if (err)
return err;
- clear_bit(ICE_CFG_BUSY, vsi->state);
ice_qvec_toggle_napi(vsi, q_vector, true);
ice_qvec_ena_irq(vsi, q_vector);
netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ clear_bit(ICE_CFG_BUSY, vsi->state);
return 0;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 0acc125decb3..e7a036538246 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -37,8 +37,6 @@ struct idpf_vport_max_q;
#define IDPF_MB_MAX_ERR 20
#define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \
((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
-#define IDPF_WAIT_FOR_EVENT_TIMEO_MIN 2000
-#define IDPF_WAIT_FOR_EVENT_TIMEO 60000
#define IDPF_MAX_WAIT 500
@@ -66,14 +64,12 @@ struct idpf_mac_filter {
/**
* enum idpf_state - State machine to handle bring up
- * @__IDPF_STARTUP: Start the state machine
* @__IDPF_VER_CHECK: Negotiate virtchnl version
* @__IDPF_GET_CAPS: Negotiate capabilities
* @__IDPF_INIT_SW: Init based on given capabilities
* @__IDPF_STATE_LAST: Must be last, used to determine size
*/
enum idpf_state {
- __IDPF_STARTUP,
__IDPF_VER_CHECK,
__IDPF_GET_CAPS,
__IDPF_INIT_SW,
@@ -87,6 +83,7 @@ enum idpf_state {
* @IDPF_HR_RESET_IN_PROG: Reset in progress
* @IDPF_REMOVE_IN_PROG: Driver remove in progress
* @IDPF_MB_INTR_MODE: Mailbox in interrupt mode
+ * @IDPF_VC_CORE_INIT: virtchnl core has been init
* @IDPF_FLAGS_NBITS: Must be last
*/
enum idpf_flags {
@@ -95,6 +92,7 @@ enum idpf_flags {
IDPF_HR_RESET_IN_PROG,
IDPF_REMOVE_IN_PROG,
IDPF_MB_INTR_MODE,
+ IDPF_VC_CORE_INIT,
IDPF_FLAGS_NBITS,
};
@@ -209,71 +207,6 @@ struct idpf_dev_ops {
struct idpf_reg_ops reg_ops;
};
-/* These macros allow us to generate an enum and a matching char * array of
- * stringified enums that are always in sync. Checkpatch issues a bogus warning
- * about this being a complex macro; but it's wrong, these are never used as a
- * statement and instead only used to define the enum and array.
- */
-#define IDPF_FOREACH_VPORT_VC_STATE(STATE) \
- STATE(IDPF_VC_CREATE_VPORT) \
- STATE(IDPF_VC_CREATE_VPORT_ERR) \
- STATE(IDPF_VC_ENA_VPORT) \
- STATE(IDPF_VC_ENA_VPORT_ERR) \
- STATE(IDPF_VC_DIS_VPORT) \
- STATE(IDPF_VC_DIS_VPORT_ERR) \
- STATE(IDPF_VC_DESTROY_VPORT) \
- STATE(IDPF_VC_DESTROY_VPORT_ERR) \
- STATE(IDPF_VC_CONFIG_TXQ) \
- STATE(IDPF_VC_CONFIG_TXQ_ERR) \
- STATE(IDPF_VC_CONFIG_RXQ) \
- STATE(IDPF_VC_CONFIG_RXQ_ERR) \
- STATE(IDPF_VC_ENA_QUEUES) \
- STATE(IDPF_VC_ENA_QUEUES_ERR) \
- STATE(IDPF_VC_DIS_QUEUES) \
- STATE(IDPF_VC_DIS_QUEUES_ERR) \
- STATE(IDPF_VC_MAP_IRQ) \
- STATE(IDPF_VC_MAP_IRQ_ERR) \
- STATE(IDPF_VC_UNMAP_IRQ) \
- STATE(IDPF_VC_UNMAP_IRQ_ERR) \
- STATE(IDPF_VC_ADD_QUEUES) \
- STATE(IDPF_VC_ADD_QUEUES_ERR) \
- STATE(IDPF_VC_DEL_QUEUES) \
- STATE(IDPF_VC_DEL_QUEUES_ERR) \
- STATE(IDPF_VC_ALLOC_VECTORS) \
- STATE(IDPF_VC_ALLOC_VECTORS_ERR) \
- STATE(IDPF_VC_DEALLOC_VECTORS) \
- STATE(IDPF_VC_DEALLOC_VECTORS_ERR) \
- STATE(IDPF_VC_SET_SRIOV_VFS) \
- STATE(IDPF_VC_SET_SRIOV_VFS_ERR) \
- STATE(IDPF_VC_GET_RSS_LUT) \
- STATE(IDPF_VC_GET_RSS_LUT_ERR) \
- STATE(IDPF_VC_SET_RSS_LUT) \
- STATE(IDPF_VC_SET_RSS_LUT_ERR) \
- STATE(IDPF_VC_GET_RSS_KEY) \
- STATE(IDPF_VC_GET_RSS_KEY_ERR) \
- STATE(IDPF_VC_SET_RSS_KEY) \
- STATE(IDPF_VC_SET_RSS_KEY_ERR) \
- STATE(IDPF_VC_GET_STATS) \
- STATE(IDPF_VC_GET_STATS_ERR) \
- STATE(IDPF_VC_ADD_MAC_ADDR) \
- STATE(IDPF_VC_ADD_MAC_ADDR_ERR) \
- STATE(IDPF_VC_DEL_MAC_ADDR) \
- STATE(IDPF_VC_DEL_MAC_ADDR_ERR) \
- STATE(IDPF_VC_GET_PTYPE_INFO) \
- STATE(IDPF_VC_GET_PTYPE_INFO_ERR) \
- STATE(IDPF_VC_LOOPBACK_STATE) \
- STATE(IDPF_VC_LOOPBACK_STATE_ERR) \
- STATE(IDPF_VC_NBITS)
-
-#define IDPF_GEN_ENUM(ENUM) ENUM,
-#define IDPF_GEN_STRING(STRING) #STRING,
-
-enum idpf_vport_vc_state {
- IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_ENUM)
-};
-
-extern const char * const idpf_vport_vc_state_str[];
-
/**
* enum idpf_vport_reset_cause - Vport soft reset causes
* @IDPF_SR_Q_CHANGE: Soft reset queue change
@@ -358,11 +291,7 @@ struct idpf_port_stats {
* @port_stats: per port csum, header split, and other offload stats
* @link_up: True if link is up
* @link_speed_mbps: Link speed in mbps
- * @vc_msg: Virtchnl message buffer
- * @vc_state: Virtchnl message state
- * @vchnl_wq: Wait queue for virtchnl messages
* @sw_marker_wq: workqueue for marker packets
- * @vc_buf_lock: Lock to protect virtchnl buffer
*/
struct idpf_vport {
u16 num_txq;
@@ -408,12 +337,7 @@ struct idpf_vport {
bool link_up;
u32 link_speed_mbps;
- char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
- DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
-
- wait_queue_head_t vchnl_wq;
wait_queue_head_t sw_marker_wq;
- struct mutex vc_buf_lock;
};
/**
@@ -476,15 +400,11 @@ struct idpf_vport_user_config_data {
* enum idpf_vport_config_flags - Vport config flags
* @IDPF_VPORT_REG_NETDEV: Register netdev
* @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset
- * @IDPF_VPORT_ADD_MAC_REQ: Asynchronous add ether address in flight
- * @IDPF_VPORT_DEL_MAC_REQ: Asynchronous delete ether address in flight
* @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last
*/
enum idpf_vport_config_flags {
IDPF_VPORT_REG_NETDEV,
IDPF_VPORT_UP_REQUESTED,
- IDPF_VPORT_ADD_MAC_REQ,
- IDPF_VPORT_DEL_MAC_REQ,
IDPF_VPORT_CONFIG_FLAGS_NBITS,
};
@@ -555,11 +475,13 @@ struct idpf_vector_lifo {
struct idpf_vport_config {
struct idpf_vport_user_config_data user_config;
struct idpf_vport_max_q max_q;
- void *req_qs_chunks;
+ struct virtchnl2_add_queues *req_qs_chunks;
spinlock_t mac_filter_list_lock;
DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
};
+struct idpf_vc_xn_manager;
+
/**
* struct idpf_adapter - Device data struct generated on probe
* @pdev: PCI device struct given on probe
@@ -601,9 +523,7 @@ struct idpf_vport_config {
* @stats_task: Periodic statistics retrieval task
* @stats_wq: Workqueue for statistics task
* @caps: Negotiated capabilities with device
- * @vchnl_wq: Wait queue for virtchnl messages
- * @vc_state: Virtchnl message state
- * @vc_msg: Virtchnl message buffer
+ * @vcxn_mngr: Virtchnl transaction manager
* @dev_ops: See idpf_dev_ops
* @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk
* to VFs but is used to initialize them
@@ -659,10 +579,8 @@ struct idpf_adapter {
struct delayed_work stats_task;
struct workqueue_struct *stats_wq;
struct virtchnl2_get_capabilities caps;
+ struct idpf_vc_xn_manager *vcxn_mngr;
- wait_queue_head_t vchnl_wq;
- DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
- char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
struct idpf_dev_ops dev_ops;
int num_vfs;
bool crc_enable;
@@ -903,68 +821,18 @@ void idpf_mbx_task(struct work_struct *work);
void idpf_vc_event_task(struct work_struct *work);
void idpf_dev_ops_init(struct idpf_adapter *adapter);
void idpf_vf_dev_ops_init(struct idpf_adapter *adapter);
-int idpf_vport_adjust_qs(struct idpf_vport *vport);
-int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
-void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
-int idpf_vc_core_init(struct idpf_adapter *adapter);
-void idpf_vc_core_deinit(struct idpf_adapter *adapter);
int idpf_intr_req(struct idpf_adapter *adapter);
void idpf_intr_rel(struct idpf_adapter *adapter);
-int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
- struct idpf_vec_regs *reg_vals);
u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter);
-int idpf_send_delete_queues_msg(struct idpf_vport *vport);
-int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
- u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
int idpf_initiate_soft_reset(struct idpf_vport *vport,
enum idpf_vport_reset_cause reset_cause);
-int idpf_send_enable_vport_msg(struct idpf_vport *vport);
-int idpf_send_disable_vport_msg(struct idpf_vport *vport);
-int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
-int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
-int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
-int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
-int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
-int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
-int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
void idpf_deinit_task(struct idpf_adapter *adapter);
int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
u16 *q_vector_idxs,
struct idpf_vector_info *vec_info);
-int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
-int idpf_send_get_stats_msg(struct idpf_vport *vport);
-int idpf_get_vec_ids(struct idpf_adapter *adapter,
- u16 *vecids, int num_vecids,
- struct virtchnl2_vector_chunks *chunks);
-int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
- void *msg, int msg_size);
-int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg);
void idpf_set_ethtool_ops(struct net_device *netdev);
-int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-int idpf_add_del_mac_filters(struct idpf_vport *vport,
- struct idpf_netdev_priv *np,
- bool add, bool async);
-int idpf_set_promiscuous(struct idpf_adapter *adapter,
- struct idpf_vport_user_config_data *config_data,
- u32 vport_id);
-int idpf_send_disable_queues_msg(struct idpf_vport *vport);
-void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
-u32 idpf_get_vport_id(struct idpf_vport *vport);
-int idpf_vport_queue_ids_init(struct idpf_vport *vport);
-int idpf_queue_reg_init(struct idpf_vport *vport);
-int idpf_send_config_queues_msg(struct idpf_vport *vport);
-int idpf_send_enable_queues_msg(struct idpf_vport *vport);
-int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-int idpf_check_supported_desc_ids(struct idpf_vport *vport);
void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector,
u16 itr, bool tx);
-int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
-int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
index c7f43d2fcd13..4849590a5591 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
@@ -516,6 +516,8 @@ post_buffs_out:
/* Wrap to end of end ring since current ntp is 0 */
cq->next_to_post = cq->ring_size - 1;
+ dma_wmb();
+
wr32(hw, cq->reg.tail, cq->next_to_post);
}
@@ -546,11 +548,6 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
int err = 0;
u16 i;
- if (*num_q_msg == 0)
- return 0;
- else if (*num_q_msg > cq->ring_size)
- return -EBADR;
-
/* take the lock before we start messing with the ring */
mutex_lock(&cq->cq_lock);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
index 8dee098bbfb0..e8e046ef2f0d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
@@ -69,6 +69,11 @@ struct idpf_ctlq_msg {
u8 context[IDPF_INDIRECT_CTX_SIZE];
struct idpf_dma_mem *payload;
} indirect;
+ struct {
+ u32 rsvd;
+ u16 data;
+ u16 flags;
+ } sw_cookie;
} ctx;
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 34ad1ac46b78..3df9935685e9 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_lan_pf_regs.h"
+#include "idpf_virtchnl.h"
#define IDPF_PF_ITR_IDX_SPACING 0x4
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 58179bd733ff..5d3532c27d57 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -2,14 +2,11 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
static const struct net_device_ops idpf_netdev_ops_splitq;
static const struct net_device_ops idpf_netdev_ops_singleq;
-const char * const idpf_vport_vc_state_str[] = {
- IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING)
-};
-
/**
* idpf_init_vector_stack - Fill the MSIX vector stack with vector index
* @adapter: private data struct
@@ -82,19 +79,12 @@ static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
*/
void idpf_intr_rel(struct idpf_adapter *adapter)
{
- int err;
-
if (!adapter->msix_entries)
return;
idpf_mb_intr_rel_irq(adapter);
pci_free_irq_vectors(adapter->pdev);
-
- err = idpf_send_dealloc_vectors_msg(adapter);
- if (err)
- dev_err(&adapter->pdev->dev,
- "Failed to deallocate vectors: %d\n", err);
-
+ idpf_send_dealloc_vectors_msg(adapter);
idpf_deinit_vector_stack(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
@@ -975,7 +965,6 @@ static void idpf_vport_rel(struct idpf_vport *vport)
struct idpf_rss_data *rss_data;
struct idpf_vport_max_q max_q;
u16 idx = vport->idx;
- int i;
vport_config = adapter->vport_config[vport->idx];
idpf_deinit_rss(vport);
@@ -985,20 +974,6 @@ static void idpf_vport_rel(struct idpf_vport *vport)
idpf_send_destroy_vport_msg(vport);
- /* Set all bits as we dont know on which vc_state the vport vhnl_wq
- * is waiting on and wakeup the virtchnl workqueue even if it is
- * waiting for the response as we are going down
- */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- set_bit(i, vport->vc_state);
- wake_up(&vport->vchnl_wq);
-
- mutex_destroy(&vport->vc_buf_lock);
-
- /* Clear all the bits */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- clear_bit(i, vport->vc_state);
-
/* Release all max queues allocated to the adapter's pool */
max_q.max_rxq = vport_config->max_q.max_rxq;
max_q.max_txq = vport_config->max_q.max_txq;
@@ -1253,7 +1228,7 @@ void idpf_mbx_task(struct work_struct *work)
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
msecs_to_jiffies(300));
- idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_UNKNOWN, NULL, 0);
+ idpf_recv_mb_msg(adapter);
}
/**
@@ -1543,9 +1518,7 @@ void idpf_init_task(struct work_struct *work)
vport_config = adapter->vport_config[index];
init_waitqueue_head(&vport->sw_marker_wq);
- init_waitqueue_head(&vport->vchnl_wq);
- mutex_init(&vport->vc_buf_lock);
spin_lock_init(&vport_config->mac_filter_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
@@ -1823,6 +1796,8 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
goto unlock_mutex;
}
+ queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
+
/* Initialize the state machine, also allocate memory and request
* resources
*/
@@ -1902,7 +1877,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
* mess with. Nothing below should use those variables from new_vport
* and should instead always refer to them in vport if they need to.
*/
- memcpy(new_vport, vport, offsetof(struct idpf_vport, vc_state));
+ memcpy(new_vport, vport, offsetof(struct idpf_vport, link_speed_mbps));
/* Adjust resource parameters prior to reallocating resources */
switch (reset_cause) {
@@ -1951,7 +1926,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
/* Same comment as above regarding avoiding copying the wait_queues and
* mutexes applies here. We do not want to mess with those if possible.
*/
- memcpy(vport, new_vport, offsetof(struct idpf_vport, vc_state));
+ memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps));
/* Since idpf_vport_queues_alloc was called with new_port, the queue
* back pointers are currently pointing to the local new_vport. Reset
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index e1febc74cefd..f784eea044bd 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_devids.h"
+#include "idpf_virtchnl.h"
#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver"
@@ -30,6 +31,7 @@ static void idpf_remove(struct pci_dev *pdev)
idpf_sriov_configure(pdev, 0);
idpf_vc_core_deinit(adapter);
+
/* Be a good citizen and leave the device clean on exit */
adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET);
idpf_deinit_dflt_mbx(adapter);
@@ -66,6 +68,8 @@ destroy_wqs:
adapter->vport_config = NULL;
kfree(adapter->netdevs);
adapter->netdevs = NULL;
+ kfree(adapter->vcxn_mngr);
+ adapter->vcxn_mngr = NULL;
mutex_destroy(&adapter->vport_ctrl_lock);
mutex_destroy(&adapter->vector_lock);
@@ -229,8 +233,6 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&adapter->queue_lock);
mutex_init(&adapter->vc_buf_lock);
- init_waitqueue_head(&adapter->vchnl_wq);
-
INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task);
INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task);
INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 2f8ad79ae3f0..6dd7a66bb897 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
/**
* idpf_buf_lifo_push - push a buffer pointer onto stack
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index 8ade4e3a9fe1..629cb5cb7c9f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_lan_vf_regs.h"
+#include "idpf_virtchnl.h"
#define IDPF_VF_ITR_IDX_SPACING 0x40
@@ -137,7 +138,7 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,
/* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */
if (trig_cause == IDPF_HR_FUNC_RESET &&
!test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
- idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL);
+ idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL, 0);
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index d0cdd63b3d5b..a5f9b7a5effe 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -2,46 +2,192 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
+
+#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
+#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
+#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
+#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
+#define IDPF_VC_XN_RING_LEN U8_MAX
+
+/**
+ * enum idpf_vc_xn_state - Virtchnl transaction status
+ * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
+ * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
+ * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received,
+ * buffer updated
+ * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
+ * was an error, buffer not updated
+ * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
+ * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
+ * return context; a callback may be provided to handle
+ * return
+ */
+enum idpf_vc_xn_state {
+ IDPF_VC_XN_IDLE = 1,
+ IDPF_VC_XN_WAITING,
+ IDPF_VC_XN_COMPLETED_SUCCESS,
+ IDPF_VC_XN_COMPLETED_FAILED,
+ IDPF_VC_XN_SHUTDOWN,
+ IDPF_VC_XN_ASYNC,
+};
+
+struct idpf_vc_xn;
+/* Callback for asynchronous messages */
+typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
+ const struct idpf_ctlq_msg *);
+
+/**
+ * struct idpf_vc_xn - Data structure representing virtchnl transactions
+ * @completed: virtchnl event loop uses that to signal when a reply is
+ * available, uses kernel completion API
+ * @state: virtchnl event loop stores the data below, protected by the
+ * completion's lock.
+ * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
+ * truncated on its way to the receiver thread according to
+ * reply_buf.iov_len.
+ * @reply: Reference to the buffer(s) where the reply data should be written
+ * to. May be 0-length (then NULL address permitted) if the reply data
+ * should be ignored.
+ * @async_handler: if sent asynchronously, a callback can be provided to handle
+ * the reply when it's received
+ * @vc_op: corresponding opcode sent with this transaction
+ * @idx: index used as retrieval on reply receive, used for cookie
+ * @salt: changed every message to make unique, used for cookie
+ */
+struct idpf_vc_xn {
+ struct completion completed;
+ enum idpf_vc_xn_state state;
+ size_t reply_sz;
+ struct kvec reply;
+ async_vc_cb async_handler;
+ u32 vc_op;
+ u8 idx;
+ u8 salt;
+};
+
+/**
+ * struct idpf_vc_xn_params - Parameters for executing transaction
+ * @send_buf: kvec for send buffer
+ * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
+ * @timeout_ms: timeout to wait for reply
+ * @async: send message asynchronously, will not wait on completion
+ * @async_handler: If sent asynchronously, optional callback handler. The user
+ * must be careful when using async handlers as the memory for
+ * the recv_buf _cannot_ be on stack if this is async.
+ * @vc_op: virtchnl op to send
+ */
+struct idpf_vc_xn_params {
+ struct kvec send_buf;
+ struct kvec recv_buf;
+ int timeout_ms;
+ bool async;
+ async_vc_cb async_handler;
+ u32 vc_op;
+};
+
+/**
+ * struct idpf_vc_xn_manager - Manager for tracking transactions
+ * @ring: backing and lookup for transactions
+ * @free_xn_bm: bitmap for free transactions
+ * @xn_bm_lock: make bitmap access synchronous where necessary
+ * @salt: used to make cookie unique every message
+ */
+struct idpf_vc_xn_manager {
+ struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN];
+ DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN);
+ spinlock_t xn_bm_lock;
+ u8 salt;
+};
+
+/**
+ * idpf_vid_to_vport - Translate vport id to vport pointer
+ * @adapter: private data struct
+ * @v_id: vport id to translate
+ *
+ * Returns vport matching v_id, NULL if not found.
+ */
+static
+struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id)
+{
+ u16 num_max_vports = idpf_get_max_vports(adapter);
+ int i;
+
+ for (i = 0; i < num_max_vports; i++)
+ if (adapter->vport_ids[i] == v_id)
+ return adapter->vports[i];
+
+ return NULL;
+}
+
+/**
+ * idpf_handle_event_link - Handle link event message
+ * @adapter: private data struct
+ * @v2e: virtchnl event message
+ */
+static void idpf_handle_event_link(struct idpf_adapter *adapter,
+ const struct virtchnl2_event *v2e)
+{
+ struct idpf_netdev_priv *np;
+ struct idpf_vport *vport;
+
+ vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id));
+ if (!vport) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n",
+ v2e->vport_id);
+ return;
+ }
+ np = netdev_priv(vport->netdev);
+
+ vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
+
+ if (vport->link_up == v2e->link_status)
+ return;
+
+ vport->link_up = v2e->link_status;
+
+ if (np->state != __IDPF_VPORT_UP)
+ return;
+
+ if (vport->link_up) {
+ netif_tx_start_all_queues(vport->netdev);
+ netif_carrier_on(vport->netdev);
+ } else {
+ netif_tx_stop_all_queues(vport->netdev);
+ netif_carrier_off(vport->netdev);
+ }
+}
/**
* idpf_recv_event_msg - Receive virtchnl event message
- * @vport: virtual port structure
+ * @adapter: Driver specific private structure
* @ctlq_msg: message to copy from
*
* Receive virtchnl event message
*/
-static void idpf_recv_event_msg(struct idpf_vport *vport,
+static void idpf_recv_event_msg(struct idpf_adapter *adapter,
struct idpf_ctlq_msg *ctlq_msg)
{
- struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ int payload_size = ctlq_msg->ctx.indirect.payload->size;
struct virtchnl2_event *v2e;
- bool link_status;
u32 event;
+ if (payload_size < sizeof(*v2e)) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode,
+ payload_size);
+ return;
+ }
+
v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;
event = le32_to_cpu(v2e->event);
switch (event) {
case VIRTCHNL2_EVENT_LINK_CHANGE:
- vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
- link_status = v2e->link_status;
-
- if (vport->link_up == link_status)
- break;
-
- vport->link_up = link_status;
- if (np->state == __IDPF_VPORT_UP) {
- if (vport->link_up) {
- netif_carrier_on(vport->netdev);
- netif_tx_start_all_queues(vport->netdev);
- } else {
- netif_tx_stop_all_queues(vport->netdev);
- netif_carrier_off(vport->netdev);
- }
- }
- break;
+ idpf_handle_event_link(adapter, v2e);
+ return;
default:
- dev_err(&vport->adapter->pdev->dev,
+ dev_err(&adapter->pdev->dev,
"Unknown event %d from PF\n", event);
break;
}
@@ -93,13 +239,14 @@ err_kfree:
* @op: virtchnl opcode
* @msg_size: size of the payload
* @msg: pointer to buffer holding the payload
+ * @cookie: unique SW generated cookie per message
*
* Will prepare the control queue message and initiates the send api
*
* Returns 0 on success, negative on failure
*/
int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg)
+ u16 msg_size, u8 *msg, u16 cookie)
{
struct idpf_ctlq_msg *ctlq_msg;
struct idpf_dma_mem *dma_mem;
@@ -139,8 +286,12 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
err = -ENOMEM;
goto dma_alloc_error;
}
- memcpy(dma_mem->va, msg, msg_size);
+
+ /* It's possible we're just sending an opcode but no buffer */
+ if (msg && msg_size)
+ memcpy(dma_mem->va, msg, msg_size);
ctlq_msg->ctx.indirect.payload = dma_mem;
+ ctlq_msg->ctx.sw_cookie.data = cookie;
err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
if (err)
@@ -159,592 +310,432 @@ dma_mem_error:
return err;
}
-/**
- * idpf_find_vport - Find vport pointer from control queue message
- * @adapter: driver specific private structure
- * @vport: address of vport pointer to copy the vport from adapters vport list
- * @ctlq_msg: control queue message
+/* API for virtchnl "transaction" support ("xn" for short).
*
- * Return 0 on success, error value on failure. Also this function does check
- * for the opcodes which expect to receive payload and return error value if
- * it is not the case.
+ * We are reusing the completion lock to serialize the accesses to the
+ * transaction state for simplicity, but it could be its own separate synchro
+ * as well. For now, this API is only used from within a workqueue context;
+ * raw_spin_lock() is enough.
*/
-static int idpf_find_vport(struct idpf_adapter *adapter,
- struct idpf_vport **vport,
- struct idpf_ctlq_msg *ctlq_msg)
-{
- bool no_op = false, vid_found = false;
- int i, err = 0;
- char *vc_msg;
- u32 v_id;
+/**
+ * idpf_vc_xn_lock - Request exclusive access to vc transaction
+ * @xn: struct idpf_vc_xn* to access
+ */
+#define idpf_vc_xn_lock(xn) \
+ raw_spin_lock(&(xn)->completed.wait.lock)
- vc_msg = kcalloc(IDPF_CTLQ_MAX_BUF_LEN, sizeof(char), GFP_KERNEL);
- if (!vc_msg)
- return -ENOMEM;
+/**
+ * idpf_vc_xn_unlock - Release exclusive access to vc transaction
+ * @xn: struct idpf_vc_xn* to access
+ */
+#define idpf_vc_xn_unlock(xn) \
+ raw_spin_unlock(&(xn)->completed.wait.lock)
- if (ctlq_msg->data_len) {
- size_t payload_size = ctlq_msg->ctx.indirect.payload->size;
+/**
+ * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
+ * reset the transaction state.
+ * @xn: struct idpf_vc_xn to update
+ */
+static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn)
+{
+ xn->reply.iov_base = NULL;
+ xn->reply.iov_len = 0;
- if (!payload_size) {
- dev_err(&adapter->pdev->dev, "Failed to receive payload buffer\n");
- kfree(vc_msg);
+ if (xn->state != IDPF_VC_XN_SHUTDOWN)
+ xn->state = IDPF_VC_XN_IDLE;
+}
- return -EINVAL;
- }
+/**
+ * idpf_vc_xn_init - Initialize virtchnl transaction object
+ * @vcxn_mngr: pointer to vc transaction manager struct
+ */
+static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
+{
+ int i;
- memcpy(vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(size_t, payload_size, IDPF_CTLQ_MAX_BUF_LEN));
- }
-
- switch (ctlq_msg->cookie.mbx.chnl_opcode) {
- case VIRTCHNL2_OP_VERSION:
- case VIRTCHNL2_OP_GET_CAPS:
- case VIRTCHNL2_OP_CREATE_VPORT:
- case VIRTCHNL2_OP_SET_SRIOV_VFS:
- case VIRTCHNL2_OP_ALLOC_VECTORS:
- case VIRTCHNL2_OP_DEALLOC_VECTORS:
- case VIRTCHNL2_OP_GET_PTYPE_INFO:
- goto free_vc_msg;
- case VIRTCHNL2_OP_ENABLE_VPORT:
- case VIRTCHNL2_OP_DISABLE_VPORT:
- case VIRTCHNL2_OP_DESTROY_VPORT:
- v_id = le32_to_cpu(((struct virtchnl2_vport *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_config_tx_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_config_rx_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- case VIRTCHNL2_OP_DEL_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_del_ena_dis_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ADD_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_add_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
- case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
- v_id = le32_to_cpu(((struct virtchnl2_queue_vector_maps *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_STATS:
- v_id = le32_to_cpu(((struct virtchnl2_vport_stats *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_RSS_LUT:
- case VIRTCHNL2_OP_SET_RSS_LUT:
- v_id = le32_to_cpu(((struct virtchnl2_rss_lut *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_RSS_KEY:
- case VIRTCHNL2_OP_SET_RSS_KEY:
- v_id = le32_to_cpu(((struct virtchnl2_rss_key *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_EVENT:
- v_id = le32_to_cpu(((struct virtchnl2_event *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_LOOPBACK:
- v_id = le32_to_cpu(((struct virtchnl2_loopback *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
- v_id = le32_to_cpu(((struct virtchnl2_promisc_info *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ADD_MAC_ADDR:
- case VIRTCHNL2_OP_DEL_MAC_ADDR:
- v_id = le32_to_cpu(((struct virtchnl2_mac_addr_list *)vc_msg)->vport_id);
- break;
- default:
- no_op = true;
- break;
- }
+ spin_lock_init(&vcxn_mngr->xn_bm_lock);
- if (no_op)
- goto free_vc_msg;
+ for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
+ struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
- for (i = 0; i < idpf_get_max_vports(adapter); i++) {
- if (adapter->vport_ids[i] == v_id) {
- vid_found = true;
- break;
- }
+ xn->state = IDPF_VC_XN_IDLE;
+ xn->idx = i;
+ idpf_vc_xn_release_bufs(xn);
+ init_completion(&xn->completed);
}
- if (vid_found)
- *vport = adapter->vports[i];
- else
- err = -EINVAL;
-
-free_vc_msg:
- kfree(vc_msg);
-
- return err;
+ bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
}
/**
- * idpf_copy_data_to_vc_buf - Copy the virtchnl response data into the buffer.
- * @adapter: driver specific private structure
- * @vport: virtual port structure
- * @ctlq_msg: msg to copy from
- * @err_enum: err bit to set on error
+ * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object
+ * @vcxn_mngr: pointer to vc transaction manager struct
*
- * Copies the payload from ctlq_msg into virtchnl buffer. Returns 0 on success,
- * negative on failure.
+ * All waiting threads will be woken-up and their transaction aborted. Further
+ * operations on that object will fail.
*/
-static int idpf_copy_data_to_vc_buf(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- struct idpf_ctlq_msg *ctlq_msg,
- enum idpf_vport_vc_state err_enum)
+static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
{
- if (ctlq_msg->cookie.mbx.chnl_retval) {
- if (vport)
- set_bit(err_enum, vport->vc_state);
- else
- set_bit(err_enum, adapter->vc_state);
+ int i;
- return -EINVAL;
- }
+ spin_lock_bh(&vcxn_mngr->xn_bm_lock);
+ bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
+ spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
- if (vport)
- memcpy(vport->vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(int, ctlq_msg->ctx.indirect.payload->size,
- IDPF_CTLQ_MAX_BUF_LEN));
- else
- memcpy(adapter->vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(int, ctlq_msg->ctx.indirect.payload->size,
- IDPF_CTLQ_MAX_BUF_LEN));
+ for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
+ struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
- return 0;
+ idpf_vc_xn_lock(xn);
+ xn->state = IDPF_VC_XN_SHUTDOWN;
+ idpf_vc_xn_release_bufs(xn);
+ idpf_vc_xn_unlock(xn);
+ complete_all(&xn->completed);
+ }
}
/**
- * idpf_recv_vchnl_op - helper function with common logic when handling the
- * reception of VIRTCHNL OPs.
- * @adapter: driver specific private structure
- * @vport: virtual port structure
- * @ctlq_msg: msg to copy from
- * @state: state bit used on timeout check
- * @err_state: err bit to set on error
+ * idpf_vc_xn_pop_free - Pop a free transaction from free list
+ * @vcxn_mngr: transaction manager to pop from
+ *
+ * Returns NULL if no free transactions
*/
-static void idpf_recv_vchnl_op(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- struct idpf_ctlq_msg *ctlq_msg,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_state)
+static
+struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr)
{
- wait_queue_head_t *vchnl_wq;
- int err;
+ struct idpf_vc_xn *xn = NULL;
+ unsigned long free_idx;
- if (vport)
- vchnl_wq = &vport->vchnl_wq;
- else
- vchnl_wq = &adapter->vchnl_wq;
+ spin_lock_bh(&vcxn_mngr->xn_bm_lock);
+ free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
+ if (free_idx == IDPF_VC_XN_RING_LEN)
+ goto do_unlock;
- err = idpf_copy_data_to_vc_buf(adapter, vport, ctlq_msg, err_state);
- if (wq_has_sleeper(vchnl_wq)) {
- if (vport)
- set_bit(state, vport->vc_state);
- else
- set_bit(state, adapter->vc_state);
+ clear_bit(free_idx, vcxn_mngr->free_xn_bm);
+ xn = &vcxn_mngr->ring[free_idx];
+ xn->salt = vcxn_mngr->salt++;
- wake_up(vchnl_wq);
- } else {
- if (!err) {
- dev_warn(&adapter->pdev->dev, "opcode %d received without waiting thread\n",
- ctlq_msg->cookie.mbx.chnl_opcode);
- } else {
- /* Clear the errors since there is no sleeper to pass
- * them on
- */
- if (vport)
- clear_bit(err_state, vport->vc_state);
- else
- clear_bit(err_state, adapter->vc_state);
- }
- }
+do_unlock:
+ spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
+
+ return xn;
}
/**
- * idpf_recv_mb_msg - Receive message over mailbox
- * @adapter: Driver specific private structure
- * @op: virtchannel operation code
- * @msg: Received message holding buffer
- * @msg_size: message size
- *
- * Will receive control queue message and posts the receive buffer. Returns 0
- * on success and negative on failure.
+ * idpf_vc_xn_push_free - Push a free transaction to free list
+ * @vcxn_mngr: transaction manager to push to
+ * @xn: transaction to push
*/
-int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
- void *msg, int msg_size)
+static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
+ struct idpf_vc_xn *xn)
{
- struct idpf_vport *vport = NULL;
- struct idpf_ctlq_msg ctlq_msg;
- struct idpf_dma_mem *dma_mem;
- bool work_done = false;
- int num_retry = 2000;
- u16 num_q_msg;
- int err;
-
- while (1) {
- struct idpf_vport_config *vport_config;
- int payload_size = 0;
-
- /* Try to get one message */
- num_q_msg = 1;
- dma_mem = NULL;
- err = idpf_ctlq_recv(adapter->hw.arq, &num_q_msg, &ctlq_msg);
- /* If no message then decide if we have to retry based on
- * opcode
- */
- if (err || !num_q_msg) {
- /* Increasing num_retry to consider the delayed
- * responses because of large number of VF's mailbox
- * messages. If the mailbox message is received from
- * the other side, we come out of the sleep cycle
- * immediately else we wait for more time.
- */
- if (!op || !num_retry--)
- break;
- if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
- err = -EIO;
- break;
- }
- msleep(20);
- continue;
- }
+ idpf_vc_xn_release_bufs(xn);
+ set_bit(xn->idx, vcxn_mngr->free_xn_bm);
+}
- /* If we are here a message is received. Check if we are looking
- * for a specific message based on opcode. If it is different
- * ignore and post buffers
+/**
+ * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction
+ * @adapter: driver specific private structure with vcxn_mngr
+ * @params: parameters for this particular transaction including
+ * -vc_op: virtchannel operation to send
+ * -send_buf: kvec iov for send buf and len
+ * -recv_buf: kvec iov for recv buf and len (ignored if NULL)
+ * -timeout_ms: timeout waiting for a reply (milliseconds)
+ * -async: don't wait for message reply, will lose caller context
+ * -async_handler: callback to handle async replies
+ *
+ * @returns >= 0 for success, the size of the initial reply (may or may not be
+ * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
+ * error.
+ */
+static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
+ const struct idpf_vc_xn_params *params)
+{
+ const struct kvec *send_buf = &params->send_buf;
+ struct idpf_vc_xn *xn;
+ ssize_t retval;
+ u16 cookie;
+
+ xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr);
+ /* no free transactions available */
+ if (!xn)
+ return -ENOSPC;
+
+ idpf_vc_xn_lock(xn);
+ if (xn->state == IDPF_VC_XN_SHUTDOWN) {
+ retval = -ENXIO;
+ goto only_unlock;
+ } else if (xn->state != IDPF_VC_XN_IDLE) {
+ /* We're just going to clobber this transaction even though
+ * it's not IDLE. If we don't reuse it we could theoretically
+ * eventually leak all the free transactions and not be able to
+ * send any messages. At least this way we make an attempt to
+ * remain functional even though something really bad is
+ * happening that's corrupting what was supposed to be free
+ * transactions.
*/
- if (op && ctlq_msg.cookie.mbx.chnl_opcode != op)
- goto post_buffs;
+ WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n",
+ xn->idx, xn->vc_op);
+ }
- err = idpf_find_vport(adapter, &vport, &ctlq_msg);
- if (err)
- goto post_buffs;
+ xn->reply = params->recv_buf;
+ xn->reply_sz = 0;
+ xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING;
+ xn->vc_op = params->vc_op;
+ xn->async_handler = params->async_handler;
+ idpf_vc_xn_unlock(xn);
- if (ctlq_msg.data_len)
- payload_size = ctlq_msg.ctx.indirect.payload->size;
+ if (!params->async)
+ reinit_completion(&xn->completed);
+ cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |
+ FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);
- /* All conditions are met. Either a message requested is
- * received or we received a message to be processed
- */
- switch (ctlq_msg.cookie.mbx.chnl_opcode) {
- case VIRTCHNL2_OP_VERSION:
- case VIRTCHNL2_OP_GET_CAPS:
- if (ctlq_msg.cookie.mbx.chnl_retval) {
- dev_err(&adapter->pdev->dev, "Failure initializing, vc op: %u retval: %u\n",
- ctlq_msg.cookie.mbx.chnl_opcode,
- ctlq_msg.cookie.mbx.chnl_retval);
- err = -EBADMSG;
- } else if (msg) {
- memcpy(msg, ctlq_msg.ctx.indirect.payload->va,
- min_t(int, payload_size, msg_size));
- }
- work_done = true;
- break;
- case VIRTCHNL2_OP_CREATE_VPORT:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_CREATE_VPORT,
- IDPF_VC_CREATE_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_ENABLE_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ENA_VPORT,
- IDPF_VC_ENA_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_DISABLE_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DIS_VPORT,
- IDPF_VC_DIS_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_DESTROY_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DESTROY_VPORT,
- IDPF_VC_DESTROY_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_CONFIG_TXQ,
- IDPF_VC_CONFIG_TXQ_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_CONFIG_RXQ,
- IDPF_VC_CONFIG_RXQ_ERR);
- break;
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ENA_QUEUES,
- IDPF_VC_ENA_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DIS_QUEUES,
- IDPF_VC_DIS_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_ADD_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ADD_QUEUES,
- IDPF_VC_ADD_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_DEL_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DEL_QUEUES,
- IDPF_VC_DEL_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_MAP_IRQ,
- IDPF_VC_MAP_IRQ_ERR);
- break;
- case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_UNMAP_IRQ,
- IDPF_VC_UNMAP_IRQ_ERR);
- break;
- case VIRTCHNL2_OP_GET_STATS:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_STATS,
- IDPF_VC_GET_STATS_ERR);
- break;
- case VIRTCHNL2_OP_GET_RSS_LUT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_RSS_LUT,
- IDPF_VC_GET_RSS_LUT_ERR);
- break;
- case VIRTCHNL2_OP_SET_RSS_LUT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_SET_RSS_LUT,
- IDPF_VC_SET_RSS_LUT_ERR);
- break;
- case VIRTCHNL2_OP_GET_RSS_KEY:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_RSS_KEY,
- IDPF_VC_GET_RSS_KEY_ERR);
- break;
- case VIRTCHNL2_OP_SET_RSS_KEY:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_SET_RSS_KEY,
- IDPF_VC_SET_RSS_KEY_ERR);
- break;
- case VIRTCHNL2_OP_SET_SRIOV_VFS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_SET_SRIOV_VFS,
- IDPF_VC_SET_SRIOV_VFS_ERR);
- break;
- case VIRTCHNL2_OP_ALLOC_VECTORS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_ALLOC_VECTORS,
- IDPF_VC_ALLOC_VECTORS_ERR);
- break;
- case VIRTCHNL2_OP_DEALLOC_VECTORS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_DEALLOC_VECTORS,
- IDPF_VC_DEALLOC_VECTORS_ERR);
- break;
- case VIRTCHNL2_OP_GET_PTYPE_INFO:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_GET_PTYPE_INFO,
- IDPF_VC_GET_PTYPE_INFO_ERR);
- break;
- case VIRTCHNL2_OP_LOOPBACK:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_LOOPBACK_STATE,
- IDPF_VC_LOOPBACK_STATE_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
- /* This message can only be sent asynchronously. As
- * such we'll have lost the context in which it was
- * called and thus can only really report if it looks
- * like an error occurred. Don't bother setting ERR bit
- * or waking chnl_wq since no work queue will be waiting
- * to read the message.
- */
- if (ctlq_msg.cookie.mbx.chnl_retval) {
- dev_err(&adapter->pdev->dev, "Failed to set promiscuous mode: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- }
- break;
- case VIRTCHNL2_OP_ADD_MAC_ADDR:
- vport_config = adapter->vport_config[vport->idx];
- if (test_and_clear_bit(IDPF_VPORT_ADD_MAC_REQ,
- vport_config->flags)) {
- /* Message was sent asynchronously. We don't
- * normally print errors here, instead
- * prefer to handle errors in the function
- * calling wait_for_event. However, if
- * asynchronous, the context in which the
- * message was sent is lost. We can't really do
- * anything about at it this point, but we
- * should at a minimum indicate that it looks
- * like something went wrong. Also don't bother
- * setting ERR bit or waking vchnl_wq since no
- * one will be waiting to read the async
- * message.
- */
- if (ctlq_msg.cookie.mbx.chnl_retval)
- dev_err(&adapter->pdev->dev, "Failed to add MAC address: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- break;
- }
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ADD_MAC_ADDR,
- IDPF_VC_ADD_MAC_ADDR_ERR);
- break;
- case VIRTCHNL2_OP_DEL_MAC_ADDR:
- vport_config = adapter->vport_config[vport->idx];
- if (test_and_clear_bit(IDPF_VPORT_DEL_MAC_REQ,
- vport_config->flags)) {
- /* Message was sent asynchronously like the
- * VIRTCHNL2_OP_ADD_MAC_ADDR
- */
- if (ctlq_msg.cookie.mbx.chnl_retval)
- dev_err(&adapter->pdev->dev, "Failed to delete MAC address: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- break;
- }
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DEL_MAC_ADDR,
- IDPF_VC_DEL_MAC_ADDR_ERR);
- break;
- case VIRTCHNL2_OP_EVENT:
- idpf_recv_event_msg(vport, &ctlq_msg);
- break;
- default:
- dev_warn(&adapter->pdev->dev,
- "Unhandled virtchnl response %d\n",
- ctlq_msg.cookie.mbx.chnl_opcode);
- break;
- }
+ retval = idpf_send_mb_msg(adapter, params->vc_op,
+ send_buf->iov_len, send_buf->iov_base,
+ cookie);
+ if (retval) {
+ idpf_vc_xn_lock(xn);
+ goto release_and_unlock;
+ }
-post_buffs:
- if (ctlq_msg.data_len)
- dma_mem = ctlq_msg.ctx.indirect.payload;
- else
- num_q_msg = 0;
+ if (params->async)
+ return 0;
- err = idpf_ctlq_post_rx_buffs(&adapter->hw, adapter->hw.arq,
- &num_q_msg, &dma_mem);
- /* If post failed clear the only buffer we supplied */
- if (err && dma_mem)
- dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
- dma_mem->va, dma_mem->pa);
+ wait_for_completion_timeout(&xn->completed,
+ msecs_to_jiffies(params->timeout_ms));
- /* Applies only if we are looking for a specific opcode */
- if (work_done)
- break;
+ /* No need to check the return value; we check the final state of the
+ * transaction below. It's possible the transaction actually gets more
+ * timeout than specified if we get preempted here but after
+ * wait_for_completion_timeout returns. This should be non-issue
+ * however.
+ */
+ idpf_vc_xn_lock(xn);
+ switch (xn->state) {
+ case IDPF_VC_XN_SHUTDOWN:
+ retval = -ENXIO;
+ goto only_unlock;
+ case IDPF_VC_XN_WAITING:
+ dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n",
+ params->vc_op, params->timeout_ms);
+ retval = -ETIME;
+ break;
+ case IDPF_VC_XN_COMPLETED_SUCCESS:
+ retval = xn->reply_sz;
+ break;
+ case IDPF_VC_XN_COMPLETED_FAILED:
+ dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n",
+ params->vc_op);
+ retval = -EIO;
+ break;
+ default:
+ /* Invalid state. */
+ WARN_ON_ONCE(1);
+ retval = -EIO;
+ break;
}
- return err;
+release_and_unlock:
+ idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
+ /* If we receive a VC reply after here, it will be dropped. */
+only_unlock:
+ idpf_vc_xn_unlock(xn);
+
+ return retval;
}
/**
- * __idpf_wait_for_event - wrapper function for wait on virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout
- * @err_check: check if this specific error bit is set
- * @timeout: Max time to wait
+ * idpf_vc_xn_forward_async - Handle async reply receives
+ * @adapter: private data struct
+ * @xn: transaction to handle
+ * @ctlq_msg: corresponding ctlq_msg
*
- * Checks if state is set upon expiry of timeout. Returns 0 on success,
- * negative on failure.
+ * For async sends we're going to lose the caller's context so, if an
+ * async_handler was provided, it can deal with the reply, otherwise we'll just
+ * check and report if there is an error.
*/
-static int __idpf_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check,
- int timeout)
+static int
+idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn,
+ const struct idpf_ctlq_msg *ctlq_msg)
{
- int time_to_wait, num_waits;
- wait_queue_head_t *vchnl_wq;
- unsigned long *vc_state;
+ int err = 0;
- time_to_wait = ((timeout <= IDPF_MAX_WAIT) ? timeout : IDPF_MAX_WAIT);
- num_waits = ((timeout <= IDPF_MAX_WAIT) ? 1 : timeout / IDPF_MAX_WAIT);
+ if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
+ xn->reply_sz = 0;
+ err = -EINVAL;
+ goto release_bufs;
+ }
- if (vport) {
- vchnl_wq = &vport->vchnl_wq;
- vc_state = vport->vc_state;
- } else {
- vchnl_wq = &adapter->vchnl_wq;
- vc_state = adapter->vc_state;
+ if (xn->async_handler) {
+ err = xn->async_handler(adapter, xn, ctlq_msg);
+ goto release_bufs;
+ }
+
+ if (ctlq_msg->cookie.mbx.chnl_retval) {
+ xn->reply_sz = 0;
+ dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EINVAL;
}
- while (num_waits) {
- int event;
+release_bufs:
+ idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
+
+ return err;
+}
+
+/**
+ * idpf_vc_xn_forward_reply - copy a reply back to receiving thread
+ * @adapter: driver specific private structure with vcxn_mngr
+ * @ctlq_msg: controlq message to send back to receiving thread
+ */
+static int
+idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
+ const struct idpf_ctlq_msg *ctlq_msg)
+{
+ const void *payload = NULL;
+ size_t payload_size = 0;
+ struct idpf_vc_xn *xn;
+ u16 msg_info;
+ int err = 0;
+ u16 xn_idx;
+ u16 salt;
+
+ msg_info = ctlq_msg->ctx.sw_cookie.data;
+ xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info);
+ if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n",
+ xn_idx);
+ return -EINVAL;
+ }
+ xn = &adapter->vcxn_mngr->ring[xn_idx];
+ salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
+ if (xn->salt != salt) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n",
+ xn->salt, salt);
+ return -EINVAL;
+ }
- /* If we are here and a reset is detected do not wait but
- * return. Reset timing is out of drivers control. So
- * while we are cleaning resources as part of reset if the
- * underlying HW mailbox is gone, wait on mailbox messages
- * is not meaningful
+ idpf_vc_xn_lock(xn);
+ switch (xn->state) {
+ case IDPF_VC_XN_WAITING:
+ /* success */
+ break;
+ case IDPF_VC_XN_IDLE:
+ dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ case IDPF_VC_XN_SHUTDOWN:
+ /* ENXIO is a bit special here as the recv msg loop uses that
+ * know if it should stop trying to clean the ring if we lost
+ * the virtchnl. We need to stop playing with registers and
+ * yield.
*/
- if (idpf_is_reset_detected(adapter))
- return 0;
+ err = -ENXIO;
+ goto out_unlock;
+ case IDPF_VC_XN_ASYNC:
+ err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg);
+ idpf_vc_xn_unlock(xn);
+ return err;
+ default:
+ dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EBUSY;
+ goto out_unlock;
+ }
- event = wait_event_timeout(*vchnl_wq,
- test_and_clear_bit(state, vc_state),
- msecs_to_jiffies(time_to_wait));
- if (event) {
- if (test_and_clear_bit(err_check, vc_state)) {
- dev_err(&adapter->pdev->dev, "VC response error %s\n",
- idpf_vport_vc_state_str[err_check]);
+ if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
+ xn->reply_sz = 0;
+ xn->state = IDPF_VC_XN_COMPLETED_FAILED;
+ err = -EINVAL;
+ goto out_unlock;
+ }
- return -EINVAL;
- }
+ if (ctlq_msg->cookie.mbx.chnl_retval) {
+ xn->reply_sz = 0;
+ xn->state = IDPF_VC_XN_COMPLETED_FAILED;
+ err = -EINVAL;
+ goto out_unlock;
+ }
- return 0;
- }
- num_waits--;
+ if (ctlq_msg->data_len) {
+ payload = ctlq_msg->ctx.indirect.payload->va;
+ payload_size = ctlq_msg->ctx.indirect.payload->size;
}
- /* Timeout occurred */
- dev_err(&adapter->pdev->dev, "VC timeout, state = %s\n",
- idpf_vport_vc_state_str[state]);
+ xn->reply_sz = payload_size;
+ xn->state = IDPF_VC_XN_COMPLETED_SUCCESS;
- return -ETIMEDOUT;
+ if (xn->reply.iov_base && xn->reply.iov_len && payload_size)
+ memcpy(xn->reply.iov_base, payload,
+ min_t(size_t, xn->reply.iov_len, payload_size));
+
+out_unlock:
+ idpf_vc_xn_unlock(xn);
+ /* we _cannot_ hold lock while calling complete */
+ complete(&xn->completed);
+
+ return err;
}
/**
- * idpf_min_wait_for_event - wait for virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout
- * @err_check: check if this specific error bit is set
+ * idpf_recv_mb_msg - Receive message over mailbox
+ * @adapter: Driver specific private structure
*
- * Returns 0 on success, negative on failure.
+ * Will receive control queue message and posts the receive buffer. Returns 0
+ * on success and negative on failure.
*/
-static int idpf_min_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check)
+int idpf_recv_mb_msg(struct idpf_adapter *adapter)
{
- return __idpf_wait_for_event(adapter, vport, state, err_check,
- IDPF_WAIT_FOR_EVENT_TIMEO_MIN);
-}
+ struct idpf_ctlq_msg ctlq_msg;
+ struct idpf_dma_mem *dma_mem;
+ int post_err, err;
+ u16 num_recv;
-/**
- * idpf_wait_for_event - wait for virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout after 500ms
- * @err_check: check if this specific error bit is set
- *
- * Returns 0 on success, negative on failure.
- */
-static int idpf_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check)
-{
- /* Increasing the timeout in __IDPF_INIT_SW flow to consider large
- * number of VF's mailbox message responses. When a message is received
- * on mailbox, this thread is woken up by the idpf_recv_mb_msg before
- * the timeout expires. Only in the error case i.e. if no message is
- * received on mailbox, we wait for the complete timeout which is
- * less likely to happen.
- */
- return __idpf_wait_for_event(adapter, vport, state, err_check,
- IDPF_WAIT_FOR_EVENT_TIMEO);
+ while (1) {
+ /* This will get <= num_recv messages and output how many
+ * actually received on num_recv.
+ */
+ num_recv = 1;
+ err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg);
+ if (err || !num_recv)
+ break;
+
+ if (ctlq_msg.data_len) {
+ dma_mem = ctlq_msg.ctx.indirect.payload;
+ } else {
+ dma_mem = NULL;
+ num_recv = 0;
+ }
+
+ if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT)
+ idpf_recv_event_msg(adapter, &ctlq_msg);
+ else
+ err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
+
+ post_err = idpf_ctlq_post_rx_buffs(&adapter->hw,
+ adapter->hw.arq,
+ &num_recv, &dma_mem);
+
+ /* If post failed clear the only buffer we supplied */
+ if (post_err) {
+ if (dma_mem)
+ dmam_free_coherent(&adapter->pdev->dev,
+ dma_mem->size, dma_mem->va,
+ dma_mem->pa);
+ break;
+ }
+
+ /* virtchnl trying to shutdown, stop cleaning */
+ if (err == -ENXIO)
+ break;
+ }
+
+ return err;
}
/**
@@ -785,7 +776,11 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
*/
static int idpf_send_ver_msg(struct idpf_adapter *adapter)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_version_info vvi;
+ ssize_t reply_sz;
+ u32 major, minor;
+ int err = 0;
if (adapter->virt_ver_maj) {
vvi.major = cpu_to_le32(adapter->virt_ver_maj);
@@ -795,43 +790,29 @@ static int idpf_send_ver_msg(struct idpf_adapter *adapter)
vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);
}
- return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_VERSION, sizeof(vvi),
- (u8 *)&vvi);
-}
-
-/**
- * idpf_recv_ver_msg - Receive virtchnl version message
- * @adapter: Driver specific private structure
- *
- * Receive virtchnl version message. Returns 0 on success, -EAGAIN if we need
- * to send version message again, otherwise negative on failure.
- */
-static int idpf_recv_ver_msg(struct idpf_adapter *adapter)
-{
- struct virtchnl2_version_info vvi;
- u32 major, minor;
- int err;
+ xn_params.vc_op = VIRTCHNL2_OP_VERSION;
+ xn_params.send_buf.iov_base = &vvi;
+ xn_params.send_buf.iov_len = sizeof(vvi);
+ xn_params.recv_buf = xn_params.send_buf;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- err = idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_VERSION, &vvi,
- sizeof(vvi));
- if (err)
- return err;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(vvi))
+ return -EIO;
major = le32_to_cpu(vvi.major);
minor = le32_to_cpu(vvi.minor);
if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
- dev_warn(&adapter->pdev->dev,
- "Virtchnl major version (%d) greater than supported\n",
- major);
-
+ dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n");
return -EINVAL;
}
if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
minor > IDPF_VIRTCHNL_VERSION_MINOR)
- dev_warn(&adapter->pdev->dev,
- "Virtchnl minor version (%d) didn't match\n", minor);
+ dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n");
/* If we have a mismatch, resend version to update receiver on what
* version we will use.
@@ -856,7 +837,9 @@ static int idpf_recv_ver_msg(struct idpf_adapter *adapter)
*/
static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
{
- struct virtchnl2_get_capabilities caps = { };
+ struct virtchnl2_get_capabilities caps = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
caps.csum_caps =
cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 |
@@ -913,21 +896,20 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
VIRTCHNL2_CAP_PROMISC |
VIRTCHNL2_CAP_LOOPBACK);
- return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, sizeof(caps),
- (u8 *)&caps);
-}
+ xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
+ xn_params.send_buf.iov_base = &caps;
+ xn_params.send_buf.iov_len = sizeof(caps);
+ xn_params.recv_buf.iov_base = &adapter->caps;
+ xn_params.recv_buf.iov_len = sizeof(adapter->caps);
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
-/**
- * idpf_recv_get_caps_msg - Receive virtchnl get capabilities message
- * @adapter: Driver specific private structure
- *
- * Receive virtchnl get capabilities message. Returns 0 on success, negative on
- * failure.
- */
-static int idpf_recv_get_caps_msg(struct idpf_adapter *adapter)
-{
- return idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, &adapter->caps,
- sizeof(struct virtchnl2_get_capabilities));
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(adapter->caps))
+ return -EIO;
+
+ return 0;
}
/**
@@ -1254,8 +1236,10 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q)
{
struct virtchnl2_create_vport *vport_msg;
+ struct idpf_vc_xn_params xn_params = {};
u16 idx = adapter->next_vport;
int err, buf_size;
+ ssize_t reply_sz;
buf_size = sizeof(struct virtchnl2_create_vport);
if (!adapter->vport_params_reqd[idx]) {
@@ -1286,35 +1270,38 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
return err;
}
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CREATE_VPORT, buf_size,
- (u8 *)vport_msg);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_CREATE_VPORT,
- IDPF_VC_CREATE_VPORT_ERR);
- if (err) {
- dev_err(&adapter->pdev->dev, "Failed to receive create vport message");
-
- goto rel_lock;
- }
-
if (!adapter->vport_params_recvd[idx]) {
adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,
GFP_KERNEL);
if (!adapter->vport_params_recvd[idx]) {
err = -ENOMEM;
- goto rel_lock;
+ goto free_vport_params;
}
}
- vport_msg = adapter->vport_params_recvd[idx];
- memcpy(vport_msg, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
+ xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT;
+ xn_params.send_buf.iov_base = vport_msg;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx];
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0) {
+ err = reply_sz;
+ goto free_vport_params;
+ }
+ if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) {
+ err = -EIO;
+ goto free_vport_params;
+ }
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
+ return 0;
+
+free_vport_params:
+ kfree(adapter->vport_params_recvd[idx]);
+ adapter->vport_params_recvd[idx] = NULL;
+ kfree(adapter->vport_params_reqd[idx]);
+ adapter->vport_params_reqd[idx] = NULL;
return err;
}
@@ -1366,26 +1353,19 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
*/
int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DESTROY_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DESTROY_VPORT,
- IDPF_VC_DESTROY_VPORT_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1397,26 +1377,19 @@ rel_lock:
*/
int idpf_send_enable_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ENABLE_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_ENA_VPORT,
- IDPF_VC_ENA_VPORT_ERR);
+ xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1428,26 +1401,19 @@ rel_lock:
*/
int idpf_send_disable_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DISABLE_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DIS_VPORT,
- IDPF_VC_DIS_VPORT_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1459,11 +1425,13 @@ rel_lock:
*/
static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
{
- struct virtchnl2_config_tx_queues *ctq;
+ struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL;
+ struct virtchnl2_txq_info *qi __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
int totqs, num_msgs, num_chunks;
- struct virtchnl2_txq_info *qi;
- int err = 0, i, k = 0;
+ ssize_t reply_sz;
+ int i, k = 0;
totqs = vport->num_txq + vport->num_complq;
qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
@@ -1524,10 +1492,8 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
}
/* Make sure accounting agrees */
- if (k != totqs) {
- err = -EINVAL;
- goto error;
- }
+ if (k != totqs)
+ return -EINVAL;
/* Chunk up the queue contexts into multiple messages to avoid
* sending a control queue message buffer that is too large
@@ -1541,12 +1507,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(ctq, qinfo, num_chunks);
ctq = kzalloc(buf_sz, GFP_KERNEL);
- if (!ctq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!ctq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
for (i = 0, k = 0; i < num_msgs; i++) {
memset(ctq, 0, buf_sz);
@@ -1554,17 +1519,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
ctq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(vport->adapter,
- VIRTCHNL2_OP_CONFIG_TX_QUEUES,
- buf_sz, (u8 *)ctq);
- if (err)
- goto mbx_error;
-
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_CONFIG_TXQ,
- IDPF_VC_CONFIG_TXQ_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = ctq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
totqs -= num_chunks;
@@ -1573,13 +1532,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(ctq, qinfo, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(ctq);
-error:
- kfree(qi);
-
- return err;
+ return 0;
}
/**
@@ -1591,11 +1544,13 @@ error:
*/
static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
{
- struct virtchnl2_config_rx_queues *crq;
+ struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL;
+ struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
int totqs, num_msgs, num_chunks;
- struct virtchnl2_rxq_info *qi;
- int err = 0, i, k = 0;
+ ssize_t reply_sz;
+ int i, k = 0;
totqs = vport->num_rxq + vport->num_bufq;
qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
@@ -1676,10 +1631,8 @@ common_qi_fields:
}
/* Make sure accounting agrees */
- if (k != totqs) {
- err = -EINVAL;
- goto error;
- }
+ if (k != totqs)
+ return -EINVAL;
/* Chunk up the queue contexts into multiple messages to avoid
* sending a control queue message buffer that is too large
@@ -1693,12 +1646,11 @@ common_qi_fields:
buf_sz = struct_size(crq, qinfo, num_chunks);
crq = kzalloc(buf_sz, GFP_KERNEL);
- if (!crq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!crq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
for (i = 0, k = 0; i < num_msgs; i++) {
memset(crq, 0, buf_sz);
@@ -1706,17 +1658,11 @@ common_qi_fields:
crq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(vport->adapter,
- VIRTCHNL2_OP_CONFIG_RX_QUEUES,
- buf_sz, (u8 *)crq);
- if (err)
- goto mbx_error;
-
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_CONFIG_RXQ,
- IDPF_VC_CONFIG_RXQ_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = crq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
totqs -= num_chunks;
@@ -1725,42 +1671,28 @@ common_qi_fields:
buf_sz = struct_size(crq, qinfo, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(crq);
-error:
- kfree(qi);
-
- return err;
+ return 0;
}
/**
* idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
* queues message
* @vport: virtual port data structure
- * @vc_op: virtchnl op code to send
+ * @ena: if true enable, false disable
*
* Send enable or disable queues virtchnl message. Returns 0 on success,
* negative on failure.
*/
-static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
+static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
{
+ struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
+ struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_del_ena_dis_queues *eq;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_queue_chunks *qcs;
- struct virtchnl2_queue_chunk *qc;
u32 config_sz, chunk_sz, buf_sz;
- int i, j, k = 0, err = 0;
-
- /* validate virtchnl op */
- switch (vc_op) {
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- break;
- default:
- return -EINVAL;
- }
+ ssize_t reply_sz;
+ int i, j, k = 0;
num_txq = vport->num_txq + vport->num_complq;
num_rxq = vport->num_rxq + vport->num_bufq;
@@ -1779,10 +1711,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
}
- if (vport->num_txq != k) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_txq != k)
+ return -EINVAL;
if (!idpf_is_queue_model_split(vport->txq_model))
goto setup_rx;
@@ -1794,10 +1724,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
- if (vport->num_complq != (k - vport->num_txq)) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_complq != (k - vport->num_txq))
+ return -EINVAL;
setup_rx:
for (i = 0; i < vport->num_rxq_grp; i++) {
@@ -1823,10 +1751,8 @@ setup_rx:
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
}
- if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - (vport->num_txq + vport->num_complq))
+ return -EINVAL;
if (!idpf_is_queue_model_split(vport->rxq_model))
goto send_msg;
@@ -1845,10 +1771,8 @@ setup_rx:
}
if (vport->num_bufq != k - (vport->num_txq +
vport->num_complq +
- vport->num_rxq)) {
- err = -EINVAL;
- goto error;
- }
+ vport->num_rxq))
+ return -EINVAL;
send_msg:
/* Chunk up the queue info into multiple messages */
@@ -1861,12 +1785,16 @@ send_msg:
buf_sz = struct_size(eq, chunks.chunks, num_chunks);
eq = kzalloc(buf_sz, GFP_KERNEL);
- if (!eq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!eq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ if (ena) {
+ xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ } else {
+ xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ }
for (i = 0, k = 0; i < num_msgs; i++) {
memset(eq, 0, buf_sz);
@@ -1875,20 +1803,11 @@ send_msg:
qcs = &eq->chunks;
memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(adapter, vc_op, buf_sz, (u8 *)eq);
- if (err)
- goto mbx_error;
-
- if (vc_op == VIRTCHNL2_OP_ENABLE_QUEUES)
- err = idpf_wait_for_event(adapter, vport,
- IDPF_VC_ENA_QUEUES,
- IDPF_VC_ENA_QUEUES_ERR);
- else
- err = idpf_min_wait_for_event(adapter, vport,
- IDPF_VC_DIS_QUEUES,
- IDPF_VC_DIS_QUEUES_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = eq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
num_q -= num_chunks;
@@ -1897,13 +1816,7 @@ send_msg:
buf_sz = struct_size(eq, chunks.chunks, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(eq);
-error:
- kfree(qc);
-
- return err;
+ return 0;
}
/**
@@ -1917,12 +1830,13 @@ error:
*/
int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_queue_vector_maps *vqvm;
- struct virtchnl2_queue_vector *vqv;
+ struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL;
+ struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
u32 num_msgs, num_chunks, num_q;
- int i, j, k = 0, err = 0;
+ ssize_t reply_sz;
+ int i, j, k = 0;
num_q = vport->num_txq + vport->num_rxq;
@@ -1952,10 +1866,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
}
}
- if (vport->num_txq != k) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_txq != k)
+ return -EINVAL;
for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
@@ -1982,15 +1894,11 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
}
if (idpf_is_queue_model_split(vport->txq_model)) {
- if (vport->num_rxq != k - vport->num_complq) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - vport->num_complq)
+ return -EINVAL;
} else {
- if (vport->num_rxq != k - vport->num_txq) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - vport->num_txq)
+ return -EINVAL;
}
/* Chunk up the vector info into multiple messages */
@@ -2003,39 +1911,28 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
buf_sz = struct_size(vqvm, qv_maps, num_chunks);
vqvm = kzalloc(buf_sz, GFP_KERNEL);
- if (!vqvm) {
- err = -ENOMEM;
- goto error;
- }
+ if (!vqvm)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ if (map) {
+ xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ } else {
+ xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ }
for (i = 0, k = 0; i < num_msgs; i++) {
memset(vqvm, 0, buf_sz);
+ xn_params.send_buf.iov_base = vqvm;
+ xn_params.send_buf.iov_len = buf_sz;
vqvm->vport_id = cpu_to_le32(vport->vport_id);
vqvm->num_qv_maps = cpu_to_le16(num_chunks);
memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
- if (map) {
- err = idpf_send_mb_msg(adapter,
- VIRTCHNL2_OP_MAP_QUEUE_VECTOR,
- buf_sz, (u8 *)vqvm);
- if (!err)
- err = idpf_wait_for_event(adapter, vport,
- IDPF_VC_MAP_IRQ,
- IDPF_VC_MAP_IRQ_ERR);
- } else {
- err = idpf_send_mb_msg(adapter,
- VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
- buf_sz, (u8 *)vqvm);
- if (!err)
- err =
- idpf_min_wait_for_event(adapter, vport,
- IDPF_VC_UNMAP_IRQ,
- IDPF_VC_UNMAP_IRQ_ERR);
- }
- if (err)
- goto mbx_error;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
num_q -= num_chunks;
@@ -2044,13 +1941,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
buf_sz = struct_size(vqvm, qv_maps, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(vqvm);
-error:
- kfree(vqv);
-
- return err;
+ return 0;
}
/**
@@ -2062,7 +1953,7 @@ error:
*/
int idpf_send_enable_queues_msg(struct idpf_vport *vport)
{
- return idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_ENABLE_QUEUES);
+ return idpf_send_ena_dis_queues_msg(vport, true);
}
/**
@@ -2076,7 +1967,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
{
int err, i;
- err = idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_DISABLE_QUEUES);
+ err = idpf_send_ena_dis_queues_msg(vport, false);
if (err)
return err;
@@ -2087,8 +1978,10 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
/* schedule the napi to receive all the marker packets */
+ local_bh_disable();
for (i = 0; i < vport->num_q_vectors; i++)
napi_schedule(&vport->q_vectors[i].napi);
+ local_bh_enable();
return idpf_wait_for_marker_event(vport);
}
@@ -2122,22 +2015,21 @@ static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchun
*/
int idpf_send_delete_queues_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
struct virtchnl2_create_vport *vport_params;
struct virtchnl2_queue_reg_chunks *chunks;
- struct virtchnl2_del_ena_dis_queues *eq;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
u16 vport_idx = vport->idx;
- int buf_size, err;
+ ssize_t reply_sz;
u16 num_chunks;
+ int buf_size;
- vport_config = adapter->vport_config[vport_idx];
+ vport_config = vport->adapter->vport_config[vport_idx];
if (vport_config->req_qs_chunks) {
- struct virtchnl2_add_queues *vc_aq =
- (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
- chunks = &vc_aq->chunks;
+ chunks = &vport_config->req_qs_chunks->chunks;
} else {
- vport_params = adapter->vport_params_recvd[vport_idx];
+ vport_params = vport->adapter->vport_params_recvd[vport_idx];
chunks = &vport_params->chunks;
}
@@ -2154,21 +2046,13 @@ int idpf_send_delete_queues_msg(struct idpf_vport *vport)
idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
num_chunks);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEL_QUEUES,
- buf_size, (u8 *)eq);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DEL_QUEUES,
- IDPF_VC_DEL_QUEUES_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(eq);
+ xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = eq;
+ xn_params.send_buf.iov_len = buf_size;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2203,14 +2087,21 @@ int idpf_send_config_queues_msg(struct idpf_vport *vport)
int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
- struct virtchnl2_add_queues aq = { };
- struct virtchnl2_add_queues *vc_msg;
+ struct virtchnl2_add_queues aq = {};
u16 vport_idx = vport->idx;
- int size, err;
+ ssize_t reply_sz;
+ int size;
+
+ vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!vc_msg)
+ return -ENOMEM;
- vport_config = adapter->vport_config[vport_idx];
+ vport_config = vport->adapter->vport_config[vport_idx];
+ kfree(vport_config->req_qs_chunks);
+ vport_config->req_qs_chunks = NULL;
aq.vport_id = cpu_to_le32(vport->vport_id);
aq.num_tx_q = cpu_to_le16(num_tx_q);
@@ -2218,47 +2109,33 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
aq.num_rx_q = cpu_to_le16(num_rx_q);
aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
- mutex_lock(&((struct idpf_vport *)vport)->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ADD_QUEUES,
- sizeof(struct virtchnl2_add_queues), (u8 *)&aq);
- if (err)
- goto rel_lock;
-
- /* We want vport to be const to prevent incidental code changes making
- * changes to the vport config. We're making a special exception here
- * to discard const to use the virtchnl.
- */
- err = idpf_wait_for_event(adapter, (struct idpf_vport *)vport,
- IDPF_VC_ADD_QUEUES, IDPF_VC_ADD_QUEUES_ERR);
- if (err)
- goto rel_lock;
-
- kfree(vport_config->req_qs_chunks);
- vport_config->req_qs_chunks = NULL;
+ xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &aq;
+ xn_params.send_buf.iov_len = sizeof(aq);
+ xn_params.recv_buf.iov_base = vc_msg;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- vc_msg = (struct virtchnl2_add_queues *)vport->vc_msg;
/* compare vc_msg num queues with vport num queues */
if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
- le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) {
- err = -EINVAL;
- goto rel_lock;
- }
+ le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq)
+ return -EINVAL;
size = struct_size(vc_msg, chunks.chunks,
le16_to_cpu(vc_msg->chunks.num_chunks));
- vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
- if (!vport_config->req_qs_chunks) {
- err = -ENOMEM;
- goto rel_lock;
- }
+ if (reply_sz < size)
+ return -EIO;
-rel_lock:
- mutex_unlock(&((struct idpf_vport *)vport)->vc_buf_lock);
+ vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
+ if (!vport_config->req_qs_chunks)
+ return -ENOMEM;
- return err;
+ return 0;
}
/**
@@ -2270,53 +2147,49 @@ rel_lock:
*/
int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)
{
- struct virtchnl2_alloc_vectors *alloc_vec, *rcvd_vec;
- struct virtchnl2_alloc_vectors ac = { };
+ struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
+ struct virtchnl2_alloc_vectors ac = {};
+ ssize_t reply_sz;
u16 num_vchunks;
- int size, err;
+ int size;
ac.num_vectors = cpu_to_le16(num_vectors);
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ALLOC_VECTORS,
- sizeof(ac), (u8 *)&ac);
- if (err)
- goto rel_lock;
+ rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!rcvd_vec)
+ return -ENOMEM;
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_ALLOC_VECTORS,
- IDPF_VC_ALLOC_VECTORS_ERR);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS;
+ xn_params.send_buf.iov_base = &ac;
+ xn_params.send_buf.iov_len = sizeof(ac);
+ xn_params.recv_buf.iov_base = rcvd_vec;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- rcvd_vec = (struct virtchnl2_alloc_vectors *)adapter->vc_msg;
num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);
-
size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);
- if (size > sizeof(adapter->vc_msg)) {
- err = -EINVAL;
- goto rel_lock;
- }
+ if (reply_sz < size)
+ return -EIO;
+
+ if (size > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
kfree(adapter->req_vec_chunks);
- adapter->req_vec_chunks = NULL;
- adapter->req_vec_chunks = kmemdup(adapter->vc_msg, size, GFP_KERNEL);
- if (!adapter->req_vec_chunks) {
- err = -ENOMEM;
- goto rel_lock;
- }
+ adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL);
+ if (!adapter->req_vec_chunks)
+ return -ENOMEM;
- alloc_vec = adapter->req_vec_chunks;
- if (le16_to_cpu(alloc_vec->num_vectors) < num_vectors) {
+ if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) {
kfree(adapter->req_vec_chunks);
adapter->req_vec_chunks = NULL;
- err = -EINVAL;
+ return -EINVAL;
}
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2329,29 +2202,24 @@ int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
{
struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;
struct virtchnl2_vector_chunks *vcs = &ac->vchunks;
- int buf_size, err;
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
+ int buf_size;
buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEALLOC_VECTORS, buf_size,
- (u8 *)vcs);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, NULL, IDPF_VC_DEALLOC_VECTORS,
- IDPF_VC_DEALLOC_VECTORS_ERR);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
+ xn_params.send_buf.iov_base = vcs;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
kfree(adapter->req_vec_chunks);
adapter->req_vec_chunks = NULL;
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2374,25 +2242,18 @@ static int idpf_get_max_vfs(struct idpf_adapter *adapter)
*/
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
{
- struct virtchnl2_sriov_vfs_info svi = { };
- int err;
+ struct virtchnl2_sriov_vfs_info svi = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
svi.num_vfs = cpu_to_le16(num_vfs);
+ xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &svi;
+ xn_params.send_buf.iov_len = sizeof(svi);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_SRIOV_VFS,
- sizeof(svi), (u8 *)&svi);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_SET_SRIOV_VFS,
- IDPF_VC_SET_SRIOV_VFS_ERR);
-
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2405,10 +2266,10 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct rtnl_link_stats64 *netstats = &np->netstats;
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_vport_stats stats_msg = { };
- struct virtchnl2_vport_stats *stats;
- int err;
+ struct virtchnl2_vport_stats stats_msg = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
+
/* Don't send get_stats message if the link is down */
if (np->state <= __IDPF_VPORT_DOWN)
@@ -2416,46 +2277,38 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
stats_msg.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_GET_STATS;
+ xn_params.send_buf.iov_base = &stats_msg;
+ xn_params.send_buf.iov_len = sizeof(stats_msg);
+ xn_params.recv_buf = xn_params.send_buf;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_STATS,
- sizeof(struct virtchnl2_vport_stats),
- (u8 *)&stats_msg);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_STATS,
- IDPF_VC_GET_STATS_ERR);
- if (err)
- goto rel_lock;
-
- stats = (struct virtchnl2_vport_stats *)vport->vc_msg;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(stats_msg))
+ return -EIO;
spin_lock_bh(&np->stats_lock);
- netstats->rx_packets = le64_to_cpu(stats->rx_unicast) +
- le64_to_cpu(stats->rx_multicast) +
- le64_to_cpu(stats->rx_broadcast);
- netstats->rx_bytes = le64_to_cpu(stats->rx_bytes);
- netstats->rx_dropped = le64_to_cpu(stats->rx_discards);
- netstats->rx_over_errors = le64_to_cpu(stats->rx_overflow_drop);
- netstats->rx_length_errors = le64_to_cpu(stats->rx_invalid_frame_length);
-
- netstats->tx_packets = le64_to_cpu(stats->tx_unicast) +
- le64_to_cpu(stats->tx_multicast) +
- le64_to_cpu(stats->tx_broadcast);
- netstats->tx_bytes = le64_to_cpu(stats->tx_bytes);
- netstats->tx_errors = le64_to_cpu(stats->tx_errors);
- netstats->tx_dropped = le64_to_cpu(stats->tx_discards);
-
- vport->port_stats.vport_stats = *stats;
+ netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) +
+ le64_to_cpu(stats_msg.rx_multicast) +
+ le64_to_cpu(stats_msg.rx_broadcast);
+ netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) +
+ le64_to_cpu(stats_msg.tx_multicast) +
+ le64_to_cpu(stats_msg.tx_broadcast);
+ netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes);
+ netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes);
+ netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors);
+ netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors);
+ netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);
+ netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);
+
+ vport->port_stats.vport_stats = stats_msg;
spin_unlock_bh(&np->stats_lock);
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2467,70 +2320,70 @@ rel_lock:
*/
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_rss_lut *recv_rl;
+ struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;
+ struct virtchnl2_rss_lut *rl __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_rss_data *rss_data;
- struct virtchnl2_rss_lut *rl;
int buf_size, lut_buf_size;
- int i, err;
+ ssize_t reply_sz;
+ int i;
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ rss_data =
+ &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
rl = kzalloc(buf_size, GFP_KERNEL);
if (!rl)
return -ENOMEM;
rl->vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
- if (!get) {
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = rl;
+ xn_params.send_buf.iov_len = buf_size;
+
+ if (get) {
+ recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!recv_rl)
+ return -ENOMEM;
+ xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT;
+ xn_params.recv_buf.iov_base = recv_rl;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ } else {
rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
for (i = 0; i < rss_data->rss_lut_size; i++)
rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_LUT,
- buf_size, (u8 *)rl);
- if (err)
- goto free_mem;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_LUT,
- IDPF_VC_SET_RSS_LUT_ERR);
-
- goto free_mem;
+ xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
}
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (!get)
+ return 0;
+ if (reply_sz < sizeof(struct virtchnl2_rss_lut))
+ return -EIO;
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_LUT,
- buf_size, (u8 *)rl);
- if (err)
- goto free_mem;
+ lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32);
+ if (reply_sz < lut_buf_size)
+ return -EIO;
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_LUT,
- IDPF_VC_GET_RSS_LUT_ERR);
- if (err)
- goto free_mem;
-
- recv_rl = (struct virtchnl2_rss_lut *)vport->vc_msg;
+ /* size didn't change, we can reuse existing lut buf */
if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))
goto do_memcpy;
rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries);
kfree(rss_data->rss_lut);
- lut_buf_size = rss_data->rss_lut_size * sizeof(u32);
rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);
if (!rss_data->rss_lut) {
rss_data->rss_lut_size = 0;
- err = -ENOMEM;
- goto free_mem;
+ return -ENOMEM;
}
do_memcpy:
- memcpy(rss_data->rss_lut, vport->vc_msg, rss_data->rss_lut_size);
-free_mem:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(rl);
+ memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size);
- return err;
+ return 0;
}
/**
@@ -2542,68 +2395,70 @@ free_mem:
*/
int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_rss_key *recv_rk;
+ struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;
+ struct virtchnl2_rss_key *rk __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_rss_data *rss_data;
- struct virtchnl2_rss_key *rk;
- int i, buf_size, err;
+ ssize_t reply_sz;
+ int i, buf_size;
+ u16 key_size;
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ rss_data =
+ &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
rk = kzalloc(buf_size, GFP_KERNEL);
if (!rk)
return -ENOMEM;
rk->vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
-
+ xn_params.send_buf.iov_base = rk;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
if (get) {
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_KEY,
- buf_size, (u8 *)rk);
- if (err)
- goto error;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_KEY,
- IDPF_VC_GET_RSS_KEY_ERR);
- if (err)
- goto error;
-
- recv_rk = (struct virtchnl2_rss_key *)vport->vc_msg;
- if (rss_data->rss_key_size !=
- le16_to_cpu(recv_rk->key_len)) {
- rss_data->rss_key_size =
- min_t(u16, NETDEV_RSS_KEY_LEN,
- le16_to_cpu(recv_rk->key_len));
- kfree(rss_data->rss_key);
- rss_data->rss_key = kzalloc(rss_data->rss_key_size,
- GFP_KERNEL);
- if (!rss_data->rss_key) {
- rss_data->rss_key_size = 0;
- err = -ENOMEM;
- goto error;
- }
- }
- memcpy(rss_data->rss_key, recv_rk->key_flex,
- rss_data->rss_key_size);
+ recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!recv_rk)
+ return -ENOMEM;
+
+ xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY;
+ xn_params.recv_buf.iov_base = recv_rk;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
} else {
rk->key_len = cpu_to_le16(rss_data->rss_key_size);
for (i = 0; i < rss_data->rss_key_size; i++)
rk->key_flex[i] = rss_data->rss_key[i];
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_KEY,
- buf_size, (u8 *)rk);
- if (err)
- goto error;
+ xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;
+ }
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (!get)
+ return 0;
+ if (reply_sz < sizeof(struct virtchnl2_rss_key))
+ return -EIO;
+
+ key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
+ le16_to_cpu(recv_rk->key_len));
+ if (reply_sz < key_size)
+ return -EIO;
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_KEY,
- IDPF_VC_SET_RSS_KEY_ERR);
+ /* key len didn't change, reuse existing buf */
+ if (rss_data->rss_key_size == key_size)
+ goto do_memcpy;
+
+ rss_data->rss_key_size = key_size;
+ kfree(rss_data->rss_key);
+ rss_data->rss_key = kzalloc(key_size, GFP_KERNEL);
+ if (!rss_data->rss_key) {
+ rss_data->rss_key_size = 0;
+ return -ENOMEM;
}
-error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(rk);
+do_memcpy:
+ memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size);
- return err;
+ return 0;
}
/**
@@ -2655,13 +2510,15 @@ static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype,
*/
int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
{
+ struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
+ struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup;
- struct virtchnl2_get_ptype_info get_ptype_info;
int max_ptype, ptypes_recvd = 0, ptype_offset;
struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_get_ptype_info *ptype_info;
+ struct idpf_vc_xn_params xn_params = {};
u16 next_ptype_id = 0;
- int err = 0, i, j, k;
+ ssize_t reply_sz;
+ int i, j, k;
if (idpf_is_queue_model_split(vport->rxq_model))
max_ptype = IDPF_RX_MAX_PTYPE;
@@ -2670,43 +2527,44 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup));
+ get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
+ if (!get_ptype_info)
+ return -ENOMEM;
+
ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
if (!ptype_info)
return -ENOMEM;
- mutex_lock(&adapter->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO;
+ xn_params.send_buf.iov_base = get_ptype_info;
+ xn_params.send_buf.iov_len = sizeof(*get_ptype_info);
+ xn_params.recv_buf.iov_base = ptype_info;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
while (next_ptype_id < max_ptype) {
- get_ptype_info.start_ptype_id = cpu_to_le16(next_ptype_id);
+ get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id);
if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)
- get_ptype_info.num_ptypes =
+ get_ptype_info->num_ptypes =
cpu_to_le16(max_ptype - next_ptype_id);
else
- get_ptype_info.num_ptypes =
+ get_ptype_info->num_ptypes =
cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
- sizeof(struct virtchnl2_get_ptype_info),
- (u8 *)&get_ptype_info);
- if (err)
- goto vc_buf_unlock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_GET_PTYPE_INFO,
- IDPF_VC_GET_PTYPE_INFO_ERR);
- if (err)
- goto vc_buf_unlock;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- memcpy(ptype_info, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
+ if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN)
+ return -EIO;
ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
- if (ptypes_recvd > max_ptype) {
- err = -EINVAL;
- goto vc_buf_unlock;
- }
+ if (ptypes_recvd > max_ptype)
+ return -EINVAL;
- next_ptype_id = le16_to_cpu(get_ptype_info.start_ptype_id) +
- le16_to_cpu(get_ptype_info.num_ptypes);
+ next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) +
+ le16_to_cpu(get_ptype_info->num_ptypes);
ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
@@ -2719,17 +2577,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
((u8 *)ptype_info + ptype_offset);
ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
- if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) {
- err = -EINVAL;
- goto vc_buf_unlock;
- }
+ if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
/* 0xFFFF indicates end of ptypes */
if (le16_to_cpu(ptype->ptype_id_10) ==
- IDPF_INVALID_PTYPE_ID) {
- err = 0;
- goto vc_buf_unlock;
- }
+ IDPF_INVALID_PTYPE_ID)
+ return 0;
if (idpf_is_queue_model_split(vport->rxq_model))
k = le16_to_cpu(ptype->ptype_id_10);
@@ -2857,11 +2711,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
}
}
-vc_buf_unlock:
- mutex_unlock(&adapter->vc_buf_lock);
- kfree(ptype_info);
-
- return err;
+ return 0;
}
/**
@@ -2873,27 +2723,20 @@ vc_buf_unlock:
*/
int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_loopback loopback;
- int err;
+ ssize_t reply_sz;
loopback.vport_id = cpu_to_le32(vport->vport_id);
loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(vport->adapter, VIRTCHNL2_OP_LOOPBACK,
- sizeof(loopback), (u8 *)&loopback);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &loopback;
+ xn_params.send_buf.iov_len = sizeof(loopback);
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_LOOPBACK_STATE,
- IDPF_VC_LOOPBACK_STATE_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2958,7 +2801,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
return -ENOENT;
}
- adapter->state = __IDPF_STARTUP;
+ adapter->state = __IDPF_VER_CHECK;
return 0;
}
@@ -3055,35 +2898,42 @@ int idpf_vc_core_init(struct idpf_adapter *adapter)
u16 num_max_vports;
int err = 0;
+ if (!adapter->vcxn_mngr) {
+ adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL);
+ if (!adapter->vcxn_mngr) {
+ err = -ENOMEM;
+ goto init_failed;
+ }
+ }
+ idpf_vc_xn_init(adapter->vcxn_mngr);
+
while (adapter->state != __IDPF_INIT_SW) {
switch (adapter->state) {
- case __IDPF_STARTUP:
- if (idpf_send_ver_msg(adapter))
- goto init_failed;
- adapter->state = __IDPF_VER_CHECK;
- goto restart;
case __IDPF_VER_CHECK:
- err = idpf_recv_ver_msg(adapter);
- if (err == -EIO) {
- return err;
- } else if (err == -EAGAIN) {
- adapter->state = __IDPF_STARTUP;
+ err = idpf_send_ver_msg(adapter);
+ switch (err) {
+ case 0:
+ /* success, move state machine forward */
+ adapter->state = __IDPF_GET_CAPS;
+ fallthrough;
+ case -EAGAIN:
goto restart;
- } else if (err) {
+ default:
+ /* Something bad happened, try again but only a
+ * few times.
+ */
goto init_failed;
}
- if (idpf_send_get_caps_msg(adapter))
- goto init_failed;
- adapter->state = __IDPF_GET_CAPS;
- goto restart;
case __IDPF_GET_CAPS:
- if (idpf_recv_get_caps_msg(adapter))
+ err = idpf_send_get_caps_msg(adapter);
+ if (err)
goto init_failed;
adapter->state = __IDPF_INIT_SW;
break;
default:
dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n",
adapter->state);
+ err = -EINVAL;
goto init_failed;
}
break;
@@ -3142,7 +2992,9 @@ restart:
queue_delayed_work(adapter->init_wq, &adapter->init_task,
msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
- goto no_err;
+ set_bit(IDPF_VC_CORE_INIT, adapter->flags);
+
+ return 0;
err_intr_req:
cancel_delayed_work_sync(&adapter->serv_task);
@@ -3151,7 +3003,6 @@ err_intr_req:
err_netdev_alloc:
kfree(adapter->vports);
adapter->vports = NULL;
-no_err:
return err;
init_failed:
@@ -3168,7 +3019,9 @@ init_failed:
* register writes might not have taken effect. Retry to initialize
* the mailbox again
*/
- adapter->state = __IDPF_STARTUP;
+ adapter->state = __IDPF_VER_CHECK;
+ if (adapter->vcxn_mngr)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
idpf_deinit_dflt_mbx(adapter);
set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
@@ -3184,29 +3037,22 @@ init_failed:
*/
void idpf_vc_core_deinit(struct idpf_adapter *adapter)
{
- int i;
+ if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
+ return;
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
idpf_deinit_task(adapter);
idpf_intr_rel(adapter);
- /* Set all bits as we dont know on which vc_state the vhnl_wq is
- * waiting on and wakeup the virtchnl workqueue even if it is waiting
- * for the response as we are going down
- */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- set_bit(i, adapter->vc_state);
- wake_up(&adapter->vchnl_wq);
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
idpf_vport_params_buf_rel(adapter);
- /* Clear all the bits */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- clear_bit(i, adapter->vc_state);
-
kfree(adapter->vports);
adapter->vports = NULL;
+
+ clear_bit(IDPF_VC_CORE_INIT, adapter->flags);
}
/**
@@ -3622,6 +3468,75 @@ u32 idpf_get_vport_id(struct idpf_vport *vport)
}
/**
+ * idpf_mac_filter_async_handler - Async callback for mac filters
+ * @adapter: private data struct
+ * @xn: transaction for message
+ * @ctlq_msg: received message
+ *
+ * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is
+ * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult
+ * situation to deal with errors returned on the reply. The best we can
+ * ultimately do is remove it from our list of mac filters and report the
+ * error.
+ */
+static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter,
+ struct idpf_vc_xn *xn,
+ const struct idpf_ctlq_msg *ctlq_msg)
+{
+ struct virtchnl2_mac_addr_list *ma_list;
+ struct idpf_vport_config *vport_config;
+ struct virtchnl2_mac_addr *mac_addr;
+ struct idpf_mac_filter *f, *tmp;
+ struct list_head *ma_list_head;
+ struct idpf_vport *vport;
+ u16 num_entries;
+ int i;
+
+ /* if success we're done, we're only here if something bad happened */
+ if (!ctlq_msg->cookie.mbx.chnl_retval)
+ return 0;
+
+ /* make sure at least struct is there */
+ if (xn->reply_sz < sizeof(*ma_list))
+ goto invalid_payload;
+
+ ma_list = ctlq_msg->ctx.indirect.payload->va;
+ mac_addr = ma_list->mac_addr_list;
+ num_entries = le16_to_cpu(ma_list->num_mac_addr);
+ /* we should have received a buffer at least this big */
+ if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries))
+ goto invalid_payload;
+
+ vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id));
+ if (!vport)
+ goto invalid_payload;
+
+ vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)];
+ ma_list_head = &vport_config->user_config.mac_filter_list;
+
+ /* We can't do much to reconcile bad filters at this point, however we
+ * should at least remove them from our list one way or the other so we
+ * have some idea what good filters we have.
+ */
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+ list_for_each_entry_safe(f, tmp, ma_list_head, list)
+ for (i = 0; i < num_entries; i++)
+ if (ether_addr_equal(mac_addr[i].addr, f->macaddr))
+ list_del(&f->list);
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+ dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n",
+ xn->vc_op);
+
+ return 0;
+
+invalid_payload:
+ dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n",
+ xn->vc_op, xn->reply_sz);
+
+ return -EINVAL;
+}
+
+/**
* idpf_add_del_mac_filters - Add/del mac filters
* @vport: Virtual port data structure
* @np: Netdev private structure
@@ -3634,17 +3549,21 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
struct idpf_netdev_priv *np,
bool add, bool async)
{
- struct virtchnl2_mac_addr_list *ma_list = NULL;
+ struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;
+ struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;
struct idpf_adapter *adapter = np->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
- enum idpf_vport_config_flags mac_flag;
- struct pci_dev *pdev = adapter->pdev;
- enum idpf_vport_vc_state vc, vc_err;
- struct virtchnl2_mac_addr *mac_addr;
- struct idpf_mac_filter *f, *tmp;
u32 num_msgs, total_filters = 0;
- int i = 0, k, err = 0;
- u32 vop;
+ struct idpf_mac_filter *f;
+ ssize_t reply_sz;
+ int i = 0, k;
+
+ xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR :
+ VIRTCHNL2_OP_DEL_MAC_ADDR;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.async = async;
+ xn_params.async_handler = idpf_mac_filter_async_handler;
vport_config = adapter->vport_config[np->vport_idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
@@ -3668,13 +3587,13 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr),
GFP_ATOMIC);
if (!mac_addr) {
- err = -ENOMEM;
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- goto error;
+
+ return -ENOMEM;
}
- list_for_each_entry_safe(f, tmp, &vport_config->user_config.mac_filter_list,
- list) {
+ list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
+ list) {
if (add && f->add) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
i++;
@@ -3693,26 +3612,11 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- if (add) {
- vop = VIRTCHNL2_OP_ADD_MAC_ADDR;
- vc = IDPF_VC_ADD_MAC_ADDR;
- vc_err = IDPF_VC_ADD_MAC_ADDR_ERR;
- mac_flag = IDPF_VPORT_ADD_MAC_REQ;
- } else {
- vop = VIRTCHNL2_OP_DEL_MAC_ADDR;
- vc = IDPF_VC_DEL_MAC_ADDR;
- vc_err = IDPF_VC_DEL_MAC_ADDR_ERR;
- mac_flag = IDPF_VPORT_DEL_MAC_REQ;
- }
-
/* Chunk up the filters into multiple messages to avoid
* sending a control queue message buffer that is too large
*/
num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);
- if (!async)
- mutex_lock(&vport->vc_buf_lock);
-
for (i = 0, k = 0; i < num_msgs; i++) {
u32 entries_size, buf_size, num_entries;
@@ -3724,10 +3628,8 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {
kfree(ma_list);
ma_list = kzalloc(buf_size, GFP_ATOMIC);
- if (!ma_list) {
- err = -ENOMEM;
- goto list_prep_error;
- }
+ if (!ma_list)
+ return -ENOMEM;
} else {
memset(ma_list, 0, buf_size);
}
@@ -3736,34 +3638,17 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
ma_list->num_mac_addr = cpu_to_le16(num_entries);
memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
- if (async)
- set_bit(mac_flag, vport_config->flags);
-
- err = idpf_send_mb_msg(adapter, vop, buf_size, (u8 *)ma_list);
- if (err)
- goto mbx_error;
-
- if (!async) {
- err = idpf_wait_for_event(adapter, vport, vc, vc_err);
- if (err)
- goto mbx_error;
- }
+ xn_params.send_buf.iov_base = ma_list;
+ xn_params.send_buf.iov_len = buf_size;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_entries;
total_filters -= num_entries;
}
-mbx_error:
- if (!async)
- mutex_unlock(&vport->vc_buf_lock);
- kfree(ma_list);
-list_prep_error:
- kfree(mac_addr);
-error:
- if (err)
- dev_err(&pdev->dev, "Failed to add or del mac filters %d", err);
-
- return err;
+ return 0;
}
/**
@@ -3780,9 +3665,10 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,
struct idpf_vport_user_config_data *config_data,
u32 vport_id)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_promisc_info vpi;
+ ssize_t reply_sz;
u16 flags = 0;
- int err;
if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))
flags |= VIRTCHNL2_UNICAST_PROMISC;
@@ -3792,9 +3678,13 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,
vpi.vport_id = cpu_to_le32(vport_id);
vpi.flags = cpu_to_le16(flags);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE,
- sizeof(struct virtchnl2_promisc_info),
- (u8 *)&vpi);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &vpi;
+ xn_params.send_buf.iov_len = sizeof(vpi);
+ /* setting promiscuous is only ever done asynchronously */
+ xn_params.async = true;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
new file mode 100644
index 000000000000..83da5d8da56b
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef _IDPF_VIRTCHNL_H_
+#define _IDPF_VIRTCHNL_H_
+
+struct idpf_adapter;
+struct idpf_netdev_priv;
+struct idpf_vec_regs;
+struct idpf_vport;
+struct idpf_vport_max_q;
+struct idpf_vport_user_config_data;
+
+int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
+void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
+int idpf_vc_core_init(struct idpf_adapter *adapter);
+void idpf_vc_core_deinit(struct idpf_adapter *adapter);
+
+int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
+ struct idpf_vec_regs *reg_vals);
+int idpf_queue_reg_init(struct idpf_vport *vport);
+int idpf_vport_queue_ids_init(struct idpf_vport *vport);
+
+int idpf_recv_mb_msg(struct idpf_adapter *adapter);
+int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
+ u16 msg_size, u8 *msg, u16 cookie);
+
+void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
+u32 idpf_get_vport_id(struct idpf_vport *vport);
+int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
+int idpf_send_enable_vport_msg(struct idpf_vport *vport);
+int idpf_send_disable_vport_msg(struct idpf_vport *vport);
+
+int idpf_vport_adjust_qs(struct idpf_vport *vport);
+int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
+ u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
+int idpf_send_delete_queues_msg(struct idpf_vport *vport);
+int idpf_send_enable_queues_msg(struct idpf_vport *vport);
+int idpf_send_disable_queues_msg(struct idpf_vport *vport);
+int idpf_send_config_queues_msg(struct idpf_vport *vport);
+
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
+int idpf_get_vec_ids(struct idpf_adapter *adapter,
+ u16 *vecids, int num_vecids,
+ struct virtchnl2_vector_chunks *chunks);
+int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
+int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
+
+int idpf_add_del_mac_filters(struct idpf_vport *vport,
+ struct idpf_netdev_priv *np,
+ bool add, bool async);
+int idpf_set_promiscuous(struct idpf_adapter *adapter,
+ struct idpf_vport_user_config_data *config_data,
+ u32 vport_id);
+int idpf_check_supported_desc_ids(struct idpf_vport *vport);
+int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
+int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
+int idpf_send_get_stats_msg(struct idpf_vport *vport);
+int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
+int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
+int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
+
+#endif /* _IDPF_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index a2b759531cb7..3c2dc7bdebb5 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -637,7 +637,7 @@ struct igb_adapter {
struct timespec64 period;
} perout[IGB_N_PEROUT];
- char fw_version[32];
+ char fw_version[48];
#ifdef CONFIG_IGB_HWMON
struct hwmon_buff *igb_hwmon_buff;
bool ets;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index b66199c9bb3a..99977a22b843 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -3027,7 +3027,7 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
-static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int igb_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -3038,11 +3038,13 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
(hw->phy.media_type != e1000_media_type_copper))
return -EOPNOTSUPP;
- edata->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_100baseT_Full);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->supported);
if (!hw->dev_spec._82575.eee_disable)
- edata->advertised =
- mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+ mii_eee_cap1_mod_linkmode_t(edata->advertised,
+ adapter->eee_advert);
/* The IPCNFG and EEER registers are not supported on I354. */
if (hw->mac.type == e1000_i354) {
@@ -3068,7 +3070,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
if (ret_val)
return -ENODATA;
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data);
break;
case e1000_i354:
case e1000_i210:
@@ -3079,7 +3081,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
if (ret_val)
return -ENODATA;
- edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+ mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data);
break;
default:
@@ -3099,18 +3101,20 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
edata->eee_enabled = false;
edata->eee_active = false;
edata->tx_lpi_enabled = false;
- edata->advertised &= ~edata->advertised;
+ linkmode_zero(edata->advertised);
}
return 0;
}
static int igb_set_eee(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct igb_adapter *adapter = netdev_priv(netdev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = {};
struct e1000_hw *hw = &adapter->hw;
- struct ethtool_eee eee_curr;
+ struct ethtool_keee eee_curr;
bool adv1g_eee = true, adv100m_eee = true;
s32 ret_val;
@@ -3118,7 +3122,7 @@ static int igb_set_eee(struct net_device *netdev,
(hw->phy.media_type != e1000_media_type_copper))
return -EOPNOTSUPP;
- memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+ memset(&eee_curr, 0, sizeof(struct ethtool_keee));
ret_val = igb_get_eee(netdev, &eee_curr);
if (ret_val)
@@ -3138,14 +3142,21 @@ static int igb_set_eee(struct net_device *netdev,
return -EINVAL;
}
- if (!edata->advertised || (edata->advertised &
- ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ supported);
+ if (linkmode_andnot(tmp, edata->advertised, supported)) {
dev_err(&adapter->pdev->dev,
"EEE Advertisement supports only 100Tx and/or 100T full duplex\n");
return -EINVAL;
}
- adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL);
- adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL);
+ adv100m_eee = linkmode_test_bit(
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->advertised);
+ adv1g_eee = linkmode_test_bit(
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->advertised);
} else if (!edata->eee_enabled) {
dev_err(&adapter->pdev->dev,
@@ -3153,7 +3164,7 @@ static int igb_set_eee(struct net_device *netdev,
return -EINVAL;
}
- adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
adapter->flags |= IGB_FLAG_EEE;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 4df8d4153aa5..a3f100769e39 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -202,7 +202,7 @@ static struct notifier_block dca_notifier = {
#endif
#ifdef CONFIG_PCI_IOV
static unsigned int max_vfs;
-module_param(max_vfs, uint, 0);
+module_param(max_vfs, uint, 0444);
MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
#endif /* CONFIG_PCI_IOV */
@@ -2538,7 +2538,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
@@ -3069,7 +3069,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_fw_version fw;
- char *lbuf;
igb_get_fw_version(hw, &fw);
@@ -3077,34 +3076,36 @@ void igb_set_fw_version(struct igb_adapter *adapter)
case e1000_i210:
case e1000_i211:
if (!(igb_get_flash_presence_i210(hw))) {
- lbuf = kasprintf(GFP_KERNEL, "%2d.%2d-%d",
- fw.invm_major, fw.invm_minor,
- fw.invm_img_type);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor,
+ fw.invm_img_type);
break;
}
fallthrough;
default:
/* if option rom is valid, display its version too */
if (fw.or_valid) {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x, %d.%d.%d",
- fw.eep_major, fw.eep_minor,
- fw.etrack_id, fw.or_major, fw.or_build,
- fw.or_patch);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
/* no option rom */
} else if (fw.etrack_id != 0X0000) {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x",
- fw.eep_major, fw.eep_minor,
- fw.etrack_id);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
} else {
- lbuf = kasprintf(GFP_KERNEL, "%d.%d.%d", fw.eep_major,
- fw.eep_minor, fw.eep_build);
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.eep_build);
}
break;
}
-
- /* the truncate happens here if it doesn't fit */
- strscpy(adapter->fw_version, lbuf, sizeof(adapter->fw_version));
- kfree(lbuf);
}
/**
@@ -6984,44 +6985,31 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
static void igb_tsync_interrupt(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 ack = 0, tsicr = rd32(E1000_TSICR);
+ u32 tsicr = rd32(E1000_TSICR);
struct ptp_clock_event event;
if (tsicr & TSINTR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
if (adapter->ptp_caps.pps)
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= TSINTR_SYS_WRAP;
}
if (tsicr & E1000_TSICR_TXTS) {
/* retrieve hardware timestamp */
schedule_work(&adapter->ptp_tx_work);
- ack |= E1000_TSICR_TXTS;
}
- if (tsicr & TSINTR_TT0) {
+ if (tsicr & TSINTR_TT0)
igb_perout(adapter, 0);
- ack |= TSINTR_TT0;
- }
- if (tsicr & TSINTR_TT1) {
+ if (tsicr & TSINTR_TT1)
igb_perout(adapter, 1);
- ack |= TSINTR_TT1;
- }
- if (tsicr & TSINTR_AUTT0) {
+ if (tsicr & TSINTR_AUTT0)
igb_extts(adapter, 0);
- ack |= TSINTR_AUTT0;
- }
- if (tsicr & TSINTR_AUTT1) {
+ if (tsicr & TSINTR_AUTT1)
igb_extts(adapter, 1);
- ack |= TSINTR_AUTT1;
- }
-
- /* acknowledge the interrupts */
- wr32(E1000_TSICR, ack);
}
static irqreturn_t igb_msix_other(int irq, void *data)
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 319c544b9f04..f94570556120 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -957,7 +957,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
/* adjust timestamp for the TX latency based on link speed */
- if (adapter->hw.mac.type == e1000_i210) {
+ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGB_I210_TX_LATENCY_10;
@@ -1003,6 +1003,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
ktime_t *timestamp)
{
struct igb_adapter *adapter = q_vector->adapter;
+ struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps ts;
__le64 *regval = (__le64 *)va;
int adjust = 0;
@@ -1022,7 +1023,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
/* adjust timestamp for the RX latency based on link speed */
- if (adapter->hw.mac.type == e1000_i210) {
+ if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
switch (adapter->link_speed) {
case SPEED_10:
adjust = IGB_I210_RX_LATENCY_10;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index a4d4f00e6a87..b0cf310e6f7b 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2655,7 +2655,7 @@ igbvf_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index 95d1e8c490a4..ebffd3054285 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -6,6 +6,7 @@
#
obj-$(CONFIG_IGC) += igc.o
+igc-$(CONFIG_IGC_LEDS) += igc_leds.o
igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 45430e246e9c..90316dc58630 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -168,7 +168,7 @@ struct igc_ring {
struct igc_adapter {
struct net_device *netdev;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
u16 eee_advert;
unsigned long state;
@@ -295,6 +295,9 @@ struct igc_adapter {
struct timespec64 start;
struct timespec64 period;
} perout[IGC_N_PEROUT];
+
+ /* LEDs */
+ struct mutex led_mutex;
};
void igc_up(struct igc_adapter *adapter);
@@ -567,7 +570,6 @@ struct igc_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
- struct net_device poll_dev;
/* for dynamic allocation of rings associated with this q_vector */
struct igc_ring ring[] ____cacheline_internodealigned_in_smp;
@@ -585,7 +587,7 @@ enum igc_filter_match_flags {
struct igc_nfc_filter {
u8 match_flags;
u16 etype;
- __be16 vlan_etype;
+ u16 vlan_etype;
u16 vlan_tci;
u16 vlan_tci_mask;
u8 src_addr[ETH_ALEN];
@@ -720,6 +722,8 @@ void igc_ptp_tx_hang(struct igc_adapter *adapter);
void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter);
+int igc_led_setup(struct igc_adapter *adapter);
+
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
#define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS)
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index b95d2c86e803..1a64f1ca6ca8 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -981,7 +981,7 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter,
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
fsp->flow_type |= FLOW_EXT;
- fsp->h_ext.vlan_etype = rule->filter.vlan_etype;
+ fsp->h_ext.vlan_etype = htons(rule->filter.vlan_etype);
fsp->m_ext.vlan_etype = ETHER_TYPE_FULL_MASK;
}
@@ -1249,7 +1249,7 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule,
/* VLAN etype matching */
if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) {
- rule->filter.vlan_etype = fsp->h_ext.vlan_etype;
+ rule->filter.vlan_etype = ntohs(fsp->h_ext.vlan_etype);
rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE;
}
@@ -1623,18 +1623,17 @@ static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags)
}
static int igc_ethtool_get_eee(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
u32 eeer;
if (hw->dev_spec._base.eee_enable)
- edata->advertised =
- mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+ mii_eee_cap1_mod_linkmode_t(edata->advertised,
+ adapter->eee_advert);
*edata = adapter->eee;
- edata->supported = SUPPORTED_Autoneg;
eeer = rd32(IGC_EEER);
@@ -1647,9 +1646,6 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
edata->eee_enabled = hw->dev_spec._base.eee_enable;
- edata->advertised = SUPPORTED_Autoneg;
- edata->lp_advertised = SUPPORTED_Autoneg;
-
/* Report correct negotiated EEE status for devices that
* wrongly report EEE at half-duplex
*/
@@ -1657,21 +1653,21 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
edata->eee_enabled = false;
edata->eee_active = false;
edata->tx_lpi_enabled = false;
- edata->advertised &= ~edata->advertised;
+ linkmode_zero(edata->advertised);
}
return 0;
}
static int igc_ethtool_set_eee(struct net_device *netdev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_hw *hw = &adapter->hw;
- struct ethtool_eee eee_curr;
+ struct ethtool_keee eee_curr;
s32 ret_val;
- memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+ memset(&eee_curr, 0, sizeof(struct ethtool_keee));
ret_val = igc_ethtool_get_eee(netdev, &eee_curr);
if (ret_val) {
@@ -1699,7 +1695,8 @@ static int igc_ethtool_set_eee(struct net_device *netdev,
return -EINVAL;
}
- adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+ adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
+
if (hw->dev_spec._base.eee_enable != edata->eee_enabled) {
hw->dev_spec._base.eee_enable = edata->eee_enabled;
adapter->flags |= IGC_FLAG_EEE;
diff --git a/drivers/net/ethernet/intel/igc/igc_leds.c b/drivers/net/ethernet/intel/igc/igc_leds.c
new file mode 100644
index 000000000000..bf240c5daf86
--- /dev/null
+++ b/drivers/net/ethernet/intel/igc/igc_leds.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2024 Linutronix GmbH */
+
+#include <linux/bits.h>
+#include <linux/leds.h>
+#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
+#include <uapi/linux/uleds.h>
+
+#include "igc.h"
+
+#define IGC_NUM_LEDS 3
+
+#define IGC_LEDCTL_LED0_MODE_SHIFT 0
+#define IGC_LEDCTL_LED0_MODE_MASK GENMASK(3, 0)
+#define IGC_LEDCTL_LED0_BLINK BIT(7)
+#define IGC_LEDCTL_LED1_MODE_SHIFT 8
+#define IGC_LEDCTL_LED1_MODE_MASK GENMASK(11, 8)
+#define IGC_LEDCTL_LED1_BLINK BIT(15)
+#define IGC_LEDCTL_LED2_MODE_SHIFT 16
+#define IGC_LEDCTL_LED2_MODE_MASK GENMASK(19, 16)
+#define IGC_LEDCTL_LED2_BLINK BIT(23)
+
+#define IGC_LEDCTL_MODE_ON 0x00
+#define IGC_LEDCTL_MODE_OFF 0x01
+#define IGC_LEDCTL_MODE_LINK_10 0x05
+#define IGC_LEDCTL_MODE_LINK_100 0x06
+#define IGC_LEDCTL_MODE_LINK_1000 0x07
+#define IGC_LEDCTL_MODE_LINK_2500 0x08
+#define IGC_LEDCTL_MODE_ACTIVITY 0x0b
+
+#define IGC_SUPPORTED_MODES \
+ (BIT(TRIGGER_NETDEV_LINK_2500) | BIT(TRIGGER_NETDEV_LINK_1000) | \
+ BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_10) | \
+ BIT(TRIGGER_NETDEV_RX) | BIT(TRIGGER_NETDEV_TX))
+
+#define IGC_ACTIVITY_MODES \
+ (BIT(TRIGGER_NETDEV_RX) | BIT(TRIGGER_NETDEV_TX))
+
+struct igc_led_classdev {
+ struct net_device *netdev;
+ struct led_classdev led;
+ int index;
+};
+
+#define lcdev_to_igc_ldev(lcdev) \
+ container_of(lcdev, struct igc_led_classdev, led)
+
+static void igc_led_select(struct igc_adapter *adapter, int led,
+ u32 *mask, u32 *shift, u32 *blink)
+{
+ switch (led) {
+ case 0:
+ *mask = IGC_LEDCTL_LED0_MODE_MASK;
+ *shift = IGC_LEDCTL_LED0_MODE_SHIFT;
+ *blink = IGC_LEDCTL_LED0_BLINK;
+ break;
+ case 1:
+ *mask = IGC_LEDCTL_LED1_MODE_MASK;
+ *shift = IGC_LEDCTL_LED1_MODE_SHIFT;
+ *blink = IGC_LEDCTL_LED1_BLINK;
+ break;
+ case 2:
+ *mask = IGC_LEDCTL_LED2_MODE_MASK;
+ *shift = IGC_LEDCTL_LED2_MODE_SHIFT;
+ *blink = IGC_LEDCTL_LED2_BLINK;
+ break;
+ default:
+ *mask = *shift = *blink = 0;
+ netdev_err(adapter->netdev, "Unknown LED %d selected!\n", led);
+ }
+}
+
+static void igc_led_set(struct igc_adapter *adapter, int led, u32 mode,
+ bool blink)
+{
+ u32 shift, mask, blink_bit, ledctl;
+ struct igc_hw *hw = &adapter->hw;
+
+ igc_led_select(adapter, led, &mask, &shift, &blink_bit);
+
+ pm_runtime_get_sync(&adapter->pdev->dev);
+ mutex_lock(&adapter->led_mutex);
+
+ /* Set mode */
+ ledctl = rd32(IGC_LEDCTL);
+ ledctl &= ~mask;
+ ledctl |= mode << shift;
+
+ /* Configure blinking */
+ if (blink)
+ ledctl |= blink_bit;
+ else
+ ledctl &= ~blink_bit;
+ wr32(IGC_LEDCTL, ledctl);
+
+ mutex_unlock(&adapter->led_mutex);
+ pm_runtime_put(&adapter->pdev->dev);
+}
+
+static u32 igc_led_get(struct igc_adapter *adapter, int led)
+{
+ u32 shift, mask, blink_bit, ledctl;
+ struct igc_hw *hw = &adapter->hw;
+
+ igc_led_select(adapter, led, &mask, &shift, &blink_bit);
+
+ pm_runtime_get_sync(&adapter->pdev->dev);
+ mutex_lock(&adapter->led_mutex);
+ ledctl = rd32(IGC_LEDCTL);
+ mutex_unlock(&adapter->led_mutex);
+ pm_runtime_put(&adapter->pdev->dev);
+
+ return (ledctl & mask) >> shift;
+}
+
+static int igc_led_brightness_set_blocking(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+ struct igc_adapter *adapter = netdev_priv(ldev->netdev);
+ u32 mode;
+
+ if (brightness)
+ mode = IGC_LEDCTL_MODE_ON;
+ else
+ mode = IGC_LEDCTL_MODE_OFF;
+
+ netdev_dbg(adapter->netdev, "Set brightness for LED %d to mode %u!\n",
+ ldev->index, mode);
+
+ igc_led_set(adapter, ldev->index, mode, false);
+
+ return 0;
+}
+
+static int igc_led_hw_control_is_supported(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ if (flags & ~IGC_SUPPORTED_MODES)
+ return -EOPNOTSUPP;
+
+ /* If Tx and Rx selected, activity can be offloaded unless some other
+ * mode is selected as well.
+ */
+ if ((flags & BIT(TRIGGER_NETDEV_TX)) &&
+ (flags & BIT(TRIGGER_NETDEV_RX)) &&
+ !(flags & ~IGC_ACTIVITY_MODES))
+ return 0;
+
+ /* Single Rx or Tx activity is not supported. */
+ if (flags & IGC_ACTIVITY_MODES)
+ return -EOPNOTSUPP;
+
+ /* Only one mode can be active at a given time. */
+ if (flags & (flags - 1))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int igc_led_hw_control_set(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+ struct igc_adapter *adapter = netdev_priv(ldev->netdev);
+ u32 mode = IGC_LEDCTL_MODE_OFF;
+ bool blink = false;
+
+ if (flags & BIT(TRIGGER_NETDEV_LINK_10))
+ mode = IGC_LEDCTL_MODE_LINK_10;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_100))
+ mode = IGC_LEDCTL_MODE_LINK_100;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_1000))
+ mode = IGC_LEDCTL_MODE_LINK_1000;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_2500))
+ mode = IGC_LEDCTL_MODE_LINK_2500;
+ if ((flags & BIT(TRIGGER_NETDEV_TX)) &&
+ (flags & BIT(TRIGGER_NETDEV_RX)))
+ mode = IGC_LEDCTL_MODE_ACTIVITY;
+
+ netdev_dbg(adapter->netdev, "Set HW control for LED %d to mode %u!\n",
+ ldev->index, mode);
+
+ /* blink is recommended for activity */
+ if (mode == IGC_LEDCTL_MODE_ACTIVITY)
+ blink = true;
+
+ igc_led_set(adapter, ldev->index, mode, blink);
+
+ return 0;
+}
+
+static int igc_led_hw_control_get(struct led_classdev *led_cdev,
+ unsigned long *flags)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+ struct igc_adapter *adapter = netdev_priv(ldev->netdev);
+ u32 mode;
+
+ mode = igc_led_get(adapter, ldev->index);
+
+ switch (mode) {
+ case IGC_LEDCTL_MODE_ACTIVITY:
+ *flags = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+ break;
+ case IGC_LEDCTL_MODE_LINK_10:
+ *flags = BIT(TRIGGER_NETDEV_LINK_10);
+ break;
+ case IGC_LEDCTL_MODE_LINK_100:
+ *flags = BIT(TRIGGER_NETDEV_LINK_100);
+ break;
+ case IGC_LEDCTL_MODE_LINK_1000:
+ *flags = BIT(TRIGGER_NETDEV_LINK_1000);
+ break;
+ case IGC_LEDCTL_MODE_LINK_2500:
+ *flags = BIT(TRIGGER_NETDEV_LINK_2500);
+ break;
+ }
+
+ return 0;
+}
+
+static struct device *igc_led_hw_control_get_device(struct led_classdev *led_cdev)
+{
+ struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev);
+
+ return &ldev->netdev->dev;
+}
+
+static void igc_led_get_name(struct igc_adapter *adapter, int index, char *buf,
+ size_t buf_len)
+{
+ snprintf(buf, buf_len, "igc-%x%x-led%d",
+ pci_domain_nr(adapter->pdev->bus),
+ pci_dev_id(adapter->pdev), index);
+}
+
+static void igc_setup_ldev(struct igc_led_classdev *ldev,
+ struct net_device *netdev, int index)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct led_classdev *led_cdev = &ldev->led;
+ char led_name[LED_MAX_NAME_SIZE];
+
+ ldev->netdev = netdev;
+ ldev->index = index;
+
+ igc_led_get_name(adapter, index, led_name, LED_MAX_NAME_SIZE);
+ led_cdev->name = led_name;
+ led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
+ led_cdev->max_brightness = 1;
+ led_cdev->brightness_set_blocking = igc_led_brightness_set_blocking;
+ led_cdev->hw_control_trigger = "netdev";
+ led_cdev->hw_control_is_supported = igc_led_hw_control_is_supported;
+ led_cdev->hw_control_set = igc_led_hw_control_set;
+ led_cdev->hw_control_get = igc_led_hw_control_get;
+ led_cdev->hw_control_get_device = igc_led_hw_control_get_device;
+
+ devm_led_classdev_register(&netdev->dev, led_cdev);
+}
+
+int igc_led_setup(struct igc_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct device *dev = &netdev->dev;
+ struct igc_led_classdev *leds;
+ int i;
+
+ mutex_init(&adapter->led_mutex);
+
+ leds = devm_kcalloc(dev, IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ for (i = 0; i < IGC_NUM_LEDS; i++)
+ igc_setup_ldev(leds + i, netdev, i);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index ba8d3fe186ae..2e1cfbd82f4f 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -3385,7 +3385,7 @@ static int igc_flex_filter_select(struct igc_adapter *adapter,
u32 fhftsl;
if (input->index >= MAX_FLEX_FILTER) {
- dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n");
+ netdev_err(adapter->netdev, "Wrong Flex Filter index selected!\n");
return -EINVAL;
}
@@ -3420,7 +3420,6 @@ static int igc_flex_filter_select(struct igc_adapter *adapter,
static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
struct igc_flex_filter *input)
{
- struct device *dev = &adapter->pdev->dev;
struct igc_hw *hw = &adapter->hw;
u8 *data = input->data;
u8 *mask = input->mask;
@@ -3434,7 +3433,7 @@ static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
* out early to avoid surprises later.
*/
if (input->length % 8 != 0) {
- dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n");
+ netdev_err(adapter->netdev, "The length of a flex filter has to be 8 byte aligned!\n");
return -EINVAL;
}
@@ -3504,8 +3503,8 @@ static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
}
wr32(IGC_WUFC, wufc);
- dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n",
- input->index);
+ netdev_dbg(adapter->netdev, "Added flex filter %u to HW.\n",
+ input->index);
return 0;
}
@@ -3577,9 +3576,9 @@ static bool igc_flex_filter_in_use(struct igc_adapter *adapter)
static int igc_add_flex_filter(struct igc_adapter *adapter,
struct igc_nfc_rule *rule)
{
- struct igc_flex_filter flex = { };
struct igc_nfc_filter *filter = &rule->filter;
unsigned int eth_offset, user_offset;
+ struct igc_flex_filter flex = { };
int ret, index;
bool vlan;
@@ -3615,10 +3614,12 @@ static int igc_add_flex_filter(struct igc_adapter *adapter,
ETH_ALEN, NULL);
/* Add VLAN etype */
- if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE)
- igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12,
- sizeof(filter->vlan_etype),
- NULL);
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
+ __be16 vlan_etype = cpu_to_be16(filter->vlan_etype);
+
+ igc_flex_filter_add_field(&flex, &vlan_etype, 12,
+ sizeof(vlan_etype), NULL);
+ }
/* Add VLAN TCI */
if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
@@ -5276,7 +5277,7 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
@@ -5302,25 +5303,22 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
static void igc_tsync_interrupt(struct igc_adapter *adapter)
{
- u32 ack, tsauxc, sec, nsec, tsicr;
struct igc_hw *hw = &adapter->hw;
+ u32 tsauxc, sec, nsec, tsicr;
struct ptp_clock_event event;
struct timespec64 ts;
tsicr = rd32(IGC_TSICR);
- ack = 0;
if (tsicr & IGC_TSICR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
if (adapter->ptp_caps.pps)
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= IGC_TSICR_SYS_WRAP;
}
if (tsicr & IGC_TSICR_TXTS) {
/* retrieve hardware timestamp */
igc_ptp_tx_tstamp_event(adapter);
- ack |= IGC_TSICR_TXTS;
}
if (tsicr & IGC_TSICR_TT0) {
@@ -5334,7 +5332,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
wr32(IGC_TSAUXC, tsauxc);
adapter->perout[0].start = ts;
spin_unlock(&adapter->tmreg_lock);
- ack |= IGC_TSICR_TT0;
}
if (tsicr & IGC_TSICR_TT1) {
@@ -5348,7 +5345,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
wr32(IGC_TSAUXC, tsauxc);
adapter->perout[1].start = ts;
spin_unlock(&adapter->tmreg_lock);
- ack |= IGC_TSICR_TT1;
}
if (tsicr & IGC_TSICR_AUTT0) {
@@ -5358,7 +5354,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
event.index = 0;
event.timestamp = sec * NSEC_PER_SEC + nsec;
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= IGC_TSICR_AUTT0;
}
if (tsicr & IGC_TSICR_AUTT1) {
@@ -5368,11 +5363,7 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
event.index = 1;
event.timestamp = sec * NSEC_PER_SEC + nsec;
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= IGC_TSICR_AUTT1;
}
-
- /* acknowledge the interrupts */
- wr32(IGC_TSICR, ack);
}
/**
@@ -6487,7 +6478,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
int cpu = smp_processor_id();
struct netdev_queue *nq;
struct igc_ring *ring;
- int i, drops;
+ int i, nxmit;
if (unlikely(!netif_carrier_ok(dev)))
return -ENETDOWN;
@@ -6503,16 +6494,15 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
/* Avoid transmit queue timeout since we share it with the slow path */
txq_trans_cond_update(nq);
- drops = 0;
+ nxmit = 0;
for (i = 0; i < num_frames; i++) {
int err;
struct xdp_frame *xdpf = frames[i];
err = igc_xdp_init_tx_descriptor(ring, xdpf);
- if (err) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ if (err)
+ break;
+ nxmit++;
}
if (flags & XDP_XMIT_FLUSH)
@@ -6520,7 +6510,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
__netif_tx_unlock(nq);
- return num_frames - drops;
+ return nxmit;
}
static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
@@ -6977,6 +6967,12 @@ static int igc_probe(struct pci_dev *pdev,
pm_runtime_put_noidle(&pdev->dev);
+ if (IS_ENABLED(CONFIG_IGC_LEDS)) {
+ err = igc_led_setup(adapter);
+ if (err)
+ goto err_register;
+ }
+
return 0;
err_register:
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 7cd8716d2ffa..861f37076861 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -130,11 +130,7 @@ void igc_power_down_phy_copper(struct igc_hw *hw)
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
-
- /* Temporary workaround - should be removed when PHY will implement
- * IEEE registers as properly
- */
- /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
usleep_range(1000, 2000);
}
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index d38c87d7e5e8..e5b893fc5b66 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -12,6 +12,7 @@
#define IGC_MDIC 0x00020 /* MDI Control - RW */
#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define IGC_VET 0x00038 /* VLAN Ether Type - RW */
+#define IGC_LEDCTL 0x00E00 /* LED Control - RW */
#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */
#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index b6f0376e42f4..559b443c409f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -949,19 +949,19 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
void ixgbe_write_eitr(struct ixgbe_q_vector *);
int ixgbe_poll(struct napi_struct *napi, int budget);
int ethtool_ioctl(struct ifreq *ifr);
-s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+int ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+int ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+int ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue);
-s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input_mask);
-s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id, u8 queue);
-s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id);
void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
@@ -1059,7 +1059,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
void ixgbe_store_key(struct ixgbe_adapter *adapter);
void ixgbe_store_reta(struct ixgbe_adapter *adapter);
-s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+int ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
#ifdef CONFIG_IXGBE_IPSEC
void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 6835d5f18753..283a23150a4d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -15,10 +15,10 @@
#define IXGBE_82598_VFT_TBL_SIZE 128
#define IXGBE_82598_RX_PB_SIZE 512
-static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
/**
@@ -66,7 +66,7 @@ out:
IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
}
-static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -93,12 +93,12 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
* not known. Perform the SFP init if necessary.
*
**/
-static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
+static int ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
u16 list_offset, data_offset;
+ int ret_val;
/* Identify the PHY */
phy->ops.identify(hw);
@@ -148,9 +148,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
* Then set pcie completion timeout
*
**/
-static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
+static int ixgbe_start_hw_82598(struct ixgbe_hw *hw)
{
- s32 ret_val;
+ int ret_val;
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val)
@@ -170,7 +170,7 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
*
* Determines the link capabilities by reading the AUTOC register.
**/
-static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+static int ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
@@ -271,7 +271,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
*
* Enable flow control according to the current settings.
**/
-static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
+static int ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
{
u32 fctrl_reg;
u32 rmcs_reg;
@@ -411,13 +411,13 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
**/
-static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete)
{
+ int status = 0;
u32 autoc_reg;
u32 links_reg;
u32 i;
- s32 status = 0;
/* Restart link */
autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -457,7 +457,7 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
* Function indicates success when phy link is available. If phy is not ready
* within 5 seconds of MAC indicating link, the function returns error.
**/
-static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+static int ixgbe_validate_link_ready(struct ixgbe_hw *hw)
{
u32 timeout;
u16 an_reg;
@@ -493,7 +493,7 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
*
* Reads the links register to determine if link is up and the current speed
**/
-static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed, bool *link_up,
bool link_up_wait_to_complete)
{
@@ -579,7 +579,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
*
* Set the link speed in the AUTOC register and restarts link.
**/
-static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
@@ -624,11 +624,11 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
*
* Sets the link speed in the AUTOC register in the MAC and restarts link.
**/
-static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+static int ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
- s32 status;
+ int status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
@@ -647,15 +647,15 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
* clears all interrupts, performing a PHY reset, and performing a link (MAC)
* reset.
**/
-static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+static int ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
{
- s32 status;
- s32 phy_status = 0;
- u32 ctrl;
+ int phy_status = 0;
+ u8 analog_val;
u32 gheccr;
- u32 i;
+ int status;
u32 autoc;
- u8 analog_val;
+ u32 ctrl;
+ u32 i;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -781,7 +781,7 @@ mac_reset_top:
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq set index
**/
-static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+static int ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -805,7 +805,7 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq clear index (not used in 82598, but elsewhere)
**/
-static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+static int ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -836,7 +836,7 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
-static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+static int ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool vlvf_bypass)
{
u32 regindex;
@@ -881,7 +881,7 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
*
* Clears the VLAN filter table, and the VMDq index associated with the filter
**/
-static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
+static int ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
{
u32 offset;
u32 vlanbyte;
@@ -905,7 +905,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
*
* Performs read operation to Atlas analog register specified.
**/
-static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
+static int ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
{
u32 atlas_ctl;
@@ -927,7 +927,7 @@ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
*
* Performs write operation to Atlas analog register specified.
**/
-static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
+static int ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
{
u32 atlas_ctl;
@@ -948,13 +948,13 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
*
* Performs 8 byte read operation to SFP module's data over I2C interface.
**/
-static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+static int ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
u8 byte_offset, u8 *eeprom_data)
{
- s32 status = 0;
u16 sfp_addr = 0;
u16 sfp_data = 0;
u16 sfp_stat = 0;
+ int status = 0;
u16 gssr;
u32 i;
@@ -1019,7 +1019,7 @@ out:
*
* Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
**/
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data)
{
return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
@@ -1034,8 +1034,8 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
*
* Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
**/
-static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *sff8472_data)
+static int ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
{
return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
byte_offset, sff8472_data);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 339e106a5732..cdaf087b4e85 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -21,24 +21,24 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void
ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed);
-static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
-static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
-static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
+static int ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
-static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+static int ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
-static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
-static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+static int ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
@@ -98,10 +98,10 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
}
}
-static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+static int ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
- s32 ret_val;
u16 list_offset, data_offset, data_value;
+ int ret_val;
if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
ixgbe_init_mac_link_ops_82599(hw);
@@ -173,10 +173,10 @@ setup_sfp_err:
* prot_autoc_write_82599(). Note, that locked can only be true in cases
* where this function doesn't return an error.
**/
-static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
+static int prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
u32 *reg_val)
{
- s32 ret_val;
+ int ret_val;
*locked = false;
/* If LESM is on then we need to hold the SW/FW semaphore. */
@@ -203,9 +203,9 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
* This part (82599) may need to hold a the SW/FW lock around all writes to
* AUTOC. Likewise after a write we need to do a pipeline reset.
**/
-static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
+static int prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
{
- s32 ret_val = 0;
+ int ret_val = 0;
/* Blocked by MNG FW so bail */
if (ixgbe_check_reset_blocked(hw))
@@ -237,7 +237,7 @@ out:
return ret_val;
}
-static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -263,11 +263,11 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
* not known. Perform the SFP init if necessary.
*
**/
-static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
+static int ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
+ int ret_val;
u32 esdp;
if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
@@ -322,7 +322,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
*
* Determines the link capabilities by reading the AUTOC register.
**/
-static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+static int ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
@@ -334,7 +334,9 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*autoneg = true;
return 0;
@@ -500,14 +502,14 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
* Configures link settings based on values in the ixgbe_hw struct.
* Restarts the link. Performs autonegotiation if needed.
**/
-static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
+static int ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+ bool autoneg_wait_to_complete)
{
+ bool got_lock = false;
+ int status = 0;
u32 autoc_reg;
u32 links_reg;
u32 i;
- s32 status = 0;
- bool got_lock = false;
if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
status = hw->mac.ops.acquire_swfw_sync(hw,
@@ -657,15 +659,15 @@ ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
*
* Implements the Intel SmartSpeed algorithm.
**/
-static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+static int ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
- s32 status = 0;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- s32 i, j;
- bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ bool link_up = false;
+ int status = 0;
+ s32 i, j;
/* Set autoneg_advertised value based on input link speed */
hw->phy.autoneg_advertised = 0;
@@ -767,16 +769,15 @@ out:
*
* Set the link speed in the AUTOC register and restarts link.
**/
-static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
- bool autoneg = false;
- s32 status;
- u32 pma_pmd_1g, link_mode, links_reg, i;
- u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
- u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+ u32 pma_pmd_10g_serial, pma_pmd_1g, link_mode, links_reg, i;
+ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ bool autoneg = false;
+ int status;
/* holds the value of AUTOC register at this current point in time */
u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -785,6 +786,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
/* temporary variable used for comparison purposes */
u32 autoc = current_autoc;
+ pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+
/* Check to see if speed passed in is supported. */
status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
&autoneg);
@@ -882,11 +885,11 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
*
* Restarts link on PHY and MAC based on settings passed in.
**/
-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+static int ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
- s32 status;
+ int status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
@@ -905,13 +908,13 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
-static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+static int ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
ixgbe_link_speed link_speed;
- s32 status;
u32 ctrl, i, autoc, autoc2;
- u32 curr_lms;
bool link_up = false;
+ u32 curr_lms;
+ int status;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -1081,7 +1084,7 @@ mac_reset_top:
* @hw: pointer to hardware structure
* @fdircmd: current value of FDIRCMD register
*/
-static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
+static int ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
{
int i;
@@ -1099,12 +1102,12 @@ static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
* ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
* @hw: pointer to hardware structure
**/
-s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
+int ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
{
- int i;
u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
u32 fdircmd;
- s32 err;
+ int err;
+ int i;
fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
@@ -1212,7 +1215,7 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
* @fdirctrl: value to write to flow director control register, initially
* contains just the value of the Rx packet buffer allocation
**/
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+int ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
/*
* Continue setup of fdirctrl register bits:
@@ -1236,7 +1239,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
* @fdirctrl: value to write to flow director control register, initially
* contains just the value of the Rx packet buffer allocation
**/
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+int ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
/*
* Continue setup of fdirctrl register bits:
@@ -1359,7 +1362,7 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
* Note that the tunnel bit in input must not be set when the hardware
* tunneling support does not exist.
**/
-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue)
@@ -1515,7 +1518,7 @@ static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
#define IXGBE_STORE_AS_BE16(_value) __swab16(ntohs((_value)))
-s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input_mask)
{
/* mask IPv6 since it is currently not supported */
@@ -1627,12 +1630,12 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
return 0;
}
-s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id, u8 queue)
{
u32 fdirport, fdirvlan, fdirhash, fdircmd;
- s32 err;
+ int err;
/* currently IPv6 is not supported, must be programmed with 0 */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
@@ -1690,13 +1693,13 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
return 0;
}
-s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+int ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id)
{
u32 fdirhash;
u32 fdircmd;
- s32 err;
+ int err;
/* configure FDIRHASH register */
fdirhash = (__force u32)input->formatted.bkt_hash;
@@ -1734,7 +1737,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
*
* Performs read operation to Omer analog register specified.
**/
-static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
+static int ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
{
u32 core_ctl;
@@ -1756,7 +1759,7 @@ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
*
* Performs write operation to Omer analog register specified.
**/
-static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
+static int ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
{
u32 core_ctl;
@@ -1776,9 +1779,9 @@ static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
* and the generation start_hw function.
* Then performs revision-specific operations, if any.
**/
-static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
+static int ixgbe_start_hw_82599(struct ixgbe_hw *hw)
{
- s32 ret_val = 0;
+ int ret_val = 0;
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val)
@@ -1802,9 +1805,9 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
* If PHY already detected, maintains current PHY type in hw struct,
* otherwise executes the PHY detection routine.
**/
-static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+static int ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
/* Detect PHY if not unknown - returns success if already detected. */
status = ixgbe_identify_phy_generic(hw);
@@ -1835,7 +1838,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
*
* Enables the Rx DMA unit for 82599
**/
-static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
+static int ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
{
/*
* Workaround for 82599 silicon errata when enabling the Rx datapath.
@@ -1865,12 +1868,12 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
* Return: -EACCES if the FW is not present or if the FW version is
* not supported.
**/
-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+static int ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
{
u16 fw_offset, fw_ptp_cfg_offset;
- s32 status = -EACCES;
- u16 offset;
+ int status = -EACCES;
u16 fw_version = 0;
+ u16 offset;
/* firmware check is only necessary for SFI devices */
if (hw->phy.media_type != ixgbe_media_type_fiber)
@@ -1917,7 +1920,7 @@ fw_version_err:
static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
{
u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
- s32 status;
+ int status;
/* get the offset to the Firmware Module block */
status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
@@ -1956,7 +1959,7 @@ static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
*
* Retrieves 16 bit word(s) read from EEPROM
**/
-static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -1982,7 +1985,7 @@ static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
*
* Reads a 16 bit word from the EEPROM
**/
-static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+static int ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
u16 offset, u16 *data)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -2006,11 +2009,11 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
* full pipeline reset. Note - We must hold the SW/FW semaphore before writing
* to AUTOC, so this function assumes the semaphore is held.
**/
-static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+static int ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
{
- s32 ret_val;
- u32 anlp1_reg = 0;
u32 i, autoc_reg, autoc2_reg;
+ u32 anlp1_reg = 0;
+ int ret_val;
/* Enable link if disabled in NVM */
autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
@@ -2061,12 +2064,12 @@ reset_pipeline_out:
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
**/
-static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
- u32 esdp;
- s32 status;
s32 timeout = 200;
+ int status;
+ u32 esdp;
if (hw->phy.qsfp_shared_i2c_bus == true) {
/* Acquire I2C bus ownership. */
@@ -2115,12 +2118,12 @@ release_i2c_access:
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
**/
-static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
- u32 esdp;
- s32 status;
s32 timeout = 200;
+ int status;
+ u32 esdp;
if (hw->phy.qsfp_shared_i2c_bus == true) {
/* Acquire I2C bus ownership. */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 2e6e0365154a..3be1bfb16498 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -10,10 +10,10 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
-static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
+static int ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
+static int ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
-static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
+static int ixgbe_ready_eeprom(struct ixgbe_hw *hw);
static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
u16 count);
@@ -22,15 +22,15 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
-static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
-static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
+static int ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
+static int ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
- u16 words, u16 *data);
-static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+static int ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static int ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset);
-static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw);
+static int ixgbe_disable_pcie_primary(struct ixgbe_hw *hw);
/* Base table for registers values that change by MAC */
const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = {
@@ -111,12 +111,12 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
*
* Called at init time to set up flow control.
**/
-s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
+int ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
{
- s32 ret_val = 0;
u32 reg = 0, reg_bp = 0;
- u16 reg_cu = 0;
bool locked = false;
+ int ret_val = 0;
+ u16 reg_cu = 0;
/*
* Validate the requested mode. Strict IEEE mode does not allow
@@ -267,11 +267,11 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
* table, VLAN filter table, calls routine to set up link and flow control
* settings, and leaves transmit and receive units disabled and uninitialized
**/
-s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
+int ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
- s32 ret_val;
- u32 ctrl_ext;
u16 device_caps;
+ u32 ctrl_ext;
+ int ret_val;
/* Set the media type */
hw->phy.media_type = hw->mac.ops.get_media_type(hw);
@@ -330,7 +330,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
* 82599
* X540
**/
-s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
+int ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
{
u32 i;
@@ -354,9 +354,9 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
* up link and flow control settings, and leaves transmit and receive units
* disabled and uninitialized
**/
-s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
+int ixgbe_init_hw_generic(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
/* Reset the hardware */
status = hw->mac.ops.reset_hw(hw);
@@ -380,7 +380,7 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
* Clears all hardware statistics counters by reading them from the hardware
* Statistics counters are clear on read.
**/
-s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
+int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
{
u16 i = 0;
@@ -489,14 +489,14 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
*
* Reads the part number string from the EEPROM.
**/
-s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+int ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size)
{
- s32 ret_val;
- u16 data;
+ int ret_val;
u16 pba_ptr;
u16 offset;
u16 length;
+ u16 data;
if (pba_num == NULL) {
hw_dbg(hw, "PBA string buffer was null\n");
@@ -599,7 +599,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
* A reset of the adapter must be performed prior to calling this function
* in order for the MAC address to have been loaded from the EEPROM into RAR0
**/
-s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
+int ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
{
u32 rar_high;
u32 rar_low;
@@ -653,7 +653,7 @@ enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
*
* Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
**/
-s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
+int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
{
u16 link_status;
@@ -709,7 +709,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
* the shared code and drivers to determine if the adapter is in a stopped
* state and should not touch the hardware.
**/
-s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
+int ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
{
u32 reg_val;
u16 i;
@@ -759,7 +759,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
* Store the index for the link active LED. This will be used to support
* blinking the LED.
**/
-s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
+int ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 led_reg, led_mode;
@@ -800,7 +800,7 @@ s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @index: led number to turn on
**/
-s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
{
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
@@ -821,7 +821,7 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
* @hw: pointer to hardware structure
* @index: led number to turn off
**/
-s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
{
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
@@ -844,7 +844,7 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
+int ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
u32 eec;
@@ -895,11 +895,11 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
u16 i, count;
+ int status;
hw->eeprom.ops.init_params(hw);
@@ -942,14 +942,14 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
* If ixgbe_eeprom_update_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
**/
-static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
- u16 word;
+ u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
u16 page_size;
+ int status;
+ u16 word;
u16 i;
- u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
/* Prepare the EEPROM for writing */
status = ixgbe_acquire_eeprom(hw);
@@ -1019,7 +1019,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
* If ixgbe_eeprom_update_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
**/
-s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+int ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
{
hw->eeprom.ops.init_params(hw);
@@ -1038,11 +1038,11 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
u16 i, count;
+ int status;
hw->eeprom.ops.init_params(hw);
@@ -1077,12 +1077,12 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
- s32 status;
- u16 word_in;
u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
+ u16 word_in;
+ int status;
u16 i;
/* Prepare the EEPROM for reading */
@@ -1129,7 +1129,7 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
*
* Reads 16 bit value from EEPROM through bit-bang method
**/
-s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data)
{
hw->eeprom.ops.init_params(hw);
@@ -1149,11 +1149,11 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
*
* Reads a 16 bit word(s) from the EEPROM using the EERD register.
**/
-s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
+ int status;
u32 eerd;
- s32 status;
u32 i;
hw->eeprom.ops.init_params(hw);
@@ -1189,11 +1189,11 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
* This function is called only when we are writing a new large buffer
* at given offset so the data would be overwritten anyway.
**/
-static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+static int ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset)
{
u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
- s32 status;
+ int status;
u16 i;
for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
@@ -1229,7 +1229,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
-s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+int ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
}
@@ -1243,11 +1243,11 @@ s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
*
* Write a 16 bit word(s) to the EEPROM using the EEWR register.
**/
-s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
+ int status;
u32 eewr;
- s32 status;
u16 i;
hw->eeprom.ops.init_params(hw);
@@ -1286,7 +1286,7 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
*
* Write a 16 bit word to the EEPROM using the EEWR register.
**/
-s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+int ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
{
return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
}
@@ -1299,7 +1299,7 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
* Polls the status bit (bit 1) of the EERD or EEWR to determine when the
* read or write is done respectively.
**/
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+static int ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
{
u32 i;
u32 reg;
@@ -1325,7 +1325,7 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
* Prepares EEPROM for access using bit-bang method. This function should
* be called before issuing a command to the EEPROM.
**/
-static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
+static int ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
{
u32 eec;
u32 i;
@@ -1371,7 +1371,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
*
* Sets the hardware semaphores so EEPROM access can occur for bit-bang method
**/
-static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
+static int ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
{
u32 timeout = 2000;
u32 i;
@@ -1462,7 +1462,7 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
* ixgbe_ready_eeprom - Polls for EEPROM ready
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
+static int ixgbe_ready_eeprom(struct ixgbe_hw *hw)
{
u16 i;
u8 spi_stat_reg;
@@ -1680,7 +1680,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
* ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
* @hw: pointer to hardware structure
**/
-s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -1728,7 +1728,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return (s32)checksum;
+ return (int)checksum;
}
/**
@@ -1739,12 +1739,12 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+int ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val)
{
- s32 status;
- u16 checksum;
u16 read_checksum = 0;
+ u16 checksum;
+ int status;
/*
* Read the first word from the EEPROM. If this times out or fails, do
@@ -1786,10 +1786,10 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
* ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
* @hw: pointer to hardware structure
**/
-s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
+int ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
- s32 status;
u16 checksum;
+ int status;
/*
* Read the first word from the EEPROM. If this times out or fails, do
@@ -1823,7 +1823,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
*
* Puts an ethernet address into a receive address register.
**/
-s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+int ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr)
{
u32 rar_low, rar_high;
@@ -1876,7 +1876,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
*
* Clears an ethernet address from a receive address register.
**/
-s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -1917,7 +1917,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
* of the receive address registers. Clears the multicast table. Assumes
* the receiver is in reset when the routine is called.
**/
-s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
+int ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
{
u32 i;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -1980,7 +1980,7 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
* by the MO field of the MCSTCTRL. The MO field is set during initialization
* to mc_filter_type.
**/
-static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+static int ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
{
u32 vector = 0;
@@ -2049,7 +2049,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
* registers for the first multicast addresses, and hashes the rest into the
* multicast table.
**/
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+int ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev)
{
struct netdev_hw_addr *ha;
@@ -2091,7 +2091,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
*
* Enables multicast address in RAR and the use of the multicast hash table.
**/
-s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
+int ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
{
struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
@@ -2108,7 +2108,7 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
*
* Disables multicast address in RAR and the use of the multicast hash table.
**/
-s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
+int ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
{
struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
@@ -2124,7 +2124,7 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
*
* Enable flow control according to the current settings.
**/
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
+int ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
{
u32 mflcn_reg, fccfg_reg;
u32 reg;
@@ -2252,7 +2252,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
* Find the intersection between advertised settings and link partner's
* advertised settings
**/
-s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+int ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
{
if ((!(adv_reg)) || (!(lp_reg)))
@@ -2294,10 +2294,10 @@ s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
*
* Enable flow control according on 1 gig fiber.
**/
-static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
{
u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
- s32 ret_val;
+ int ret_val;
/*
* On multispeed fiber at 1g, bail out if
@@ -2328,10 +2328,10 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
*
* Enable flow control according to IEEE clause 37.
**/
-static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
{
u32 links2, anlp1_reg, autoc_reg, links;
- s32 ret_val;
+ int ret_val;
/*
* On backplane, bail out if
@@ -2367,7 +2367,7 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
*
* Enable flow control according to IEEE clause 37.
**/
-static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
{
u16 technology_ability_reg = 0;
u16 lp_technology_ability_reg = 0;
@@ -2395,7 +2395,7 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
{
ixgbe_link_speed speed;
- s32 ret_val = -EIO;
+ int ret_val = -EIO;
bool link_up;
/*
@@ -2501,7 +2501,7 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
* bit hasn't caused the primary requests to be disabled, else 0
* is returned signifying primary requests disabled.
**/
-static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
+static int ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
{
u32 i, poll;
u16 value;
@@ -2573,7 +2573,7 @@ gio_disable_fail:
* Acquires the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
+int ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
{
u32 gssr = 0;
u32 swmask = mask;
@@ -2641,7 +2641,7 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
*
* The default case requires no protection so just to the register read.
**/
-s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+int prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
{
*locked = false;
*reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -2655,7 +2655,7 @@ s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
* @locked: bool to indicate whether the SW/FW lock was already taken by
* previous read.
**/
-s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
+int prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
{
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
return 0;
@@ -2668,7 +2668,7 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
* Stops the receive data path and waits for the HW to internally
* empty the Rx security block.
**/
-s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
+int ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
{
#define IXGBE_MAX_SECRX_POLL 40
int i;
@@ -2700,7 +2700,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
*
* Enables the receive data path
**/
-s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
+int ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
{
u32 secrxreg;
@@ -2719,7 +2719,7 @@ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
*
* Enables the Rx DMA unit
**/
-s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
+int ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
{
if (regval & IXGBE_RXCTRL_RXEN)
hw->mac.ops.enable_rx(hw);
@@ -2734,14 +2734,14 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
* @hw: pointer to hardware structure
* @index: led number to blink
**/
-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
{
- ixgbe_link_speed speed = 0;
- bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ ixgbe_link_speed speed = 0;
+ bool link_up = false;
bool locked = false;
- s32 ret_val;
+ int ret_val;
if (index > 3)
return -EINVAL;
@@ -2782,12 +2782,12 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
* @hw: pointer to hardware structure
* @index: led number to stop blinking
**/
-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
{
- u32 autoc_reg = 0;
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
bool locked = false;
- s32 ret_val;
+ u32 autoc_reg = 0;
+ int ret_val;
if (index > 3)
return -EINVAL;
@@ -2821,10 +2821,10 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
* pointer, and returns the value at that location. This is used in both
* get and set mac_addr routines.
**/
-static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
- u16 *san_mac_offset)
+static int ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+ u16 *san_mac_offset)
{
- s32 ret_val;
+ int ret_val;
/*
* First read the EEPROM pointer to see if the MAC addresses are
@@ -2849,11 +2849,11 @@ static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
* set_lan_id() is called by identify_sfp(), but this cannot be relied
* upon for non-SFP connections, so we must call it here.
**/
-s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+int ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
{
u16 san_mac_data, san_mac_offset;
+ int ret_val;
u8 i;
- s32 ret_val;
/*
* First read the EEPROM pointer to see if the MAC addresses are
@@ -2942,7 +2942,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
* @rar: receive address register index to disassociate
* @vmdq: VMDq pool index to remove from the rar
**/
-s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+int ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 mpsar_lo, mpsar_hi;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -2993,7 +2993,7 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq pool index
**/
-s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+int ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 mpsar;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -3026,7 +3026,7 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* VFs advertized and not 0.
* MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
**/
-s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
+int ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
{
u32 rar = hw->mac.san_mac_rar_index;
@@ -3045,7 +3045,7 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
* ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
* @hw: pointer to hardware structure
**/
-s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
+int ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
{
int i;
@@ -3065,9 +3065,9 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
* return the VLVF index where this VLAN id should be placed
*
**/
-static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
+static int ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
{
- s32 regindex, first_empty_slot;
+ int regindex, first_empty_slot;
u32 bits;
/* short cut the special case */
@@ -3115,11 +3115,11 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
-s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+int ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool vlvf_bypass)
{
u32 regidx, vfta_delta, vfta, bits;
- s32 vlvf_index;
+ int vlvf_index;
if ((vlan > 4095) || (vind > 63))
return -EINVAL;
@@ -3226,7 +3226,7 @@ vfta_update:
*
* Clears the VLAN filter table, and the VMDq index associated with the filter
**/
-s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
+int ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
{
u32 offset;
@@ -3276,7 +3276,7 @@ static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
*
* Reads the links register to determine if link is up and the current speed
**/
-s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete)
{
bool crosstalk_fix_active = ixgbe_need_crosstalk_fix(hw);
@@ -3396,8 +3396,8 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* This function will read the EEPROM from the alternative SAN MAC address
* block to check the support for the alternative WWNN/WWPN prefix support.
**/
-s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix)
+int ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+ u16 *wwpn_prefix)
{
u16 offset, caps;
u16 alt_san_mac_blk_offset;
@@ -3494,7 +3494,7 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
* This function will read the EEPROM location for the device capabilities,
* and return the word through device_caps.
**/
-s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
+int ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
{
hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
@@ -3604,7 +3604,7 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
* by the caller.
**/
-s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
+int ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
u32 timeout)
{
u32 hicr, i, fwsts;
@@ -3676,15 +3676,15 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
* Communicates with the manageability block. On success return 0
* else return -EIO or -EINVAL.
**/
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
+int ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
u32 length, u32 timeout,
bool return_data)
{
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
struct ixgbe_hic_hdr *hdr = buffer;
- u32 *u32arr = buffer;
u16 buf_len, dword_len;
- s32 status;
+ u32 *u32arr = buffer;
+ int status;
u32 bi;
if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
@@ -3753,13 +3753,13 @@ rel_out:
* else returns -EBUSY when encountering an error acquiring
* semaphore or -EIO when command fails.
**/
-s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+int ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 sub, __always_unused u16 len,
__always_unused const char *driver_ver)
{
struct ixgbe_hic_drv_info fw_cmd;
+ int ret_val;
int i;
- s32 ret_val;
fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
@@ -3875,10 +3875,10 @@ static const u8 ixgbe_emc_therm_limit[4] = {
*
* Returns error code.
**/
-static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
+static int ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
u16 *ets_offset)
{
- s32 status;
+ int status;
status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset);
if (status)
@@ -3903,13 +3903,13 @@ static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
*
* Returns the thermal sensor data structure
**/
-s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
+int ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
{
- s32 status;
u16 ets_offset;
- u16 ets_cfg;
u16 ets_sensor;
u8 num_sensors;
+ u16 ets_cfg;
+ int status;
u8 i;
struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
@@ -3959,17 +3959,17 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
* Inits the thermal sensor thresholds according to the NVM map
* and save off the threshold and location values into mac.thermal_sensor_data
**/
-s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
+int ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
{
- s32 status;
- u16 ets_offset;
- u16 ets_cfg;
- u16 ets_sensor;
+ struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
u8 low_thresh_delta;
u8 num_sensors;
u8 therm_limit;
+ u16 ets_sensor;
+ u16 ets_offset;
+ u16 ets_cfg;
+ int status;
u8 i;
- struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
@@ -4192,16 +4192,16 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw)
*
* Set the link speed in the MAC and/or PHY register and restarts link.
*/
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+int ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
- ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
- s32 status = 0;
+ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ bool autoneg, link_up = false;
u32 speedcnt = 0;
+ int status = 0;
u32 i = 0;
- bool autoneg, link_up = false;
/* Mask off requested but non-supported speeds */
status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg);
@@ -4340,8 +4340,8 @@ out:
void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed)
{
- s32 status;
u8 rs, eeprom_data;
+ int status;
switch (speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 34761e691d52..6493abf189de 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -8,89 +8,89 @@
#include "ixgbe.h"
u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
-s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+int ixgbe_init_hw_generic(struct ixgbe_hw *hw);
+int ixgbe_start_hw_generic(struct ixgbe_hw *hw);
+int ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
+int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
+int ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size);
-s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
+int ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status);
enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status);
-s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
+int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
-s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
+int ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
-s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw);
+int ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
-s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
+int ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+int ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
-s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+int ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
-s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+int ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
-s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+int ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
-s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
-s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
+int ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
-s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+int ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
-s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+int ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
-s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
+int ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
+int ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev);
-s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
-s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
-s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
-s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_fc_generic(struct ixgbe_hw *);
+int ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
+int ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+int ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
+int ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
+int ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
+int ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+int ixgbe_setup_fc_generic(struct ixgbe_hw *);
bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
+int ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
-s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
-s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
-s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
-s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
-s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
+int ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+int ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+int ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
+int ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+int ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
+int ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
u32 vind, bool vlan_on, bool vlvf_bypass);
-s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
-s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
+int ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
-s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+int ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
-s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
-s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+int prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
+int prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
+int ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
-s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
-s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+int ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
+int ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver, u16 len, const char *str);
u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
+int ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
u32 timeout, bool return_data);
-s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout);
-s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+int ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout);
+int ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
u32 (*data)[FW_PHY_ACT_DATA_COUNT]);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
bool ixgbe_mng_present(struct ixgbe_hw *hw);
@@ -111,8 +111,8 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT];
#define IXGBE_EMC_DIODE3_DATA 0x2A
#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30
-s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
-s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
+int ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
+int ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
void ixgbe_get_etk_id(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver);
void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
@@ -121,7 +121,7 @@ void ixgbe_get_orom_version(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+int ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index d26cea5b43bd..502666f28124 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -18,7 +18,7 @@
* @max: max credits by traffic class
* @max_frame: maximum frame size
*/
-static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
+static int ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
__u16 *max, int max_frame)
{
int min_percent = 100;
@@ -59,7 +59,7 @@ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill,
* It should be called only after the rules are checked by
* ixgbe_dcb_check_config().
*/
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
+int ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config,
int max_frame, u8 direction)
{
@@ -247,7 +247,7 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
+int ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
u8 pfc_en;
@@ -283,7 +283,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
}
/* Helper routines to abstract HW specifics from DCB netlink ops */
-s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
+int ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
{
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -300,7 +300,7 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
return -EINVAL;
}
-s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
+int ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
{
__u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
__u8 prio_type[IEEE_8021QAZ_MAX_TCS];
@@ -333,7 +333,7 @@ s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
bwg_id, prio_type, ets->prio_tc);
}
-s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
+int ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
u16 *refill, u16 *max, u8 *bwg_id,
u8 *prio_type, u8 *prio_tc)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 60cd5863bf5e..91788e4c4e19 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -124,15 +124,15 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *);
u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);
/* DCB credits calculation */
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
+int ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
struct ixgbe_dcb_config *, int, u8);
/* DCB hw initialization */
-s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max);
-s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
+int ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max);
+int ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
u8 *bwg_id, u8 *prio_type, u8 *tc_prio);
-s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);
-s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+int ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio);
+int ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 379ae747cdce..185c3e5f9837 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -15,10 +15,8 @@
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *prio_type)
+int ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *prio_type)
{
u32 reg = 0;
u32 credit_refill = 0;
@@ -75,11 +73,8 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type)
+int ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type)
{
u32 reg, max_credits;
u8 i;
@@ -124,11 +119,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type)
+int ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type)
{
u32 reg;
u8 i;
@@ -171,7 +163,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Priority Flow Control for each traffic class.
*/
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
+int ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 fcrtl, reg;
u8 i;
@@ -224,7 +216,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
-static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+static int ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
@@ -260,7 +252,7 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type)
{
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
index fdca41abb44c..5bf3f13c6953 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
@@ -46,27 +46,19 @@
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
+int ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
/* DCB hw initialization */
-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *prio_type);
-
-s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type);
-
-s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type);
-
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *prio_type);
+
+int ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
+
+int ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
+
+int ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type);
#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 7948849840a5..c61bd9059541 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -17,7 +17,7 @@
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+int ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
@@ -76,7 +76,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+int ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
@@ -128,7 +128,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+int ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
u16 *refill,
u16 *max,
u8 *bwg_id,
@@ -187,7 +187,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
*
* Configure Priority Flow Control (PFC) for each traffic class.
*/
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
+int ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
{
u32 i, j, fcrtl, reg;
u8 max_tc = 0;
@@ -272,7 +272,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
-static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
+static int ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
@@ -330,7 +330,7 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
{
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index c6f084883cab..f6e5a87c03e3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -70,30 +70,21 @@
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc);
+int ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc);
/* DCB hw initialization */
-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type,
- u8 *prio_tc);
-
-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type);
-
-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
- u16 *refill,
- u16 *max,
- u8 *bwg_id,
- u8 *prio_type,
- u8 *prio_tc);
-
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
+int ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type,
+ u8 *prio_tc);
+
+int ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
+
+int ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type,
+ u8 *prio_tc);
+
+int ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type,
u8 *prio_tc);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 9a63457712c7..6e6e6f1847b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -349,6 +349,8 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
case ixgbe_sfp_type_1g_sx_core1:
case ixgbe_sfp_type_1g_lx_core0:
case ixgbe_sfp_type_1g_lx_core1:
+ case ixgbe_sfp_type_1g_bx_core0:
+ case ixgbe_sfp_type_1g_bx_core1:
ethtool_link_ksettings_add_link_mode(cmd, supported,
FIBRE);
ethtool_link_ksettings_add_link_mode(cmd, advertising,
@@ -459,7 +461,7 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 advertised, old;
- s32 err = 0;
+ int err = 0;
if ((hw->phy.media_type == ixgbe_media_type_copper) ||
(hw->phy.multispeed_fiber)) {
@@ -3326,9 +3328,9 @@ static int ixgbe_get_module_info(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- s32 status;
u8 sff8472_rev, addr_mode;
bool page_swap = false;
+ int status;
if (hw->phy.type == ixgbe_phy_fw)
return -ENXIO;
@@ -3372,7 +3374,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- s32 status = -EFAULT;
+ int status = -EFAULT;
u8 databyte = 0xFF;
int i = 0;
@@ -3403,66 +3405,68 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
static const struct {
ixgbe_link_speed mac_speed;
- u32 supported;
+ u32 link_mode;
} ixgbe_ls_map[] = {
- { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
- { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
- { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
- { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
- { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
+ { IXGBE_LINK_SPEED_10_FULL, ETHTOOL_LINK_MODE_10baseT_Full_BIT },
+ { IXGBE_LINK_SPEED_100_FULL, ETHTOOL_LINK_MODE_100baseT_Full_BIT },
+ { IXGBE_LINK_SPEED_1GB_FULL, ETHTOOL_LINK_MODE_1000baseT_Full_BIT },
+ { IXGBE_LINK_SPEED_2_5GB_FULL, ETHTOOL_LINK_MODE_2500baseX_Full_BIT },
+ { IXGBE_LINK_SPEED_10GB_FULL, ETHTOOL_LINK_MODE_10000baseT_Full_BIT },
};
static const struct {
u32 lp_advertised;
- u32 mac_speed;
+ u32 link_mode;
} ixgbe_lp_map[] = {
- { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
- { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
- { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
- { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
- { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
- { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
+ { FW_PHY_ACT_UD_2_100M_TX_EEE, ETHTOOL_LINK_MODE_100baseT_Full_BIT },
+ { FW_PHY_ACT_UD_2_1G_T_EEE, ETHTOOL_LINK_MODE_1000baseT_Full_BIT },
+ { FW_PHY_ACT_UD_2_10G_T_EEE, ETHTOOL_LINK_MODE_10000baseT_Full_BIT },
+ { FW_PHY_ACT_UD_2_1G_KX_EEE, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT },
+ { FW_PHY_ACT_UD_2_10G_KX4_EEE, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT },
+ { FW_PHY_ACT_UD_2_10G_KR_EEE, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
};
static int
-ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
+ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
struct ixgbe_hw *hw = &adapter->hw;
- s32 rc;
+ int rc;
u16 i;
rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
if (rc)
return rc;
- edata->lp_advertised = 0;
for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
if (info[0] & ixgbe_lp_map[i].lp_advertised)
- edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
+ linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ edata->lp_advertised);
}
- edata->supported = 0;
for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
- edata->supported |= ixgbe_ls_map[i].supported;
+ linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ edata->supported);
}
- edata->advertised = 0;
for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
- edata->advertised |= ixgbe_ls_map[i].supported;
+ linkmode_set_bit(ixgbe_lp_map[i].link_mode,
+ edata->advertised);
}
- edata->eee_enabled = !!edata->advertised;
+ edata->eee_enabled = !linkmode_empty(edata->advertised);
edata->tx_lpi_enabled = edata->eee_enabled;
- if (edata->advertised & edata->lp_advertised)
- edata->eee_active = true;
+
+ linkmode_and(common, edata->advertised, edata->lp_advertised);
+ edata->eee_active = !linkmode_empty(common);
return 0;
}
-static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3476,17 +3480,17 @@ static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
-static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- struct ethtool_eee eee_data;
- s32 ret_val;
+ struct ethtool_keee eee_data;
+ int ret_val;
if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
return -EOPNOTSUPP;
- memset(&eee_data, 0, sizeof(struct ethtool_eee));
+ memset(&eee_data, 0, sizeof(struct ethtool_keee));
ret_val = ixgbe_get_eee(netdev, &eee_data);
if (ret_val)
@@ -3504,7 +3508,7 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EINVAL;
}
- if (eee_data.advertised != edata->advertised) {
+ if (!linkmode_equal(eee_data.advertised, edata->advertised)) {
e_err(drv,
"Setting EEE advertised speeds is not supported\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bd541527c8c7..f985252c8c8d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -153,7 +153,7 @@ MODULE_PARM_DESC(max_vfs,
#endif /* CONFIG_PCI_IOV */
static bool allow_unsupported_sfp;
-module_param(allow_unsupported_sfp, bool, 0);
+module_param(allow_unsupported_sfp, bool, 0444);
MODULE_PARM_DESC(allow_unsupported_sfp,
"Allow unsupported and untested SFP+ modules on 82599-based adapters");
@@ -205,7 +205,7 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
return 0;
}
-static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
+static int ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u16 link_status = 0;
@@ -1106,6 +1106,44 @@ static int ixgbe_tx_maxrate(struct net_device *netdev,
}
/**
+ * ixgbe_update_tx_ring_stats - Update Tx ring specific counters
+ * @tx_ring: ring to update
+ * @q_vector: queue vector ring belongs to
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ixgbe_update_tx_ring_stats(struct ixgbe_ring *tx_ring,
+ struct ixgbe_q_vector *q_vector, u64 pkts,
+ u64 bytes)
+{
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += bytes;
+ tx_ring->stats.packets += pkts;
+ u64_stats_update_end(&tx_ring->syncp);
+ q_vector->tx.total_bytes += bytes;
+ q_vector->tx.total_packets += pkts;
+}
+
+/**
+ * ixgbe_update_rx_ring_stats - Update Rx ring specific counters
+ * @rx_ring: ring to update
+ * @q_vector: queue vector ring belongs to
+ * @pkts: number of processed packets
+ * @bytes: number of processed bytes
+ */
+void ixgbe_update_rx_ring_stats(struct ixgbe_ring *rx_ring,
+ struct ixgbe_q_vector *q_vector, u64 pkts,
+ u64 bytes)
+{
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.bytes += bytes;
+ rx_ring->stats.packets += pkts;
+ u64_stats_update_end(&rx_ring->syncp);
+ q_vector->rx.total_bytes += bytes;
+ q_vector->rx.total_packets += pkts;
+}
+
+/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: structure containing interrupt and ring information
* @tx_ring: tx ring to clean
@@ -1207,12 +1245,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
i += tx_ring->count;
tx_ring->next_to_clean = i;
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.bytes += total_bytes;
- tx_ring->stats.packets += total_packets;
- u64_stats_update_end(&tx_ring->syncp);
- q_vector->tx.total_bytes += total_bytes;
- q_vector->tx.total_packets += total_packets;
+ ixgbe_update_tx_ring_stats(tx_ring, q_vector, total_packets,
+ total_bytes);
adapter->tx_ipsec += total_ipsec;
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
@@ -2429,12 +2463,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
ixgbe_xdp_ring_update_tail_locked(ring);
}
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets += total_rx_packets;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- q_vector->rx.total_packets += total_rx_packets;
- q_vector->rx.total_bytes += total_rx_bytes;
+ ixgbe_update_rx_ring_stats(rx_ring, q_vector, total_rx_packets,
+ total_rx_bytes);
return total_rx_packets;
}
@@ -2939,8 +2969,8 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
- u32 mask;
struct ixgbe_hw *hw = &adapter->hw;
+ u32 mask;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -7809,7 +7839,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- s32 err;
+ int err;
/* not searching for SFP so there is nothing to do here */
if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
@@ -10205,7 +10235,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
@@ -10525,6 +10555,44 @@ static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
}
/**
+ * ixgbe_irq_disable_single - Disable single IRQ vector
+ * @adapter: adapter structure
+ * @ring: ring index
+ **/
+static void ixgbe_irq_disable_single(struct ixgbe_adapter *adapter, u32 ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 qmask = BIT_ULL(ring);
+ u32 mask;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ mask = qmask & IXGBE_EIMC_RTX_QUEUE;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ mask = (qmask & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ mask = (qmask >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ break;
+ default:
+ break;
+ }
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ synchronize_irq(adapter->msix_entries[ring].vector);
+ else
+ synchronize_irq(adapter->pdev->irq);
+}
+
+/**
* ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
* @adapter: adapter structure
* @ring: ring index
@@ -10540,6 +10608,11 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
tx_ring = adapter->tx_ring[ring];
xdp_ring = adapter->xdp_ring[ring];
+ ixgbe_irq_disable_single(adapter, ring);
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_disable(&rx_ring->q_vector->napi);
+
ixgbe_disable_txr(adapter, tx_ring);
if (xdp_ring)
ixgbe_disable_txr(adapter, xdp_ring);
@@ -10548,9 +10621,6 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
if (xdp_ring)
synchronize_rcu();
- /* Rx/Tx/XDP Tx share the same napi context. */
- napi_disable(&rx_ring->q_vector->napi);
-
ixgbe_clean_tx_ring(tx_ring);
if (xdp_ring)
ixgbe_clean_tx_ring(xdp_ring);
@@ -10578,9 +10648,6 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
tx_ring = adapter->tx_ring[ring];
xdp_ring = adapter->xdp_ring[ring];
- /* Rx/Tx/XDP Tx share the same napi context. */
- napi_enable(&rx_ring->q_vector->napi);
-
ixgbe_configure_tx_ring(adapter, tx_ring);
if (xdp_ring)
ixgbe_configure_tx_ring(adapter, xdp_ring);
@@ -10589,6 +10656,11 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
if (xdp_ring)
clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_enable(&rx_ring->q_vector->napi);
+ ixgbe_irq_enable_queues(adapter, BIT_ULL(ring));
+ IXGBE_WRITE_FLUSH(&adapter->hw);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index fe7ef5773369..d67d77e5dacc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -15,7 +15,7 @@
*
* returns SUCCESS if it successfully read message from buffer
**/
-s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+int ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -38,7 +38,7 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
*
* returns SUCCESS if it successfully copied message into the buffer
**/
-s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+int ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -58,7 +58,7 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+int ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -75,7 +75,7 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+int ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -92,7 +92,7 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
-s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+int ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
@@ -109,7 +109,7 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if it successfully received a message notification
**/
-static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+static int ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
@@ -134,7 +134,7 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
*
* returns SUCCESS if it successfully received a message acknowledgement
**/
-static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+static int ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
@@ -162,11 +162,11 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
* returns SUCCESS if it successfully received a message notification and
* copied it into the receive buffer.
**/
-static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+static int ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val;
+ int ret_val;
if (!mbx->ops)
return -EIO;
@@ -189,11 +189,11 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
* returns SUCCESS if it successfully copied message into the buffer and
* received an ack to that message within delay * timeout period
**/
-static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+static int ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val;
+ int ret_val;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops || !mbx->timeout)
@@ -208,7 +208,7 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
return ixgbe_poll_for_ack(hw, mbx_id);
}
-static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+static int ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
{
u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
@@ -227,9 +227,9 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
{
- s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ int index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
@@ -248,9 +248,9 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
{
- s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ int index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
@@ -269,7 +269,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
{
u32 reg_offset = (vf_number < 32) ? 0 : 1;
u32 vf_shift = vf_number % 32;
@@ -305,7 +305,7 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* return SUCCESS if we obtained the mailbox lock
**/
-static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+static int ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
{
u32 p2v_mailbox;
@@ -329,10 +329,10 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
*
* returns SUCCESS if it successfully copied message into the buffer
**/
-static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+static int ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
- s32 ret_val;
+ int ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
@@ -368,10 +368,10 @@ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
* memory buffer. The presumption is that the caller knows that there was
* a message due to a VF request so no polling for message is needed.
**/
-static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+static int ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
- s32 ret_val;
+ int ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 6434c190e7a4..bd205306934b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -96,11 +96,11 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
-s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+int ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+int ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+int ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+int ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+int ixgbe_check_for_rst(struct ixgbe_hw *, u16);
#ifdef CONFIG_PCI_IOV
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
#endif /* CONFIG_PCI_IOV */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index f28140a05f09..07eaa3c3f4d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -11,19 +11,19 @@
static void ixgbe_i2c_start(struct ixgbe_hw *hw);
static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
-static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
-static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
-static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
-static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
-static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
+static int ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
+static int ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
+static int ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
+static int ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
+static int ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
-static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
+static int ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
-static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
-static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
+static int ixgbe_get_phy_id(struct ixgbe_hw *hw);
+static int ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
/**
* ixgbe_out_i2c_byte_ack - Send I2C byte with ack
@@ -32,9 +32,9 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
*
* Returns an error code on error.
**/
-static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
+static int ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
{
- s32 status;
+ int status;
status = ixgbe_clock_out_i2c_byte(hw, byte);
if (status)
@@ -49,9 +49,9 @@ static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
*
* Returns an error code on error.
**/
-static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
+static int ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
{
- s32 status;
+ int status;
status = ixgbe_clock_in_i2c_byte(hw, byte);
if (status)
@@ -85,7 +85,7 @@ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
*
* Returns an error code on error.
*/
-s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+int ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
@@ -163,7 +163,7 @@ fail:
*
* Returns an error code on error.
*/
-s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
+int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
@@ -260,7 +260,7 @@ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
*
* Determines the physical layer module found on the current adapter.
**/
-s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
+int ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
{
u32 status = -EFAULT;
u32 phy_addr;
@@ -332,11 +332,11 @@ bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
*
**/
-static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
+static int ixgbe_get_phy_id(struct ixgbe_hw *hw)
{
- s32 status;
u16 phy_id_high = 0;
u16 phy_id_low = 0;
+ int status;
status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
&phy_id_high);
@@ -394,11 +394,11 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
* ixgbe_reset_phy_generic - Performs a PHY reset
* @hw: pointer to hardware structure
**/
-s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
+int ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
{
u32 i;
u16 ctrl = 0;
- s32 status = 0;
+ int status = 0;
if (hw->phy.type == ixgbe_phy_unknown)
status = ixgbe_identify_phy_generic(hw);
@@ -470,8 +470,8 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
*
* Reads a value from a specified PHY register without the SWFW lock
**/
-s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
- u16 *phy_data)
+int ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data)
{
u32 i, data, command;
@@ -546,11 +546,11 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
* @device_type: 5 bit device type
* @phy_data: Pointer to read data from PHY register
**/
-s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
- s32 status;
u32 gssr = hw->phy.phy_semaphore_mask;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
@@ -571,8 +571,8 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
* @device_type: 5 bit device type
* @phy_data: Data to write to the PHY register
**/
-s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data)
+int ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 phy_data)
{
u32 i, command;
@@ -644,11 +644,11 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
* @device_type: 5 bit device type
* @phy_data: Data to write to the PHY register
**/
-s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
- s32 status;
u32 gssr = hw->phy.phy_semaphore_mask;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
@@ -668,7 +668,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
* @hw: pointer to hardware structure
* @cmd: command register value to write
**/
-static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
+static int ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
{
IXGBE_WRITE_REG(hw, IXGBE_MSCA, cmd);
@@ -684,11 +684,11 @@ static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
* @regnum: register number
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr,
int regnum, u32 gssr)
{
u32 hwaddr, cmd;
- s32 data;
+ int data;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -718,11 +718,11 @@ mii_bus_read_done:
* @regnum: register number
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr,
int devad, int regnum, u32 gssr)
{
u32 hwaddr, cmd;
- s32 data;
+ int data;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -756,11 +756,11 @@ mii_bus_read_done:
* @val: value to write
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
int regnum, u16 val, u32 gssr)
{
u32 hwaddr, cmd;
- s32 err;
+ int err;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -787,12 +787,12 @@ static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
* @val: value to write
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr,
+static int ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr,
int devad, int regnum, u16 val,
u32 gssr)
{
u32 hwaddr, cmd;
- s32 err;
+ int err;
if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
return -EBUSY;
@@ -821,7 +821,7 @@ mii_bus_write_done:
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
+static int ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
struct ixgbe_hw *hw = &adapter->hw;
@@ -837,7 +837,7 @@ static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
+static int ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -854,7 +854,7 @@ static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
+static int ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -872,7 +872,7 @@ static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
+static int ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -889,7 +889,7 @@ static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -907,7 +907,7 @@ static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
* @devad: device address to read
* @regnum: register number
**/
-static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
int devad, int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -925,7 +925,7 @@ static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -944,7 +944,7 @@ static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr,
+static int ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr,
int devad, int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
@@ -1023,13 +1023,13 @@ out:
*
* ixgbe_mii_bus_init initializes a mii_bus structure in adapter
**/
-s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
+int ixgbe_mii_bus_init(struct ixgbe_hw *hw)
{
- s32 (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val);
- s32 (*read_c22)(struct mii_bus *bus, int addr, int regnum);
- s32 (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum,
+ int (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val);
+ int (*read_c22)(struct mii_bus *bus, int addr, int regnum);
+ int (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum,
u16 val);
- s32 (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum);
+ int (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum);
struct ixgbe_adapter *adapter = hw->back;
struct pci_dev *pdev = adapter->pdev;
struct device *dev = &adapter->netdev->dev;
@@ -1095,12 +1095,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
*
* Restart autonegotiation and PHY and waits for completion.
**/
-s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
+int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
{
- s32 status = 0;
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
- bool autoneg = false;
ixgbe_link_speed speed;
+ bool autoneg = false;
+ int status = 0;
ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
@@ -1173,7 +1173,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* @speed: new link speed
* @autoneg_wait_to_complete: unused
**/
-s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+int ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
@@ -1214,10 +1214,10 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
* Determines the supported link capabilities by reading the PHY auto
* negotiation register.
*/
-static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
+static int ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
{
u16 speed_ability;
- s32 status;
+ int status;
status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
&speed_ability);
@@ -1253,11 +1253,11 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
* @speed: pointer to link speed
* @autoneg: boolean auto-negotiation value
*/
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+int ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
- s32 status = 0;
+ int status = 0;
*autoneg = true;
if (!hw->phy.speeds_supported)
@@ -1276,15 +1276,15 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
* Reads the VS1 register to determine if link is up and the current speed for
* the PHY.
**/
-s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+int ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up)
{
- s32 status;
- u32 time_out;
u32 max_time_out = 10;
- u16 phy_link = 0;
u16 phy_speed = 0;
+ u16 phy_link = 0;
u16 phy_data = 0;
+ u32 time_out;
+ int status;
/* Initialize speed and link to default case */
*link_up = false;
@@ -1326,7 +1326,7 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* it is called via a function pointer that could call other
* functions that could return an error.
**/
-s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
+int ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
{
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
bool autoneg = false;
@@ -1399,13 +1399,13 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
* ixgbe_reset_phy_nl - Performs a PHY reset
* @hw: pointer to hardware structure
**/
-s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+int ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
{
u16 phy_offset, control, eword, edata, block_crc;
- bool end_data = false;
u16 list_offset, data_offset;
+ bool end_data = false;
u16 phy_data = 0;
- s32 ret_val;
+ int ret_val;
u32 i;
/* Blocked by MNG FW so bail */
@@ -1506,7 +1506,7 @@ err_eeprom:
*
* Determines HW type and calls appropriate function.
**/
-s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+int ixgbe_identify_module_generic(struct ixgbe_hw *hw)
{
switch (hw->mac.ops.get_media_type(hw)) {
case ixgbe_media_type_fiber:
@@ -1527,19 +1527,20 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
*
* Searches for and identifies the SFP module and assigns appropriate PHY type.
**/
-s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
{
+ enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
struct ixgbe_adapter *adapter = hw->back;
- s32 status;
+ u8 oui_bytes[3] = {0, 0, 0};
+ u8 bitrate_nominal = 0;
+ u8 comp_codes_10g = 0;
+ u8 comp_codes_1g = 0;
+ u16 enforce_sfp = 0;
u32 vendor_oui = 0;
- enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
u8 identifier = 0;
- u8 comp_codes_1g = 0;
- u8 comp_codes_10g = 0;
- u8 oui_bytes[3] = {0, 0, 0};
u8 cable_tech = 0;
u8 cable_spec = 0;
- u16 enforce_sfp = 0;
+ int status;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
@@ -1576,7 +1577,12 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_CABLE_TECHNOLOGY,
&cable_tech);
+ if (status)
+ goto err_read_i2c_eeprom;
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_BITRATE_NOMINAL,
+ &bitrate_nominal);
if (status)
goto err_read_i2c_eeprom;
@@ -1659,6 +1665,18 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_lx_core1;
+ /* Support only Ethernet 1000BASE-BX10, checking the Bit Rate
+ * Nominal Value as per SFF-8472 by convention 1.25 Gb/s should
+ * be rounded up to 0Dh (13 in units of 100 MBd) for 1000BASE-BX
+ */
+ } else if ((comp_codes_1g & IXGBE_SFF_BASEBX10_CAPABLE) &&
+ (bitrate_nominal == 0xD)) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_bx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_bx_core1;
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
@@ -1747,7 +1765,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
return -EOPNOTSUPP;
}
@@ -1763,7 +1783,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel)
return 0;
@@ -1792,10 +1814,10 @@ err_read_i2c_eeprom:
*
* Searches for and identifies the QSFP module and assigns appropriate PHY type
**/
-static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+static int ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
{
struct ixgbe_adapter *adapter = hw->back;
- s32 status;
+ int status;
u32 vendor_oui = 0;
enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
u8 identifier = 0;
@@ -1975,7 +1997,7 @@ err_read_i2c_eeprom:
* Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
* so it returns the offsets to the phy init sequence block.
**/
-s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+int ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset)
{
@@ -1999,12 +2021,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
- sfp_type == ixgbe_sfp_type_1g_sx_core0)
+ sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_bx_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
- sfp_type == ixgbe_sfp_type_1g_sx_core1)
+ sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_bx_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
@@ -2065,7 +2089,7 @@ err_phy:
*
* Performs byte read operation to SFP module's EEPROM over I2C interface.
**/
-s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data)
{
return hw->phy.ops.read_i2c_byte(hw, byte_offset,
@@ -2081,7 +2105,7 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
*
* Performs byte read operation to SFP module's SFF-8472 data over I2C
**/
-s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data)
{
return hw->phy.ops.read_i2c_byte(hw, byte_offset,
@@ -2097,7 +2121,7 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
*
* Performs byte write operation to SFP module's EEPROM over I2C interface.
**/
-s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data)
{
return hw->phy.ops.write_i2c_byte(hw, byte_offset,
@@ -2131,14 +2155,14 @@ static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data, bool lock)
{
- s32 status;
- u32 max_retry = 10;
- u32 retry = 0;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 max_retry = 10;
bool nack = true;
+ u32 retry = 0;
+ int status;
if (hw->mac.type >= ixgbe_mac_X550)
max_retry = 3;
@@ -2221,7 +2245,7 @@ fail:
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2238,7 +2262,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte read operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data)
{
return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2256,13 +2280,13 @@ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
+static int ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data, bool lock)
{
- s32 status;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
u32 max_retry = 1;
u32 retry = 0;
- u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ int status;
if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return -EBUSY;
@@ -2324,7 +2348,7 @@ fail:
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2341,7 +2365,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte write operation to SFP module's EEPROM over I2C interface at
* a specified device address.
*/
-s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data)
{
return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
@@ -2422,10 +2446,10 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
*
* Clocks in one byte data via I2C data/clock
**/
-static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
+static int ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
{
- s32 i;
bool bit = false;
+ int i;
*data = 0;
for (i = 7; i >= 0; i--) {
@@ -2443,12 +2467,12 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
*
* Clocks out one byte data via I2C data/clock
**/
-static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
+static int ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
{
- s32 status;
- s32 i;
- u32 i2cctl;
bool bit = false;
+ int status;
+ u32 i2cctl;
+ int i;
for (i = 7; i >= 0; i--) {
bit = (data >> i) & 0x1;
@@ -2474,14 +2498,14 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
*
* Clocks in/out one bit via I2C data/clock
**/
-static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
+static int ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
{
- u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
- s32 status = 0;
- u32 i = 0;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
u32 timeout = 10;
bool ack = true;
+ int status = 0;
+ u32 i = 0;
if (data_oe_bit) {
i2cctl |= IXGBE_I2C_DATA_OUT(hw);
@@ -2525,7 +2549,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
*
* Clocks in one bit via I2C data/clock
**/
-static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
+static int ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
{
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
@@ -2559,10 +2583,10 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
*
* Clocks out one bit via I2C data/clock
**/
-static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
+static int ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
{
- s32 status;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
+ int status;
status = ixgbe_set_i2c_data(hw, &i2cctl, data);
if (status == 0) {
@@ -2647,7 +2671,7 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
* Sets the I2C data bit
* Asserts the I2C data output enable on X550 hardware.
**/
-static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
+static int ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
{
u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
@@ -2769,7 +2793,7 @@ bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @on: true for on, false for off
**/
-s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
+int ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
{
u32 status;
u16 reg;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index ef72729d7c93..14aa2ca51f70 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -17,6 +17,7 @@
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_BITRATE_NOMINAL 0xC
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
#define IXGBE_SFF_SFF_8472_SWAP 0x5C
#define IXGBE_SFF_SFF_8472_COMP 0x5E
@@ -39,6 +40,7 @@
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
+#define IXGBE_SFF_BASEBX10_CAPABLE 0x64
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
@@ -121,57 +123,57 @@
/* SFP+ SFF-8472 Compliance code */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
-s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw);
+int ixgbe_mii_bus_init(struct ixgbe_hw *hw);
-s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
-s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
-s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
+int ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
+int ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
-s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
-s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
-s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+int ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
-s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
-s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
+int ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+int ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
/* PHY specific */
-s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
+int ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up);
-s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
+int ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
-s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
-s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
-s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+int ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+int ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
+int ixgbe_identify_module_generic(struct ixgbe_hw *hw);
+int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+int ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
-s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
-s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
-s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
-s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data);
-s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+int ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
-s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
+int ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
u16 *val, bool lock);
-s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
+int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg,
u16 val, bool lock);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 7299a830f6e4..fcfd0a075eee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -492,7 +492,7 @@ static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf
struct net_device *dev = adapter->netdev;
int pf_max_frame = dev->mtu + ETH_HLEN;
u32 reg_offset, vf_shift, vfre;
- s32 err = 0;
+ int err = 0;
#ifdef CONFIG_FCOE
if (dev->features & NETIF_F_FCOE_MTU)
@@ -775,7 +775,7 @@ static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf)
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int vf, unsigned char *mac_addr)
{
- s32 retval;
+ int retval;
ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
@@ -1254,7 +1254,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
struct ixgbe_hw *hw = &adapter->hw;
- s32 retval;
+ int retval;
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
@@ -1418,7 +1418,7 @@ void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter)
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- s32 retval;
+ int retval;
if (vf >= adapter->num_vfs)
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index f1f69ce67420..78deea5ec536 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -46,4 +46,11 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
int ixgbe_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring);
+void ixgbe_update_tx_ring_stats(struct ixgbe_ring *tx_ring,
+ struct ixgbe_q_vector *q_vector, u64 pkts,
+ u64 bytes);
+void ixgbe_update_rx_ring_stats(struct ixgbe_ring *rx_ring,
+ struct ixgbe_q_vector *q_vector, u64 pkts,
+ u64 bytes);
+
#endif /* #define _IXGBE_TXRX_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 61b9774b3d31..ed440dd0c4f9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -3210,6 +3210,9 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_1g_sx_core1 = 12,
ixgbe_sfp_type_1g_lx_core0 = 13,
ixgbe_sfp_type_1g_lx_core1 = 14,
+ ixgbe_sfp_type_1g_bx_core0 = 15,
+ ixgbe_sfp_type_1g_bx_core1 = 16,
+
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@@ -3393,50 +3396,50 @@ struct ixgbe_hw;
/* Function pointer table */
struct ixgbe_eeprom_operations {
- s32 (*init_params)(struct ixgbe_hw *);
- s32 (*read)(struct ixgbe_hw *, u16, u16 *);
- s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
- s32 (*write)(struct ixgbe_hw *, u16, u16);
- s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
- s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
- s32 (*update_checksum)(struct ixgbe_hw *);
- s32 (*calc_checksum)(struct ixgbe_hw *);
+ int (*init_params)(struct ixgbe_hw *);
+ int (*read)(struct ixgbe_hw *, u16, u16 *);
+ int (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ int (*write)(struct ixgbe_hw *, u16, u16);
+ int (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+ int (*validate_checksum)(struct ixgbe_hw *, u16 *);
+ int (*update_checksum)(struct ixgbe_hw *);
+ int (*calc_checksum)(struct ixgbe_hw *);
};
struct ixgbe_mac_operations {
- s32 (*init_hw)(struct ixgbe_hw *);
- s32 (*reset_hw)(struct ixgbe_hw *);
- s32 (*start_hw)(struct ixgbe_hw *);
- s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+ int (*init_hw)(struct ixgbe_hw *);
+ int (*reset_hw)(struct ixgbe_hw *);
+ int (*start_hw)(struct ixgbe_hw *);
+ int (*clear_hw_cntrs)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
- s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
- s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
- s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
- s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
- s32 (*stop_adapter)(struct ixgbe_hw *);
- s32 (*get_bus_info)(struct ixgbe_hw *);
+ int (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+ int (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
+ int (*get_device_caps)(struct ixgbe_hw *, u16 *);
+ int (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
+ int (*stop_adapter)(struct ixgbe_hw *);
+ int (*get_bus_info)(struct ixgbe_hw *);
void (*set_lan_id)(struct ixgbe_hw *);
- s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
- s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
- s32 (*setup_sfp)(struct ixgbe_hw *);
- s32 (*disable_rx_buff)(struct ixgbe_hw *);
- s32 (*enable_rx_buff)(struct ixgbe_hw *);
- s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
- s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
+ int (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
+ int (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
+ int (*setup_sfp)(struct ixgbe_hw *);
+ int (*disable_rx_buff)(struct ixgbe_hw *);
+ int (*enable_rx_buff)(struct ixgbe_hw *);
+ int (*enable_rx_dma)(struct ixgbe_hw *, u32);
+ int (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
void (*release_swfw_sync)(struct ixgbe_hw *, u32);
void (*init_swfw_sync)(struct ixgbe_hw *);
- s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
- s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
+ int (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
+ int (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
/* Link */
void (*disable_tx_laser)(struct ixgbe_hw *);
void (*enable_tx_laser)(struct ixgbe_hw *);
void (*flap_tx_laser)(struct ixgbe_hw *);
void (*stop_link_on_d3)(struct ixgbe_hw *);
- s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
- s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
- s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
- s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+ int (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ int (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ int (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+ int (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
bool *);
void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed);
@@ -3444,38 +3447,38 @@ struct ixgbe_mac_operations {
void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
/* LED */
- s32 (*led_on)(struct ixgbe_hw *, u32);
- s32 (*led_off)(struct ixgbe_hw *, u32);
- s32 (*blink_led_start)(struct ixgbe_hw *, u32);
- s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
- s32 (*init_led_link_act)(struct ixgbe_hw *);
+ int (*led_on)(struct ixgbe_hw *, u32);
+ int (*led_off)(struct ixgbe_hw *, u32);
+ int (*blink_led_start)(struct ixgbe_hw *, u32);
+ int (*blink_led_stop)(struct ixgbe_hw *, u32);
+ int (*init_led_link_act)(struct ixgbe_hw *);
/* RAR, Multicast, VLAN */
- s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
- s32 (*clear_rar)(struct ixgbe_hw *, u32);
- s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
- s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
- s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
- s32 (*init_rx_addrs)(struct ixgbe_hw *);
- s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
- s32 (*enable_mc)(struct ixgbe_hw *);
- s32 (*disable_mc)(struct ixgbe_hw *);
- s32 (*clear_vfta)(struct ixgbe_hw *);
- s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool);
- s32 (*init_uta_tables)(struct ixgbe_hw *);
+ int (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
+ int (*clear_rar)(struct ixgbe_hw *, u32);
+ int (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+ int (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
+ int (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
+ int (*init_rx_addrs)(struct ixgbe_hw *);
+ int (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
+ int (*enable_mc)(struct ixgbe_hw *);
+ int (*disable_mc)(struct ixgbe_hw *);
+ int (*clear_vfta)(struct ixgbe_hw *);
+ int (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool);
+ int (*init_uta_tables)(struct ixgbe_hw *);
void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* Flow Control */
- s32 (*fc_enable)(struct ixgbe_hw *);
- s32 (*setup_fc)(struct ixgbe_hw *);
+ int (*fc_enable)(struct ixgbe_hw *);
+ int (*setup_fc)(struct ixgbe_hw *);
void (*fc_autoneg)(struct ixgbe_hw *);
/* Manageability interface */
- s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16,
+ int (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16,
const char *);
- s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
- s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
+ int (*get_thermal_sensor_data)(struct ixgbe_hw *);
+ int (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
void (*disable_rx)(struct ixgbe_hw *hw);
void (*enable_rx)(struct ixgbe_hw *hw);
@@ -3484,47 +3487,47 @@ struct ixgbe_mac_operations {
void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* DMA Coalescing */
- s32 (*dmac_config)(struct ixgbe_hw *hw);
- s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
- s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
- s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
- s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
+ int (*dmac_config)(struct ixgbe_hw *hw);
+ int (*dmac_update_tcs)(struct ixgbe_hw *hw);
+ int (*dmac_config_tcs)(struct ixgbe_hw *hw);
+ int (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
+ int (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
};
struct ixgbe_phy_operations {
- s32 (*identify)(struct ixgbe_hw *);
- s32 (*identify_sfp)(struct ixgbe_hw *);
- s32 (*init)(struct ixgbe_hw *);
- s32 (*reset)(struct ixgbe_hw *);
- s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
- s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
- s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
- s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
- s32 (*setup_link)(struct ixgbe_hw *);
- s32 (*setup_internal_link)(struct ixgbe_hw *);
- s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
- s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
- s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
- s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
- s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
- s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
- s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+ int (*identify)(struct ixgbe_hw *);
+ int (*identify_sfp)(struct ixgbe_hw *);
+ int (*init)(struct ixgbe_hw *);
+ int (*reset)(struct ixgbe_hw *);
+ int (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
+ int (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+ int (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
+ int (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
+ int (*setup_link)(struct ixgbe_hw *);
+ int (*setup_internal_link)(struct ixgbe_hw *);
+ int (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
+ int (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
+ int (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
+ int (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+ int (*read_i2c_sff8472)(struct ixgbe_hw *, u8, u8 *);
+ int (*read_i2c_eeprom)(struct ixgbe_hw *, u8, u8 *);
+ int (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
bool (*check_overtemp)(struct ixgbe_hw *);
- s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
- s32 (*enter_lplu)(struct ixgbe_hw *);
- s32 (*handle_lasi)(struct ixgbe_hw *hw, bool *);
- s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ int (*set_phy_power)(struct ixgbe_hw *, bool on);
+ int (*enter_lplu)(struct ixgbe_hw *);
+ int (*handle_lasi)(struct ixgbe_hw *hw, bool *);
+ int (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
u8 *value);
- s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
+ int (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr,
u8 value);
};
struct ixgbe_link_operations {
- s32 (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
- s32 (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ int (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val);
+ int (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
u16 *val);
- s32 (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
- s32 (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
+ int (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
+ int (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg,
u16 val);
};
@@ -3602,14 +3605,14 @@ struct ixgbe_phy_info {
#include "ixgbe_mbx.h"
struct ixgbe_mbx_operations {
- s32 (*init_params)(struct ixgbe_hw *hw);
- s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*check_for_msg)(struct ixgbe_hw *, u16);
- s32 (*check_for_ack)(struct ixgbe_hw *, u16);
- s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+ int (*init_params)(struct ixgbe_hw *hw);
+ int (*read)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ int (*check_for_msg)(struct ixgbe_hw *, u16);
+ int (*check_for_ack)(struct ixgbe_hw *, u16);
+ int (*check_for_rst)(struct ixgbe_hw *, u16);
};
struct ixgbe_mbx_stats {
@@ -3656,7 +3659,7 @@ struct ixgbe_hw {
struct ixgbe_info {
enum ixgbe_mac_type mac;
- s32 (*get_invariants)(struct ixgbe_hw *);
+ int (*get_invariants)(struct ixgbe_hw *);
const struct ixgbe_mac_operations *mac_ops;
const struct ixgbe_eeprom_operations *eeprom_ops;
const struct ixgbe_phy_operations *phy_ops;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 57a912e4653f..f1ffa398f6df 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -16,9 +16,9 @@
#define IXGBE_X540_VFT_TBL_SIZE 128
#define IXGBE_X540_RX_PB_SIZE 384
-static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
-static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
-static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
+static int ixgbe_update_flash_X540(struct ixgbe_hw *hw);
+static int ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
+static int ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
@@ -26,7 +26,7 @@ enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
return ixgbe_media_type_copper;
}
-s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
+int ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
@@ -51,7 +51,7 @@ s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
* @speed: new link speed
* @autoneg_wait_to_complete: true when waiting for completion is needed
**/
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
return hw->phy.ops.setup_link_speed(hw, speed,
@@ -66,11 +66,11 @@ s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
-s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+int ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
- s32 status;
- u32 ctrl, i;
u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 ctrl, i;
+ int status;
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -166,9 +166,9 @@ mac_reset_top:
* and the generation start_hw function.
* Then performs revision-specific operations, if any.
**/
-s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+int ixgbe_start_hw_X540(struct ixgbe_hw *hw)
{
- s32 ret_val;
+ int ret_val;
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val)
@@ -184,7 +184,7 @@ s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -215,9 +215,9 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
-static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+static int ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return -EBUSY;
@@ -237,10 +237,10 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
*
* Reads a 16 bit word(s) from the EEPROM using the EERD register.
**/
-static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
+static int ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return -EBUSY;
@@ -259,9 +259,9 @@ static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
*
* Write a 16 bit word to the EEPROM using the EEWR register.
**/
-static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+static int ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return -EBUSY;
@@ -281,10 +281,10 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
*
* Write a 16 bit word(s) to the EEPROM using the EEWR register.
**/
-static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
+static int ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return -EBUSY;
@@ -303,7 +303,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
*
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+static int ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
@@ -368,7 +368,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return (s32)checksum;
+ return (int)checksum;
}
/**
@@ -379,12 +379,12 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+static int ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
u16 *checksum_val)
{
- s32 status;
- u16 checksum;
u16 read_checksum = 0;
+ u16 checksum;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -439,10 +439,10 @@ out:
* checksum and updates the EEPROM and instructs the hardware to update
* the flash.
**/
-static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+static int ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
- s32 status;
u16 checksum;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -484,10 +484,10 @@ out:
* Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
* EEPROM from shadow RAM to the flash device.
**/
-static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+static int ixgbe_update_flash_X540(struct ixgbe_hw *hw)
{
+ int status;
u32 flup;
- s32 status;
status = ixgbe_poll_flash_update_done_X540(hw);
if (status == -EIO) {
@@ -529,7 +529,7 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
* Polls the FLUDONE (bit 26) of the EEC Register to determine when the
* flash update is done.
**/
-static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+static int ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
{
u32 i;
u32 reg;
@@ -551,7 +551,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
* Acquires the SWFW semaphore thought the SW_FW_SYNC register for
* the specified function (CSR, PHY0, PHY1, NVM, Flash)
**/
-s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
+int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK;
u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK;
@@ -660,7 +660,7 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
*
* Sets the hardware semaphores so SW/FW can gain control of shared resources
*/
-static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+static int ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
{
u32 timeout = 2000;
u32 i;
@@ -760,7 +760,7 @@ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
* Devices that implement the version 2 interface:
* X540
**/
-s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
@@ -798,7 +798,7 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
* Devices that implement the version 2 interface:
* X540
**/
-s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+int ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index e246c0d2a427..b69a680d3ab5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -3,17 +3,17 @@
#include "ixgbe_type.h"
-s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+int ixgbe_get_invariants_X540(struct ixgbe_hw *hw);
+int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
-s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
+int ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
+int ixgbe_start_hw_X540(struct ixgbe_hw *hw);
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
-s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
-s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
-s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+int ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
+int ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
+int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
-s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index c1adc94a5a65..2decb0710b6e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -6,13 +6,13 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
-static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
-static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *);
+static int ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
+static int ixgbe_setup_fc_x550em(struct ixgbe_hw *);
static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *);
static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *);
-static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *);
+static int ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *);
-static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
@@ -29,7 +29,7 @@ static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
@@ -41,7 +41,7 @@ static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
@@ -55,7 +55,7 @@ static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw)
+static int ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
@@ -91,7 +91,7 @@ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
*
* Returns status code
*/
-static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
+static int ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
{
return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
}
@@ -104,7 +104,7 @@ static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
*
* Returns status code
*/
-static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
+static int ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
{
return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
}
@@ -117,9 +117,9 @@ static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
*
* Returns status code
*/
-static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
+static int ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
{
- s32 status;
+ int status;
status = ixgbe_read_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value);
if (status)
@@ -135,9 +135,9 @@ static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
*
* Returns status code
*/
-static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
+static int ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
{
- s32 status;
+ int status;
status = ixgbe_write_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE,
value);
@@ -153,9 +153,9 @@ static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
* This function assumes that the caller has acquired the proper semaphore.
* Returns error code
*/
-static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
+static int ixgbe_reset_cs4227(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
u32 retry;
u16 value;
u8 reg;
@@ -225,7 +225,7 @@ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- s32 status;
+ int status;
u16 value;
u8 retry;
@@ -292,7 +292,7 @@ out:
*
* Returns error code
*/
-static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
+static int ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP:
@@ -347,13 +347,13 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
return 0;
}
-static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
return -EOPNOTSUPP;
}
-static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
return -EOPNOTSUPP;
@@ -368,7 +368,7 @@ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
*
* Returns an error code on error.
**/
-static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
+static int ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val)
{
return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
@@ -383,7 +383,7 @@ static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
*
* Returns an error code on error.
**/
-static s32
+static int
ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
u16 reg, u16 *val)
{
@@ -399,7 +399,7 @@ ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
*
* Returns an error code on error.
**/
-static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
+static int ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
u8 addr, u16 reg, u16 val)
{
return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
@@ -414,7 +414,7 @@ static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
*
* Returns an error code on error.
**/
-static s32
+static int
ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
u8 addr, u16 reg, u16 val)
{
@@ -427,7 +427,7 @@ ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
* @activity: activity to perform
* @data: Pointer to 4 32-bit words of data
*/
-s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+int ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
u32 (*data)[FW_PHY_ACT_DATA_COUNT])
{
union {
@@ -435,7 +435,7 @@ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
struct ixgbe_hic_phy_activity_resp rsp;
} hic;
u16 retries = FW_PHY_ACT_RETRIES;
- s32 rc;
+ int rc;
u32 i;
do {
@@ -484,12 +484,12 @@ static const struct {
*
* Returns error code
*/
-static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
+static int ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
{
u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
u16 phy_speeds;
u16 phy_id_lo;
- s32 rc;
+ int rc;
u16 i;
if (hw->phy.id)
@@ -526,7 +526,7 @@ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
*
* Returns error code
*/
-static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
+static int ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
{
if (hw->bus.lan_id)
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
@@ -545,7 +545,7 @@ static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
*
* Returns error code
*/
-static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
+static int ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
{
u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
@@ -557,10 +557,10 @@ static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
* ixgbe_setup_fw_link - Setup firmware-controlled PHYs
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
+static int ixgbe_setup_fw_link(struct ixgbe_hw *hw)
{
u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
- s32 rc;
+ int rc;
u16 i;
if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
@@ -613,7 +613,7 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
*
* Called at init time to set up flow control.
*/
-static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
+static int ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
{
if (hw->fc.requested_mode == ixgbe_fc_default)
hw->fc.requested_mode = ixgbe_fc_full;
@@ -627,7 +627,7 @@ static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
-static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
+static int ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
@@ -659,7 +659,7 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
*
* Note: ctrl can be NULL if the IOSF control register value is not needed
*/
-static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
+static int ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
{
u32 i, command;
@@ -690,12 +690,12 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
* @device_type: 3 bit device type
* @phy_data: Pointer to read data from the register
**/
-static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 *data)
{
u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
u32 command, error;
- s32 ret;
+ int ret;
ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
if (ret)
@@ -732,10 +732,10 @@ out:
* ixgbe_get_phy_token - Get the token for shared PHY access
* @hw: Pointer to hardware structure
*/
-static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
+static int ixgbe_get_phy_token(struct ixgbe_hw *hw)
{
struct ixgbe_hic_phy_token_req token_cmd;
- s32 status;
+ int status;
token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
@@ -761,10 +761,10 @@ static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
* ixgbe_put_phy_token - Put the token for shared PHY access
* @hw: Pointer to hardware structure
*/
-static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
+static int ixgbe_put_phy_token(struct ixgbe_hw *hw)
{
struct ixgbe_hic_phy_token_req token_cmd;
- s32 status;
+ int status;
token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
@@ -790,7 +790,7 @@ static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
* @device_type: 3 bit device type
* @data: Data to write to the register
**/
-static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
__always_unused u32 device_type,
u32 data)
{
@@ -816,7 +816,7 @@ static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
* @device_type: 3 bit device type
* @data: Pointer to read data from the register
**/
-static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
__always_unused u32 device_type,
u32 *data)
{
@@ -824,7 +824,7 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
struct ixgbe_hic_internal_phy_req cmd;
struct ixgbe_hic_internal_phy_resp rsp;
} hic;
- s32 status;
+ int status;
memset(&hic, 0, sizeof(hic));
hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
@@ -851,14 +851,14 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
*
* Reads a 16 bit word(s) from the EEPROM using the hostif.
**/
-static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+static int ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
struct ixgbe_hic_read_shadow_ram buffer;
u32 current_word = 0;
u16 words_to_read;
- s32 status;
+ int status;
u32 i;
/* Take semaphore for the entire operation. */
@@ -923,14 +923,14 @@ out:
*
* Returns error status for any failure
**/
-static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
+static int ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
u16 size, u16 *csum, u16 *buffer,
u32 buffer_size)
{
- u16 buf[256];
- s32 status;
u16 length, bufsz, i, start;
u16 *local_buffer;
+ u16 buf[256];
+ int status;
bufsz = ARRAY_SIZE(buf);
@@ -991,14 +991,14 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
*
* Returns a negative error code on error, or the 16-bit checksum
**/
-static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
+static int ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
u32 buffer_size)
{
u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
+ u16 pointer, i, size;
u16 *local_buffer;
- s32 status;
u16 checksum = 0;
- u16 pointer, i, size;
+ int status;
hw->eeprom.ops.init_params(hw);
@@ -1060,7 +1060,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
- return (s32)checksum;
+ return (int)checksum;
}
/** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
@@ -1068,7 +1068,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
*
* Returns a negative error code on error, or the 16-bit checksum
**/
-static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
+static int ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
{
return ixgbe_calc_checksum_X550(hw, NULL, 0);
}
@@ -1080,11 +1080,11 @@ static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
*
* Reads a 16 bit word from the EEPROM using the hostif.
**/
-static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
+static int ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
struct ixgbe_hic_read_shadow_ram buffer;
- s32 status;
+ int status;
buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
buffer.hdr.req.buf_lenh = 0;
@@ -1118,12 +1118,12 @@ static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
+static int ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
u16 *checksum_val)
{
- s32 status;
- u16 checksum;
u16 read_checksum = 0;
+ u16 checksum;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -1168,11 +1168,11 @@ static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
*
* Write a 16 bit word to the EEPROM using the hostif.
**/
-static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
+static int ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
u16 data)
{
- s32 status;
struct ixgbe_hic_write_shadow_ram buffer;
+ int status;
buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
buffer.hdr.req.buf_lenh = 0;
@@ -1196,9 +1196,9 @@ static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
*
* Write a 16 bit word to the EEPROM using the hostif.
**/
-static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
+static int ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
{
- s32 status = 0;
+ int status = 0;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
@@ -1216,10 +1216,10 @@ static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
*
* Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
**/
-static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
+static int ixgbe_update_flash_X550(struct ixgbe_hw *hw)
{
- s32 status = 0;
union ixgbe_hic_hdr2 buffer;
+ int status = 0;
buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
buffer.req.buf_lenh = 0;
@@ -1238,7 +1238,7 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
* Sets bus link width and speed to unknown because X550em is
* not a PCI device.
**/
-static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
+static int ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
{
hw->bus.type = ixgbe_bus_type_internal;
hw->bus.width = ixgbe_bus_width_unknown;
@@ -1269,9 +1269,9 @@ static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
**/
static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
{
- u32 rxctrl, pfdtxgswc;
- s32 status;
struct ixgbe_hic_disable_rxen fw_cmd;
+ u32 rxctrl, pfdtxgswc;
+ int status;
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
if (rxctrl & IXGBE_RXCTRL_RXEN) {
@@ -1311,10 +1311,10 @@ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
* checksum and updates the EEPROM and instructs the hardware to update
* the flash.
**/
-static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
+static int ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
{
- s32 status;
u16 checksum = 0;
+ int status;
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
@@ -1351,11 +1351,11 @@ static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
*
* Write a 16 bit word(s) to the EEPROM using the hostif.
**/
-static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+static int ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
u16 offset, u16 words,
u16 *data)
{
- s32 status = 0;
+ int status = 0;
u32 i = 0;
/* Take semaphore for the entire operation. */
@@ -1387,12 +1387,12 @@ static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
* @device_type: 3 bit device type
* @data: Data to write to the register
**/
-static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 data)
{
u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
u32 command, error;
- s32 ret;
+ int ret;
ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
if (ret)
@@ -1430,10 +1430,10 @@ out:
*
* iXfI configuration needed for ixgbe_mac_X550EM_x devices.
**/
-static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
+static int ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
{
- s32 status;
u32 reg_val;
+ int status;
/* Disable training protocol FSM. */
status = ixgbe_read_iosf_sb_reg_x550(hw,
@@ -1502,10 +1502,10 @@ static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
* internal PHY
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
+static int ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
{
- s32 status;
u32 link_ctrl;
+ int status;
/* Restart auto-negotiation. */
status = hw->mac.ops.read_iosf_sb_reg(hw,
@@ -1551,11 +1551,11 @@ static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
* Configures the integrated KR PHY to use iXFI mode. Used to connect an
* internal and external PHY at a specific speed, without autonegotiation.
**/
-static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+static int ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
{
struct ixgbe_mac_info *mac = &hw->mac;
- s32 status;
u32 reg_val;
+ int status;
/* iXFI is only supported with X552 */
if (mac->type != ixgbe_mac_X550EM_x)
@@ -1608,7 +1608,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
* @hw: pointer to hardware structure
* @linear: true if SFP module is linear
*/
-static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
+static int ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
{
switch (hw->phy.sfp_type) {
case ixgbe_sfp_type_not_present:
@@ -1645,14 +1645,14 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
*
* Configures the extern PHY and the integrated KR PHY for SFP support.
*/
-static s32
+static int
ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
- s32 status;
- u16 reg_slice, reg_val;
bool setup_linear = false;
+ u16 reg_slice, reg_val;
+ int status;
/* Check if SFP module is supported and linear */
status = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
@@ -1691,11 +1691,11 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
* Configures the integrated PHY for native SFI mode. Used to connect the
* internal PHY directly to an SFP cage, without autonegotiation.
**/
-static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+static int ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
{
struct ixgbe_mac_info *mac = &hw->mac;
- s32 status;
u32 reg_val;
+ int status;
/* Disable all AN and force speed to 10G Serial. */
status = mac->ops.read_iosf_sb_reg(hw,
@@ -1790,13 +1790,13 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
*
* Configure the integrated PHY for native SFP support.
*/
-static s32
+static int
ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
bool setup_linear = false;
u32 reg_phy_int;
- s32 ret_val;
+ int ret_val;
/* Check if SFP module is supported and linear */
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
@@ -1839,14 +1839,14 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
*
* Configure the integrated PHY for SFP support.
*/
-static s32
+static int
ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
u32 reg_slice, slice_offset;
bool setup_linear = false;
u16 reg_phy_ext;
- s32 ret_val;
+ int ret_val;
/* Check if SFP module is supported and linear */
ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
@@ -1918,12 +1918,12 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
*
* Returns error status for any failure
**/
-static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
+static int ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait)
{
- s32 status;
ixgbe_link_speed force_speed;
+ int status;
/* Setup internal/external PHY link speed to iXFI (10G), unless
* only 1G is auto advertised then setup KX link.
@@ -1954,7 +1954,7 @@ static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
*
* Check that both the MAC and X557 external PHY have link.
**/
-static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
+static int ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up,
bool link_up_wait_to_complete)
@@ -1998,13 +1998,13 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
* @speed: unused
* @autoneg_wait_to_complete: unused
*/
-static s32
+static int
ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
__always_unused bool autoneg_wait_to_complete)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 lval, sval, flx_val;
- s32 rc;
+ int rc;
rc = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -2071,12 +2071,12 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
* @speed: the link speed to force
* @autoneg_wait: true when waiting for completion is needed
*/
-static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+static int ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 lval, sval, flx_val;
- s32 rc;
+ int rc;
rc = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -2148,7 +2148,7 @@ static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
{
u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
ixgbe_link_speed speed;
- s32 status = -EIO;
+ int status = -EIO;
bool link_up;
/* AN should have completed when the cable was plugged in.
@@ -2276,10 +2276,10 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
/** ixgbe_setup_sfp_modules_X550em - Setup SFP module
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
{
- s32 status;
bool linear;
+ int status;
/* Check if SFP module is supported */
status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
@@ -2297,7 +2297,7 @@ static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
* @speed: pointer to link speed
* @autoneg: true when autoneg or autotry is enabled
**/
-static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
+static int ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
@@ -2375,7 +2375,7 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
* Determime if external Base T PHY interrupt cause is high temperature
* failure alarm or link status change.
**/
-static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
+static int ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
bool *is_overtemp)
{
u32 status;
@@ -2463,7 +2463,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc,
*
* Returns PHY access status
**/
-static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
{
bool lsc, overtemp;
u32 status;
@@ -2555,7 +2555,7 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
* failure alarm then return error, else if link status change
* then setup internal/external PHY link
**/
-static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw,
+static int ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw,
bool *is_overtemp)
{
struct ixgbe_phy_info *phy = &hw->phy;
@@ -2579,11 +2579,11 @@ static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw,
*
* Configures the integrated KR PHY.
**/
-static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
+static int ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed)
{
- s32 status;
u32 reg_val;
+ int status;
status = hw->mac.ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -2634,7 +2634,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
* ixgbe_setup_kr_x550em - Configure the KR PHY
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
/* leave link alone for 2.5G */
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
@@ -2652,7 +2652,7 @@ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
*
* Returns error code if unable to get link status.
**/
-static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
+static int ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
{
u32 ret;
u16 autoneg_status;
@@ -2686,7 +2686,7 @@ static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
* A return of a non-zero value indicates an error, and the base driver should
* not report link up.
**/
-static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
{
ixgbe_link_speed force_speed;
bool link_up;
@@ -2746,9 +2746,9 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
/** ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
* @hw: pointer to hardware structure
**/
-static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
+static int ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
status = ixgbe_reset_phy_generic(hw);
@@ -2764,7 +2764,7 @@ static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @led_idx: led number to turn on
**/
-static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+static int ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
{
u16 phy_data;
@@ -2786,7 +2786,7 @@ static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* @hw: pointer to hardware structure
* @led_idx: led number to turn off
**/
-static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
{
u16 phy_data;
@@ -2819,12 +2819,12 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* semaphore, -EIO when command fails or -ENIVAL when incorrect
* params passed.
**/
-static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+static int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 sub, u16 len,
const char *driver_ver)
{
struct ixgbe_hic_drv_info2 fw_cmd;
- s32 ret_val;
+ int ret_val;
int i;
if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string)))
@@ -2866,12 +2866,12 @@ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
*
* Determine lowest common link speed with link partner.
**/
-static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
+static int ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed *lcd_speed)
{
- u16 an_lp_status;
- s32 status;
u16 word = hw->eeprom.ctrl_word_3;
+ u16 an_lp_status;
+ int status;
*lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -2884,28 +2884,28 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
/* If link partner advertised 1G, return 1G */
if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
*lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
- return status;
+ return 0;
}
/* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
(word & NVM_INIT_CTRL_3_D10GMP_PORT0))
- return status;
+ return 0;
/* Link partner not capable of lower speeds, return 10G */
*lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
- return status;
+ return 0;
}
/**
* ixgbe_setup_fc_x550em - Set up flow control
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
+static int ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
{
bool pause, asm_dir;
u32 reg_val;
- s32 rc = 0;
+ int rc = 0;
/* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
@@ -2990,7 +2990,7 @@ static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
{
u32 link_s1, lp_an_page_low, an_cntl_1;
ixgbe_link_speed speed;
- s32 status = -EIO;
+ int status = -EIO;
bool link_up;
/* AN should have completed when the cable was plugged in.
@@ -3073,13 +3073,13 @@ static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
* (from D0 to non-D0). Link is required to enter LPLU so avoid resetting
* the X557 PHY immediately prior to entering LPLU.
**/
-static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
{
u16 an_10g_cntl_reg, autoneg_reg, speed;
- s32 status;
ixgbe_link_speed lcd_speed;
u32 save_autoneg;
bool link_up;
+ int status;
/* If blocked by MNG FW, then don't restart AN */
if (ixgbe_check_reset_blocked(hw))
@@ -3130,7 +3130,7 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
(lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
(lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
- return status;
+ return 0;
/* Clear AN completed indication */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
@@ -3167,10 +3167,10 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
* ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
* @hw: pointer to hardware structure
*/
-static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
+static int ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
{
u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
- s32 rc;
+ int rc;
if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
return 0;
@@ -3196,7 +3196,7 @@ static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
static bool ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
{
u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
- s32 rc;
+ int rc;
rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
if (rc)
@@ -3239,10 +3239,10 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
* set during init_shared_code because the PHY/SFP type was
* not known. Perform the SFP init if necessary.
**/
-static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
+static int ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
{
struct ixgbe_phy_info *phy = &hw->phy;
- s32 ret_val;
+ int ret_val;
hw->mac.ops.set_lan_id(hw);
@@ -3367,9 +3367,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
/** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
** @hw: pointer to hardware structure
**/
-static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
+static int ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
{
- s32 status;
+ int status;
u16 reg;
status = hw->phy.ops.read_reg(hw,
@@ -3441,14 +3441,14 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
** and clears all interrupts, perform a PHY reset, and perform a link (MAC)
** reset.
**/
-static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+static int ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
ixgbe_link_speed link_speed;
- s32 status;
+ bool link_up = false;
u32 ctrl = 0;
+ int status;
u32 i;
- bool link_up = false;
- u32 swfw_mask = hw->phy.phy_semaphore_mask;
/* Call adapter stop to disable Tx/Rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
@@ -3609,10 +3609,10 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
*
* Called at init time to set up flow control.
**/
-static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
+static int ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
{
- s32 status = 0;
u32 an_cntl = 0;
+ int status = 0;
/* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
@@ -3714,9 +3714,9 @@ static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
*
* Acquires the SWFW semaphore and sets the I2C MUX
*/
-static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
+static int ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
{
- s32 status;
+ int status;
status = ixgbe_acquire_swfw_sync_X540(hw, mask);
if (status)
@@ -3750,11 +3750,11 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
*
* Acquires the SWFW semaphore and get the shared PHY token as needed
*/
-static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
+static int ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
{
u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
int retries = FW_PHY_TOKEN_RETRIES;
- s32 status;
+ int status;
while (--retries) {
status = 0;
@@ -3807,11 +3807,11 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
* Token. The PHY Token is needed since the MDIO is shared between to MAC
* instances.
*/
-static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, mask))
return -EBUSY;
@@ -3833,11 +3833,11 @@ static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
* Writes a value to specified PHY register using the SWFW lock and PHY Token.
* The PHY Token is needed since the MDIO is shared between to MAC instances.
*/
-static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
- s32 status;
+ int status;
if (hw->mac.ops.acquire_swfw_sync(hw, mask))
return -EBUSY;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 59798bc33298..d34d715c59eb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -359,12 +359,8 @@ construct_skb:
ixgbe_xdp_ring_update_tail_locked(ring);
}
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets += total_rx_packets;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- q_vector->rx.total_packets += total_rx_packets;
- q_vector->rx.total_bytes += total_rx_bytes;
+ ixgbe_update_rx_ring_stats(rx_ring, q_vector, total_rx_packets,
+ total_rx_bytes);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
@@ -499,13 +495,8 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
}
tx_ring->next_to_clean = ntc;
-
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.bytes += total_bytes;
- tx_ring->stats.packets += total_packets;
- u64_stats_update_end(&tx_ring->syncp);
- q_vector->tx.total_bytes += total_bytes;
- q_vector->tx.total_packets += total_packets;
+ ixgbe_update_tx_ring_stats(tx_ring, q_vector, total_packets,
+ total_bytes);
if (xsk_frames)
xsk_tx_completed(pool, xsk_frames);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index a44e4bd56142..9c960017a6de 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4413,7 +4413,7 @@ ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
unsigned int network_hdr_len, mac_hdr_len;
/* Make certain the headers can be described by a context descriptor */
- mac_hdr_len = skb_network_header(skb) - skb->data;
+ mac_hdr_len = skb_network_offset(skb);
if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC |
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 884d64114bff..837295fecd17 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -180,6 +180,7 @@ config SKY2_DEBUG
source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
source "drivers/net/ethernet/marvell/octeon_ep/Kconfig"
+source "drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig"
source "drivers/net/ethernet/marvell/prestera/Kconfig"
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index ceba4aa4f026..a399defe25fd 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -12,5 +12,6 @@ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
obj-y += octeon_ep/
+obj-y += octeon_ep_vf/
obj-y += octeontx2/
obj-y += prestera/
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a641b3534ca3..40a5f1431e4e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -5097,7 +5097,7 @@ static int mvneta_ethtool_set_wol(struct net_device *dev,
}
static int mvneta_ethtool_get_eee(struct net_device *dev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
u32 lpi_ctl0;
@@ -5113,7 +5113,7 @@ static int mvneta_ethtool_get_eee(struct net_device *dev,
}
static int mvneta_ethtool_set_eee(struct net_device *dev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct mvneta_port *pp = netdev_priv(dev);
u32 lpi_ctl0;
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig b/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig
new file mode 100644
index 000000000000..e371a3ef0c49
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell's Octeon PCI Endpoint NIC VF Driver Configuration
+#
+
+config OCTEON_EP_VF
+ tristate "Marvell Octeon PCI Endpoint NIC VF Driver"
+ depends on 64BIT
+ depends on PCI
+ help
+ This driver supports the networking functionality of Marvell's
+ Octeon PCI Endpoint NIC VF.
+
+ To know the list of devices supported by this driver, refer to the
+ documentation in
+ <file:Documentation/networking/device_drivers/ethernet/marvell/octeon_ep_vf.rst>.
+
+ To compile this driver as a module, choose M here.
+ The name of the module will be octeon_ep_vf.
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile b/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile
new file mode 100644
index 000000000000..4a5f9fcb0b40
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Network driver for Marvell's Octeon PCI Endpoint NIC VF
+#
+
+obj-$(CONFIG_OCTEON_EP_VF) += octeon_ep_vf.o
+
+octeon_ep_vf-y := octep_vf_main.o octep_vf_cn9k.o octep_vf_cnxk.o \
+ octep_vf_tx.o octep_vf_rx.o octep_vf_mbox.o \
+ octep_vf_ethtool.o
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
new file mode 100644
index 000000000000..88937fce75f1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+#include "octep_vf_regs_cn9k.h"
+
+/* Dump useful hardware IQ/OQ CSRs for debug purpose */
+static void cn93_vf_dump_q_regs(struct octep_vf_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INSTR_DBELL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_CONTROL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_ENABLE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INSTR_BADDR(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_CNTS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_IN_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_CONTROL(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_ENABLE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_CNTS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_VF_SDP_R_OUT_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_BYTE_CNT(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static void cn93_vf_reset_iq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no);
+
+ /* Disable the Tx/Instruction Ring */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no));
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no),
+ val & GENMASK_ULL(31, 0));
+}
+
+/* Reset Hardware Rx queue */
+static void cn93_vf_reset_oq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ /* Disable Output (Rx) Ring */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_vf_read_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no));
+ octep_vf_write_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(q_no), GENMASK_ULL(35, 0));
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(q_no), GENMASK_ULL(31, 0));
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_vf_reset_io_queues_cn93(struct octep_vf_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CN93 VF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cn93_vf_reset_iq(oct, q);
+ cn93_vf_reset_oq(oct, q);
+ }
+}
+
+/* Initialize configuration limits and initial active config */
+static void octep_vf_init_config_cn93_vf(struct octep_vf_device *oct)
+{
+ struct octep_vf_config *conf = oct->conf;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(0));
+ conf->ring_cfg.max_io_rings = (reg_val >> CN93_VF_R_IN_CTL_RPVF_POS) &
+ CN93_VF_R_IN_CTL_RPVF_MASK;
+ conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings;
+
+ conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR;
+ conf->iq.db_min = OCTEP_VF_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD;
+
+ conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings;
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_vf_setup_iq_regs_cn93(struct octep_vf_device *oct, int iq_no)
+{
+ struct octep_vf_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_VF_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CN93_VF_R_IN_CTL_IDLE));
+ }
+ reg_val |= CN93_VF_R_IN_CTL_RDSIZE;
+ reg_val |= CN93_VF_R_IN_CTL_IS_64B;
+ reg_val |= CN93_VF_R_IN_CTL_ESR;
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr for this queue */
+ iq->doorbell_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no)
+{
+ struct octep_vf_oq *oq = oct->oq[oq_no];
+ u32 time_threshold = 0;
+ u64 oq_ctl = ULL(0);
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_VF_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CN93_VF_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CN93_VF_R_OUT_CTL_IMODE);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ES_I);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CN93_VF_R_OUT_CTL_ES_D);
+ reg_val |= (CN93_VF_R_OUT_CTL_ES_P);
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count);
+
+ oq_ctl = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
+ oq_ctl &= ~GENMASK_ULL(22, 0); //clear the ISIZE and BSIZE (22-0)
+ oq_ctl |= (oq->buffer_size & GENMASK_ULL(15, 0)); //populate the BSIZE (15-0)
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+}
+
+/* Setup registers for a VF mailbox */
+static void octep_vf_setup_mbox_regs_cn93(struct octep_vf_device *oct, int q_no)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+
+ /* PF to VF DATA reg. VF reads from this reg */
+ mbox->mbox_read_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_DATA(q_no);
+
+ /* VF mbox interrupt reg */
+ mbox->mbox_int_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_INT(q_no);
+
+ /* VF to PF DATA reg. VF writes into this reg */
+ mbox->mbox_write_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Mailbox Interrupt handler */
+static void cn93_handle_vf_mbox_intr(struct octep_vf_device *oct)
+{
+ if (oct->mbox)
+ schedule_work(&oct->mbox->wk.work);
+ else
+ dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n");
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_vf_ioq_intr_handler_cn93(void *data)
+{
+ struct octep_vf_ioq_vector *vector = data;
+ struct octep_vf_device *oct;
+ struct octep_vf_oq *oq;
+ u64 reg_val;
+
+ oct = vector->octep_vf_dev;
+ oq = vector->oq;
+ /* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */
+ if (oq->q_no == 0) {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0));
+ if (reg_val & CN93_VF_SDP_R_MBOX_PF_VF_INT_STATUS) {
+ cn93_handle_vf_mbox_intr(oct);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val);
+ }
+ }
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_vf_reinit_regs_cn93(struct octep_vf_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_vf_enable_interrupts_cn93(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+ /* Enable PF to VF mbox interrupt by setting 2nd bit*/
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0),
+ CN93_VF_SDP_R_MBOX_PF_VF_INT_ENAB);
+}
+
+/* Disable all interrupts */
+static void octep_vf_disable_interrupts_cn93(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ /* Disable PF to VF mbox interrupt by setting 2nd bit*/
+ if (oct->mbox)
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0);
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_vf_update_iq_read_index_cn93(struct octep_vf_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_vf_enable_iq_cn93(struct octep_vf_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no), GENMASK_ULL(31, 0));
+
+ while (octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_vf_enable_oq_cn93(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no), GENMASK_ULL(31, 0));
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_enable_io_queues_cn93(struct octep_vf_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_enable_iq_cn93(oct, q);
+ octep_vf_enable_oq_cn93(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assigned to VF */
+static void octep_vf_disable_iq_cn93(struct octep_vf_device *oct, int iq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assigned to VF */
+static void octep_vf_disable_oq_cn93(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_disable_io_queues_cn93(struct octep_vf_device *oct)
+{
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_disable_iq_cn93(oct, q);
+ octep_vf_disable_oq_cn93(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_vf_dump_registers_cn93(struct octep_vf_device *oct)
+{
+ u8 num_rings, q;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++)
+ cn93_vf_dump_q_regs(oct, q);
+}
+
+/**
+ * octep_vf_device_setup_cn93() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - set initial configuration and max limits.
+ */
+void octep_vf_device_setup_cn93(struct octep_vf_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cn93;
+ oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cn93;
+ oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cn93;
+
+ oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cn93;
+ oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cn93;
+
+ oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cn93;
+ oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cn93;
+
+ oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cn93;
+
+ oct->hw_ops.enable_iq = octep_vf_enable_iq_cn93;
+ oct->hw_ops.enable_oq = octep_vf_enable_oq_cn93;
+ oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cn93;
+
+ oct->hw_ops.disable_iq = octep_vf_disable_iq_cn93;
+ oct->hw_ops.disable_oq = octep_vf_disable_oq_cn93;
+ oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cn93;
+ oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cn93;
+
+ oct->hw_ops.dump_registers = octep_vf_dump_registers_cn93;
+ octep_vf_init_config_cn93_vf(oct);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
new file mode 100644
index 000000000000..1f79dfad42c6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+#include "octep_vf_regs_cnxk.h"
+
+/* Dump useful hardware IQ/OQ CSRs for debug purpose */
+static void cnxk_vf_dump_q_regs(struct octep_vf_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_CONTROL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_ENABLE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_CNTS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_IN_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_CONTROL(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_ENABLE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_CNTS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_PKT_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno)));
+ dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n",
+ qno, CNXK_VF_SDP_R_ERR_TYPE(qno),
+ octep_vf_read_csr64(oct, CNXK_VF_SDP_R_ERR_TYPE(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static void cnxk_vf_reset_iq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no);
+
+ /* Disable the Tx/Instruction Ring */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no));
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no), val & GENMASK_ULL(31, 0));
+}
+
+/* Reset Hardware Rx queue */
+static void cnxk_vf_reset_oq(struct octep_vf_device *oct, int q_no)
+{
+ u64 val = ULL(0);
+
+ /* Disable Output (Rx) Ring */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_vf_read_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no));
+ octep_vf_write_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(q_no), GENMASK_ULL(35, 0));
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(q_no), GENMASK_ULL(31, 0));
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_vf_reset_io_queues_cnxk(struct octep_vf_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CNXK VF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cnxk_vf_reset_iq(oct, q);
+ cnxk_vf_reset_oq(oct, q);
+ }
+}
+
+/* Initialize configuration limits and initial active config */
+static void octep_vf_init_config_cnxk_vf(struct octep_vf_device *oct)
+{
+ struct octep_vf_config *conf = oct->conf;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(0));
+ conf->ring_cfg.max_io_rings = (reg_val >> CNXK_VF_R_IN_CTL_RPVF_POS) &
+ CNXK_VF_R_IN_CTL_RPVF_MASK;
+ conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings;
+
+ conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR;
+ conf->iq.db_min = OCTEP_VF_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD;
+ conf->oq.wmark = OCTEP_VF_OQ_WMARK_MIN;
+
+ conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings;
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_vf_setup_iq_regs_cnxk(struct octep_vf_device *oct, int iq_no)
+{
+ struct octep_vf_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CNXK_VF_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CNXK_VF_R_IN_CTL_IDLE));
+ }
+ reg_val |= CNXK_VF_R_IN_CTL_RDSIZE;
+ reg_val |= CNXK_VF_R_IN_CTL_IS_64B;
+ reg_val |= CNXK_VF_R_IN_CTL_ESR;
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr for this queue */
+ iq->doorbell_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & GENMASK_ULL(31, 0);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
+{
+ struct octep_vf_oq *oq = oct->oq[oq_no];
+ u32 time_threshold = 0;
+ u64 oq_ctl = ULL(0);
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_IMODE);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_I);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_D);
+ reg_val |= (CNXK_VF_R_OUT_CTL_ES_P);
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count);
+
+ oq_ctl = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
+ /* Clear the ISIZE and BSIZE (22-0) */
+ oq_ctl &= ~GENMASK_ULL(22, 0);
+ /* Populate the BSIZE (15-0) */
+ oq_ctl |= (oq->buffer_size & GENMASK_ULL(15, 0));
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ /* set watermark for backpressure */
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no));
+ reg_val &= ~GENMASK_ULL(31, 0);
+ reg_val |= CFG_GET_OQ_WMARK(oct->conf);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no), reg_val);
+}
+
+/* Setup registers for a VF mailbox */
+static void octep_vf_setup_mbox_regs_cnxk(struct octep_vf_device *oct, int q_no)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+
+ /* PF to VF DATA reg. VF reads from this reg */
+ mbox->mbox_read_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_DATA(q_no);
+
+ /* VF mbox interrupt reg */
+ mbox->mbox_int_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_INT(q_no);
+
+ /* VF to PF DATA reg. VF writes into this reg */
+ mbox->mbox_write_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Mailbox Interrupt handler */
+static void cnxk_handle_vf_mbox_intr(struct octep_vf_device *oct)
+{
+ if (oct->mbox)
+ schedule_work(&oct->mbox->wk.work);
+ else
+ dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n");
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_vf_ioq_intr_handler_cnxk(void *data)
+{
+ struct octep_vf_ioq_vector *vector = data;
+ struct octep_vf_device *oct;
+ struct octep_vf_oq *oq;
+ u64 reg_val;
+
+ oct = vector->octep_vf_dev;
+ oq = vector->oq;
+ /* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */
+ if (oq->q_no == 0) {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0));
+ if (reg_val & CNXK_VF_SDP_R_MBOX_PF_VF_INT_STATUS) {
+ cnxk_handle_vf_mbox_intr(oct);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val);
+ }
+ }
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_vf_reinit_regs_cnxk(struct octep_vf_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_vf_enable_interrupts_cnxk(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+ /* Enable PF to VF mbox interrupt by setting 2nd bit*/
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0),
+ CNXK_VF_SDP_R_MBOX_PF_VF_INT_ENAB);
+}
+
+/* Disable all interrupts */
+static void octep_vf_disable_interrupts_cnxk(struct octep_vf_device *oct)
+{
+ int num_rings, q;
+ u64 reg_val;
+
+ /* Disable PF to VF mbox interrupt by setting 2nd bit*/
+ if (oct->mbox)
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0);
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++) {
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q));
+ reg_val &= ~BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
+ }
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_vf_update_iq_read_index_cnxk(struct octep_vf_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_vf_enable_iq_cnxk(struct octep_vf_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no), GENMASK_ULL(31, 0));
+
+ while (octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_vf_enable_oq_cnxk(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= BIT_ULL_MASK(62);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no), GENMASK_ULL(31, 0));
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_enable_io_queues_cnxk(struct octep_vf_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_enable_iq_cnxk(oct, q);
+ octep_vf_enable_oq_cnxk(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assigned to VF */
+static void octep_vf_disable_iq_cnxk(struct octep_vf_device *oct, int iq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assigned to VF */
+static void octep_vf_disable_oq_cnxk(struct octep_vf_device *oct, int oq_no)
+{
+ u64 reg_val;
+
+ reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~ULL(1);
+ octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assigned to VF */
+static void octep_vf_disable_io_queues_cnxk(struct octep_vf_device *oct)
+{
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_vf_disable_iq_cnxk(oct, q);
+ octep_vf_disable_oq_cnxk(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_vf_dump_registers_cnxk(struct octep_vf_device *oct)
+{
+ u8 num_rings, q;
+
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ for (q = 0; q < num_rings; q++)
+ cnxk_vf_dump_q_regs(oct, q);
+}
+
+/**
+ * octep_vf_device_setup_cnxk() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - set initial configuration and max limits.
+ */
+void octep_vf_device_setup_cnxk(struct octep_vf_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cnxk;
+ oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cnxk;
+ oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cnxk;
+
+ oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cnxk;
+ oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cnxk;
+
+ oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cnxk;
+ oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cnxk;
+
+ oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cnxk;
+
+ oct->hw_ops.enable_iq = octep_vf_enable_iq_cnxk;
+ oct->hw_ops.enable_oq = octep_vf_enable_oq_cnxk;
+ oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cnxk;
+
+ oct->hw_ops.disable_iq = octep_vf_disable_iq_cnxk;
+ oct->hw_ops.disable_oq = octep_vf_disable_oq_cnxk;
+ oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cnxk;
+ oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cnxk;
+
+ oct->hw_ops.dump_registers = octep_vf_dump_registers_cnxk;
+ octep_vf_init_config_cnxk_vf(oct);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h
new file mode 100644
index 000000000000..e03a647b0110
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_CONFIG_H_
+#define _OCTEP_VF_CONFIG_H_
+
+/* Tx instruction types by length */
+#define OCTEP_VF_32BYTE_INSTR 32
+#define OCTEP_VF_64BYTE_INSTR 64
+
+/* Tx Queue: maximum descriptors per ring */
+#define OCTEP_VF_IQ_MAX_DESCRIPTORS 1024
+/* Minimum input (Tx) requests to be enqueued to ring doorbell */
+#define OCTEP_VF_DB_MIN 8
+/* Packet threshold for Tx queue interrupt */
+#define OCTEP_VF_IQ_INTR_THRESHOLD 0x0
+
+/* Minimum watermark for backpressure */
+#define OCTEP_VF_OQ_WMARK_MIN 256
+
+/* Rx Queue: maximum descriptors per ring */
+#define OCTEP_VF_OQ_MAX_DESCRIPTORS 1024
+
+/* Rx buffer size: Use page size buffers.
+ * Build skb from allocated page buffer once the packet is received.
+ * When a gathered packet is received, make head page as skb head and
+ * page buffers in consecutive Rx descriptors as fragments.
+ */
+#define OCTEP_VF_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE))
+#define OCTEP_VF_OQ_PKTS_PER_INTR 128
+#define OCTEP_VF_OQ_REFILL_THRESHOLD (OCTEP_VF_OQ_MAX_DESCRIPTORS / 4)
+
+#define OCTEP_VF_OQ_INTR_PKT_THRESHOLD 1
+#define OCTEP_VF_OQ_INTR_TIME_THRESHOLD 10
+
+#define OCTEP_VF_MSIX_NAME_SIZE (IFNAMSIZ + 32)
+
+/* Tx Queue wake threshold
+ * wakeup a stopped Tx queue if minimum 2 descriptors are available.
+ * Even a skb with fragments consume only one Tx queue descriptor entry.
+ */
+#define OCTEP_VF_WAKE_QUEUE_THRESHOLD 2
+
+/* Minimum MTU supported by Octeon network interface */
+#define OCTEP_VF_MIN_MTU ETH_MIN_MTU
+/* Maximum MTU supported by Octeon interface*/
+#define OCTEP_VF_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN))
+/* Default MTU */
+#define OCTEP_VF_DEFAULT_MTU 1500
+
+/* Macros to get octeon config params */
+#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
+#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
+#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
+#define CFG_GET_IQ_INSTR_SIZE(cfg) (64)
+#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
+#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold)
+
+#define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs)
+#define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size)
+#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
+#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
+#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+#define CFG_GET_OQ_WMARK(cfg) ((cfg)->oq.wmark)
+
+#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->ring_cfg.active_io_rings)
+#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->ring_cfg.max_io_rings)
+
+#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us)
+#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us)
+
+#define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix)
+
+/* Hardware Tx Queue configuration. */
+struct octep_vf_iq_config {
+ /* Size of the Input queue (number of commands) */
+ u16 num_descs;
+
+ /* Command size - 32 or 64 bytes */
+ u16 instr_type;
+
+ /* Minimum number of commands pending to be posted to Octeon before driver
+ * hits the Input queue doorbell.
+ */
+ u16 db_min;
+
+ /* Trigger the IQ interrupt when processed cmd count reaches
+ * this level.
+ */
+ u32 intr_threshold;
+};
+
+/* Hardware Rx Queue configuration. */
+struct octep_vf_oq_config {
+ /* Size of Output queue (number of descriptors) */
+ u16 num_descs;
+
+ /* Size of buffer in this Output queue. */
+ u16 buf_size;
+
+ /* The number of buffers that were consumed during packet processing
+ * by the driver on this Output queue before the driver attempts to
+ * replenish the descriptor ring with new buffers.
+ */
+ u16 refill_threshold;
+
+ /* Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver usually does not use packet count interrupt coalescing.
+ */
+ u32 oq_intr_pkt;
+
+ /* Interrupt Coalescing (Time Interval). Octeon will interrupt the host
+ * if at least one packet was sent in the time interval specified by
+ * this field. The driver uses time interval interrupt coalescing by
+ * default. The time is specified in microseconds.
+ */
+ u32 oq_intr_time;
+
+ /* Water mark for backpressure.
+ * Output queue sends backpressure signal to source when
+ * free buffer count falls below wmark.
+ */
+ u32 wmark;
+};
+
+/* Tx/Rx configuration */
+struct octep_vf_ring_config {
+ /* Max number of IOQs */
+ u16 max_io_rings;
+
+ /* Number of active IOQs */
+ u16 active_io_rings;
+};
+
+/* Octeon MSI-x config. */
+struct octep_vf_msix_config {
+ /* Number of IOQ interrupts */
+ u16 ioq_msix;
+};
+
+/* Data Structure to hold configuration limits and active config */
+struct octep_vf_config {
+ /* Input Queue attributes. */
+ struct octep_vf_iq_config iq;
+
+ /* Output Queue attributes. */
+ struct octep_vf_oq_config oq;
+
+ /* MSI-X interrupt config */
+ struct octep_vf_msix_config msix_cfg;
+
+ /* NIC VF ring Configuration */
+ struct octep_vf_ring_config ring_cfg;
+};
+#endif /* _OCTEP_VF_CONFIG_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
new file mode 100644
index 000000000000..a1979b45e355
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+static const char octep_vf_gstrings_global_stats[][ETH_GSTRING_LEN] = {
+ "rx_alloc_errors",
+ "tx_busy_errors",
+ "tx_hw_pkts",
+ "tx_hw_octs",
+ "tx_hw_bcast",
+ "tx_hw_mcast",
+ "rx_hw_pkts",
+ "rx_hw_bytes",
+ "rx_hw_bcast",
+ "rx_dropped_bytes_fifo_full",
+};
+
+#define OCTEP_VF_GLOBAL_STATS_CNT (sizeof(octep_vf_gstrings_global_stats) / ETH_GSTRING_LEN)
+
+static const char octep_vf_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
+ "tx_packets_posted[Q-%u]",
+ "tx_packets_completed[Q-%u]",
+ "tx_bytes[Q-%u]",
+ "tx_busy[Q-%u]",
+};
+
+#define OCTEP_VF_TX_Q_STATS_CNT (sizeof(octep_vf_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
+
+static const char octep_vf_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets[Q-%u]",
+ "rx_bytes[Q-%u]",
+ "rx_alloc_errors[Q-%u]",
+};
+
+#define OCTEP_VF_RX_Q_STATS_CNT (sizeof(octep_vf_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
+
+static void octep_vf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+
+ strscpy(info->driver, OCTEP_VF_DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info));
+}
+
+static void octep_vf_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ char *strings = (char *)data;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < OCTEP_VF_GLOBAL_STATS_CNT; i++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_vf_gstrings_global_stats[i]);
+ strings += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_VF_TX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_vf_gstrings_tx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_VF_RX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_vf_gstrings_rx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int octep_vf_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return OCTEP_VF_GLOBAL_STATS_CNT + (num_queues *
+ (OCTEP_VF_TX_Q_STATS_CNT + OCTEP_VF_RX_Q_STATS_CNT));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void octep_vf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct octep_vf_iface_tx_stats *iface_tx_stats;
+ struct octep_vf_iface_rx_stats *iface_rx_stats;
+ u64 rx_alloc_errors, tx_busy_errors;
+ int q, i;
+
+ rx_alloc_errors = 0;
+ tx_busy_errors = 0;
+
+ octep_vf_get_if_stats(oct);
+ iface_tx_stats = &oct->iface_tx_stats;
+ iface_rx_stats = &oct->iface_rx_stats;
+
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_vf_iq *iq = oct->iq[q];
+ struct octep_vf_oq *oq = oct->oq[q];
+
+ tx_busy_errors += iq->stats.tx_busy;
+ rx_alloc_errors += oq->stats.alloc_failures;
+ }
+ i = 0;
+ data[i++] = rx_alloc_errors;
+ data[i++] = tx_busy_errors;
+ data[i++] = iface_tx_stats->pkts;
+ data[i++] = iface_tx_stats->octs;
+ data[i++] = iface_tx_stats->bcst;
+ data[i++] = iface_tx_stats->mcst;
+ data[i++] = iface_rx_stats->pkts;
+ data[i++] = iface_rx_stats->octets;
+ data[i++] = iface_rx_stats->bcast_pkts;
+ data[i++] = iface_rx_stats->dropped_octets_fifo_full;
+
+ /* Per Tx Queue stats */
+ for (q = 0; q < oct->num_iqs; q++) {
+ struct octep_vf_iq *iq = oct->iq[q];
+
+ data[i++] = iq->stats.instr_posted;
+ data[i++] = iq->stats.instr_completed;
+ data[i++] = iq->stats.bytes_sent;
+ data[i++] = iq->stats.tx_busy;
+ }
+
+ /* Per Rx Queue stats */
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_vf_oq *oq = oct->oq[q];
+
+ data[i++] = oq->stats.packets;
+ data[i++] = oq->stats.bytes;
+ data[i++] = oq->stats.alloc_failures;
+ }
+}
+
+#define OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(octep_vf_speeds, ksettings, name) \
+{ \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_T)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_R)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_CR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_KR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_SR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \
+ if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \
+}
+
+static int octep_vf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct octep_vf_iface_link_info *link_info;
+ u32 advertised_modes, supported_modes;
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ octep_vf_get_link_info(oct);
+
+ advertised_modes = oct->link_info.advertised_modes;
+ supported_modes = oct->link_info.supported_modes;
+ link_info = &oct->link_info;
+
+ OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported);
+ OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising);
+
+ if (link_info->autoneg) {
+ if (link_info->autoneg & OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ if (link_info->autoneg & OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED) {
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+
+ cmd->base.port = PORT_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ if (netif_carrier_ok(netdev)) {
+ cmd->base.speed = link_info->speed;
+ cmd->base.duplex = DUPLEX_FULL;
+ } else {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ return 0;
+}
+
+static const struct ethtool_ops octep_vf_ethtool_ops = {
+ .get_drvinfo = octep_vf_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = octep_vf_get_strings,
+ .get_sset_count = octep_vf_get_sset_count,
+ .get_ethtool_stats = octep_vf_get_ethtool_stats,
+ .get_link_ksettings = octep_vf_get_link_ksettings,
+};
+
+void octep_vf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &octep_vf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
new file mode 100644
index 000000000000..dd49d0b8b494
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -0,0 +1,1231 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/vmalloc.h>
+#include <net/netdev_queues.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+struct workqueue_struct *octep_vf_wq;
+
+/* Supported Devices */
+static const struct pci_device_id octep_vf_pci_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN98_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KA_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KA_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KB_VF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KB_VF)},
+ {0, },
+};
+MODULE_DEVICE_TABLE(pci, octep_vf_pci_id_tbl);
+
+MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>");
+MODULE_DESCRIPTION(OCTEP_VF_DRV_STRING);
+MODULE_LICENSE("GPL");
+
+/**
+ * octep_vf_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate resources to hold per Tx/Rx queue interrupt info.
+ * This is the information passed to interrupt handler, from which napi poll
+ * is scheduled and includes quick access to private data of Tx/Rx queue
+ * corresponding to the interrupt being handled.
+ *
+ * Return: 0, on successful allocation of resources for all queue interrupts.
+ * -1, if failed to allocate any resource.
+ */
+static int octep_vf_alloc_ioq_vectors(struct octep_vf_device *oct)
+{
+ struct octep_vf_ioq_vector *ioq_vector;
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i]));
+ if (!oct->ioq_vector[i])
+ goto free_ioq_vector;
+
+ ioq_vector = oct->ioq_vector[i];
+ ioq_vector->iq = oct->iq[i];
+ ioq_vector->oq = oct->oq[i];
+ ioq_vector->octep_vf_dev = oct;
+ }
+
+ dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs);
+ return 0;
+
+free_ioq_vector:
+ while (i) {
+ i--;
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ return -1;
+}
+
+/**
+ * octep_vf_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_free_ioq_vectors(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ if (oct->ioq_vector[i]) {
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ }
+ netdev_info(oct->netdev, "Freed IOQ Vectors\n");
+}
+
+/**
+ * octep_vf_enable_msix_range() - enable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts)
+ * for the Octeon device.
+ *
+ * Return: 0, on successfully enabling all MSI-x interrupts.
+ * -1, if failed to enable any MSI-x interrupt.
+ */
+static int octep_vf_enable_msix_range(struct octep_vf_device *oct)
+{
+ int num_msix, msix_allocated;
+ int i;
+
+ /* Generic interrupts apart from input/output queues */
+ //num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf);
+ num_msix = oct->num_oqs;
+ oct->msix_entries = kcalloc(num_msix, sizeof(struct msix_entry), GFP_KERNEL);
+ if (!oct->msix_entries)
+ goto msix_alloc_err;
+
+ for (i = 0; i < num_msix; i++)
+ oct->msix_entries[i].entry = i;
+
+ msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries,
+ num_msix, num_msix);
+ if (msix_allocated != num_msix) {
+ dev_err(&oct->pdev->dev,
+ "Failed to enable %d msix irqs; got only %d\n",
+ num_msix, msix_allocated);
+ goto enable_msix_err;
+ }
+ oct->num_irqs = msix_allocated;
+ dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n");
+
+ return 0;
+
+enable_msix_err:
+ if (msix_allocated > 0)
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+msix_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_vf_disable_msix() - disable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Disable MSI-x on the Octeon device.
+ */
+static void octep_vf_disable_msix(struct octep_vf_device *oct)
+{
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ dev_info(&oct->pdev->dev, "Disabled MSI-X\n");
+}
+
+/**
+ * octep_vf_ioq_intr_handler() - handler for all Tx/Rx queue interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data contains pointers to Tx/Rx queue private data
+ * and correspong NAPI context.
+ *
+ * this is common handler for all non-queue (generic) interrupts.
+ */
+static irqreturn_t octep_vf_ioq_intr_handler(int irq, void *data)
+{
+ struct octep_vf_ioq_vector *ioq_vector = data;
+ struct octep_vf_device *oct = ioq_vector->octep_vf_dev;
+
+ return oct->hw_ops.ioq_intr_handler(ioq_vector);
+}
+
+/**
+ * octep_vf_request_irqs() - Register interrupt handlers.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Register handlers for all queue and non-queue interrupts.
+ *
+ * Return: 0, on successful registration of all interrupt handlers.
+ * -1, on any error.
+ */
+static int octep_vf_request_irqs(struct octep_vf_device *oct)
+{
+ struct net_device *netdev = oct->netdev;
+ struct octep_vf_ioq_vector *ioq_vector;
+ struct msix_entry *msix_entry;
+ int ret, i;
+
+ /* Request IRQs for Tx/Rx queues */
+ for (i = 0; i < oct->num_oqs; i++) {
+ ioq_vector = oct->ioq_vector[i];
+ msix_entry = &oct->msix_entries[i];
+
+ snprintf(ioq_vector->name, sizeof(ioq_vector->name),
+ "%s-q%d", netdev->name, i);
+ ret = request_irq(msix_entry->vector,
+ octep_vf_ioq_intr_handler, 0,
+ ioq_vector->name, ioq_vector);
+ if (ret) {
+ netdev_err(netdev,
+ "request_irq failed for Q-%d; err=%d",
+ i, ret);
+ goto ioq_irq_err;
+ }
+
+ cpumask_set_cpu(i % num_online_cpus(),
+ &ioq_vector->affinity_mask);
+ irq_set_affinity_hint(msix_entry->vector,
+ &ioq_vector->affinity_mask);
+ }
+
+ return 0;
+ioq_irq_err:
+ while (i) {
+ --i;
+ free_irq(oct->msix_entries[i].vector, oct);
+ }
+ return -1;
+}
+
+/**
+ * octep_vf_free_irqs() - free all registered interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free all queue and non-queue interrupts of the Octeon device.
+ */
+static void octep_vf_free_irqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_irqs; i++) {
+ irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
+ free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]);
+ }
+ netdev_info(oct->netdev, "IRQs freed\n");
+}
+
+/**
+ * octep_vf_setup_irqs() - setup interrupts for the Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate data structures to hold per interrupt information, allocate/enable
+ * MSI-x interrupt and register interrupt handlers.
+ *
+ * Return: 0, on successful allocation and registration of all interrupts.
+ * -1, on any error.
+ */
+static int octep_vf_setup_irqs(struct octep_vf_device *oct)
+{
+ if (octep_vf_alloc_ioq_vectors(oct))
+ goto ioq_vector_err;
+
+ if (octep_vf_enable_msix_range(oct))
+ goto enable_msix_err;
+
+ if (octep_vf_request_irqs(oct))
+ goto request_irq_err;
+
+ return 0;
+
+request_irq_err:
+ octep_vf_disable_msix(oct);
+enable_msix_err:
+ octep_vf_free_ioq_vectors(oct);
+ioq_vector_err:
+ return -1;
+}
+
+/**
+ * octep_vf_clean_irqs() - free all interrupts and its resources.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_clean_irqs(struct octep_vf_device *oct)
+{
+ octep_vf_free_irqs(oct);
+ octep_vf_disable_msix(oct);
+ octep_vf_free_ioq_vectors(oct);
+}
+
+/**
+ * octep_vf_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *oq)
+{
+ u32 pkts_pend = oq->pkts_pending;
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+ if (iq->pkts_processed) {
+ writel(iq->pkts_processed, iq->inst_cnt_reg);
+ iq->pkt_in_done -= iq->pkts_processed;
+ iq->pkts_processed = 0;
+ }
+ if (oq->last_pkt_count - pkts_pend) {
+ writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+ oq->last_pkt_count = pkts_pend;
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+ smp_wmb();
+ writeq(1UL << OCTEP_VF_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_VF_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+}
+
+/**
+ * octep_vf_napi_poll() - NAPI poll function for Tx/Rx.
+ *
+ * @napi: pointer to napi context.
+ * @budget: max number of packets to be processed in single invocation.
+ */
+static int octep_vf_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octep_vf_ioq_vector *ioq_vector =
+ container_of(napi, struct octep_vf_ioq_vector, napi);
+ u32 tx_pending, rx_done;
+
+ tx_pending = octep_vf_iq_process_completions(ioq_vector->iq, 64);
+ rx_done = octep_vf_oq_process_rx(ioq_vector->oq, budget);
+
+ /* need more polling if tx completion processing is still pending or
+ * processed at least 'budget' number of rx packets.
+ */
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
+ if (likely(napi_complete_done(napi, rx_done)))
+ octep_vf_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+
+ return rx_done;
+}
+
+/**
+ * octep_vf_napi_add() - Add NAPI poll for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_add(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
+ netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, octep_vf_napi_poll);
+ oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
+ }
+}
+
+/**
+ * octep_vf_napi_delete() - delete NAPI poll callback for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_delete(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i);
+ netif_napi_del(&oct->ioq_vector[i]->napi);
+ oct->oq[i]->napi = NULL;
+ }
+}
+
+/**
+ * octep_vf_napi_enable() - enable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_enable(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i);
+ napi_enable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+/**
+ * octep_vf_napi_disable() - disable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_vf_napi_disable(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i);
+ napi_disable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+static void octep_vf_link_up(struct net_device *netdev)
+{
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+}
+
+static void octep_vf_set_rx_state(struct octep_vf_device *oct, bool up)
+{
+ int err;
+
+ err = octep_vf_mbox_set_rx_state(oct, up);
+ if (err)
+ netdev_err(oct->netdev, "Set Rx state to %d failed with err:%d\n", up, err);
+}
+
+static int octep_vf_get_link_status(struct octep_vf_device *oct)
+{
+ int err;
+
+ err = octep_vf_mbox_get_link_status(oct, &oct->link_info.oper_up);
+ if (err)
+ netdev_err(oct->netdev, "Get link status failed with err:%d\n", err);
+ return oct->link_info.oper_up;
+}
+
+static void octep_vf_set_link_status(struct octep_vf_device *oct, bool up)
+{
+ int err;
+
+ err = octep_vf_mbox_set_link_status(oct, up);
+ if (err) {
+ netdev_err(oct->netdev, "Set link status to %d failed with err:%d\n", up, err);
+ return;
+ }
+ oct->link_info.oper_up = up;
+}
+
+/**
+ * octep_vf_open() - start the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues
+ * and interrupts..
+ *
+ * Return: 0, on successfully setting up device and bring it up.
+ * -1, on any error.
+ */
+static int octep_vf_open(struct net_device *netdev)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ int err, ret;
+
+ netdev_info(netdev, "Starting netdev ...\n");
+ netif_carrier_off(netdev);
+
+ oct->hw_ops.reset_io_queues(oct);
+
+ if (octep_vf_setup_iqs(oct))
+ goto setup_iq_err;
+ if (octep_vf_setup_oqs(oct))
+ goto setup_oq_err;
+ if (octep_vf_setup_irqs(oct))
+ goto setup_irq_err;
+
+ err = netif_set_real_num_tx_queues(netdev, oct->num_oqs);
+ if (err)
+ goto set_queues_err;
+ err = netif_set_real_num_rx_queues(netdev, oct->num_iqs);
+ if (err)
+ goto set_queues_err;
+
+ octep_vf_napi_add(oct);
+ octep_vf_napi_enable(oct);
+
+ oct->link_info.admin_up = 1;
+ octep_vf_set_rx_state(oct, true);
+
+ ret = octep_vf_get_link_status(oct);
+ if (!ret)
+ octep_vf_set_link_status(oct, true);
+
+ /* Enable the input and output queues for this Octeon device */
+ oct->hw_ops.enable_io_queues(oct);
+
+ /* Enable Octeon device interrupts */
+ oct->hw_ops.enable_interrupts(oct);
+
+ octep_vf_oq_dbell_init(oct);
+
+ ret = octep_vf_get_link_status(oct);
+ if (ret)
+ octep_vf_link_up(netdev);
+
+ return 0;
+
+set_queues_err:
+ octep_vf_napi_disable(oct);
+ octep_vf_napi_delete(oct);
+ octep_vf_clean_irqs(oct);
+setup_irq_err:
+ octep_vf_free_oqs(oct);
+setup_oq_err:
+ octep_vf_free_iqs(oct);
+setup_iq_err:
+ return -1;
+}
+
+/**
+ * octep_vf_stop() - stop the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * stop the device Tx/Rx operations, bring down the link and
+ * free up all resources allocated for Tx/Rx queues and interrupts.
+ */
+static int octep_vf_stop(struct net_device *netdev)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+
+ netdev_info(netdev, "Stopping the device ...\n");
+
+ /* Stop Tx from stack */
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ octep_vf_set_link_status(oct, false);
+ octep_vf_set_rx_state(oct, false);
+
+ oct->link_info.admin_up = 0;
+ oct->link_info.oper_up = 0;
+
+ oct->hw_ops.disable_interrupts(oct);
+ octep_vf_napi_disable(oct);
+ octep_vf_napi_delete(oct);
+
+ octep_vf_clean_irqs(oct);
+ octep_vf_clean_iqs(oct);
+
+ oct->hw_ops.disable_io_queues(oct);
+ oct->hw_ops.reset_io_queues(oct);
+ octep_vf_free_oqs(oct);
+ octep_vf_free_iqs(oct);
+ netdev_info(netdev, "Device stopped !!\n");
+ return 0;
+}
+
+/**
+ * octep_vf_iq_full_check() - check if a Tx queue is full.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Return: 0, if the Tx queue is not full.
+ * 1, if the Tx queue is full.
+ */
+static int octep_vf_iq_full_check(struct octep_vf_iq *iq)
+{
+ int ret;
+
+ ret = netif_subqueue_maybe_stop(iq->netdev, iq->q_no, IQ_INSTR_SPACE(iq),
+ OCTEP_VF_WAKE_QUEUE_THRESHOLD,
+ OCTEP_VF_WAKE_QUEUE_THRESHOLD);
+ switch (ret) {
+ case 0: /* Stopped the queue, since IQ is full */
+ return 1;
+ case -1: /*
+ * Pending updates in write index from
+ * iq_process_completion in other cpus
+ * caused queues to get re-enabled after
+ * being stopped
+ */
+ iq->stats.restart_cnt++;
+ fallthrough;
+ case 1: /* Queue left enabled, since IQ is not yet full*/
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * octep_vf_start_xmit() - Enqueue packet to Octoen hardware Tx Queue.
+ *
+ * @skb: packet skbuff pointer.
+ * @netdev: kernel network device.
+ *
+ * Return: NETDEV_TX_BUSY, if Tx Queue is full.
+ * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue.
+ */
+static netdev_tx_t octep_vf_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ netdev_features_t feat = netdev->features;
+ struct octep_vf_tx_sglist_desc *sglist;
+ struct octep_vf_tx_buffer *tx_buffer;
+ struct octep_vf_tx_desc_hw *hw_desc;
+ struct skb_shared_info *shinfo;
+ struct octep_vf_instr_hdr *ih;
+ struct octep_vf_iq *iq;
+ skb_frag_t *frag;
+ u16 nr_frags, si;
+ int xmit_more;
+ u16 q_no, wi;
+
+ if (skb_put_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
+ q_no = skb_get_queue_mapping(skb);
+ if (q_no >= oct->num_iqs) {
+ netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no);
+ q_no = q_no % oct->num_iqs;
+ }
+
+ iq = oct->iq[q_no];
+
+ shinfo = skb_shinfo(skb);
+ nr_frags = shinfo->nr_frags;
+
+ wi = iq->host_write_index;
+ hw_desc = &iq->desc_ring[wi];
+ hw_desc->ih64 = 0;
+
+ tx_buffer = iq->buff_info + wi;
+ tx_buffer->skb = skb;
+
+ ih = &hw_desc->ih;
+ ih->tlen = skb->len;
+ ih->pkind = oct->fw_info.pkind;
+ ih->fsz = oct->fw_info.fsz;
+ ih->tlen = skb->len + ih->fsz;
+
+ if (!nr_frags) {
+ tx_buffer->gather = 0;
+ tx_buffer->dma = dma_map_single(iq->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, tx_buffer->dma))
+ goto dma_map_err;
+ hw_desc->dptr = tx_buffer->dma;
+ } else {
+ /* Scatter/Gather */
+ dma_addr_t dma;
+ u16 len;
+
+ sglist = tx_buffer->sglist;
+
+ ih->gsz = nr_frags + 1;
+ ih->gather = 1;
+ tx_buffer->gather = 1;
+
+ len = skb_headlen(skb);
+ dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_err;
+
+ memset(sglist, 0, OCTEP_VF_SGLIST_SIZE_PER_PKT);
+ sglist[0].len[3] = len;
+ sglist[0].dma_ptr[0] = dma;
+
+ si = 1; /* entry 0 is main skb, mapped above */
+ frag = &shinfo->frags[0];
+ while (nr_frags--) {
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(iq->dev, frag, 0,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_sg_err;
+
+ sglist[si >> 2].len[3 - (si & 3)] = len;
+ sglist[si >> 2].dma_ptr[si & 3] = dma;
+
+ frag++;
+ si++;
+ }
+ hw_desc->dptr = tx_buffer->sglist_dma;
+ }
+ if (oct->fw_info.tx_ol_flags) {
+ if ((feat & (NETIF_F_TSO)) && (skb_is_gso(skb))) {
+ hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM;
+ hw_desc->txm.ol_flags |= OCTEP_VF_TX_OFFLOAD_TSO;
+ hw_desc->txm.gso_size = skb_shinfo(skb)->gso_size;
+ hw_desc->txm.gso_segs = skb_shinfo(skb)->gso_segs;
+ } else if (feat & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+ hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM;
+ }
+ /* due to ESR txm will be swapped by hw */
+ hw_desc->txm64[0] = (__force u64)cpu_to_be64(hw_desc->txm64[0]);
+ }
+
+ xmit_more = netdev_xmit_more();
+
+ netdev_tx_sent_queue(iq->netdev_q, skb->len);
+
+ skb_tx_timestamp(skb);
+ iq->fill_cnt++;
+ wi++;
+ iq->host_write_index = wi & iq->ring_size_mask;
+
+ /* octep_iq_full_check stops the queue and returns
+ * true if so, in case the queue has become full
+ * by inserting current packet. If so, we can
+ * go ahead and ring doorbell.
+ */
+ if (!octep_vf_iq_full_check(iq) && xmit_more &&
+ iq->fill_cnt < iq->fill_threshold)
+ return NETDEV_TX_OK;
+
+ goto ring_dbell;
+
+dma_map_sg_err:
+ if (si > 0) {
+ dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
+ sglist[0].len[0], DMA_TO_DEVICE);
+ sglist[0].len[0] = 0;
+ }
+ while (si > 1) {
+ dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
+ sglist[si >> 2].len[si & 3], DMA_TO_DEVICE);
+ sglist[si >> 2].len[si & 3] = 0;
+ si--;
+ }
+ tx_buffer->gather = 0;
+dma_map_err:
+ dev_kfree_skb_any(skb);
+ring_dbell:
+ /* Flush the hw descriptors before writing to doorbell */
+ smp_wmb();
+ writel(iq->fill_cnt, iq->doorbell_reg);
+ iq->stats.instr_posted += iq->fill_cnt;
+ iq->fill_cnt = 0;
+ return NETDEV_TX_OK;
+}
+
+int octep_vf_get_if_stats(struct octep_vf_device *oct)
+{
+ struct octep_vf_iface_rxtx_stats vf_stats;
+ int ret, size;
+
+ memset(&vf_stats, 0, sizeof(struct octep_vf_iface_rxtx_stats));
+ ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_STATS,
+ (u8 *)&vf_stats, &size);
+
+ if (ret)
+ return ret;
+
+ memcpy(&oct->iface_rx_stats, &vf_stats.iface_rx_stats,
+ sizeof(struct octep_vf_iface_rx_stats));
+ memcpy(&oct->iface_tx_stats, &vf_stats.iface_tx_stats,
+ sizeof(struct octep_vf_iface_tx_stats));
+
+ return 0;
+}
+
+int octep_vf_get_link_info(struct octep_vf_device *oct)
+{
+ int ret, size;
+
+ ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO,
+ (u8 *)&oct->link_info, &size);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Get VF link info failed via VF Mbox\n");
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * octep_vf_get_stats64() - Get Octeon network device statistics.
+ *
+ * @netdev: kernel network device.
+ * @stats: pointer to stats structure to be filled in.
+ */
+static void octep_vf_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ int q;
+
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_packets = 0;
+ rx_bytes = 0;
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_vf_iq *iq = oct->iq[q];
+ struct octep_vf_oq *oq = oct->oq[q];
+
+ tx_packets += iq->stats.instr_completed;
+ tx_bytes += iq->stats.bytes_sent;
+ rx_packets += oq->stats.packets;
+ rx_bytes += oq->stats.bytes;
+ }
+ stats->tx_packets = tx_packets;
+ stats->tx_bytes = tx_bytes;
+ stats->rx_packets = rx_packets;
+ stats->rx_bytes = rx_bytes;
+ if (!octep_vf_get_if_stats(oct)) {
+ stats->multicast = oct->iface_rx_stats.mcast_pkts;
+ stats->rx_errors = oct->iface_rx_stats.err_pkts;
+ stats->rx_dropped = oct->iface_rx_stats.dropped_pkts_fifo_full +
+ oct->iface_rx_stats.err_pkts;
+ stats->rx_missed_errors = oct->iface_rx_stats.dropped_pkts_fifo_full;
+ stats->tx_dropped = oct->iface_tx_stats.dropped;
+ }
+}
+
+/**
+ * octep_vf_tx_timeout_task - work queue task to Handle Tx queue timeout.
+ *
+ * @work: pointer to Tx queue timeout work_struct
+ *
+ * Stop and start the device so that it frees up all queue resources
+ * and restarts the queues, that potentially clears a Tx queue timeout
+ * condition.
+ **/
+static void octep_vf_tx_timeout_task(struct work_struct *work)
+{
+ struct octep_vf_device *oct = container_of(work, struct octep_vf_device,
+ tx_timeout_task);
+ struct net_device *netdev = oct->netdev;
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ octep_vf_stop(netdev);
+ octep_vf_open(netdev);
+ }
+ rtnl_unlock();
+ netdev_put(netdev, NULL);
+}
+
+/**
+ * octep_vf_tx_timeout() - Handle Tx Queue timeout.
+ *
+ * @netdev: pointer to kernel network device.
+ * @txqueue: Timed out Tx queue number.
+ *
+ * Schedule a work to handle Tx queue timeout.
+ */
+static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+
+ netdev_hold(netdev, NULL, GFP_ATOMIC);
+ schedule_work(&oct->tx_timeout_task);
+}
+
+static int octep_vf_set_mac(struct net_device *netdev, void *p)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct sockaddr *addr = (struct sockaddr *)p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = octep_vf_mbox_set_mac_addr(oct, addr->sa_data);
+ if (err)
+ return err;
+
+ memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ return 0;
+}
+
+static int octep_vf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ struct octep_vf_iface_link_info *link_info;
+ int err;
+
+ link_info = &oct->link_info;
+ if (link_info->mtu == new_mtu)
+ return 0;
+
+ err = octep_vf_mbox_set_mtu(oct, new_mtu);
+ if (!err) {
+ oct->link_info.mtu = new_mtu;
+ netdev->mtu = new_mtu;
+ }
+ return err;
+}
+
+static int octep_vf_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct octep_vf_device *oct = netdev_priv(netdev);
+ u16 rx_offloads = 0, tx_offloads = 0;
+ int err;
+
+ /* We only support features received from firmware */
+ if ((features & netdev->hw_features) != features)
+ return -EINVAL;
+
+ if (features & NETIF_F_TSO)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO;
+
+ if (features & NETIF_F_TSO6)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO;
+
+ if (features & NETIF_F_IP_CSUM)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM;
+
+ if (features & NETIF_F_IPV6_CSUM)
+ tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM;
+
+ if (features & NETIF_F_RXCSUM)
+ rx_offloads |= OCTEP_VF_RX_OFFLOAD_CKSUM;
+
+ err = octep_vf_mbox_set_offloads(oct, tx_offloads, rx_offloads);
+ if (!err)
+ netdev->features = features;
+
+ return err;
+}
+
+static const struct net_device_ops octep_vf_netdev_ops = {
+ .ndo_open = octep_vf_open,
+ .ndo_stop = octep_vf_stop,
+ .ndo_start_xmit = octep_vf_start_xmit,
+ .ndo_get_stats64 = octep_vf_get_stats64,
+ .ndo_tx_timeout = octep_vf_tx_timeout,
+ .ndo_set_mac_address = octep_vf_set_mac,
+ .ndo_change_mtu = octep_vf_change_mtu,
+ .ndo_set_features = octep_vf_set_features,
+};
+
+static const char *octep_vf_devid_to_str(struct octep_vf_device *oct)
+{
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_VF:
+ return "CN93XX";
+ case OCTEP_PCI_DEVICE_ID_CNF95N_VF:
+ return "CNF95N";
+ case OCTEP_PCI_DEVICE_ID_CN10KA_VF:
+ return "CN10KA";
+ case OCTEP_PCI_DEVICE_ID_CNF10KA_VF:
+ return "CNF10KA";
+ case OCTEP_PCI_DEVICE_ID_CNF10KB_VF:
+ return "CNF10KB";
+ case OCTEP_PCI_DEVICE_ID_CN10KB_VF:
+ return "CN10KB";
+ default:
+ return "Unsupported";
+ }
+}
+
+/**
+ * octep_vf_device_setup() - Setup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Setup Octeon device hardware operations, configuration, etc ...
+ */
+int octep_vf_device_setup(struct octep_vf_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+
+ /* allocate memory for oct->conf */
+ oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL);
+ if (!oct->conf)
+ return -ENOMEM;
+
+ /* Map BAR region 0 */
+ oct->mmio.hw_addr = ioremap(pci_resource_start(oct->pdev, 0),
+ pci_resource_len(oct->pdev, 0));
+ if (!oct->mmio.hw_addr) {
+ dev_err(&pdev->dev,
+ "Failed to remap BAR0; start=0x%llx len=0x%llx\n",
+ pci_resource_start(oct->pdev, 0),
+ pci_resource_len(oct->pdev, 0));
+ goto ioremap_err;
+ }
+ oct->mmio.mapped = 1;
+
+ oct->chip_id = pdev->device;
+ oct->rev_id = pdev->revision;
+ dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device);
+
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_VF:
+ case OCTEP_PCI_DEVICE_ID_CNF95N_VF:
+ case OCTEP_PCI_DEVICE_ID_CN98_VF:
+ dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n",
+ octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct),
+ OCTEP_VF_MINOR_REV(oct));
+ octep_vf_device_setup_cn93(oct);
+ break;
+ case OCTEP_PCI_DEVICE_ID_CNF10KA_VF:
+ case OCTEP_PCI_DEVICE_ID_CN10KA_VF:
+ case OCTEP_PCI_DEVICE_ID_CNF10KB_VF:
+ case OCTEP_PCI_DEVICE_ID_CN10KB_VF:
+ dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n",
+ octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct),
+ OCTEP_VF_MINOR_REV(oct));
+ octep_vf_device_setup_cnxk(oct);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported device\n");
+ goto unsupported_dev;
+ }
+
+ return 0;
+
+unsupported_dev:
+ iounmap(oct->mmio.hw_addr);
+ioremap_err:
+ kfree(oct->conf);
+ return -EOPNOTSUPP;
+}
+
+/**
+ * octep_vf_device_cleanup() - Cleanup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Cleanup Octeon device allocated resources.
+ */
+static void octep_vf_device_cleanup(struct octep_vf_device *oct)
+{
+ dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
+
+ if (oct->mmio.mapped)
+ iounmap(oct->mmio.hw_addr);
+
+ kfree(oct->conf);
+ oct->conf = NULL;
+}
+
+static int octep_vf_get_mac_addr(struct octep_vf_device *oct, u8 *addr)
+{
+ return octep_vf_mbox_get_mac_addr(oct, addr);
+}
+
+/**
+ * octep_vf_probe() - Octeon PCI device probe handler.
+ *
+ * @pdev: PCI device structure.
+ * @ent: entry in Octeon PCI device ID table.
+ *
+ * Initializes and enables the Octeon PCI device for network operations.
+ * Initializes Octeon private data structure and registers a network device.
+ */
+static int octep_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct octep_vf_device *octep_vf_dev;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set DMA mask !!\n");
+ goto disable_pci_device;
+ }
+
+ err = pci_request_mem_regions(pdev, OCTEP_VF_DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to map PCI memory regions\n");
+ goto disable_pci_device;
+ }
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev_mq(sizeof(struct octep_vf_device),
+ OCTEP_VF_MAX_QUEUES);
+ if (!netdev) {
+ dev_err(&pdev->dev, "Failed to allocate netdev\n");
+ err = -ENOMEM;
+ goto mem_regions_release;
+ }
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ octep_vf_dev = netdev_priv(netdev);
+ octep_vf_dev->netdev = netdev;
+ octep_vf_dev->pdev = pdev;
+ octep_vf_dev->dev = &pdev->dev;
+ pci_set_drvdata(pdev, octep_vf_dev);
+
+ err = octep_vf_device_setup(octep_vf_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Device setup failed\n");
+ goto netdevice_free;
+ }
+ INIT_WORK(&octep_vf_dev->tx_timeout_task, octep_vf_tx_timeout_task);
+
+ netdev->netdev_ops = &octep_vf_netdev_ops;
+ octep_vf_set_ethtool_ops(netdev);
+ netif_carrier_off(netdev);
+
+ if (octep_vf_setup_mbox(octep_vf_dev)) {
+ dev_err(&pdev->dev, "VF Mailbox setup failed\n");
+ err = -ENOMEM;
+ goto device_cleanup;
+ }
+
+ if (octep_vf_mbox_version_check(octep_vf_dev)) {
+ dev_err(&pdev->dev, "PF VF Mailbox version mismatch\n");
+ err = -EINVAL;
+ goto delete_mbox;
+ }
+
+ if (octep_vf_mbox_get_fw_info(octep_vf_dev)) {
+ dev_err(&pdev->dev, "unable to get fw info\n");
+ err = -EINVAL;
+ goto delete_mbox;
+ }
+
+ netdev->hw_features = NETIF_F_SG;
+ if (OCTEP_VF_TX_IP_CSUM(octep_vf_dev->fw_info.tx_ol_flags))
+ netdev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+
+ if (OCTEP_VF_RX_IP_CSUM(octep_vf_dev->fw_info.rx_ol_flags))
+ netdev->hw_features |= NETIF_F_RXCSUM;
+
+ netdev->min_mtu = OCTEP_VF_MIN_MTU;
+ netdev->max_mtu = OCTEP_VF_MAX_MTU;
+ netdev->mtu = OCTEP_VF_DEFAULT_MTU;
+
+ if (OCTEP_VF_TX_TSO(octep_vf_dev->fw_info.tx_ol_flags)) {
+ netdev->hw_features |= NETIF_F_TSO;
+ netif_set_tso_max_size(netdev, netdev->max_mtu);
+ }
+
+ netdev->features |= netdev->hw_features;
+ octep_vf_get_mac_addr(octep_vf_dev, octep_vf_dev->mac_addr);
+ eth_hw_addr_set(netdev, octep_vf_dev->mac_addr);
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ goto delete_mbox;
+ }
+ dev_info(&pdev->dev, "Device probe successful\n");
+ return 0;
+
+delete_mbox:
+ octep_vf_delete_mbox(octep_vf_dev);
+device_cleanup:
+ octep_vf_device_cleanup(octep_vf_dev);
+netdevice_free:
+ free_netdev(netdev);
+mem_regions_release:
+ pci_release_mem_regions(pdev);
+disable_pci_device:
+ pci_disable_device(pdev);
+ dev_err(&pdev->dev, "Device probe failed\n");
+ return err;
+}
+
+/**
+ * octep_vf_remove() - Remove Octeon PCI device from driver control.
+ *
+ * @pdev: PCI device structure of the Octeon device.
+ *
+ * Cleanup all resources allocated for the Octeon device.
+ * Unregister from network device and disable the PCI device.
+ */
+static void octep_vf_remove(struct pci_dev *pdev)
+{
+ struct octep_vf_device *oct = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+
+ if (!oct)
+ return;
+
+ octep_vf_mbox_dev_remove(oct);
+ cancel_work_sync(&oct->tx_timeout_task);
+ netdev = oct->netdev;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(netdev);
+ octep_vf_delete_mbox(oct);
+ octep_vf_device_cleanup(oct);
+ pci_release_mem_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver octep_vf_driver = {
+ .name = OCTEP_VF_DRV_NAME,
+ .id_table = octep_vf_pci_id_tbl,
+ .probe = octep_vf_probe,
+ .remove = octep_vf_remove,
+};
+
+/**
+ * octep_vf_init_module() - Module initialization.
+ *
+ * create common resource for the driver and register PCI driver.
+ */
+static int __init octep_vf_init_module(void)
+{
+ int ret;
+
+ pr_info("%s: Loading %s ...\n", OCTEP_VF_DRV_NAME, OCTEP_VF_DRV_STRING);
+
+ ret = pci_register_driver(&octep_vf_driver);
+ if (ret < 0) {
+ pr_err("%s: Failed to register PCI driver; err=%d\n",
+ OCTEP_VF_DRV_NAME, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * octep_vf_exit_module() - Module exit routine.
+ *
+ * unregister the driver with PCI subsystem and cleanup common resources.
+ */
+static void __exit octep_vf_exit_module(void)
+{
+ pr_info("%s: Unloading ...\n", OCTEP_VF_DRV_NAME);
+
+ pci_unregister_driver(&octep_vf_driver);
+
+ pr_info("%s: Unloading complete\n", OCTEP_VF_DRV_NAME);
+}
+
+module_init(octep_vf_init_module);
+module_exit(octep_vf_exit_module);
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
new file mode 100644
index 000000000000..5769f62545cd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_MAIN_H_
+#define _OCTEP_VF_MAIN_H_
+
+#include "octep_vf_tx.h"
+#include "octep_vf_rx.h"
+#include "octep_vf_mbox.h"
+
+#define OCTEP_VF_DRV_NAME "octeon_ep_vf"
+#define OCTEP_VF_DRV_STRING "Marvell Octeon EndPoint NIC VF Driver"
+
+#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203 //93xx VF
+#define OCTEP_PCI_DEVICE_ID_CNF95N_VF 0xB403 //95N VF
+#define OCTEP_PCI_DEVICE_ID_CN98_VF 0xB103
+#define OCTEP_PCI_DEVICE_ID_CN10KA_VF 0xB903
+#define OCTEP_PCI_DEVICE_ID_CNF10KA_VF 0xBA03
+#define OCTEP_PCI_DEVICE_ID_CNF10KB_VF 0xBC03
+#define OCTEP_PCI_DEVICE_ID_CN10KB_VF 0xBD03
+
+#define OCTEP_VF_MAX_QUEUES 63
+#define OCTEP_VF_MAX_IQ OCTEP_VF_MAX_QUEUES
+#define OCTEP_VF_MAX_OQ OCTEP_VF_MAX_QUEUES
+
+#define OCTEP_VF_MAX_MSIX_VECTORS OCTEP_VF_MAX_OQ
+
+#define OCTEP_VF_IQ_INTR_RESEND_BIT 59
+#define OCTEP_VF_OQ_INTR_RESEND_BIT 59
+
+#define IQ_INSTR_PENDING(iq) ({ typeof(iq) iq__ = (iq); \
+ ((iq__)->host_write_index - (iq__)->flush_index) & \
+ (iq__)->ring_size_mask; \
+ })
+#define IQ_INSTR_SPACE(iq) ({ typeof(iq) iq_ = (iq); \
+ (iq_)->max_count - IQ_INSTR_PENDING(iq_); \
+ })
+
+/* PCI address space mapping information.
+ * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
+ * Octeon gets mapped to different physical address spaces in
+ * the kernel.
+ */
+struct octep_vf_mmio {
+ /* The physical address to which the PCI address space is mapped. */
+ u8 __iomem *hw_addr;
+
+ /* Flag indicating the mapping was successful. */
+ int mapped;
+};
+
+struct octep_vf_hw_ops {
+ void (*setup_iq_regs)(struct octep_vf_device *oct, int q);
+ void (*setup_oq_regs)(struct octep_vf_device *oct, int q);
+ void (*setup_mbox_regs)(struct octep_vf_device *oct, int mbox);
+
+ irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);
+ irqreturn_t (*ioq_intr_handler)(void *ioq_vector);
+ void (*reinit_regs)(struct octep_vf_device *oct);
+ u32 (*update_iq_read_idx)(struct octep_vf_iq *iq);
+
+ void (*enable_interrupts)(struct octep_vf_device *oct);
+ void (*disable_interrupts)(struct octep_vf_device *oct);
+
+ void (*enable_io_queues)(struct octep_vf_device *oct);
+ void (*disable_io_queues)(struct octep_vf_device *oct);
+ void (*enable_iq)(struct octep_vf_device *oct, int q);
+ void (*disable_iq)(struct octep_vf_device *oct, int q);
+ void (*enable_oq)(struct octep_vf_device *oct, int q);
+ void (*disable_oq)(struct octep_vf_device *oct, int q);
+ void (*reset_io_queues)(struct octep_vf_device *oct);
+ void (*dump_registers)(struct octep_vf_device *oct);
+};
+
+/* Octeon mailbox data */
+struct octep_vf_mbox_data {
+ /* Holds the offset of received data via mailbox. */
+ u32 data_index;
+
+ /* Holds the received data via mailbox. */
+ u8 recv_data[OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE];
+};
+
+/* wrappers around work structs */
+struct octep_vf_mbox_wk {
+ struct work_struct work;
+ void *ctxptr;
+};
+
+/* Octeon device mailbox */
+struct octep_vf_mbox {
+ /* A mutex to protect access to this q_mbox. */
+ struct mutex lock;
+
+ u32 state;
+
+ /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
+ u8 __iomem *mbox_int_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
+ */
+ u8 __iomem *mbox_write_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
+ */
+ u8 __iomem *mbox_read_reg;
+
+ /* Octeon mailbox data */
+ struct octep_vf_mbox_data mbox_data;
+
+ /* Octeon mailbox work handler to process Mbox messages */
+ struct octep_vf_mbox_wk wk;
+};
+
+/* Tx/Rx queue vector per interrupt. */
+struct octep_vf_ioq_vector {
+ char name[OCTEP_VF_MSIX_NAME_SIZE];
+ struct napi_struct napi;
+ struct octep_vf_device *octep_vf_dev;
+ struct octep_vf_iq *iq;
+ struct octep_vf_oq *oq;
+ cpumask_t affinity_mask;
+};
+
+/* Octeon hardware/firmware offload capability flags. */
+#define OCTEP_VF_CAP_TX_CHECKSUM BIT(0)
+#define OCTEP_VF_CAP_RX_CHECKSUM BIT(1)
+#define OCTEP_VF_CAP_TSO BIT(2)
+
+/* Link modes */
+enum octep_vf_link_mode_bit_indices {
+ OCTEP_VF_LINK_MODE_10GBASE_T = 0,
+ OCTEP_VF_LINK_MODE_10GBASE_R,
+ OCTEP_VF_LINK_MODE_10GBASE_CR,
+ OCTEP_VF_LINK_MODE_10GBASE_KR,
+ OCTEP_VF_LINK_MODE_10GBASE_LR,
+ OCTEP_VF_LINK_MODE_10GBASE_SR,
+ OCTEP_VF_LINK_MODE_25GBASE_CR,
+ OCTEP_VF_LINK_MODE_25GBASE_KR,
+ OCTEP_VF_LINK_MODE_25GBASE_SR,
+ OCTEP_VF_LINK_MODE_40GBASE_CR4,
+ OCTEP_VF_LINK_MODE_40GBASE_KR4,
+ OCTEP_VF_LINK_MODE_40GBASE_LR4,
+ OCTEP_VF_LINK_MODE_40GBASE_SR4,
+ OCTEP_VF_LINK_MODE_50GBASE_CR2,
+ OCTEP_VF_LINK_MODE_50GBASE_KR2,
+ OCTEP_VF_LINK_MODE_50GBASE_SR2,
+ OCTEP_VF_LINK_MODE_50GBASE_CR,
+ OCTEP_VF_LINK_MODE_50GBASE_KR,
+ OCTEP_VF_LINK_MODE_50GBASE_LR,
+ OCTEP_VF_LINK_MODE_50GBASE_SR,
+ OCTEP_VF_LINK_MODE_100GBASE_CR4,
+ OCTEP_VF_LINK_MODE_100GBASE_KR4,
+ OCTEP_VF_LINK_MODE_100GBASE_LR4,
+ OCTEP_VF_LINK_MODE_100GBASE_SR4,
+ OCTEP_VF_LINK_MODE_NBITS
+};
+
+/* Hardware interface link state information. */
+struct octep_vf_iface_link_info {
+ /* Bitmap of Supported link speeds/modes. */
+ u64 supported_modes;
+
+ /* Bitmap of Advertised link speeds/modes. */
+ u64 advertised_modes;
+
+ /* Negotiated link speed in Mbps. */
+ u32 speed;
+
+ /* MTU */
+ u16 mtu;
+
+ /* Autonegotiation state. */
+#define OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED BIT(0)
+#define OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED BIT(1)
+ u8 autoneg;
+
+ /* Pause frames setting. */
+#define OCTEP_VF_LINK_MODE_PAUSE_SUPPORTED BIT(0)
+#define OCTEP_VF_LINK_MODE_PAUSE_ADVERTISED BIT(1)
+ u8 pause;
+
+ /* Admin state of the link (ifconfig <iface> up/down */
+ u8 admin_up;
+
+ /* Operational state of the link: physical link is up down */
+ u8 oper_up;
+};
+
+/* Hardware interface stats information. */
+struct octep_vf_iface_rxtx_stats {
+ /* Hardware Interface Rx statistics */
+ struct octep_vf_iface_rx_stats iface_rx_stats;
+
+ /* Hardware Interface Tx statistics */
+ struct octep_vf_iface_tx_stats iface_tx_stats;
+};
+
+struct octep_vf_fw_info {
+ /* pkind value to be used in every Tx hardware descriptor */
+ u8 pkind;
+ /* front size data */
+ u8 fsz;
+ /* supported rx offloads OCTEP_VF_RX_OFFLOAD_* */
+ u16 rx_ol_flags;
+ /* supported tx offloads OCTEP_VF_TX_OFFLOAD_* */
+ u16 tx_ol_flags;
+};
+
+/* The Octeon device specific private data structure.
+ * Each Octeon device has this structure to represent all its components.
+ */
+struct octep_vf_device {
+ struct octep_vf_config *conf;
+
+ /* Octeon Chip type. */
+ u16 chip_id;
+ u16 rev_id;
+
+ /* Device capabilities enabled */
+ u64 caps_enabled;
+ /* Device capabilities supported */
+ u64 caps_supported;
+
+ /* Pointer to basic Linux device */
+ struct device *dev;
+ /* Linux PCI device pointer */
+ struct pci_dev *pdev;
+ /* Netdev corresponding to the Octeon device */
+ struct net_device *netdev;
+
+ /* memory mapped io range */
+ struct octep_vf_mmio mmio;
+
+ /* MAC address */
+ u8 mac_addr[ETH_ALEN];
+
+ /* Tx queues (IQ: Instruction Queue) */
+ u16 num_iqs;
+ /* Pointers to Octeon Tx queues */
+ struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ];
+
+ /* Rx queues (OQ: Output Queue) */
+ u16 num_oqs;
+ /* Pointers to Octeon Rx queues */
+ struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ];
+
+ /* Hardware port number of the PCIe interface */
+ u16 pcie_port;
+
+ /* Hardware operations */
+ struct octep_vf_hw_ops hw_ops;
+
+ /* IRQ info */
+ u16 num_irqs;
+ u16 num_non_ioq_irqs;
+ char *non_ioq_irq_names;
+ struct msix_entry *msix_entries;
+ /* IOq information of it's corresponding MSI-X interrupt. */
+ struct octep_vf_ioq_vector *ioq_vector[OCTEP_VF_MAX_QUEUES];
+
+ /* Hardware Interface Tx statistics */
+ struct octep_vf_iface_tx_stats iface_tx_stats;
+ /* Hardware Interface Rx statistics */
+ struct octep_vf_iface_rx_stats iface_rx_stats;
+
+ /* Hardware Interface Link info like supported modes, aneg support */
+ struct octep_vf_iface_link_info link_info;
+
+ /* Mailbox to talk to VFs */
+ struct octep_vf_mbox *mbox;
+
+ /* Work entry to handle Tx timeout */
+ struct work_struct tx_timeout_task;
+
+ /* offset for iface stats */
+ u32 ctrl_mbox_ifstats_offset;
+
+ /* Negotiated Mbox version */
+ u32 mbox_neg_ver;
+
+ /* firmware info */
+ struct octep_vf_fw_info fw_info;
+};
+
+static inline u16 OCTEP_VF_MAJOR_REV(struct octep_vf_device *oct)
+{
+ u16 rev = (oct->rev_id & 0xC) >> 2;
+
+ return (rev == 0) ? 1 : rev;
+}
+
+static inline u16 OCTEP_VF_MINOR_REV(struct octep_vf_device *oct)
+{
+ return (oct->rev_id & 0x3);
+}
+
+/* Octeon CSR read/write access APIs */
+#define octep_vf_write_csr(octep_vf_dev, reg_off, value) \
+ writel(value, (octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+#define octep_vf_write_csr64(octep_vf_dev, reg_off, val64) \
+ writeq(val64, (octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+#define octep_vf_read_csr(octep_vf_dev, reg_off) \
+ readl((octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+#define octep_vf_read_csr64(octep_vf_dev, reg_off) \
+ readq((octep_vf_dev)->mmio.hw_addr + (reg_off))
+
+extern struct workqueue_struct *octep_vf_wq;
+
+int octep_vf_device_setup(struct octep_vf_device *oct);
+int octep_vf_setup_iqs(struct octep_vf_device *oct);
+void octep_vf_free_iqs(struct octep_vf_device *oct);
+void octep_vf_clean_iqs(struct octep_vf_device *oct);
+int octep_vf_setup_oqs(struct octep_vf_device *oct);
+void octep_vf_free_oqs(struct octep_vf_device *oct);
+void octep_vf_oq_dbell_init(struct octep_vf_device *oct);
+void octep_vf_device_setup_cn93(struct octep_vf_device *oct);
+void octep_vf_device_setup_cnxk(struct octep_vf_device *oct);
+int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget);
+int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget);
+void octep_vf_set_ethtool_ops(struct net_device *netdev);
+int octep_vf_get_link_info(struct octep_vf_device *oct);
+int octep_vf_get_if_stats(struct octep_vf_device *oct);
+void octep_vf_mbox_work(struct work_struct *work);
+#endif /* _OCTEP_VF_MAIN_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
new file mode 100644
index 000000000000..2eab21e43048
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+/* When a new command is implemented, the below table should be updated
+ * with new command and it's version info.
+ */
+static u32 pfvf_cmd_versions[OCTEP_PFVF_MBOX_CMD_MAX] = {
+ [0 ... OCTEP_PFVF_MBOX_CMD_DEV_REMOVE] = OCTEP_PFVF_MBOX_VERSION_V1,
+ [OCTEP_PFVF_MBOX_CMD_GET_FW_INFO ... OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS] =
+ OCTEP_PFVF_MBOX_VERSION_V2
+};
+
+int octep_vf_setup_mbox(struct octep_vf_device *oct)
+{
+ int ring = 0;
+
+ oct->mbox = vzalloc(sizeof(*oct->mbox));
+ if (!oct->mbox)
+ return -1;
+
+ mutex_init(&oct->mbox->lock);
+
+ oct->hw_ops.setup_mbox_regs(oct, ring);
+ INIT_WORK(&oct->mbox->wk.work, octep_vf_mbox_work);
+ oct->mbox->wk.ctxptr = oct;
+ oct->mbox_neg_ver = OCTEP_PFVF_MBOX_VERSION_CURRENT;
+ dev_info(&oct->pdev->dev, "setup vf mbox successfully\n");
+ return 0;
+}
+
+void octep_vf_delete_mbox(struct octep_vf_device *oct)
+{
+ if (oct->mbox) {
+ if (work_pending(&oct->mbox->wk.work))
+ cancel_work_sync(&oct->mbox->wk.work);
+
+ mutex_destroy(&oct->mbox->lock);
+ vfree(oct->mbox);
+ oct->mbox = NULL;
+ dev_info(&oct->pdev->dev, "Deleted vf mbox successfully\n");
+ }
+}
+
+int octep_vf_mbox_version_check(struct octep_vf_device *oct)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_version.opcode = OCTEP_PFVF_MBOX_CMD_VERSION;
+ cmd.s_version.version = OCTEP_PFVF_MBOX_VERSION_CURRENT;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret == OCTEP_PFVF_MBOX_CMD_STATUS_NACK) {
+ dev_err(&oct->pdev->dev,
+ "VF Mbox version is incompatible with PF\n");
+ return -EINVAL;
+ }
+ oct->mbox_neg_ver = (u32)rsp.s_version.version;
+ dev_dbg(&oct->pdev->dev,
+ "VF Mbox version:%u Negotiated VF version with PF:%u\n",
+ (u32)cmd.s_version.version,
+ (u32)rsp.s_version.version);
+ return 0;
+}
+
+void octep_vf_mbox_work(struct work_struct *work)
+{
+ struct octep_vf_mbox_wk *wk = container_of(work, struct octep_vf_mbox_wk, work);
+ struct octep_vf_iface_link_info *link_info;
+ struct octep_vf_device *oct = NULL;
+ struct octep_vf_mbox *mbox = NULL;
+ union octep_pfvf_mbox_word *notif;
+ u64 pf_vf_data;
+
+ oct = (struct octep_vf_device *)wk->ctxptr;
+ link_info = &oct->link_info;
+ mbox = oct->mbox;
+ pf_vf_data = readq(mbox->mbox_read_reg);
+
+ notif = (union octep_pfvf_mbox_word *)&pf_vf_data;
+
+ switch (notif->s.opcode) {
+ case OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS:
+ if (notif->s_link_status.status) {
+ link_info->oper_up = OCTEP_PFVF_LINK_STATUS_UP;
+ netif_carrier_on(oct->netdev);
+ dev_info(&oct->pdev->dev, "netif_carrier_on\n");
+ } else {
+ link_info->oper_up = OCTEP_PFVF_LINK_STATUS_DOWN;
+ netif_carrier_off(oct->netdev);
+ dev_info(&oct->pdev->dev, "netif_carrier_off\n");
+ }
+ break;
+ default:
+ dev_err(&oct->pdev->dev,
+ "Received unsupported notif %d\n", notif->s.opcode);
+ break;
+ }
+}
+
+static int __octep_vf_mbox_send_cmd(struct octep_vf_device *oct,
+ union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+ u64 reg_val = 0ull;
+ int count;
+
+ if (!mbox)
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
+
+ cmd.s.type = OCTEP_PFVF_MBOX_TYPE_CMD;
+ writeq(cmd.u64, mbox->mbox_write_reg);
+
+ /* No response for notification messages */
+ if (!rsp)
+ return 0;
+
+ for (count = 0; count < OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT; count++) {
+ usleep_range(1000, 1500);
+ reg_val = readq(mbox->mbox_write_reg);
+ if (reg_val != cmd.u64) {
+ rsp->u64 = reg_val;
+ break;
+ }
+ }
+ if (count == OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT) {
+ dev_err(&oct->pdev->dev, "mbox send command timed out\n");
+ return OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT;
+ }
+ if (rsp->s.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "mbox_send: Received NACK\n");
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NACK;
+ }
+ rsp->u64 = reg_val;
+ return 0;
+}
+
+int octep_vf_mbox_send_cmd(struct octep_vf_device *oct, union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+ int ret;
+
+ if (!mbox)
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
+ mutex_lock(&mbox->lock);
+ if (pfvf_cmd_versions[cmd.s.opcode] > oct->mbox_neg_ver) {
+ dev_dbg(&oct->pdev->dev, "CMD:%d not supported in Version:%d\n",
+ cmd.s.opcode, oct->mbox_neg_ver);
+ mutex_unlock(&mbox->lock);
+ return -EOPNOTSUPP;
+ }
+ ret = __octep_vf_mbox_send_cmd(oct, cmd, rsp);
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+int octep_vf_mbox_bulk_read(struct octep_vf_device *oct, enum octep_pfvf_mbox_opcode opcode,
+ u8 *data, int *size)
+{
+ struct octep_vf_mbox *mbox = oct->mbox;
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int data_len = 0, tmp_len = 0;
+ int read_cnt, i = 0, ret;
+
+ if (!mbox)
+ return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
+
+ mutex_lock(&mbox->lock);
+ cmd.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 0;
+ /* Send cmd to read data from PF */
+ ret = __octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "send mbox cmd fail for data request\n");
+ mutex_unlock(&mbox->lock);
+ return ret;
+ }
+ /* PF sends the data length of requested CMD
+ * in ACK
+ */
+ data_len = *((int32_t *)rsp.s_data.data);
+ tmp_len = data_len;
+ cmd.u64 = 0;
+ rsp.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 1;
+ while (data_len) {
+ ret = __octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "send mbox cmd fail for data request\n");
+ mutex_unlock(&mbox->lock);
+ mbox->mbox_data.data_index = 0;
+ memset(mbox->mbox_data.recv_data, 0, OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE);
+ return ret;
+ }
+ if (data_len > OCTEP_PFVF_MBOX_MAX_DATA_SIZE) {
+ data_len -= OCTEP_PFVF_MBOX_MAX_DATA_SIZE;
+ read_cnt = OCTEP_PFVF_MBOX_MAX_DATA_SIZE;
+ } else {
+ read_cnt = data_len;
+ data_len = 0;
+ }
+ for (i = 0; i < read_cnt; i++) {
+ mbox->mbox_data.recv_data[mbox->mbox_data.data_index] =
+ rsp.s_data.data[i];
+ mbox->mbox_data.data_index++;
+ }
+ cmd.u64 = 0;
+ rsp.u64 = 0;
+ cmd.s_data.opcode = opcode;
+ cmd.s_data.frag = 1;
+ }
+ memcpy(data, mbox->mbox_data.recv_data, tmp_len);
+ *size = tmp_len;
+ mbox->mbox_data.data_index = 0;
+ memset(mbox->mbox_data.recv_data, 0, OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE);
+ mutex_unlock(&mbox->lock);
+ return 0;
+}
+
+int octep_vf_mbox_set_mtu(struct octep_vf_device *oct, int mtu)
+{
+ int frame_size = mtu + ETH_HLEN + ETH_FCS_LEN;
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret = 0;
+
+ if (mtu < ETH_MIN_MTU || frame_size > ETH_MAX_MTU) {
+ dev_err(&oct->pdev->dev,
+ "Failed to set MTU to %d MIN MTU:%d MAX MTU:%d\n",
+ mtu, ETH_MIN_MTU, ETH_MAX_MTU);
+ return -EINVAL;
+ }
+
+ cmd.u64 = 0;
+ cmd.s_set_mtu.opcode = OCTEP_PFVF_MBOX_CMD_SET_MTU;
+ cmd.s_set_mtu.mtu = mtu;
+
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Mbox send failed; err=%d\n", ret);
+ return ret;
+ }
+ if (rsp.s_set_mtu.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Received Mbox NACK from PF for MTU:%d\n", mtu);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int octep_vf_mbox_set_mac_addr(struct octep_vf_device *oct, char *mac_addr)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int i, ret;
+
+ cmd.u64 = 0;
+ cmd.s_set_mac.opcode = OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR;
+ for (i = 0; i < ETH_ALEN; i++)
+ cmd.s_set_mac.mac_addr[i] = mac_addr[i];
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Mbox send failed; err = %d\n", ret);
+ return ret;
+ }
+ if (rsp.s_set_mac.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int octep_vf_mbox_get_mac_addr(struct octep_vf_device *oct, char *mac_addr)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int i, ret;
+
+ cmd.u64 = 0;
+ cmd.s_set_mac.opcode = OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "get_mac: mbox send failed; err = %d\n", ret);
+ return ret;
+ }
+ if (rsp.s_set_mac.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "get_mac: received NACK\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < ETH_ALEN; i++)
+ mac_addr[i] = rsp.s_set_mac.mac_addr[i];
+ return 0;
+}
+
+int octep_vf_mbox_set_rx_state(struct octep_vf_device *oct, bool state)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_state.opcode = OCTEP_PFVF_MBOX_CMD_SET_RX_STATE;
+ cmd.s_link_state.state = state;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Set Rx state via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_state.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Set Rx state received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int octep_vf_mbox_set_link_status(struct octep_vf_device *oct, bool status)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_status.opcode = OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS;
+ cmd.s_link_status.status = status;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Set link status via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_status.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Set link status received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int octep_vf_mbox_get_link_status(struct octep_vf_device *oct, u8 *oper_up)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_link_status.opcode = OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Get link status via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_status.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Get link status received NACK\n");
+ return -EINVAL;
+ }
+ *oper_up = rsp.s_link_status.status;
+ return 0;
+}
+
+int octep_vf_mbox_dev_remove(struct octep_vf_device *oct)
+{
+ union octep_pfvf_mbox_word cmd;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s.opcode = OCTEP_PFVF_MBOX_CMD_DEV_REMOVE;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, NULL);
+ return ret;
+}
+
+int octep_vf_mbox_get_fw_info(struct octep_vf_device *oct)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_fw_info.opcode = OCTEP_PFVF_MBOX_CMD_GET_FW_INFO;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Get link status via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_fw_info.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Get link status received NACK\n");
+ return -EINVAL;
+ }
+ oct->fw_info.pkind = rsp.s_fw_info.pkind;
+ oct->fw_info.fsz = rsp.s_fw_info.fsz;
+ oct->fw_info.rx_ol_flags = rsp.s_fw_info.rx_ol_flags;
+ oct->fw_info.tx_ol_flags = rsp.s_fw_info.tx_ol_flags;
+
+ return 0;
+}
+
+int octep_vf_mbox_set_offloads(struct octep_vf_device *oct, u16 tx_offloads,
+ u16 rx_offloads)
+{
+ union octep_pfvf_mbox_word cmd;
+ union octep_pfvf_mbox_word rsp;
+ int ret;
+
+ cmd.u64 = 0;
+ cmd.s_offloads.opcode = OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS;
+ cmd.s_offloads.rx_ol_flags = rx_offloads;
+ cmd.s_offloads.tx_ol_flags = tx_offloads;
+ ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
+ if (ret) {
+ dev_err(&oct->pdev->dev, "Set offloads via VF Mbox send failed\n");
+ return ret;
+ }
+ if (rsp.s_link_state.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
+ dev_err(&oct->pdev->dev, "Set offloads received NACK\n");
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h
new file mode 100644
index 000000000000..9b5efad37eab
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef _OCTEP_VF_MBOX_H_
+#define _OCTEP_VF_MBOX_H_
+
+/* When a new command is implemented, VF Mbox version should be bumped.
+ */
+enum octep_pfvf_mbox_version {
+ OCTEP_PFVF_MBOX_VERSION_V0,
+ OCTEP_PFVF_MBOX_VERSION_V1,
+ OCTEP_PFVF_MBOX_VERSION_V2
+};
+
+#define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2
+
+enum octep_pfvf_mbox_opcode {
+ OCTEP_PFVF_MBOX_CMD_VERSION,
+ OCTEP_PFVF_MBOX_CMD_SET_MTU,
+ OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR,
+ OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR,
+ OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO,
+ OCTEP_PFVF_MBOX_CMD_GET_STATS,
+ OCTEP_PFVF_MBOX_CMD_SET_RX_STATE,
+ OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_GET_MTU,
+ OCTEP_PFVF_MBOX_CMD_DEV_REMOVE,
+ OCTEP_PFVF_MBOX_CMD_GET_FW_INFO,
+ OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS,
+ OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS,
+ OCTEP_PFVF_MBOX_CMD_MAX,
+};
+
+enum octep_pfvf_mbox_word_type {
+ OCTEP_PFVF_MBOX_TYPE_CMD,
+ OCTEP_PFVF_MBOX_TYPE_RSP_ACK,
+ OCTEP_PFVF_MBOX_TYPE_RSP_NACK,
+};
+
+enum octep_pfvf_mbox_cmd_status {
+ OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP = 1,
+ OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT = 2,
+ OCTEP_PFVF_MBOX_CMD_STATUS_NACK = 3,
+ OCTEP_PFVF_MBOX_CMD_STATUS_BUSY = 4,
+ OCTEP_PFVF_MBOX_CMD_STATUS_ERR = 5
+};
+
+enum octep_pfvf_link_status {
+ OCTEP_PFVF_LINK_STATUS_DOWN,
+ OCTEP_PFVF_LINK_STATUS_UP,
+};
+
+enum octep_pfvf_link_speed {
+ OCTEP_PFVF_LINK_SPEED_NONE,
+ OCTEP_PFVF_LINK_SPEED_1000,
+ OCTEP_PFVF_LINK_SPEED_10000,
+ OCTEP_PFVF_LINK_SPEED_25000,
+ OCTEP_PFVF_LINK_SPEED_40000,
+ OCTEP_PFVF_LINK_SPEED_50000,
+ OCTEP_PFVF_LINK_SPEED_100000,
+ OCTEP_PFVF_LINK_SPEED_LAST,
+};
+
+enum octep_pfvf_link_duplex {
+ OCTEP_PFVF_LINK_HALF_DUPLEX,
+ OCTEP_PFVF_LINK_FULL_DUPLEX,
+};
+
+enum octep_pfvf_link_autoneg {
+ OCTEP_PFVF_LINK_AUTONEG,
+ OCTEP_PFVF_LINK_FIXED,
+};
+
+#define OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT 8000
+#define OCTEP_PFVF_MBOX_TIMEOUT_WAIT_UDELAY 1000
+#define OCTEP_PFVF_MBOX_MAX_RETRIES 2
+#define OCTEP_PFVF_MBOX_VERSION 0
+#define OCTEP_PFVF_MBOX_MAX_DATA_SIZE 6
+#define OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE 320
+#define OCTEP_PFVF_MBOX_MORE_FRAG_FLAG 1
+
+union octep_pfvf_mbox_word {
+ u64 u64;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 data:48;
+ } s;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 frag:1;
+ u64 rsvd:5;
+ u8 data[6];
+ } s_data;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 version:48;
+ } s_version;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u8 mac_addr[6];
+ } s_set_mac;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:6;
+ u64 mtu:48;
+ } s_set_mtu;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 state:1;
+ u64 rsvd:53;
+ } s_link_state;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 status:1;
+ u64 rsvd:53;
+ } s_link_status;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 pkind:8;
+ u64 fsz:8;
+ u64 rx_ol_flags:16;
+ u64 tx_ol_flags:16;
+ u64 rsvd:6;
+ } s_fw_info;
+ struct {
+ u64 opcode:8;
+ u64 type:2;
+ u64 rsvd:22;
+ u64 rx_ol_flags:16;
+ u64 tx_ol_flags:16;
+ } s_offloads;
+} __packed;
+
+int octep_vf_setup_mbox(struct octep_vf_device *oct);
+void octep_vf_delete_mbox(struct octep_vf_device *oct);
+int octep_vf_mbox_send_cmd(struct octep_vf_device *oct, union octep_pfvf_mbox_word cmd,
+ union octep_pfvf_mbox_word *rsp);
+int octep_vf_mbox_bulk_read(struct octep_vf_device *oct, enum octep_pfvf_mbox_opcode opcode,
+ u8 *data, int *size);
+int octep_vf_mbox_set_mtu(struct octep_vf_device *oct, int mtu);
+int octep_vf_mbox_set_mac_addr(struct octep_vf_device *oct, char *mac_addr);
+int octep_vf_mbox_get_mac_addr(struct octep_vf_device *oct, char *mac_addr);
+int octep_vf_mbox_version_check(struct octep_vf_device *oct);
+int octep_vf_mbox_set_rx_state(struct octep_vf_device *oct, bool state);
+int octep_vf_mbox_set_link_status(struct octep_vf_device *oct, bool status);
+int octep_vf_mbox_get_link_status(struct octep_vf_device *oct, u8 *oper_up);
+int octep_vf_mbox_dev_remove(struct octep_vf_device *oct);
+int octep_vf_mbox_get_fw_info(struct octep_vf_device *oct);
+int octep_vf_mbox_set_offloads(struct octep_vf_device *oct, u16 tx_offloads, u16 rx_offloads);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h
new file mode 100644
index 000000000000..25e2a876ebba
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef _OCTEP_VF_REGS_CN9K_H_
+#define _OCTEP_VF_REGS_CN9K_H_
+
+/*############################ RST #########################*/
+#define CN93_VF_CONFIG_XPANSION_BAR 0x38
+#define CN93_VF_CONFIG_PCIE_CAP 0x70
+#define CN93_VF_CONFIG_PCIE_DEVCAP 0x74
+#define CN93_VF_CONFIG_PCIE_DEVCTL 0x78
+#define CN93_VF_CONFIG_PCIE_LINKCAP 0x7C
+#define CN93_VF_CONFIG_PCIE_LINKCTL 0x80
+#define CN93_VF_CONFIG_PCIE_SLOTCAP 0x84
+#define CN93_VF_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CN93_VF_RING_OFFSET BIT_ULL(17)
+
+/*###################### RING IN REGISTERS #########################*/
+#define CN93_VF_SDP_R_IN_CONTROL_START 0x10000
+#define CN93_VF_SDP_R_IN_ENABLE_START 0x10010
+#define CN93_VF_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CN93_VF_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CN93_VF_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CN93_VF_SDP_R_IN_CNTS_START 0x10050
+#define CN93_VF_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CN93_VF_SDP_R_IN_PKT_CNT_START 0x10080
+#define CN93_VF_SDP_R_IN_BYTE_CNT_START 0x10090
+
+#define CN93_VF_SDP_R_IN_CONTROL(ring) \
+ (CN93_VF_SDP_R_IN_CONTROL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_ENABLE(ring) \
+ (CN93_VF_SDP_R_IN_ENABLE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INSTR_BADDR(ring) \
+ (CN93_VF_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CN93_VF_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INSTR_DBELL(ring) \
+ (CN93_VF_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_CNTS(ring) \
+ (CN93_VF_SDP_R_IN_CNTS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_INT_LEVELS(ring) \
+ (CN93_VF_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_PKT_CNT(ring) \
+ (CN93_VF_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_IN_BYTE_CNT(ring) \
+ (CN93_VF_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+/*------------------ R_IN Masks ----------------*/
+
+/** Rings per Virtual Function **/
+#define CN93_VF_R_IN_CTL_RPVF_MASK (0xF)
+#define CN93_VF_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ **/
+#define CN93_VF_R_IN_CTL_IDLE BIT_ULL(28)
+#define CN93_VF_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CN93_VF_R_IN_CTL_IS_64B BIT_ULL(24)
+#define CN93_VF_R_IN_CTL_D_NSR BIT_ULL(8)
+#define CN93_VF_R_IN_CTL_D_ESR BIT_ULL(6)
+#define CN93_VF_R_IN_CTL_D_ROR BIT_ULL(5)
+#define CN93_VF_R_IN_CTL_NSR BIT_ULL(3)
+#define CN93_VF_R_IN_CTL_ESR BIT_ULL(1)
+#define CN93_VF_R_IN_CTL_ROR BIT_ULL(0)
+
+#define CN93_VF_R_IN_CTL_MASK (CN93_VF_R_IN_CTL_RDSIZE | CN93_VF_R_IN_CTL_IS_64B)
+
+/*###################### RING OUT REGISTERS #########################*/
+#define CN93_VF_SDP_R_OUT_CNTS_START 0x10100
+#define CN93_VF_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CN93_VF_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CN93_VF_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CN93_VF_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CN93_VF_SDP_R_OUT_CONTROL_START 0x10150
+#define CN93_VF_SDP_R_OUT_ENABLE_START 0x10160
+#define CN93_VF_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CN93_VF_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CN93_VF_SDP_R_OUT_CONTROL(ring) \
+ (CN93_VF_SDP_R_OUT_CONTROL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_ENABLE(ring) \
+ (CN93_VF_SDP_R_OUT_ENABLE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CN93_VF_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CN93_VF_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CN93_VF_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_CNTS(ring) \
+ (CN93_VF_SDP_R_OUT_CNTS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_INT_LEVELS(ring) \
+ (CN93_VF_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_PKT_CNT(ring) \
+ (CN93_VF_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_OUT_BYTE_CNT(ring) \
+ (CN93_VF_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CN93_VF_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CN93_VF_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CN93_VF_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CN93_VF_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CN93_VF_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CN93_VF_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CN93_VF_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CN93_VF_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CN93_VF_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CN93_VF_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CN93_VF_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CN93_VF_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CN93_VF_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ##################### Mail Box Registers ########################## */
+/* SDP PF to VF Mailbox Data Register */
+#define CN93_VF_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+/* SDP Packet PF to VF Mailbox Interrupt Register */
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT_START 0x10220
+/* SDP VF to PF Mailbox Data Register */
+#define CN93_VF_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT_ENAB BIT_ULL(1)
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT_STATUS BIT_ULL(0)
+
+#define CN93_VF_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CN93_VF_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CN93_VF_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_VF_RING_OFFSET))
+
+#define CN93_VF_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CN93_VF_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_VF_RING_OFFSET))
+#endif /* _OCTEP_VF_REGS_CN9K_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h
new file mode 100644
index 000000000000..2e156745ef64
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef _OCTEP_VF_REGS_CNXK_H_
+#define _OCTEP_VF_REGS_CNXK_H_
+
+/*############################ RST #########################*/
+#define CNXK_VF_CONFIG_XPANSION_BAR 0x38
+#define CNXK_VF_CONFIG_PCIE_CAP 0x70
+#define CNXK_VF_CONFIG_PCIE_DEVCAP 0x74
+#define CNXK_VF_CONFIG_PCIE_DEVCTL 0x78
+#define CNXK_VF_CONFIG_PCIE_LINKCAP 0x7C
+#define CNXK_VF_CONFIG_PCIE_LINKCTL 0x80
+#define CNXK_VF_CONFIG_PCIE_SLOTCAP 0x84
+#define CNXK_VF_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CNXK_VF_RING_OFFSET (0x1ULL << 17)
+
+/*###################### RING IN REGISTERS #########################*/
+#define CNXK_VF_SDP_R_IN_CONTROL_START 0x10000
+#define CNXK_VF_SDP_R_IN_ENABLE_START 0x10010
+#define CNXK_VF_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CNXK_VF_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CNXK_VF_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CNXK_VF_SDP_R_IN_CNTS_START 0x10050
+#define CNXK_VF_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CNXK_VF_SDP_R_IN_PKT_CNT_START 0x10080
+#define CNXK_VF_SDP_R_IN_BYTE_CNT_START 0x10090
+#define CNXK_VF_SDP_R_ERR_TYPE_START 0x10400
+
+#define CNXK_VF_SDP_R_ERR_TYPE(ring) \
+ (CNXK_VF_SDP_R_ERR_TYPE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_CONTROL(ring) \
+ (CNXK_VF_SDP_R_IN_CONTROL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_ENABLE(ring) \
+ (CNXK_VF_SDP_R_IN_ENABLE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INSTR_BADDR(ring) \
+ (CNXK_VF_SDP_R_IN_INSTR_BADDR_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CNXK_VF_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INSTR_DBELL(ring) \
+ (CNXK_VF_SDP_R_IN_INSTR_DBELL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_CNTS(ring) \
+ (CNXK_VF_SDP_R_IN_CNTS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_INT_LEVELS(ring) \
+ (CNXK_VF_SDP_R_IN_INT_LEVELS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_PKT_CNT(ring) \
+ (CNXK_VF_SDP_R_IN_PKT_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_IN_BYTE_CNT(ring) \
+ (CNXK_VF_SDP_R_IN_BYTE_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+/*------------------ R_IN Masks ----------------*/
+
+/** Rings per Virtual Function **/
+#define CNXK_VF_R_IN_CTL_RPVF_MASK (0xF)
+#define CNXK_VF_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ **/
+#define CNXK_VF_R_IN_CTL_IDLE (0x1ULL << 28)
+#define CNXK_VF_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CNXK_VF_R_IN_CTL_IS_64B (0x1ULL << 24)
+#define CNXK_VF_R_IN_CTL_D_NSR (0x1ULL << 8)
+#define CNXK_VF_R_IN_CTL_D_ESR (0x1ULL << 6)
+#define CNXK_VF_R_IN_CTL_D_ROR (0x1ULL << 5)
+#define CNXK_VF_R_IN_CTL_NSR (0x1ULL << 3)
+#define CNXK_VF_R_IN_CTL_ESR (0x1ULL << 1)
+#define CNXK_VF_R_IN_CTL_ROR (0x1ULL << 0)
+
+#define CNXK_VF_R_IN_CTL_MASK (CNXK_VF_R_IN_CTL_RDSIZE | CNXK_VF_R_IN_CTL_IS_64B)
+
+/*###################### RING OUT REGISTERS #########################*/
+#define CNXK_VF_SDP_R_OUT_CNTS_START 0x10100
+#define CNXK_VF_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CNXK_VF_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CNXK_VF_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CNXK_VF_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CNXK_VF_SDP_R_OUT_CONTROL_START 0x10150
+#define CNXK_VF_SDP_R_OUT_WMARK_START 0x10160
+#define CNXK_VF_SDP_R_OUT_ENABLE_START 0x10170
+#define CNXK_VF_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CNXK_VF_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CNXK_VF_SDP_R_OUT_CONTROL(ring) \
+ (CNXK_VF_SDP_R_OUT_CONTROL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_ENABLE(ring) \
+ (CNXK_VF_SDP_R_OUT_ENABLE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CNXK_VF_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CNXK_VF_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CNXK_VF_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_WMARK(ring) \
+ (CNXK_VF_SDP_R_OUT_WMARK_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_CNTS(ring) \
+ (CNXK_VF_SDP_R_OUT_CNTS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_INT_LEVELS(ring) \
+ (CNXK_VF_SDP_R_OUT_INT_LEVELS_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_PKT_CNT(ring) \
+ (CNXK_VF_SDP_R_OUT_PKT_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_OUT_BYTE_CNT(ring) \
+ (CNXK_VF_SDP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CNXK_VF_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CNXK_VF_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CNXK_VF_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CNXK_VF_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CNXK_VF_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CNXK_VF_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CNXK_VF_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CNXK_VF_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CNXK_VF_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CNXK_VF_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CNXK_VF_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CNXK_VF_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CNXK_VF_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ##################### Mail Box Registers ########################## */
+/* SDP PF to VF Mailbox Data Register */
+#define CNXK_VF_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+/* SDP Packet PF to VF Mailbox Interrupt Register */
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_START 0x10220
+/* SDP VF to PF Mailbox Data Register */
+#define CNXK_VF_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_ENAB BIT_ULL(1)
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_STATUS BIT_ULL(0)
+
+#define CNXK_VF_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CNXK_VF_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CNXK_VF_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_VF_RING_OFFSET))
+
+#define CNXK_VF_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CNXK_VF_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_VF_RING_OFFSET))
+#endif /* _OCTEP_VF_REGS_CNXK_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
new file mode 100644
index 000000000000..82821bc28634
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+static void octep_vf_oq_reset_indices(struct octep_vf_oq *oq)
+{
+ oq->host_read_idx = 0;
+ oq->host_refill_idx = 0;
+ oq->refill_count = 0;
+ oq->last_pkt_count = 0;
+ oq->pkts_pending = 0;
+}
+
+/**
+ * octep_vf_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: 0, if successfully filled receive buffers for all descriptors.
+ * -ENOMEM, if failed to allocate a buffer or failed to map for DMA.
+ */
+static int octep_vf_oq_fill_ring_buffers(struct octep_vf_oq *oq)
+{
+ struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 i;
+
+ for (i = 0; i < oq->max_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "Rx buffer alloc failed\n");
+ goto rx_buf_alloc_err;
+ }
+ desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer alloc: DMA mapping error!\n",
+ oq->q_no);
+ goto dma_map_err;
+ }
+ oq->buff_info[i].page = page;
+ }
+
+ return 0;
+
+dma_map_err:
+ put_page(page);
+rx_buf_alloc_err:
+ while (i) {
+ i--;
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * octep_vf_oq_refill() - refill buffers for used Rx ring descriptors.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: number of descriptors successfully refilled with receive buffers.
+ */
+static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *oq)
+{
+ struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 refill_idx, i;
+
+ refill_idx = oq->host_refill_idx;
+ for (i = 0; i < oq->refill_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "refill: rx buffer alloc failed\n");
+ oq->stats.alloc_failures++;
+ break;
+ }
+
+ desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer refill: DMA mapping error!\n",
+ oq->q_no);
+ put_page(page);
+ oq->stats.alloc_failures++;
+ break;
+ }
+ oq->buff_info[refill_idx].page = page;
+ refill_idx++;
+ if (refill_idx == oq->max_count)
+ refill_idx = 0;
+ }
+ oq->host_refill_idx = refill_idx;
+ oq->refill_count -= i;
+
+ return i;
+}
+
+/**
+ * octep_vf_setup_oq() - Setup a Rx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Rx queue number to be setup.
+ *
+ * Allocate resources for a Rx queue.
+ */
+static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
+{
+ struct octep_vf_oq *oq;
+ u32 desc_ring_size;
+
+ oq = vzalloc(sizeof(*oq));
+ if (!oq)
+ goto create_oq_fail;
+ oct->oq[q_no] = oq;
+
+ oq->octep_vf_dev = oct;
+ oq->netdev = oct->netdev;
+ oq->dev = &oct->pdev->dev;
+ oq->q_no = q_no;
+ oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
+ oq->ring_size_mask = oq->max_count - 1;
+ oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
+ oq->max_single_buffer_size = oq->buffer_size - OCTEP_VF_OQ_RESP_HW_SIZE;
+
+ /* When the hardware/firmware supports additional capabilities,
+ * additional header is filled-in by Octeon after length field in
+ * Rx packets. this header contains additional packet information.
+ */
+ if (oct->fw_info.rx_ol_flags)
+ oq->max_single_buffer_size -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
+
+ oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf);
+
+ desc_ring_size = oq->max_count * OCTEP_VF_OQ_DESC_SIZE;
+ oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size,
+ &oq->desc_ring_dma, GFP_KERNEL);
+
+ if (unlikely(!oq->desc_ring)) {
+ dev_err(oq->dev,
+ "Failed to allocate DMA memory for OQ-%d !!\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ oq->buff_info = vzalloc(oq->max_count * OCTEP_VF_OQ_RECVBUF_SIZE);
+
+ if (unlikely(!oq->buff_info)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to allocate buffer info for OQ-%d\n", q_no);
+ goto buf_list_err;
+ }
+
+ if (octep_vf_oq_fill_ring_buffers(oq))
+ goto oq_fill_buff_err;
+
+ octep_vf_oq_reset_indices(oq);
+ oct->hw_ops.setup_oq_regs(oct, q_no);
+ oct->num_oqs++;
+
+ return 0;
+
+oq_fill_buff_err:
+ vfree(oq->buff_info);
+ oq->buff_info = NULL;
+buf_list_err:
+ dma_free_coherent(oq->dev, desc_ring_size,
+ oq->desc_ring, oq->desc_ring_dma);
+ oq->desc_ring = NULL;
+desc_dma_alloc_err:
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+create_oq_fail:
+ return -ENOMEM;
+}
+
+/**
+ * octep_vf_oq_free_ring_buffers() - Free ring buffers.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free receive buffers in unused Rx queue descriptors.
+ */
+static void octep_vf_oq_free_ring_buffers(struct octep_vf_oq *oq)
+{
+ struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
+ int i;
+
+ if (!oq->desc_ring || !oq->buff_info)
+ return;
+
+ for (i = 0; i < oq->max_count; i++) {
+ if (oq->buff_info[i].page) {
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ desc_ring[i].buffer_ptr = 0;
+ }
+ }
+ octep_vf_oq_reset_indices(oq);
+}
+
+/**
+ * octep_vf_free_oq() - Free Rx queue resources.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free all resources of a Rx queue.
+ */
+static int octep_vf_free_oq(struct octep_vf_oq *oq)
+{
+ struct octep_vf_device *oct = oq->octep_vf_dev;
+ int q_no = oq->q_no;
+
+ octep_vf_oq_free_ring_buffers(oq);
+
+ vfree(oq->buff_info);
+
+ if (oq->desc_ring)
+ dma_free_coherent(oq->dev,
+ oq->max_count * OCTEP_VF_OQ_DESC_SIZE,
+ oq->desc_ring, oq->desc_ring_dma);
+
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+ oct->num_oqs--;
+ return 0;
+}
+
+/**
+ * octep_vf_setup_oqs() - setup resources for all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_vf_setup_oqs(struct octep_vf_device *oct)
+{
+ int i, retval = 0;
+
+ oct->num_oqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ retval = octep_vf_setup_oq(oct, i);
+ if (retval) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup OQ(RxQ)-%d.\n", i);
+ goto oq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+oq_setup_err:
+ while (i) {
+ i--;
+ octep_vf_free_oq(oct->oq[i]);
+ }
+ return retval;
+}
+
+/**
+ * octep_vf_oq_dbell_init() - Initialize Rx queue doorbell.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Write number of descriptors to Rx queue doorbell register.
+ */
+void octep_vf_oq_dbell_init(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/**
+ * octep_vf_free_oqs() - Free resources of all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_vf_free_oqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (!oct->oq[i])
+ continue;
+ octep_vf_free_oq(oct->oq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully freed OQ(RxQ)-%d.\n", i);
+ }
+}
+
+/**
+ * octep_vf_oq_check_hw_for_pkts() - Check for new Rx packets.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: packets received after previous check.
+ */
+static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct,
+ struct octep_vf_oq *oq)
+{
+ u32 pkt_count, new_pkts;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts = pkt_count - oq->last_pkt_count;
+
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+ * reached half its max value.
+ * this counter is not cleared every time read, to save write cycles.
+ */
+ if (unlikely(pkt_count > 0xF0000000U)) {
+ writel(pkt_count, oq->pkts_sent_reg);
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+ oq->last_pkt_count = pkt_count;
+ oq->pkts_pending += new_pkts;
+ return new_pkts;
+}
+
+/**
+ * __octep_vf_oq_process_rx() - Process hardware Rx queue and push to stack.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ * @pkts_to_process: number of packets to be processed.
+ *
+ * Process the new packets in Rx queue.
+ * Packets larger than single Rx buffer arrive in consecutive descriptors.
+ * But, count returned by the API only accounts full packets, not fragments.
+ *
+ * Return: number of packets processed and pushed to stack.
+ */
+static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
+ struct octep_vf_oq *oq, u16 pkts_to_process)
+{
+ struct octep_vf_oq_resp_hw_ext *resp_hw_ext = NULL;
+ netdev_features_t feat = oq->netdev->features;
+ struct octep_vf_rx_buffer *buff_info;
+ struct octep_vf_oq_resp_hw *resp_hw;
+ u32 pkt, rx_bytes, desc_used;
+ u16 data_offset, rx_ol_flags;
+ struct sk_buff *skb;
+ u32 read_idx;
+
+ read_idx = oq->host_read_idx;
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+ buff_info = (struct octep_vf_rx_buffer *)&oq->buff_info[read_idx];
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ resp_hw = page_address(buff_info->page);
+ buff_info->page = NULL;
+
+ /* Swap the length field that is in Big-Endian to CPU */
+ buff_info->len = be64_to_cpu(resp_hw->length);
+ if (oct->fw_info.rx_ol_flags) {
+ /* Extended response header is immediately after
+ * response header (resp_hw)
+ */
+ resp_hw_ext = (struct octep_vf_oq_resp_hw_ext *)
+ (resp_hw + 1);
+ buff_info->len -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
+ /* Packet Data is immediately after
+ * extended response header.
+ */
+ data_offset = OCTEP_VF_OQ_RESP_HW_SIZE +
+ OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
+ rx_ol_flags = resp_hw_ext->rx_ol_flags;
+ } else {
+ /* Data is immediately after
+ * Hardware Rx response header.
+ */
+ data_offset = OCTEP_VF_OQ_RESP_HW_SIZE;
+ rx_ol_flags = 0;
+ }
+ rx_bytes += buff_info->len;
+
+ if (buff_info->len <= oq->max_single_buffer_size) {
+ skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ skb_put(skb, buff_info->len);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ } else {
+ struct skb_shared_info *shinfo;
+ u16 data_len;
+
+ skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ /* Head fragment includes response header(s);
+ * subsequent fragments contains only data.
+ */
+ skb_put(skb, oq->max_single_buffer_size);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+
+ shinfo = skb_shinfo(skb);
+ data_len = buff_info->len - oq->max_single_buffer_size;
+ while (data_len) {
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ buff_info = (struct octep_vf_rx_buffer *)
+ &oq->buff_info[read_idx];
+ if (data_len < oq->buffer_size) {
+ buff_info->len = data_len;
+ data_len = 0;
+ } else {
+ buff_info->len = oq->buffer_size;
+ data_len -= oq->buffer_size;
+ }
+
+ skb_add_rx_frag(skb, shinfo->nr_frags,
+ buff_info->page, 0,
+ buff_info->len,
+ buff_info->len);
+ buff_info->page = NULL;
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ }
+ }
+
+ skb->dev = oq->netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (feat & NETIF_F_RXCSUM &&
+ OCTEP_VF_RX_CSUM_VERIFIED(rx_ol_flags))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ napi_gro_receive(oq->napi, skb);
+ }
+
+ oq->host_read_idx = read_idx;
+ oq->refill_count += desc_used;
+ oq->stats.packets += pkt;
+ oq->stats.bytes += rx_bytes;
+
+ return pkt;
+}
+
+/**
+ * octep_vf_oq_process_rx() - Process Rx queue.
+ *
+ * @oq: Octeon Rx queue data structure.
+ * @budget: max number of packets can be processed in one invocation.
+ *
+ * Check for newly received packets and process them.
+ * Keeps checking for new packets until budget is used or no new packets seen.
+ *
+ * Return: number of packets processed.
+ */
+int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget)
+{
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_vf_device *oct = oq->octep_vf_dev;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+ if (oq->pkts_pending == 0)
+ octep_vf_oq_check_hw_for_pkts(oct, oq);
+ pkts_available = min(budget - total_pkts_processed,
+ oq->pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_vf_oq_process_rx(oct, oq,
+ pkts_available);
+ oq->pkts_pending -= pkts_processed;
+ total_pkts_processed += pkts_processed;
+ }
+
+ if (oq->refill_count >= oq->refill_threshold) {
+ u32 desc_refilled = octep_vf_oq_refill(oct, oq);
+
+ /* flush pending writes before updating credits */
+ smp_wmb();
+ writel(desc_refilled, oq->pkts_credit_reg);
+ }
+
+ return total_pkts_processed;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
new file mode 100644
index 000000000000..fe46838b5200
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_RX_H_
+#define _OCTEP_VF_RX_H_
+
+/* struct octep_vf_oq_desc_hw - Octeon Hardware OQ descriptor format.
+ *
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ *
+ * @buffer_ptr: DMA address of the skb->data
+ * @info_ptr: DMA address of host memory, used to update pkt count by hw.
+ * This is currently unused to save pci writes.
+ */
+struct octep_vf_oq_desc_hw {
+ dma_addr_t buffer_ptr;
+ u64 info_ptr;
+};
+
+static_assert(sizeof(struct octep_vf_oq_desc_hw) == 16);
+
+#define OCTEP_VF_OQ_DESC_SIZE (sizeof(struct octep_vf_oq_desc_hw))
+
+/* Rx offload flags */
+#define OCTEP_VF_RX_OFFLOAD_VLAN_STRIP BIT(0)
+#define OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM BIT(1)
+#define OCTEP_VF_RX_OFFLOAD_UDP_CKSUM BIT(2)
+#define OCTEP_VF_RX_OFFLOAD_TCP_CKSUM BIT(3)
+
+#define OCTEP_VF_RX_OFFLOAD_CKSUM (OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_UDP_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_TCP_CKSUM)
+
+#define OCTEP_VF_RX_IP_CSUM(flags) ((flags) & \
+ (OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_TCP_CKSUM | \
+ OCTEP_VF_RX_OFFLOAD_UDP_CKSUM))
+
+/* bit 0 is vlan strip */
+#define OCTEP_VF_RX_CSUM_IP_VERIFIED BIT(1)
+#define OCTEP_VF_RX_CSUM_L4_VERIFIED BIT(2)
+
+#define OCTEP_VF_RX_CSUM_VERIFIED(flags) ((flags) & \
+ (OCTEP_VF_RX_CSUM_L4_VERIFIED | \
+ OCTEP_VF_RX_CSUM_IP_VERIFIED))
+
+/* Extended Response Header in packet data received from Hardware.
+ * Includes metadata like checksum status.
+ * this is valid only if hardware/firmware published support for this.
+ * This is at offset 0 of packet data (skb->data).
+ */
+struct octep_vf_oq_resp_hw_ext {
+ /* Reserved. */
+ u64 rsvd:48;
+
+ /* rx offload flags */
+ u16 rx_ol_flags;
+};
+
+static_assert(sizeof(struct octep_vf_oq_resp_hw_ext) == 8);
+
+#define OCTEP_VF_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_vf_oq_resp_hw_ext))
+
+/* Length of Rx packet DMA'ed by Octeon to Host.
+ * this is in bigendian; so need to be converted to cpu endian.
+ * Octeon writes this at the beginning of Rx buffer (skb->data).
+ */
+struct octep_vf_oq_resp_hw {
+ /* The Length of the packet. */
+ __be64 length;
+};
+
+static_assert(sizeof(struct octep_vf_oq_resp_hw) == 8);
+
+#define OCTEP_VF_OQ_RESP_HW_SIZE (sizeof(struct octep_vf_oq_resp_hw))
+
+/* Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers. The fields are operated by
+ * OS-dependent routines.
+ */
+struct octep_vf_rx_buffer {
+ struct page *page;
+
+ /* length from rx hardware descriptor after converting to cpu endian */
+ u64 len;
+};
+
+#define OCTEP_VF_OQ_RECVBUF_SIZE (sizeof(struct octep_vf_rx_buffer))
+
+/* Output Queue statistics. Each output queue has four stats fields. */
+struct octep_vf_oq_stats {
+ /* Number of packets received from the Device. */
+ u64 packets;
+
+ /* Number of bytes received from the Device. */
+ u64 bytes;
+
+ /* Number of times failed to allocate buffers. */
+ u64 alloc_failures;
+};
+
+#define OCTEP_VF_OQ_STATS_SIZE (sizeof(struct octep_vf_oq_stats))
+
+/* Hardware interface Rx statistics */
+struct octep_vf_iface_rx_stats {
+ /* Received packets */
+ u64 pkts;
+
+ /* Octets of received packets */
+ u64 octets;
+
+ /* Received PAUSE and Control packets */
+ u64 pause_pkts;
+
+ /* Received PAUSE and Control octets */
+ u64 pause_octets;
+
+ /* Filtered DMAC0 packets */
+ u64 dmac0_pkts;
+
+ /* Filtered DMAC0 octets */
+ u64 dmac0_octets;
+
+ /* Packets dropped due to RX FIFO full */
+ u64 dropped_pkts_fifo_full;
+
+ /* Octets dropped due to RX FIFO full */
+ u64 dropped_octets_fifo_full;
+
+ /* Error packets */
+ u64 err_pkts;
+
+ /* Filtered DMAC1 packets */
+ u64 dmac1_pkts;
+
+ /* Filtered DMAC1 octets */
+ u64 dmac1_octets;
+
+ /* NCSI-bound packets dropped */
+ u64 ncsi_dropped_pkts;
+
+ /* NCSI-bound octets dropped */
+ u64 ncsi_dropped_octets;
+
+ /* Multicast packets received. */
+ u64 mcast_pkts;
+
+ /* Broadcast packets received. */
+ u64 bcast_pkts;
+
+};
+
+/* The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * Octeon OQ.
+ */
+struct octep_vf_oq {
+ u32 q_no;
+
+ struct octep_vf_device *octep_vf_dev;
+ struct net_device *netdev;
+ struct device *dev;
+
+ struct napi_struct *napi;
+
+ /* The receive buffer list. This list has the virtual addresses
+ * of the buffers.
+ */
+ struct octep_vf_rx_buffer *buff_info;
+
+ /* Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ u8 __iomem *pkts_credit_reg;
+
+ /* Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ u8 __iomem *pkts_sent_reg;
+
+ /* Statistics for this OQ. */
+ struct octep_vf_oq_stats stats;
+
+ /* Packets pending to be processed */
+ u32 pkts_pending;
+ u32 last_pkt_count;
+
+ /* Index in the ring where the driver should read the next packet */
+ u32 host_read_idx;
+
+ /* Number of descriptors in this ring. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ /* The number of descriptors pending refill. */
+ u32 refill_count;
+
+ /* Index in the ring where the driver will refill the
+ * descriptor's buffer
+ */
+ u32 host_refill_idx;
+ u32 refill_threshold;
+
+ /* The size of each buffer pointed by the buffer pointer. */
+ u32 buffer_size;
+ u32 max_single_buffer_size;
+
+ /* The 8B aligned descriptor ring starts at this address. */
+ struct octep_vf_oq_desc_hw *desc_ring;
+
+ /* DMA mapped address of the OQ descriptor ring. */
+ dma_addr_t desc_ring_dma;
+};
+
+#define OCTEP_VF_OQ_SIZE (sizeof(struct octep_vf_oq))
+#endif /* _OCTEP_VF_RX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
new file mode 100644
index 000000000000..47a5c054fdb6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <net/netdev_queues.h>
+
+#include "octep_vf_config.h"
+#include "octep_vf_main.h"
+
+/* Reset various index of Tx queue data structure. */
+static void octep_vf_iq_reset_indices(struct octep_vf_iq *iq)
+{
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->octep_vf_read_index = 0;
+ iq->flush_index = 0;
+ iq->pkts_processed = 0;
+ iq->pkt_in_done = 0;
+}
+
+/**
+ * octep_vf_iq_process_completions() - Process Tx queue completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @budget: max number of completions to be processed in one invocation.
+ */
+int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget)
+{
+ u32 compl_pkts, compl_bytes, compl_sg;
+ struct octep_vf_device *oct = iq->octep_vf_dev;
+ struct octep_vf_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ compl_pkts = 0;
+ compl_sg = 0;
+ compl_bytes = 0;
+ iq->octep_vf_read_index = oct->hw_ops.update_iq_read_idx(iq);
+
+ while (likely(budget && (fi != iq->octep_vf_read_index))) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+ compl_bytes += skb->len;
+ compl_pkts++;
+ budget--;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+ compl_sg++;
+
+ dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[3], DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ iq->pkts_processed += compl_pkts;
+ iq->stats.instr_completed += compl_pkts;
+ iq->stats.bytes_sent += compl_bytes;
+ iq->stats.sgentry_sent += compl_sg;
+ iq->flush_index = fi;
+
+ netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts,
+ compl_bytes, IQ_INSTR_SPACE(iq),
+ OCTEP_VF_WAKE_QUEUE_THRESHOLD);
+
+ return !budget;
+}
+
+/**
+ * octep_vf_iq_free_pending() - Free Tx buffers for pending completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ */
+static void octep_vf_iq_free_pending(struct octep_vf_iq *iq)
+{
+ struct octep_vf_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ while (fi != iq->host_write_index) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+
+ dma_unmap_single(iq->dev,
+ tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[0],
+ DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ iq->flush_index = fi;
+ netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
+}
+
+/**
+ * octep_vf_clean_iqs() - Clean Tx queues to shutdown the device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free the buffers in Tx queue descriptors pending completion and
+ * reset queue indices
+ */
+void octep_vf_clean_iqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_iqs; i++) {
+ octep_vf_iq_free_pending(oct->iq[i]);
+ octep_vf_iq_reset_indices(oct->iq[i]);
+ }
+}
+
+/**
+ * octep_vf_setup_iq() - Setup a Tx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Tx queue number to be setup.
+ *
+ * Allocate resources for a Tx queue.
+ */
+static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no)
+{
+ u32 desc_ring_size, buff_info_size, sglist_size;
+ struct octep_vf_iq *iq;
+ int i;
+
+ iq = vzalloc(sizeof(*iq));
+ if (!iq)
+ goto iq_alloc_err;
+ oct->iq[q_no] = iq;
+
+ iq->octep_vf_dev = oct;
+ iq->netdev = oct->netdev;
+ iq->dev = &oct->pdev->dev;
+ iq->q_no = q_no;
+ iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->ring_size_mask = iq->max_count - 1;
+ iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
+ iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no);
+
+ /* Allocate memory for hardware queue descriptors */
+ desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size,
+ &iq->desc_ring_dma, GFP_KERNEL);
+ if (unlikely(!iq->desc_ring)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ /* Allocate memory for hardware SGLIST descriptors */
+ sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->sglist = dma_alloc_coherent(iq->dev, sglist_size,
+ &iq->sglist_dma, GFP_KERNEL);
+ if (unlikely(!iq->sglist)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d SGLIST\n",
+ q_no);
+ goto sglist_alloc_err;
+ }
+
+ /* allocate memory to manage Tx packets pending completion */
+ buff_info_size = OCTEP_VF_IQ_TXBUFF_INFO_SIZE * iq->max_count;
+ iq->buff_info = vzalloc(buff_info_size);
+ if (!iq->buff_info) {
+ dev_err(iq->dev,
+ "Failed to allocate buff info for IQ-%d\n", q_no);
+ goto buff_info_err;
+ }
+
+ /* Setup sglist addresses in tx_buffer entries */
+ for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) {
+ struct octep_vf_tx_buffer *tx_buffer;
+
+ tx_buffer = &iq->buff_info[i];
+ tx_buffer->sglist =
+ &iq->sglist[i * OCTEP_VF_SGLIST_ENTRIES_PER_PKT];
+ tx_buffer->sglist_dma =
+ iq->sglist_dma + (i * OCTEP_VF_SGLIST_SIZE_PER_PKT);
+ }
+
+ octep_vf_iq_reset_indices(iq);
+ oct->hw_ops.setup_iq_regs(oct, q_no);
+
+ oct->num_iqs++;
+ return 0;
+
+buff_info_err:
+ dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma);
+sglist_alloc_err:
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+desc_dma_alloc_err:
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+iq_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_vf_free_iq() - Free Tx queue resources.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Free all the resources allocated for a Tx queue.
+ */
+static void octep_vf_free_iq(struct octep_vf_iq *iq)
+{
+ struct octep_vf_device *oct = iq->octep_vf_dev;
+ u64 desc_ring_size, sglist_size;
+ int q_no = iq->q_no;
+
+ desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+
+ vfree(iq->buff_info);
+
+ if (iq->desc_ring)
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+
+ sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ if (iq->sglist)
+ dma_free_coherent(iq->dev, sglist_size,
+ iq->sglist, iq->sglist_dma);
+
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+ oct->num_iqs--;
+}
+
+/**
+ * octep_vf_setup_iqs() - setup resources for all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_vf_setup_iqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ oct->num_iqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (octep_vf_setup_iq(oct, i)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup IQ(TxQ)-%d.\n", i);
+ goto iq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+iq_setup_err:
+ while (i) {
+ i--;
+ octep_vf_free_iq(oct->iq[i]);
+ }
+ return -1;
+}
+
+/**
+ * octep_vf_free_iqs() - Free resources of all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_vf_free_iqs(struct octep_vf_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ octep_vf_free_iq(oct->iq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully destroyed IQ(TxQ)-%d.\n", i);
+ }
+ oct->num_iqs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
new file mode 100644
index 000000000000..f338b975103c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_VF_TX_H_
+#define _OCTEP_VF_TX_H_
+
+#define IQ_SEND_OK 0
+#define IQ_SEND_STOP 1
+#define IQ_SEND_FAILED -1
+
+#define TX_BUFTYPE_NONE 0
+#define TX_BUFTYPE_NET 1
+#define TX_BUFTYPE_NET_SG 2
+#define NUM_TX_BUFTYPES 3
+
+/* Hardware format for Scatter/Gather list
+ *
+ * 63 48|47 32|31 16|15 0
+ * -----------------------------------------
+ * | Len 0 | Len 1 | Len 2 | Len 3 |
+ * -----------------------------------------
+ * | Ptr 0 |
+ * -----------------------------------------
+ * | Ptr 1 |
+ * -----------------------------------------
+ * | Ptr 2 |
+ * -----------------------------------------
+ * | Ptr 3 |
+ * -----------------------------------------
+ */
+struct octep_vf_tx_sglist_desc {
+ u16 len[4];
+ dma_addr_t dma_ptr[4];
+};
+
+static_assert(sizeof(struct octep_vf_tx_sglist_desc) == 40);
+
+/* Each Scatter/Gather entry sent to hardwar hold four pointers.
+ * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1'
+ * is for main skb which also goes as a gather buffer to Octeon hardware.
+ * To allocate sufficient SGLIST entries for a packet with max fragments,
+ * align by adding 3 before calcuating max SGLIST entries per packet.
+ */
+#define OCTEP_VF_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4)
+#define OCTEP_VF_SGLIST_SIZE_PER_PKT \
+ (OCTEP_VF_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_vf_tx_sglist_desc))
+
+struct octep_vf_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct octep_vf_tx_sglist_desc *sglist;
+ dma_addr_t sglist_dma;
+ u8 gather;
+};
+
+#define OCTEP_VF_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_vf_tx_buffer))
+
+/* VF Hardware interface Tx statistics */
+struct octep_vf_iface_tx_stats {
+ /* Total frames sent on the interface */
+ u64 pkts;
+
+ /* Total octets sent on the interface */
+ u64 octs;
+
+ /* Packets sent to a broadcast DMAC */
+ u64 bcst;
+
+ /* Packets sent to the multicast DMAC */
+ u64 mcst;
+
+ /* Packets dropped */
+ u64 dropped;
+
+ /* Reserved */
+ u64 reserved[13];
+};
+
+/* VF Input Queue statistics */
+struct octep_vf_iq_stats {
+ /* Instructions posted to this queue. */
+ u64 instr_posted;
+
+ /* Instructions copied by hardware for processing. */
+ u64 instr_completed;
+
+ /* Instructions that could not be processed. */
+ u64 instr_dropped;
+
+ /* Bytes sent through this queue. */
+ u64 bytes_sent;
+
+ /* Gather entries sent through this queue. */
+ u64 sgentry_sent;
+
+ /* Number of transmit failures due to TX_BUSY */
+ u64 tx_busy;
+
+ /* Number of times the queue is restarted */
+ u64 restart_cnt;
+};
+
+/* The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue (up to 4) for
+ * a Octeon device has one such structure to represent it.
+ */
+struct octep_vf_iq {
+ u32 q_no;
+
+ struct octep_vf_device *octep_vf_dev;
+ struct net_device *netdev;
+ struct device *dev;
+ struct netdev_queue *netdev_q;
+
+ /* Index in input ring where driver should write the next packet */
+ u16 host_write_index;
+
+ /* Index in input ring where Octeon is expected to read next packet */
+ u16 octep_vf_read_index;
+
+ /* This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ u16 flush_index;
+
+ /* Statistics for this input queue. */
+ struct octep_vf_iq_stats stats;
+
+ /* Pointer to the Virtual Base addr of the input ring. */
+ struct octep_vf_tx_desc_hw *desc_ring;
+
+ /* DMA mapped base address of the input descriptor ring. */
+ dma_addr_t desc_ring_dma;
+
+ /* Info of Tx buffers pending completion. */
+ struct octep_vf_tx_buffer *buff_info;
+
+ /* Base pointer to Scatter/Gather lists for all ring descriptors. */
+ struct octep_vf_tx_sglist_desc *sglist;
+
+ /* DMA mapped addr of Scatter Gather Lists */
+ dma_addr_t sglist_dma;
+
+ /* Octeon doorbell register for the ring. */
+ u8 __iomem *doorbell_reg;
+
+ /* Octeon instruction count register for this ring. */
+ u8 __iomem *inst_cnt_reg;
+
+ /* interrupt level register for this ring */
+ u8 __iomem *intr_lvl_reg;
+
+ /* Maximum no. of instructions in this queue. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ u32 pkt_in_done;
+ u32 pkts_processed;
+
+ u32 status;
+
+ /* Number of instructions pending to be posted to Octeon. */
+ u32 fill_cnt;
+
+ /* The max. number of instructions that can be held pending by the
+ * driver before ringing doorbell.
+ */
+ u32 fill_threshold;
+};
+
+/* Hardware Tx Instruction Header */
+struct octep_vf_instr_hdr {
+ /* Data Len */
+ u64 tlen:16;
+
+ /* Reserved */
+ u64 rsvd:20;
+
+ /* PKIND for SDP */
+ u64 pkind:6;
+
+ /* Front Data size */
+ u64 fsz:6;
+
+ /* No. of entries in gather list */
+ u64 gsz:14;
+
+ /* Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /* Reserved3 */
+ u64 reserved3:1;
+};
+
+static_assert(sizeof(struct octep_vf_instr_hdr) == 8);
+
+/* Tx offload flags */
+#define OCTEP_VF_TX_OFFLOAD_VLAN_INSERT BIT(0)
+#define OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM BIT(1)
+#define OCTEP_VF_TX_OFFLOAD_UDP_CKSUM BIT(2)
+#define OCTEP_VF_TX_OFFLOAD_TCP_CKSUM BIT(3)
+#define OCTEP_VF_TX_OFFLOAD_SCTP_CKSUM BIT(4)
+#define OCTEP_VF_TX_OFFLOAD_TCP_TSO BIT(5)
+#define OCTEP_VF_TX_OFFLOAD_UDP_TSO BIT(6)
+
+#define OCTEP_VF_TX_OFFLOAD_CKSUM (OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_UDP_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_TCP_CKSUM)
+
+#define OCTEP_VF_TX_OFFLOAD_TSO (OCTEP_VF_TX_OFFLOAD_TCP_TSO | \
+ OCTEP_VF_TX_OFFLOAD_UDP_TSO)
+
+#define OCTEP_VF_TX_IP_CSUM(flags) ((flags) & \
+ (OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_TCP_CKSUM | \
+ OCTEP_VF_TX_OFFLOAD_UDP_CKSUM))
+
+#define OCTEP_VF_TX_TSO(flags) ((flags) & \
+ (OCTEP_VF_TX_OFFLOAD_TCP_TSO | \
+ OCTEP_VF_TX_OFFLOAD_UDP_TSO))
+
+struct tx_mdata {
+ /* offload flags */
+ u16 ol_flags;
+
+ /* gso size */
+ u16 gso_size;
+
+ /* gso flags */
+ u16 gso_segs;
+
+ /* reserved */
+ u16 rsvd1;
+
+ /* reserved */
+ u64 rsvd2;
+};
+
+static_assert(sizeof(struct tx_mdata) == 16);
+
+/* 64-byte Tx instruction format.
+ * Format of instruction for a 64-byte mode input queue.
+ *
+ * only first 16-bytes (dptr and ih) are mandatory; rest are optional
+ * and filled by the driver based on firmware/hardware capabilities.
+ * These optional headers together called Front Data and its size is
+ * described by ih->fsz.
+ */
+struct octep_vf_tx_desc_hw {
+ /* Pointer where the input data is available. */
+ u64 dptr;
+
+ /* Instruction Header. */
+ union {
+ struct octep_vf_instr_hdr ih;
+ u64 ih64;
+ };
+
+ union {
+ u64 txm64[2];
+ struct tx_mdata txm;
+ };
+
+ /* Additional headers available in a 64-byte instruction. */
+ u64 exhdr[4];
+};
+
+static_assert(sizeof(struct octep_vf_tx_desc_hw) == 64);
+
+#define OCTEP_VF_IQ_DESC_SIZE (sizeof(struct octep_vf_tx_desc_hw))
+#endif /* _OCTEP_VF_TX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index edeb0f737312..61ab7f66f053 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -837,6 +837,8 @@ enum nix_af_status {
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
NIX_AF_ERR_LINK_CREDITS = -431,
+ NIX_AF_ERR_INVALID_BPID = -434,
+ NIX_AF_ERR_INVALID_BPID_REQ = -435,
NIX_AF_ERR_INVALID_MCAST_GRP = -436,
NIX_AF_ERR_INVALID_MCAST_DEL_REQ = -437,
NIX_AF_ERR_NON_CONTIG_MCE_LIST = -438,
@@ -1114,6 +1116,7 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
+#define NIX_FLOW_KEY_TYPE_CUSTOM0 BIT(19)
#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
#define NIX_FLOW_KEY_TYPE_IPV4_PROTO BIT(21)
#define NIX_FLOW_KEY_TYPE_AH BIT(22)
@@ -1553,6 +1556,7 @@ struct flow_msg {
u32 mpls_lse[4];
u8 icmp_type;
u8 icmp_code;
+ __be16 tcp_flags;
};
struct npc_install_flow_req {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index b0b4dea548e1..d883157393ea 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -85,8 +85,7 @@ enum npc_kpu_lc_ltype {
enum npc_kpu_ld_ltype {
NPC_LT_LD_TCP = 1,
NPC_LT_LD_UDP,
- NPC_LT_LD_ICMP,
- NPC_LT_LD_SCTP,
+ NPC_LT_LD_SCTP = 4,
NPC_LT_LD_ICMP6,
NPC_LT_LD_CUSTOM0,
NPC_LT_LD_CUSTOM1,
@@ -97,6 +96,7 @@ enum npc_kpu_ld_ltype {
NPC_LT_LD_NSH,
NPC_LT_LD_TU_MPLS_IN_NSH,
NPC_LT_LD_TU_MPLS_IN_IP,
+ NPC_LT_LD_ICMP,
};
enum npc_kpu_le_ltype {
@@ -140,14 +140,14 @@ enum npc_kpu_lg_ltype {
enum npc_kpu_lh_ltype {
NPC_LT_LH_TU_TCP = 1,
NPC_LT_LH_TU_UDP,
- NPC_LT_LH_TU_ICMP,
- NPC_LT_LH_TU_SCTP,
+ NPC_LT_LH_TU_SCTP = 4,
NPC_LT_LH_TU_ICMP6,
+ NPC_LT_LH_CUSTOM0,
+ NPC_LT_LH_CUSTOM1,
NPC_LT_LH_TU_IGMP = 8,
NPC_LT_LH_TU_ESP,
NPC_LT_LH_TU_AH,
- NPC_LT_LH_CUSTOM0 = 0xE,
- NPC_LT_LH_CUSTOM1 = 0xF,
+ NPC_LT_LH_TU_ICMP = 0xF,
};
/* NPC port kind defines how the incoming or outgoing packets
@@ -155,10 +155,11 @@ enum npc_kpu_lh_ltype {
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
-#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CPT_HDR_PTP_PKIND
enum npc_pkind_type {
NPC_RX_LBK_PKIND = 0ULL,
+ NPC_RX_CPT_HDR_PTP_PKIND = 54ULL,
NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL,
NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
NPC_RX_CHLEN24B_PKIND = 57ULL,
@@ -216,6 +217,7 @@ enum key_fields {
NPC_MPLS4_TTL,
NPC_TYPE_ICMP,
NPC_CODE_ICMP,
+ NPC_TCP_FLAGS,
NPC_HEADER_FIELDS_MAX,
NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
NPC_PF_FUNC, /* Valid when Tx */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index a820bad3abb2..41de72c8607f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -35,6 +35,7 @@
#define NPC_ETYPE_NSH 0x894f
#define NPC_ETYPE_DSA 0xdada
#define NPC_ETYPE_PPPOE 0x8864
+#define NPC_ETYPE_ERSPA 0x88be
#define NPC_PPP_IP 0x0021
#define NPC_PPP_IP6 0x0057
@@ -59,6 +60,9 @@
#define NPC_IPNH_MPLS 137
#define NPC_IPNH_HOSTID 139
#define NPC_IPNH_SHIM6 140
+#define NPC_IPNH_CUSTOM 253
+
+#define NPC_IP6_ROUTE_TYPE 4
#define NPC_UDP_PORT_PTP_E 319
#define NPC_UDP_PORT_PTP_G 320
@@ -187,6 +191,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU2_EXDSA,
NPC_S_KPU2_CPT_CTAG,
NPC_S_KPU2_CPT_QINQ,
+ NPC_S_KPU2_MT,
NPC_S_KPU3_CTAG,
NPC_S_KPU3_STAG,
NPC_S_KPU3_QINQ,
@@ -231,6 +236,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU8_ICMP6,
NPC_S_KPU8_GRE,
NPC_S_KPU8_AH,
+ NPC_S_KPU8_CUSTOM,
NPC_S_KPU9_TU_MPLS_IN_GRE,
NPC_S_KPU9_TU_MPLS_IN_NSH,
NPC_S_KPU9_TU_MPLS_IN_IP,
@@ -242,6 +248,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU9_GTPC,
NPC_S_KPU9_GTPU,
NPC_S_KPU9_ESP,
+ NPC_S_KPU9_CUSTOM,
NPC_S_KPU10_TU_MPLS_IN_VXLANGPE,
NPC_S_KPU10_TU_MPLS_PL,
NPC_S_KPU10_TU_MPLS,
@@ -318,10 +325,10 @@ enum npc_kpu_lc_uflag {
NPC_F_LC_U_UNK_PROTO = 0x10,
NPC_F_LC_U_IP_FRAG = 0x20,
NPC_F_LC_U_IP6_FRAG = 0x40,
+ NPC_F_LC_L_6TO4 = 0x80,
};
enum npc_kpu_lc_lflag {
NPC_F_LC_L_IP_IN_IP = 1,
- NPC_F_LC_L_6TO4,
NPC_F_LC_L_MPLS_IN_IP,
NPC_F_LC_L_IP6_TUN_IP6,
NPC_F_LC_L_IP6_MPLS_IN_IP,
@@ -334,6 +341,8 @@ enum npc_kpu_lc_lflag {
NPC_F_LC_L_EXT_MOBILITY,
NPC_F_LC_L_EXT_HOSTID,
NPC_F_LC_L_EXT_SHIM6,
+ NPC_F_LC_L_IP6_SRH_SEG_1,
+ NPC_F_LC_L_IP6_SRH_SEG_2,
};
enum npc_kpu_ld_lflag {
@@ -970,10 +979,10 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ NPC_S_KPU1_CPT_HDR, 48, 0,
NPC_LID_LA, NPC_LT_NA,
0,
- 0, 0, 0, 0,
+ 0, 7, 0, 0,
},
{
@@ -2786,6 +2795,24 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU2_MT, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_MT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4501,6 +4528,24 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
0xff00,
NPC_IP_VER_6,
NPC_IP_VER_MASK,
+ (NPC_IP6_ROUTE_TYPE << 8) | 1,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ (NPC_IP6_ROUTE_TYPE << 8) | 2,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU5_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
0x0000,
0x0000,
},
@@ -4776,6 +4821,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
{
NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_CUSTOM,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
0x0000,
0x0000,
NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
@@ -4884,6 +4938,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
{
NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_CUSTOM,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
0x0000,
0x0000,
NPC_IP_VER_4,
@@ -5064,6 +5127,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
{
NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
0x0000,
0x0000,
NPC_IP_VER_6,
@@ -5208,6 +5280,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5325,6 +5406,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_HOP_DEST, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_HOP_DEST, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5433,6 +5523,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_ROUT, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_ROUT, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5532,6 +5631,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5649,6 +5757,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5757,6 +5874,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
{
NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5883,6 +6009,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
{
NPC_S_KPU7_IP6_ROUT, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_ROUT, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5982,6 +6117,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
{
NPC_S_KPU7_IP6_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ NPC_IP6_FRAG_FRAGOFF,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_IP6_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -6081,6 +6225,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
{
NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_CUSTOM << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -6310,6 +6463,15 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0xffff,
0x0000,
0x0000,
+ 0x0009,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_ESP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
0x0000,
0x0000,
},
@@ -6756,6 +6918,78 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
},
{
NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
+ NPC_ETYPE_ERSPA,
+ 0xffff,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU8_GRE, 0xff,
0x0000,
0xffff,
NPC_GRE_F_ROUTE,
@@ -6836,6 +7070,15 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU8_CUSTOM, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -7304,6 +7547,24 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU9_CUSTOM, 0xff,
+ 0x4000,
+ 0xf000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU9_CUSTOM, 0xff,
+ 0x6000,
+ 0xf000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -8384,7 +8645,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
0,
@@ -8536,7 +8797,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 22, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
NPC_F_LA_U_HAS_IH_NIX,
@@ -8693,7 +8954,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 30, 1,
NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
NPC_F_LA_U_HAS_HIGIG2,
@@ -8818,7 +9079,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 38, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2,
@@ -8947,7 +9208,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
+ 6, 0, 42, 3, 0,
NPC_S_KPU5_IP6, 14, 0,
NPC_LID_LA, NPC_LT_NA,
0,
@@ -9124,7 +9385,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 6, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
0,
@@ -9204,7 +9465,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
@@ -9213,7 +9474,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
+ NPC_S_NA, 6, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
NPC_F_LB_U_UNK_ETYPE,
0, 0, 0, 0,
@@ -9228,7 +9489,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
@@ -9324,7 +9585,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
@@ -9428,7 +9689,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
@@ -9532,7 +9793,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
0,
@@ -9628,7 +9889,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
@@ -9684,7 +9945,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -9757,7 +10018,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
+ NPC_S_NA, 8, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
NPC_F_LB_U_UNK_ETYPE,
0, 0, 0, 0,
@@ -9772,7 +10033,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 18, 1,
NPC_LID_LB, NPC_LT_LB_EDSA,
NPC_F_LB_L_EDSA,
@@ -9836,7 +10097,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
+ 6, 0, 42, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_EXDSA,
NPC_F_LB_L_EXDSA,
@@ -9923,6 +10184,22 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU3_CTAG_C, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -9949,7 +10226,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10029,7 +10306,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10101,7 +10378,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10165,7 +10442,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10237,7 +10514,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 0,
NPC_LID_LB, NPC_LT_NA,
0,
@@ -10310,80 +10587,80 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_IP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ 6, 0, 42, 1, 0,
+ NPC_S_KPU5_IP6, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_ARP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_RARP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_RARP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_PTP, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_PTP, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_FCOE, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU5_FCOE, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU4_MPLS, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU4_MPLS, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 0, 0, 0, 0,
- NPC_S_KPU4_NSH, 4, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_KPU4_NSH, 2, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LB, NPC_LT_LB_CTAG,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
},
@@ -10397,7 +10674,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10469,7 +10746,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10533,7 +10810,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 8, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10605,7 +10882,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 4, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -10685,7 +10962,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_DSA,
NPC_F_LB_L_DSA,
@@ -10733,7 +11010,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
+ 6, 0, 42, 1, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LB, NPC_LT_LB_DSA_VLAN,
NPC_F_LB_L_DSA_VLAN,
@@ -10894,7 +11171,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 6, 1,
NPC_LID_LB, NPC_LT_LB_FDSA,
NPC_F_LB_L_FDSA,
@@ -10942,7 +11219,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_FDSA,
NPC_F_LB_L_FDSA,
@@ -10990,7 +11267,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 14, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
@@ -11014,7 +11291,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
+ 6, 0, 42, 0, 0,
NPC_S_KPU5_IP6, 2, 0,
NPC_LID_LC, NPC_LT_NA,
0,
@@ -11063,15 +11340,15 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 0, 0,
- NPC_S_KPU5_IP, 10, 0,
+ NPC_S_KPU5_IP, 10, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 0, 0,
- NPC_S_KPU5_IP6, 10, 0,
+ 6, 0, 42, 0, 0,
+ NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
0,
0, 0, 0, 0,
@@ -11119,7 +11396,7 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 0, 0, 2, 0,
+ 2, 0, 4, 2, 0,
NPC_S_KPU8_UDP, 20, 1,
NPC_LID_LC, NPC_LT_LC_IP,
0,
@@ -11223,7 +11500,7 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 8, 10, 2, 0,
+ 2, 8, 4, 2, 0,
NPC_S_KPU8_UDP, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP_OPT,
0,
@@ -11450,6 +11727,22 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
0, 0, 0, 0, 0,
NPC_S_KPU6_IP6_ROUT, 40, 1,
NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_IP6_SRH_SEG_1,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_IP6_SRH_SEG_2,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
NPC_F_LC_L_EXT_ROUT,
0, 0, 0, 0,
},
@@ -11695,6 +11988,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_CUSTOM, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP,
@@ -11791,6 +12092,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_CUSTOM, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP_OPT,
@@ -11951,6 +12260,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_CUSTOM, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP6,
@@ -12080,6 +12397,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12184,6 +12509,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12280,6 +12613,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12368,6 +12709,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12472,6 +12821,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12568,6 +12925,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12681,6 +13046,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12769,6 +13142,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -12857,6 +13238,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_CUSTOM, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
@@ -13058,6 +13447,14 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
NPC_S_KPU9_ESP, 8, 1,
NPC_LID_LD, NPC_LT_LD_UDP,
@@ -13458,6 +13855,70 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 12, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 16, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 20, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 16, 20, 2, 0,
+ NPC_S_KPU11_TU_ETHER, 24, 1,
+ NPC_LID_LD, NPC_LT_LD_GRE,
+ NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LD, NPC_LT_LD_GRE,
@@ -13529,6 +13990,14 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_LD, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU9_CUSTOM, 0, 1,
+ NPC_LID_LF, NPC_LT_LF_CUSTOM0,
+ 0,
+ 0, 0xff, 0, 0,
+ },
+ {
NPC_ERRLEV_LD, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -13946,6 +14415,22 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU12_TU_IP6, 0, 0,
+ NPC_LID_LE, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LE, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -15105,7 +15590,9 @@ static struct npc_lt_def_cfg npc_lt_defaults = {
},
.rx_et = {
{
- .lid = NPC_LID_LB,
+ .offset = -2,
+ .valid = 1,
+ .lid = NPC_LID_LC,
.ltype_match = NPC_LT_NA,
.ltype_mask = 0x0,
},
@@ -15139,6 +15626,12 @@ static struct npc_mcam_kex npc_mkex_default = {
/* Ethertype: 2 bytes, KW0[55:40] */
KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5),
},
+ [NPC_LT_LA_CPT_HDR] = {
+ /* DMAC: 6 bytes, KW1[55:8] */
+ KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5),
+ },
/* Layer A: HiGig2: */
[NPC_LT_LA_HIGIG2_ETHER] = {
/* Classification: 2 bytes, KW1[23:8] */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 5c1d04a3c559..07d4859de53a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -817,6 +817,8 @@ static int rvu_fwdata_init(struct rvu *rvu)
err = cgx_get_fwdata_base(&fwdbase);
if (err)
goto fail;
+
+ BUILD_BUG_ON(offsetof(struct rvu_fwdata, cgx_fw_data) > FWDATA_CGX_LMAC_OFFSET);
rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata));
if (!rvu->fwdata)
goto fail;
@@ -1484,7 +1486,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
/* All CGX mapped PFs are set with assigned NIX block during init */
if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
blkaddr = pf->nix_blkaddr;
- } else if (is_afvf(pcifunc)) {
+ } else if (is_lbk_vf(rvu, pcifunc)) {
vf = pcifunc - 1;
/* Assign NIX based on VF number. All even numbered VFs get
* NIX0 and odd numbered gets NIX1
@@ -2034,7 +2036,7 @@ int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
u16 target;
/* Only PF can add VF permissions */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_lbk_vf(rvu, pcifunc))
return -EOPNOTSUPP;
target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
@@ -2618,6 +2620,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
* 3. Cleanup pools (NPA)
*/
+ /* Free allocated BPIDs */
+ rvu_nix_flr_free_bpids(rvu, pcifunc);
+
/* Free multicast/mirror node associated with the 'pcifunc' */
rvu_nix_mcast_flr_free_entries(rvu, pcifunc);
@@ -3151,6 +3156,7 @@ static int rvu_enable_sriov(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
int err, chans, vfs;
+ int pos = 0;
if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
dev_warn(&pdev->dev,
@@ -3158,6 +3164,12 @@ static int rvu_enable_sriov(struct rvu *rvu)
return 0;
}
+ /* Get RVU VFs device id */
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &rvu->vf_devid);
+
chans = rvu_get_num_lbk_chans();
if (chans < 0)
return chans;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 43be37dd1f32..f390525a6217 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -288,6 +288,16 @@ enum rvu_pfvf_flags {
#define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC)
+struct nix_bp {
+ struct rsrc_bmap bpids; /* free bpids bitmap */
+ u16 cgx_bpid_cnt;
+ u16 sdp_bpid_cnt;
+ u16 free_pool_base;
+ u16 *fn_map; /* pcifunc mapping */
+ u8 *intf_map; /* interface type map */
+ u8 *ref_cnt;
+};
+
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
@@ -363,6 +373,7 @@ struct nix_hw {
struct nix_lso lso;
struct nix_txvlan txvlan;
struct nix_ipolicer *ipolicer;
+ struct nix_bp bp;
u64 *tx_credits;
u8 cc_mcs_cnt;
};
@@ -432,6 +443,13 @@ struct mbox_wq_info {
struct workqueue_struct *mbox_wq;
};
+struct channel_fwdata {
+ struct sdp_node_info info;
+ u8 valid;
+#define RVU_CHANL_INFO_RESERVED 379
+ u8 reserved[RVU_CHANL_INFO_RESERVED];
+};
+
struct rvu_fwdata {
#define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/
#define RVU_FWDATA_VERSION 0x0001
@@ -450,11 +468,13 @@ struct rvu_fwdata {
u64 msixtr_base;
u32 ptp_ext_clk_rate;
u32 ptp_ext_tstamp;
-#define FWDATA_RESERVED_MEM 1022
+ struct channel_fwdata channel_data;
+#define FWDATA_RESERVED_MEM 958
u64 reserved[FWDATA_RESERVED_MEM];
#define CGX_MAX 9
#define CGX_LMACS_MAX 4
#define CGX_LMACS_USX 8
+#define FWDATA_CGX_LMAC_OFFSET 10536
union {
struct cgx_lmac_fwdata_s
cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
@@ -503,6 +523,7 @@ struct rvu {
struct mutex rsrc_lock; /* Serialize resource alloc/free */
struct mutex alias_lock; /* Serialize bar2 alias access */
int vfs; /* Number of VFs attached to RVU */
+ u16 vf_devid; /* VF devices id */
int nix_blkaddr[MAX_NIX_BLKS];
/* Mbox */
@@ -732,9 +753,11 @@ static inline bool is_rvu_supports_nix1(struct rvu *rvu)
/* Function Prototypes
* RVU
*/
-static inline bool is_afvf(u16 pcifunc)
+#define RVU_LBK_VF_DEVID 0xA0F8
+static inline bool is_lbk_vf(struct rvu *rvu, u16 pcifunc)
{
- return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
+ return (!(pcifunc & ~RVU_PFVF_FUNC_MASK) &&
+ (rvu->vf_devid == RVU_LBK_VF_DEVID));
}
static inline bool is_vf(u16 pcifunc)
@@ -794,7 +817,7 @@ void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
int rvu_sdp_init(struct rvu *rvu);
bool is_sdp_pfvf(u16 pcifunc);
bool is_sdp_pf(u16 pcifunc);
-bool is_sdp_vf(u16 pcifunc);
+bool is_sdp_vf(struct rvu *rvu, u16 pcifunc);
/* CGX APIs */
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
@@ -873,6 +896,7 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx);
int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx, u16 mcam_index);
+void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc);
/* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index e6d7914ce61c..2500f5ba4f5a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -2870,6 +2870,10 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "%d ", ntohs(rule->packet.dport));
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
break;
+ case NPC_TCP_FLAGS:
+ seq_printf(s, "%d ", rule->packet.tcp_flags);
+ seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags);
+ break;
case NPC_IPSEC_SPI:
seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 1e6fbd98423d..96c04f7d93f8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1235,8 +1235,8 @@ static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
enum rvu_af_dl_param_id {
RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
- RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
+ RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
};
@@ -1434,15 +1434,6 @@ static const struct devlink_param rvu_af_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
rvu_af_dl_dwrr_mtu_validate),
-};
-
-static const struct devlink_param rvu_af_dl_param_exact_match[] = {
- DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
- "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- rvu_af_npc_exact_feature_get,
- rvu_af_npc_exact_feature_disable,
- rvu_af_npc_exact_feature_validate),
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
"npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
@@ -1457,6 +1448,15 @@ static const struct devlink_param rvu_af_dl_param_exact_match[] = {
rvu_af_dl_nix_maxlf_validate),
};
+static const struct devlink_param rvu_af_dl_param_exact_match[] = {
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
+ "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_npc_exact_feature_get,
+ rvu_af_npc_exact_feature_disable,
+ rvu_af_npc_exact_feature_validate),
+};
+
/* Devlink switch mode */
static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 66203a90f052..d39001cdc707 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -499,29 +499,115 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
rvu_cgx_disable_dmac_entries(rvu, pcifunc);
}
+#define NIX_BPIDS_PER_LMAC 8
+#define NIX_BPIDS_PER_CPT 1
+static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr)
+{
+ struct nix_bp *bp = &hw->bp;
+ int err, max_bpids;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ max_bpids = FIELD_GET(NIX_CONST_MAX_BPIDS, cfg);
+
+ /* Reserve the BPIds for CGX and SDP */
+ bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC;
+ bp->sdp_bpid_cnt = rvu->hw->sdp_links * FIELD_GET(NIX_CONST_SDP_CHANS, cfg);
+ bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt +
+ NIX_BPIDS_PER_CPT;
+ bp->bpids.max = max_bpids - bp->free_pool_base;
+
+ err = rvu_alloc_bitmap(&bp->bpids);
+ if (err)
+ return err;
+
+ bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!bp->fn_map)
+ return -ENOMEM;
+
+ bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max,
+ sizeof(u8), GFP_KERNEL);
+ if (!bp->intf_map)
+ return -ENOMEM;
+
+ bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max,
+ sizeof(u8), GFP_KERNEL);
+ if (!bp->ref_cnt)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, bpid, err;
+ struct nix_hw *nix_hw;
+ struct nix_bp *bp;
+
+ if (!is_lbk_vf(rvu, pcifunc))
+ return;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return;
+
+ bp = &nix_hw->bp;
+
+ mutex_lock(&rvu->rsrc_lock);
+ for (bpid = 0; bpid < bp->bpids.max; bpid++) {
+ if (bp->fn_map[bpid] == pcifunc) {
+ bp->ref_cnt[bpid]--;
+ if (bp->ref_cnt[bpid])
+ continue;
+ rvu_free_rsrc(&bp->bpids, bpid);
+ bp->fn_map[bpid] = 0;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+}
+
int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
struct nix_bp_cfg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, pf, type, err;
+ u16 chan_base, chan, bpid;
struct rvu_pfvf *pfvf;
- int blkaddr, pf, type;
- u16 chan_base, chan;
+ struct nix_hw *nix_hw;
+ struct nix_bp *bp;
u64 cfg;
pf = rvu_get_pf(pcifunc);
- type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
return 0;
pfvf = rvu_get_pfvf(rvu, pcifunc);
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+ bp = &nix_hw->bp;
chan_base = pfvf->rx_chan_base + req->chan_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
cfg & ~BIT_ULL(16));
+
+ if (type == NIX_INTF_TYPE_LBK) {
+ bpid = cfg & GENMASK(8, 0);
+ mutex_lock(&rvu->rsrc_lock);
+ rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base);
+ for (bpid = 0; bpid < bp->bpids.max; bpid++) {
+ if (bp->fn_map[bpid] == pcifunc) {
+ bp->fn_map[bpid] = 0;
+ bp->ref_cnt[bpid] = 0;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ }
}
return 0;
}
@@ -529,25 +615,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id)
{
- int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
- u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
+ int bpid, blkaddr, sdp_chan_base, err;
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_pfvf *pfvf;
+ struct nix_hw *nix_hw;
u8 cgx_id, lmac_id;
- u64 cfg;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
- lmac_chan_cnt = cfg & 0xFF;
+ struct nix_bp *bp;
- cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
- lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
- sdp_chan_cnt = cfg & 0xFFF;
- sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
+ err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
- pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ bp = &nix_hw->bp;
/* Backpressure IDs range division
* CGX channles are mapped to (0 - 191) BPIDs
@@ -561,38 +642,48 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
*/
switch (type) {
case NIX_INTF_TYPE_CGX:
- if ((req->chan_base + req->chan_cnt) > 16)
- return -EINVAL;
+ if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC)
+ return NIX_AF_ERR_INVALID_BPID_REQ;
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
/* Assign bpid based on cgx, lmac and chan id */
- bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
- (lmac_id * lmac_chan_cnt) + req->chan_base;
+ bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) +
+ (lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base;
if (req->bpid_per_chan)
bpid += chan_id;
- if (bpid > cgx_bpid_cnt)
- return -EINVAL;
+ if (bpid > bp->cgx_bpid_cnt)
+ return NIX_AF_ERR_INVALID_BPID;
break;
case NIX_INTF_TYPE_LBK:
- if ((req->chan_base + req->chan_cnt) > 63)
- return -EINVAL;
- bpid = cgx_bpid_cnt + req->chan_base;
- if (req->bpid_per_chan)
- bpid += chan_id;
- if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
- return -EINVAL;
+ /* Alloc bpid from the free pool */
+ mutex_lock(&rvu->rsrc_lock);
+ bpid = rvu_alloc_rsrc(&bp->bpids);
+ if (bpid < 0) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return NIX_AF_ERR_INVALID_BPID;
+ }
+ bp->fn_map[bpid] = req->hdr.pcifunc;
+ bp->ref_cnt[bpid]++;
+ bpid += bp->free_pool_base;
+ mutex_unlock(&rvu->rsrc_lock);
break;
case NIX_INTF_TYPE_SDP:
- if ((req->chan_base + req->chan_cnt) > 255)
- return -EINVAL;
+ if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt)
+ return NIX_AF_ERR_INVALID_BPID_REQ;
- bpid = sdp_bpid_cnt + req->chan_base;
+ /* Handle usecase of 2 SDP blocks */
+ if (!hw->cap.programmable_chans)
+ sdp_chan_base = pfvf->rx_chan_base - NIX_CHAN_SDP_CH_START;
+ else
+ sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base;
+
+ bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base;
if (req->bpid_per_chan)
bpid += chan_id;
- if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
- return -EINVAL;
+ if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt))
+ return NIX_AF_ERR_INVALID_BPID;
break;
default:
return -EINVAL;
@@ -612,7 +703,7 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
u64 cfg;
pf = rvu_get_pf(pcifunc);
- type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (is_sdp_pfvf(pcifunc))
type = NIX_INTF_TYPE_SDP;
@@ -1523,7 +1614,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
cfg = NPC_TX_DEF_PKIND;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
- intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (is_sdp_pfvf(pcifunc))
intf = NIX_INTF_TYPE_SDP;
@@ -1899,7 +1990,7 @@ static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
- if (is_afvf(pcifunc)) {/* LBK links */
+ if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */
return hw->cgx_links;
} else if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -1916,7 +2007,7 @@ static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
struct rvu_hwinfo *hw = rvu->hw;
int pf = rvu_get_pf(pcifunc);
- if (is_afvf(pcifunc)) { /* LBK links */
+ if (is_lbk_vf(rvu, pcifunc)) { /* LBK links */
*start = hw->cap.nix_txsch_per_cgx_lmac * link;
*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
@@ -3356,7 +3447,7 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
int pf;
/* skip multicast pkt replication for AF's VFs & SDP links */
- if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc))
return 0;
if (!hw->cap.nix_rx_multicast)
@@ -3703,7 +3794,7 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
- if (is_afvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc))
rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
else
rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
@@ -4039,6 +4130,13 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->ltype_match = NPC_LT_LE_GTPU;
field->ltype_mask = 0xF;
break;
+ case NIX_FLOW_KEY_TYPE_CUSTOM0:
+ field->lid = NPC_LID_LC;
+ field->hdr_offset = 6;
+ field->bytesm1 = 1; /* 2 Bytes*/
+ field->ltype_match = NPC_LT_LC_CUSTOM0;
+ field->ltype_mask = 0xF;
+ break;
case NIX_FLOW_KEY_TYPE_VLAN:
field->lid = NPC_LID_LB;
field->hdr_offset = 2; /* Skip TPID (2-bytes) */
@@ -4420,7 +4518,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
- if (is_afvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc))
rvu_get_lbk_link_max_frs(rvu, &max_mtu);
else
rvu_get_lmac_link_max_frs(rvu, &max_mtu);
@@ -4784,6 +4882,10 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
if (err)
return err;
+ err = nix_setup_bpids(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
/* Configure segmentation offload formats */
nix_setup_lso(rvu, nix_hw, blkaddr);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 167145bdcb75..e350242bbafb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -61,28 +61,6 @@ int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena)
return 0;
}
-static int npc_mcam_verify_pf_func(struct rvu *rvu,
- struct mcam_entry *entry_data, u8 intf,
- u16 pcifunc)
-{
- u16 pf_func, pf_func_mask;
-
- if (is_npc_intf_rx(intf))
- return 0;
-
- pf_func_mask = (entry_data->kw_mask[0] >> 32) &
- NPC_KEX_PF_FUNC_MASK;
- pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK;
-
- pf_func = be16_to_cpu((__force __be16)pf_func);
- if (pf_func_mask != NPC_KEX_PF_FUNC_MASK ||
- ((pf_func & ~RVU_PFVF_FUNC_MASK) !=
- (pcifunc & ~RVU_PFVF_FUNC_MASK)))
- return -EINVAL;
-
- return 0;
-}
-
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
{
int blkaddr;
@@ -417,7 +395,7 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
owner = mcam->entry2pfvf_map[index];
target_func = (entry->action >> 4) & 0xffff;
/* do nothing when target is LBK/PF or owner is not PF */
- if (is_pffunc_af(owner) || is_afvf(target_func) ||
+ if (is_pffunc_af(owner) || is_lbk_vf(rvu, target_func) ||
(owner & RVU_PFVF_FUNC_MASK) ||
!(target_func & RVU_PFVF_FUNC_MASK))
return;
@@ -437,6 +415,10 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
return;
}
+ /* AF modifies given action iff PF/VF has requested for it */
+ if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT)
+ return;
+
/* copy VF default entry action to the VF mcam entry */
rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
target_func);
@@ -626,7 +608,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int blkaddr, index;
/* AF's and SDP VFs work in promiscuous mode */
- if (is_afvf(pcifunc) || is_sdp_vf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) || is_sdp_vf(rvu, pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -791,7 +773,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
return;
/* Skip LBK VFs */
- if (is_afvf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc))
return;
/* If pkt replication is not supported,
@@ -871,7 +853,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u16 vf_func;
/* Only CGX PF/VF can add allmulticast entry */
- if (is_afvf(pcifunc) && is_sdp_vf(pcifunc))
+ if (is_lbk_vf(rvu, pcifunc) && is_sdp_vf(rvu, pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -1850,8 +1832,8 @@ void npc_mcam_rsrcs_deinit(struct rvu *rvu)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
- kfree(mcam->bmap);
- kfree(mcam->bmap_reverse);
+ bitmap_free(mcam->bmap);
+ bitmap_free(mcam->bmap_reverse);
kfree(mcam->entry2pfvf_map);
kfree(mcam->cntr2pfvf_map);
kfree(mcam->entry2cntr_map);
@@ -1904,21 +1886,20 @@ int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
/* Allocate bitmaps for managing MCAM entries */
- mcam->bmap = kmalloc_array(BITS_TO_LONGS(mcam->bmap_entries),
- sizeof(long), GFP_KERNEL);
+ mcam->bmap = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL);
if (!mcam->bmap)
return -ENOMEM;
- mcam->bmap_reverse = kmalloc_array(BITS_TO_LONGS(mcam->bmap_entries),
- sizeof(long), GFP_KERNEL);
+ mcam->bmap_reverse = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL);
if (!mcam->bmap_reverse)
goto free_bmap;
mcam->bmap_fcnt = mcam->bmap_entries;
/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
- mcam->entry2pfvf_map = kmalloc_array(mcam->bmap_entries,
- sizeof(u16), GFP_KERNEL);
+ mcam->entry2pfvf_map = kcalloc(mcam->bmap_entries, sizeof(u16),
+ GFP_KERNEL);
+
if (!mcam->entry2pfvf_map)
goto free_bmap_reverse;
@@ -1941,21 +1922,21 @@ int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
if (err)
goto free_entry_map;
- mcam->cntr2pfvf_map = kmalloc_array(mcam->counters.max,
- sizeof(u16), GFP_KERNEL);
+ mcam->cntr2pfvf_map = kcalloc(mcam->counters.max, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->cntr2pfvf_map)
goto free_cntr_bmap;
/* Alloc memory for MCAM entry to counter mapping and for tracking
* counter's reference count.
*/
- mcam->entry2cntr_map = kmalloc_array(mcam->bmap_entries,
- sizeof(u16), GFP_KERNEL);
+ mcam->entry2cntr_map = kcalloc(mcam->bmap_entries, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->entry2cntr_map)
goto free_cntr_map;
- mcam->cntr_refcnt = kmalloc_array(mcam->counters.max,
- sizeof(u16), GFP_KERNEL);
+ mcam->cntr_refcnt = kcalloc(mcam->counters.max, sizeof(u16),
+ GFP_KERNEL);
if (!mcam->cntr_refcnt)
goto free_entry_cntr_map;
@@ -1988,9 +1969,9 @@ free_cntr_bmap:
free_entry_map:
kfree(mcam->entry2pfvf_map);
free_bmap_reverse:
- kfree(mcam->bmap_reverse);
+ bitmap_free(mcam->bmap_reverse);
free_bmap:
- kfree(mcam->bmap);
+ bitmap_free(mcam->bmap);
return -ENOMEM;
}
@@ -2852,12 +2833,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
else
nix_intf = pfvf->nix_rx_intf;
- if (!is_pffunc_af(pcifunc) &&
- npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) {
- rc = NPC_MCAM_INVALID_REQ;
- goto exit;
- }
-
/* For AF installed rules, the nix_intf should be set to target NIX */
if (is_pffunc_af(req->hdr.pcifunc))
nix_intf = req->intf;
@@ -3209,10 +3184,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
if (!is_npc_interface_valid(rvu, req->intf))
return NPC_MCAM_INVALID_REQ;
- if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
- req->hdr.pcifunc))
- return NPC_MCAM_INVALID_REQ;
-
/* Try to allocate a MCAM entry */
entry_req.hdr.pcifunc = req->hdr.pcifunc;
entry_req.contig = true;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index c75669c8fde7..c181e7aa9eb6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -53,6 +53,7 @@ static const char * const npc_flow_names[] = {
[NPC_MPLS4_TTL] = "lse depth 4",
[NPC_TYPE_ICMP] = "icmp type",
[NPC_CODE_ICMP] = "icmp code",
+ [NPC_TCP_FLAGS] = "tcp flags",
[NPC_UNKNOWN] = "unknown",
};
@@ -530,6 +531,7 @@ do { \
NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2);
NPC_SCAN_HDR(NPC_TYPE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 0, 1);
NPC_SCAN_HDR(NPC_CODE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 1, 1);
+ NPC_SCAN_HDR(NPC_TCP_FLAGS, NPC_LID_LD, NPC_LT_LD_TCP, 12, 2);
NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
@@ -574,7 +576,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) |
BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) |
- BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP);
+ BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP) |
+ BIT_ULL(NPC_TCP_FLAGS);
/* for tcp/udp/sctp corresponding layer type should be in the key */
if (*features & proto_flags) {
@@ -982,7 +985,8 @@ do { \
mask->icmp_type, 0);
NPC_WRITE_FLOW(NPC_CODE_ICMP, icmp_code, pkt->icmp_code, 0,
mask->icmp_code, 0);
-
+ NPC_WRITE_FLOW(NPC_TCP_FLAGS, tcp_flags, ntohs(pkt->tcp_flags), 0,
+ ntohs(mask->tcp_flags), 0);
NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0,
ntohl(mask->spi), 0);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 6f73ad9807f0..086f05c0376f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -439,6 +439,9 @@
#define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16)
#define NIX_AF_LINKX_MCS_CNT_MASK GENMASK_ULL(33, 32)
+#define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12)
+#define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0)
+
/* SSO */
#define SSO_AF_CONST (0x1000)
#define SSO_AF_CONST1 (0x1008)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
index ae50d56258ec..38cfe148f4b7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -40,8 +40,12 @@ bool is_sdp_pf(u16 pcifunc)
!(pcifunc & RVU_PFVF_FUNC_MASK));
}
-bool is_sdp_vf(u16 pcifunc)
+#define RVU_SDP_VF_DEVID 0xA0F7
+bool is_sdp_vf(struct rvu *rvu, u16 pcifunc)
{
+ if (!(pcifunc & ~RVU_PFVF_FUNC_MASK))
+ return (rvu->vf_devid == RVU_SDP_VF_DEVID);
+
return (is_sdp_pfvf(pcifunc) &&
!!(pcifunc & RVU_PFVF_FUNC_MASK));
}
@@ -52,6 +56,14 @@ int rvu_sdp_init(struct rvu *rvu)
struct rvu_pfvf *pfvf;
u32 i = 0;
+ if (rvu->fwdata->channel_data.valid) {
+ sdp_pf_num[0] = 0;
+ pfvf = &rvu->pf[sdp_pf_num[0]];
+ pfvf->sdp_info = &rvu->fwdata->channel_data.info;
+
+ return 0;
+ }
+
while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OTX2_SDP_PF,
pdev)) != NULL) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 7ca6941ea0b9..02d0b707aea5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -951,8 +951,11 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
sizeof(*sq->timestamps));
- if (err)
+ if (err) {
+ kfree(sq->sg);
+ sq->sg = NULL;
return err;
+ }
}
sq->head = 0;
@@ -968,7 +971,14 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->stats.bytes = 0;
sq->stats.pkts = 0;
- return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
+ err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
+ if (err) {
+ kfree(sq->sg);
+ sq->sg = NULL;
+ return err;
+ }
+
+ return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 4fd44b6eecea..87bdb93cb066 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -638,6 +638,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
BIT(FLOW_DISSECTOR_KEY_IPSEC) |
BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_TCP) |
BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) {
netdev_info(nic->netdev, "unsupported flow used key 0x%llx",
dissector->used_keys);
@@ -857,6 +858,16 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_match_tcp match;
+
+ flow_rule_match_tcp(rule, &match);
+
+ flow_spec->tcp_flags = match.key->flags;
+ flow_mask->tcp_flags = match.mask->flags;
+ req->features |= BIT_ULL(NPC_TCP_FLAGS);
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
struct flow_match_mpls match;
u8 bit;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
index d58b07e7e123..7063c78bd35f 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -286,7 +286,6 @@ mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
static void
mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
- struct page *page;
int i;
for (i = 0; i < q->n_desc; i++) {
@@ -301,19 +300,12 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
entry->buf = NULL;
}
- if (!q->cache.va)
- return;
-
- page = virt_to_page(q->cache.va);
- __page_frag_cache_drain(page, q->cache.pagecnt_bias);
- memset(&q->cache, 0, sizeof(q->cache));
+ page_frag_cache_drain(&q->cache);
}
static void
mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
- struct page *page;
-
for (;;) {
void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
@@ -323,12 +315,7 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
skb_free_frag(buf);
}
- if (!q->cache.va)
- return;
-
- page = virt_to_page(q->cache.va);
- __page_frag_cache_drain(page, q->cache.pagecnt_bias);
- memset(&q->cache, 0, sizeof(q->cache));
+ page_frag_cache_drain(&q->cache);
}
static void
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index f5b1f8c7834f..7f20813456e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2199,8 +2199,9 @@ reset_slave:
if (cmd != MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
slave, cmd);
- /* Turn on internal error letting slave reset itself immeditaly,
- * otherwise it might take till timeout on command is passed
+ /* Turn on internal error letting slave reset itself
+ * immediately, otherwise it might take till timeout on
+ * command is passed
*/
reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
}
@@ -2954,7 +2955,7 @@ static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
dummy_admin.default_vlan = vlan;
/* VF wants to move to other VST state which is valid with current
- * rate limit. Either differnt default vlan in VST or other
+ * rate limit. Either different default vlan in VST or other
* supported QoS priority. Otherwise we don't allow this change when
* the TX rate is still configured.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 4d4f9cf9facb..e130e7259275 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -115,7 +115,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
return;
}
- /* Acessing the CQ outside of rcu_read_lock is safe, because
+ /* Accessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
++cq->arm_sn;
@@ -137,7 +137,7 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
return;
}
- /* Acessing the CQ outside of rcu_read_lock is safe, because
+ /* Accessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
cq->event(cq, event_type);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 9e3b76182088..cd754cd76bde 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -96,8 +96,8 @@ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
#define MLX4_EN_WRAP_AROUND_SEC 10UL
/* By scheduling the overflow check every 5 seconds, we have a reasonably
- * good chance we wont miss a wrap around.
- * TOTO: Use a timer instead of a work queue to increase the guarantee.
+ * good chance we won't miss a wrap around.
+ * TODO: Use a timer instead of a work queue to increase the guarantee.
*/
#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 33bbcced8105..5d3fde63b273 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -42,6 +42,7 @@
#include <net/ip.h>
#include <net/vxlan.h>
#include <net/devlink.h>
+#include <net/rps.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@@ -1072,7 +1073,8 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
1, MLX4_MCAST_CONFIG);
/* Update multicast list - we cache all addresses so they won't
- * change while HW is updated holding the command semaphor */
+ * change while HW is updated holding the command semaphore
+ */
netif_addr_lock_bh(dev);
mlx4_en_cache_mclist(dev);
netif_addr_unlock_bh(dev);
@@ -1817,7 +1819,7 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_set_rss_steer_rules(priv))
mlx4_warn(mdev, "Failed setting steering rules\n");
- /* Attach rx QP to bradcast address */
+ /* Attach rx QP to broadcast address */
eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index a09b6e05337d..eac49657bd07 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -762,7 +762,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Drop packet on bad receive or bad checksum */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) {
- en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
+ en_err(priv, "CQE completed in error - vendor syndrome:%d syndrome:%d\n",
((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
((struct mlx4_err_cqe *)cqe)->syndrome);
goto next;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 65cb63f6c465..1ddb11cb25f9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -992,7 +992,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->ts_requested = 1;
}
- /* Prepare ctrl segement apart opcode+ownership, which depends on
+ /* Prepare ctrl segment apart opcode+ownership, which depends on
* whether LSO is used */
tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 6598b10a9ff4..9572a45f6143 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -210,7 +210,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
s_eqe->slave_id = slave;
- /* ensure all information is written before setting the ownersip bit */
+ /* ensure all information is written before setting the ownership bit */
dma_wmb();
s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
++slave_eq->prod;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
index 954b86faac29..40ca29bb928c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
@@ -44,7 +44,7 @@
/* Default supported priorities for VPP allocation */
#define MLX4_DEFAULT_QOS_PRIO (0)
-/* Derived from FW feature definition, 0 is the default vport fo all QPs */
+/* Derived from FW feature definition, 0 is the default vport for all QPs */
#define MLX4_VPP_DEFAULT_VPORT (0)
struct mlx4_vport_qos_param {
@@ -98,7 +98,7 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
u16 *available_vpp, u8 *vpp_p_up);
/**
- * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among differnt priorities.
+ * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among different priorities.
* The total number of VPPs assigned to all for a port must not exceed
* the value reported by available_vpp in mlx4_ALLOCATE_VPP_get.
* VPP allocation is allowed only after the port type has been set,
@@ -113,7 +113,7 @@ int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up);
/**
- * mlx4_SET_VPORT_QOS_get - Query QoS proporties of a Vport.
+ * mlx4_SET_VPORT_QOS_get - Query QoS properties of a Vport.
* Each priority allowed for the Vport is assigned with a share of the BW,
* and a BW limitation. This commands query the current QoS values.
*
@@ -128,7 +128,7 @@ int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
struct mlx4_vport_qos_param *out_param);
/**
- * mlx4_SET_VPORT_QOS_set - Set QoS proporties of a Vport.
+ * mlx4_SET_VPORT_QOS_set - Set QoS properties of a Vport.
* QoS parameters can be modified at any time, but must be initialized
* before any QP is associated with the VPort.
*
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2581226836b5..7b02ff61126d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -129,7 +129,7 @@ static const struct mlx4_profile default_profile = {
.num_cq = 1 << 16,
.num_mcg = 1 << 13,
.num_mpt = 1 << 19,
- .num_mtt = 1 << 20, /* It is really num mtt segements */
+ .num_mtt = 1 << 20, /* It is really num mtt segments */
};
static const struct mlx4_profile low_mem_profile = {
@@ -1508,7 +1508,7 @@ static int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
priv->v2p.port1 = port1;
priv->v2p.port2 = port2;
} else {
- mlx4_err(dev, "Failed to change port mape: %d\n", err);
+ mlx4_err(dev, "Failed to change port map: %d\n", err);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index e9cd4bb6f83d..d3d9ec042d2c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -112,7 +112,7 @@ struct mlx4_en_stat_out_flow_control_mbox {
__be64 tx_pause_duration;
/* Number of transmitter transitions from XOFF state to XON state */
__be64 tx_pause_transition;
- /* Reserverd */
+ /* Reserved */
__be64 reserved[2];
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 256a06b3c096..4e43f4a7d246 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -2118,7 +2118,7 @@ static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
* @data: output buffer to put the requested data into.
*
* Reads cable module eeprom data, puts the outcome data into
- * data pointer paramer.
+ * data pointer parameter.
* Returns num of read bytes on success or a negative error
* code.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index c44870b175f9..76dc5a9b9648 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -29,7 +29,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
en/qos.o en/htb.o en/trap.o en/fs_tt_redirect.o en/selq.o \
- lib/crypto.o
+ lib/crypto.o lib/sd.o
#
# Netdev extra
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index cf0477f53dc4..47e7c2639774 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -210,7 +210,7 @@ static bool is_dpll_supported(struct mlx5_core_dev *dev)
return false;
if (!MLX5_CAP_MCAM_REG2(dev, synce_registers)) {
- mlx5_core_warn(dev, "Missing SyncE capability\n");
+ mlx5_core_dbg(dev, "Missing SyncE capability\n");
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 3e064234f6fe..98d4306929f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -157,6 +157,12 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP;
}
+ if (action == DEVLINK_RELOAD_ACTION_FW_ACTIVATE &&
+ !dev->priv.fw_reset) {
+ NL_SET_ERR_MSG_MOD(extack, "FW activate is unsupported for this function");
+ return -EOPNOTSUPP;
+ }
+
if (mlx5_core_is_pf(dev) && pci_num_vf(pdev))
NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
index 18fed2b34fb1..904e08de852e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -41,6 +41,7 @@ struct mlx5_dpll_synce_status {
enum mlx5_msees_oper_status oper_status;
bool ho_acq;
bool oper_freq_measure;
+ enum mlx5_msees_failure_reason failure_reason;
s32 frequency_diff;
};
@@ -60,6 +61,7 @@ mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev,
synce_status->oper_status = MLX5_GET(msees_reg, out, oper_status);
synce_status->ho_acq = MLX5_GET(msees_reg, out, ho_acq);
synce_status->oper_freq_measure = MLX5_GET(msees_reg, out, oper_freq_measure);
+ synce_status->failure_reason = MLX5_GET(msees_reg, out, failure_reason);
synce_status->frequency_diff = MLX5_GET(msees_reg, out, frequency_diff);
return 0;
}
@@ -99,6 +101,26 @@ mlx5_dpll_lock_status_get(struct mlx5_dpll_synce_status *synce_status)
}
}
+static enum dpll_lock_status_error
+mlx5_dpll_lock_status_error_get(struct mlx5_dpll_synce_status *synce_status)
+{
+ switch (synce_status->oper_status) {
+ case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER:
+ fallthrough;
+ case MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING:
+ switch (synce_status->failure_reason) {
+ case MLX5_MSEES_FAILURE_REASON_PORT_DOWN:
+ return DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN;
+ case MLX5_MSEES_FAILURE_REASON_TOO_HIGH_FREQUENCY_DIFF:
+ return DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH;
+ default:
+ return DPLL_LOCK_STATUS_ERROR_UNDEFINED;
+ }
+ default:
+ return DPLL_LOCK_STATUS_ERROR_NONE;
+ }
+}
+
static enum dpll_pin_state
mlx5_dpll_pin_state_get(struct mlx5_dpll_synce_status *synce_status)
{
@@ -118,10 +140,11 @@ mlx5_dpll_pin_ffo_get(struct mlx5_dpll_synce_status *synce_status,
return 0;
}
-static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll,
- void *priv,
- enum dpll_lock_status *status,
- struct netlink_ext_ack *extack)
+static int
+mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll, void *priv,
+ enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
+ struct netlink_ext_ack *extack)
{
struct mlx5_dpll_synce_status synce_status;
struct mlx5_dpll *mdpll = priv;
@@ -131,6 +154,7 @@ static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll,
if (err)
return err;
*status = mlx5_dpll_lock_status_get(&synce_status);
+ *status_error = mlx5_dpll_lock_status_error_get(&synce_status);
return 0;
}
@@ -261,7 +285,7 @@ static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll,
{
if (mdpll->tracking_netdev)
return;
- netdev_dpll_pin_set(netdev, mdpll->dpll_pin);
+ dpll_netdev_pin_set(netdev, mdpll->dpll_pin);
mdpll->tracking_netdev = netdev;
}
@@ -269,7 +293,7 @@ static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll)
{
if (!mdpll->tracking_netdev)
return;
- netdev_dpll_pin_clear(mdpll->tracking_netdev);
+ dpll_netdev_pin_clear(mdpll->tracking_netdev);
mdpll->tracking_netdev = NULL;
}
@@ -389,7 +413,7 @@ static void mlx5_dpll_remove(struct auxiliary_device *adev)
struct mlx5_dpll *mdpll = auxiliary_get_drvdata(adev);
struct mlx5_core_dev *mdev = mdpll->mdev;
- cancel_delayed_work(&mdpll->work);
+ cancel_delayed_work_sync(&mdpll->work);
mlx5_dpll_mdev_netdev_untrack(mdpll, mdev);
destroy_workqueue(mdpll->wq);
dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 55c6ace0acd5..84db05fb9389 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -60,6 +60,7 @@
#include "lib/clock.h"
#include "en/rx_res.h"
#include "en/selq.h"
+#include "lib/sd.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
@@ -791,6 +792,8 @@ struct mlx5e_channel {
struct hwtstamp_config *tstamp;
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
int ix;
+ int vec_ix;
+ int sd_ix;
int cpu;
/* Sync between icosq recovery and XSK enable/disable. */
struct mutex icosq_recovery_lock;
@@ -914,7 +917,7 @@ struct mlx5e_priv {
bool tx_ptp_opened;
bool rx_ptp_opened;
struct hwtstamp_config tstamp;
- u16 q_counter;
+ u16 q_counter[MLX5_SD_MAX_GROUP_SZ];
u16 drop_rq_q_counter;
struct notifier_block events_nb;
struct notifier_block blocking_events_nb;
@@ -1029,12 +1032,12 @@ struct mlx5e_xsk_param;
struct mlx5e_rq_param;
int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
- struct mlx5e_xsk_param *xsk, int node,
+ struct mlx5e_xsk_param *xsk, int node, u16 q_counter,
struct mlx5e_rq *rq);
#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_close_rq(struct mlx5e_rq *rq);
-int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
+int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter);
void mlx5e_destroy_rq(struct mlx5e_rq *rq);
struct mlx5e_sq_param;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
index 48581ea3adcb..874a1016623c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
@@ -23,20 +23,26 @@ bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix)
return test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
}
-void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
+ u32 *vhca_id)
{
struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
*rqn = c->rq.rqn;
+ if (vhca_id)
+ *vhca_id = MLX5_CAP_GEN(c->mdev, vhca_id);
}
-void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
+ u32 *vhca_id)
{
struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
WARN_ON_ONCE(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state));
*rqn = c->xskrq.rqn;
+ if (vhca_id)
+ *vhca_id = MLX5_CAP_GEN(c->mdev, vhca_id);
}
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
index 637ca90daaa8..6715aa9383b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
@@ -10,8 +10,10 @@ struct mlx5e_channels;
unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs);
bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix);
-void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
-void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
+void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
+ u32 *vhca_id);
+void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
+ u32 *vhca_id);
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn);
#endif /* __MLX5_EN_CHANNELS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
index 40c8df111754..e2d8d2754be0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
@@ -20,10 +20,8 @@
#define NUM_REQ_PPCNT_COUNTER_S1 MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1
#define NUM_REQ_Q_COUNTERS_S1 MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1
-int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
+static int mlx5e_monitor_counter_cap(struct mlx5_core_dev *mdev)
{
- struct mlx5_core_dev *mdev = priv->mdev;
-
if (!MLX5_CAP_GEN(mdev, max_num_of_monitor_counters))
return false;
if (MLX5_CAP_PCAM_REG(mdev, ppcnt) &&
@@ -36,24 +34,38 @@ int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
return true;
}
-static void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv)
+int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *pos;
+ int i;
+
+ mlx5_sd_for_each_dev(i, priv->mdev, pos)
+ if (!mlx5e_monitor_counter_cap(pos))
+ return false;
+ return true;
+}
+
+static void mlx5e_monitor_counter_arm(struct mlx5_core_dev *mdev)
{
u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {};
MLX5_SET(arm_monitor_counter_in, in, opcode,
MLX5_CMD_OP_ARM_MONITOR_COUNTER);
- mlx5_cmd_exec_in(priv->mdev, arm_monitor_counter, in);
+ mlx5_cmd_exec_in(mdev, arm_monitor_counter, in);
}
static void mlx5e_monitor_counters_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
monitor_counters_work);
+ struct mlx5_core_dev *pos;
+ int i;
mutex_lock(&priv->state_lock);
mlx5e_stats_update_ndo_stats(priv);
mutex_unlock(&priv->state_lock);
- mlx5e_monitor_counter_arm(priv);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos)
+ mlx5e_monitor_counter_arm(pos);
}
static int mlx5e_monitor_event_handler(struct notifier_block *nb,
@@ -97,15 +109,13 @@ static int fill_monitor_counter_q_counter_set1(int cnt, int q_counter, u32 *in)
}
/* check if mlx5e_monitor_counter_supported before calling this function*/
-static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
+static void mlx5e_set_monitor_counter(struct mlx5_core_dev *mdev, int q_counter)
{
- struct mlx5_core_dev *mdev = priv->mdev;
int max_num_of_counters = MLX5_CAP_GEN(mdev, max_num_of_monitor_counters);
int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters);
int num_ppcnt_counters = !MLX5_CAP_PCAM_REG(mdev, ppcnt) ? 0 :
MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters);
u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
- int q_counter = priv->q_counter;
int cnt = 0;
if (num_ppcnt_counters >= NUM_REQ_PPCNT_COUNTER_S1 &&
@@ -127,13 +137,17 @@ static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
/* check if mlx5e_monitor_counter_supported before calling this function*/
void mlx5e_monitor_counter_init(struct mlx5e_priv *priv)
{
+ struct mlx5_core_dev *pos;
+ int i;
+
INIT_WORK(&priv->monitor_counters_work, mlx5e_monitor_counters_work);
MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler,
MONITOR_COUNTER);
- mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb);
-
- mlx5e_set_monitor_counter(priv);
- mlx5e_monitor_counter_arm(priv);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos) {
+ mlx5_eq_notifier_register(pos, &priv->monitor_counters_nb);
+ mlx5e_set_monitor_counter(pos, priv->q_counter[i]);
+ mlx5e_monitor_counter_arm(pos);
+ }
queue_work(priv->wq, &priv->update_stats_work);
}
@@ -141,11 +155,15 @@ void mlx5e_monitor_counter_init(struct mlx5e_priv *priv)
void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv)
{
u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
+ struct mlx5_core_dev *pos;
+ int i;
MLX5_SET(set_monitor_counter_in, in, opcode,
MLX5_CMD_OP_SET_MONITOR_COUNTER);
- mlx5_cmd_exec_in(priv->mdev, set_monitor_counter, in);
- mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos) {
+ mlx5_cmd_exec_in(pos, set_monitor_counter, in);
+ mlx5_eq_notifier_unregister(pos, &priv->monitor_counters_nb);
+ }
cancel_work_sync(&priv->monitor_counters_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 5d213a9886f1..a3f31d9d527e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -240,11 +240,14 @@ static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params,
return xsk->headroom + hw_mtu;
}
-static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk)
+static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool no_head_tail_room)
{
- /* SKBs built on XDP_PASS on XSK RQs don't have headroom. */
- u16 headroom = xsk ? 0 : mlx5e_get_linear_rq_headroom(params, NULL);
u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ u16 headroom;
+
+ if (no_head_tail_room)
+ return SKB_DATA_ALIGN(hw_mtu);
+ headroom = mlx5e_get_linear_rq_headroom(params, NULL);
return MLX5_SKB_FRAG_SZ(headroom + hw_mtu);
}
@@ -254,6 +257,7 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk,
bool mpwqe)
{
+ bool no_head_tail_room;
u32 sz;
/* XSK frames are mapped as individual pages, because frames may come in
@@ -262,7 +266,13 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
if (xsk)
return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
- sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false));
+ no_head_tail_room = params->xdp_prog && mpwqe && !mlx5e_rx_is_linear_skb(mdev, params, xsk);
+
+ /* When no_head_tail_room is set, headroom and tailroom are excluded from skb calculations.
+ * no_head_tail_room should be set in the case of XDP with Striding RQ
+ * when SKB is not linear. This is because another page is allocated for the linear part.
+ */
+ sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, no_head_tail_room));
/* XDP in mlx5e doesn't support multiple packets per page.
* Do not assume sz <= PAGE_SIZE if params->xdp_prog is set.
@@ -289,7 +299,11 @@ bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
return false;
- /* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
+ /* Call mlx5e_rx_get_linear_sz_skb with the no_head_tail_room parameter set
+ * to exclude headroom and tailroom from calculations.
+ * no_head_tail_room is true when SKB is built on XDP_PASS on XSK RQs
+ * since packet data buffers don't have headroom and tailroom resreved for the SKB.
+ * Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
* must fit into a CPU page.
*/
if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
@@ -674,7 +688,7 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
.napi = &c->napi,
.ch_stats = c->stats,
.node = cpu_to_node(c->cpu),
- .ix = c->ix,
+ .ix = c->vec_ix,
};
}
@@ -945,7 +959,6 @@ static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *param
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- u16 q_counter,
struct mlx5e_rq_param *param)
{
void *rqc = param->rqc;
@@ -1007,7 +1020,6 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, log_wq_stride,
mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
- MLX5_SET(rqc, rqc, counter_set_id, q_counter);
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
@@ -1018,7 +1030,6 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
}
void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
- u16 q_counter,
struct mlx5e_rq_param *param)
{
void *rqc = param->rqc;
@@ -1027,7 +1038,6 @@ void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, log_wq_stride,
mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
- MLX5_SET(rqc, rqc, counter_set_id, q_counter);
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
}
@@ -1292,13 +1302,12 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- u16 q_counter,
struct mlx5e_channel_param *cparam)
{
u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
int err;
- err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq);
+ err = mlx5e_build_rq_param(mdev, params, NULL, &cparam->rq);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 6800949dafbc..9a781f18b57f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -130,10 +130,8 @@ void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- u16 q_counter,
struct mlx5e_rq_param *param);
void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
- u16 q_counter,
struct mlx5e_rq_param *param);
void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param);
@@ -149,7 +147,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param);
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
- u16 q_counter,
struct mlx5e_channel_param *cparam);
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 078f56a3cbb2..d0af7271da34 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -42,9 +42,9 @@ mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metad
WARN_ON_ONCE(tracker->inuse);
tracker->inuse = true;
- spin_lock(&list->tracker_list_lock);
+ spin_lock_bh(&list->tracker_list_lock);
list_add_tail(&tracker->entry, &list->tracker_list_head);
- spin_unlock(&list->tracker_list_lock);
+ spin_unlock_bh(&list->tracker_list_lock);
}
static void
@@ -54,9 +54,9 @@ mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 me
WARN_ON_ONCE(!tracker->inuse);
tracker->inuse = false;
- spin_lock(&list->tracker_list_lock);
+ spin_lock_bh(&list->tracker_list_lock);
list_del(&tracker->entry);
- spin_unlock(&list->tracker_list_lock);
+ spin_unlock_bh(&list->tracker_list_lock);
}
void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata)
@@ -155,7 +155,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n;
- spin_lock(&cqe_list->tracker_list_lock);
+ spin_lock_bh(&cqe_list->tracker_list_lock);
list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) {
struct sk_buff *skb =
mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id);
@@ -170,7 +170,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
pos->inuse = false;
list_del(&pos->entry);
}
- spin_unlock(&cqe_list->tracker_list_lock);
+ spin_unlock_bh(&cqe_list->tracker_list_lock);
}
#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
@@ -646,7 +646,6 @@ static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
struct net_device *netdev,
- u16 q_counter,
struct mlx5e_ptp_params *ptp_params)
{
struct mlx5e_rq_param *rq_params = &ptp_params->rq_param;
@@ -655,7 +654,7 @@ static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
mlx5e_init_rq_type_params(mdev, params);
params->sw_mtu = netdev->max_mtu;
- mlx5e_build_rq_param(mdev, params, NULL, q_counter, rq_params);
+ mlx5e_build_rq_param(mdev, params, NULL, rq_params);
}
static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
@@ -681,7 +680,7 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
/* RQ */
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
params->vlan_strip_disable = orig->vlan_strip_disable;
- mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
+ mlx5e_ptp_build_rq_param(c->mdev, c->netdev, cparams);
}
}
@@ -714,13 +713,16 @@ static int mlx5e_ptp_open_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param)
{
int node = dev_to_node(c->mdev->device);
- int err;
+ int err, sd_ix;
+ u16 q_counter;
err = mlx5e_init_ptp_rq(c, params, &c->rq);
if (err)
return err;
- return mlx5e_open_rq(params, rq_param, NULL, node, &c->rq);
+ sd_ix = mlx5_sd_ch_ix_get_dev_ix(c->mdev, MLX5E_PTP_CHANNEL_IX);
+ q_counter = c->priv->q_counter[sd_ix];
+ return mlx5e_open_rq(params, rq_param, NULL, node, q_counter, &c->rq);
}
static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c,
@@ -935,6 +937,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
mlx5e_ptp_rx_set_fs(c->priv);
mlx5e_activate_rq(&c->rq);
+ netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, &c->napi);
}
mlx5e_trigger_napi_sched(&c->napi);
}
@@ -943,8 +946,10 @@ void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
{
int tc;
- if (test_bit(MLX5E_PTP_STATE_RX, c->state))
+ if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
+ netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, NULL);
mlx5e_deactivate_rq(&c->rq);
+ }
if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
for (tc = 0; tc < c->num_tc; tc++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 34adf8c3f81a..e87e26f2c669 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -122,8 +122,8 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
memset(&param_sq, 0, sizeof(param_sq));
memset(&param_cq, 0, sizeof(param_cq));
- mlx5e_build_sq_param(priv->mdev, params, &param_sq);
- mlx5e_build_tx_cq_param(priv->mdev, params, &param_cq);
+ mlx5e_build_sq_param(c->mdev, params, &param_sq);
+ mlx5e_build_tx_cq_param(c->mdev, params, &param_cq);
err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq);
if (err)
goto err_free_sq;
@@ -176,7 +176,7 @@ int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
*/
smp_wmb();
- qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node_qid);
+ qos_dbg(sq->mdev, "Activate QoS SQ qid %u\n", node_qid);
mlx5e_activate_txqsq(sq);
return 0;
@@ -190,7 +190,7 @@ void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
if (!sq) /* Handle the case when the SQ failed to open. */
return;
- qos_dbg(priv->mdev, "Deactivate QoS SQ qid %u\n", qid);
+ qos_dbg(sq->mdev, "Deactivate QoS SQ qid %u\n", qid);
mlx5e_deactivate_txqsq(sq);
priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 4358798d6ce1..25d751eba99b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -294,8 +294,8 @@ static void mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq,
params = &priv->channels.params;
rq_sz = mlx5e_rqwq_get_size(rq);
- real_time = mlx5_is_real_time_rq(priv->mdev);
- rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL));
+ real_time = mlx5_is_real_time_rq(rq->mdev);
+ rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(rq->mdev, params, NULL));
mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 6b44ddce14e9..0ab9db319530 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -219,7 +219,6 @@ mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
struct mlx5e_txqsq *sq, int tc)
{
bool stopped = netif_xmit_stopped(sq->txq);
- struct mlx5e_priv *priv = sq->priv;
u8 state;
int err;
@@ -227,7 +226,7 @@ mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix);
devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn);
- err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
+ err = mlx5_core_query_sq_state(sq->mdev, sq->sqn, &state);
if (!err)
devlink_fmsg_u8_pair_put(fmsg, "HW state", state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
index 7b8ff7a71003..bcafb4bf9415 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
@@ -4,6 +4,33 @@
#include "rqt.h"
#include <linux/mlx5/transobj.h>
+static bool verify_num_vhca_ids(struct mlx5_core_dev *mdev, u32 *vhca_ids,
+ unsigned int size)
+{
+ unsigned int max_num_vhca_id = MLX5_CAP_GEN_2(mdev, max_rqt_vhca_id);
+ int i;
+
+ /* Verify that all vhca_ids are in range [0, max_num_vhca_ids - 1] */
+ for (i = 0; i < size; i++)
+ if (vhca_ids[i] >= max_num_vhca_id)
+ return false;
+ return true;
+}
+
+static bool rqt_verify_vhca_ids(struct mlx5_core_dev *mdev, u32 *vhca_ids,
+ unsigned int size)
+{
+ if (!vhca_ids)
+ return true;
+
+ if (!MLX5_CAP_GEN(mdev, cross_vhca_rqt))
+ return false;
+ if (!verify_num_vhca_ids(mdev, vhca_ids, size))
+ return false;
+
+ return true;
+}
+
void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
unsigned int num_channels)
{
@@ -13,19 +40,38 @@ void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
indir->table[i] = i % num_channels;
}
+static void fill_rqn_list(void *rqtc, u32 *rqns, u32 *vhca_ids, unsigned int size)
+{
+ unsigned int i;
+
+ if (vhca_ids) {
+ MLX5_SET(rqtc, rqtc, rq_vhca_id_format, 1);
+ for (i = 0; i < size; i++) {
+ MLX5_SET(rqtc, rqtc, rq_vhca[i].rq_num, rqns[i]);
+ MLX5_SET(rqtc, rqtc, rq_vhca[i].rq_vhca_id, vhca_ids[i]);
+ }
+ } else {
+ for (i = 0; i < size; i++)
+ MLX5_SET(rqtc, rqtc, rq_num[i], rqns[i]);
+ }
+}
static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
- u16 max_size, u32 *init_rqns, u16 init_size)
+ u16 max_size, u32 *init_rqns, u32 *init_vhca_ids, u16 init_size)
{
+ int entry_sz;
void *rqtc;
int inlen;
int err;
u32 *in;
- int i;
+
+ if (!rqt_verify_vhca_ids(mdev, init_vhca_ids, init_size))
+ return -EOPNOTSUPP;
rqt->mdev = mdev;
rqt->size = max_size;
- inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * init_size;
+ entry_sz = init_vhca_ids ? MLX5_ST_SZ_BYTES(rq_vhca) : MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + entry_sz * init_size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -33,10 +79,9 @@ static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, rqt_max_size, rqt->size);
-
MLX5_SET(rqtc, rqtc, rqt_actual_size, init_size);
- for (i = 0; i < init_size; i++)
- MLX5_SET(rqtc, rqtc, rq_num[i], init_rqns[i]);
+
+ fill_rqn_list(rqtc, init_rqns, init_vhca_ids, init_size);
err = mlx5_core_create_rqt(rqt->mdev, in, inlen, &rqt->rqtn);
@@ -49,7 +94,7 @@ int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
{
u16 max_size = indir_enabled ? indir_table_size : 1;
- return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, 1);
+ return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, NULL, 1);
}
static int mlx5e_bits_invert(unsigned long a, int size)
@@ -63,7 +108,8 @@ static int mlx5e_bits_invert(unsigned long a, int size)
return inv;
}
-static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns,
+static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, u32 *rss_vhca_ids, u32 *vhca_ids,
+ unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir)
{
unsigned int i;
@@ -82,30 +128,42 @@ static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns
*/
return -EINVAL;
rss_rqns[i] = rqns[ix];
+ if (vhca_ids)
+ rss_vhca_ids[i] = vhca_ids[ix];
}
return 0;
}
int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
- u32 *rqns, unsigned int num_rqns,
+ u32 *rqns, u32 *vhca_ids, unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir)
{
- u32 *rss_rqns;
+ u32 *rss_rqns, *rss_vhca_ids = NULL;
int err;
rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL);
if (!rss_rqns)
return -ENOMEM;
- err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir);
+ if (vhca_ids) {
+ rss_vhca_ids = kvmalloc_array(indir->actual_table_size, sizeof(*rss_vhca_ids),
+ GFP_KERNEL);
+ if (!rss_vhca_ids) {
+ kvfree(rss_rqns);
+ return -ENOMEM;
+ }
+ }
+
+ err = mlx5e_calc_indir_rqns(rss_rqns, rqns, rss_vhca_ids, vhca_ids, num_rqns, hfunc, indir);
if (err)
goto out;
- err = mlx5e_rqt_init(rqt, mdev, indir->max_table_size, rss_rqns,
+ err = mlx5e_rqt_init(rqt, mdev, indir->max_table_size, rss_rqns, rss_vhca_ids,
indir->actual_table_size);
out:
+ kvfree(rss_vhca_ids);
kvfree(rss_rqns);
return err;
}
@@ -126,15 +184,20 @@ void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt)
mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn);
}
-static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int size)
+static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
+ unsigned int size)
{
- unsigned int i;
+ int entry_sz;
void *rqtc;
int inlen;
u32 *in;
int err;
- inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * size;
+ if (!rqt_verify_vhca_ids(rqt->mdev, vhca_ids, size))
+ return -EINVAL;
+
+ entry_sz = vhca_ids ? MLX5_ST_SZ_BYTES(rq_vhca) : MLX5_ST_SZ_BYTES(rq_num);
+ inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + entry_sz * size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -143,8 +206,8 @@ static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int siz
MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
MLX5_SET(rqtc, rqtc, rqt_actual_size, size);
- for (i = 0; i < size; i++)
- MLX5_SET(rqtc, rqtc, rq_num[i], rqns[i]);
+
+ fill_rqn_list(rqtc, rqns, vhca_ids, size);
err = mlx5_core_modify_rqt(rqt->mdev, rqt->rqtn, in, inlen);
@@ -152,17 +215,21 @@ static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int siz
return err;
}
-int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn)
+int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn, u32 *vhca_id)
{
- return mlx5e_rqt_redirect(rqt, &rqn, 1);
+ return mlx5e_rqt_redirect(rqt, &rqn, vhca_id, 1);
}
-int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns,
+int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
+ unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir)
{
- u32 *rss_rqns;
+ u32 *rss_rqns, *rss_vhca_ids = NULL;
int err;
+ if (!rqt_verify_vhca_ids(rqt->mdev, vhca_ids, num_rqns))
+ return -EINVAL;
+
if (WARN_ON(rqt->size != indir->max_table_size))
return -EINVAL;
@@ -170,13 +237,23 @@ int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_
if (!rss_rqns)
return -ENOMEM;
- err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir);
+ if (vhca_ids) {
+ rss_vhca_ids = kvmalloc_array(indir->actual_table_size, sizeof(*rss_vhca_ids),
+ GFP_KERNEL);
+ if (!rss_vhca_ids) {
+ kvfree(rss_rqns);
+ return -ENOMEM;
+ }
+ }
+
+ err = mlx5e_calc_indir_rqns(rss_rqns, rqns, rss_vhca_ids, vhca_ids, num_rqns, hfunc, indir);
if (err)
goto out;
- err = mlx5e_rqt_redirect(rqt, rss_rqns, indir->actual_table_size);
+ err = mlx5e_rqt_redirect(rqt, rss_rqns, rss_vhca_ids, indir->actual_table_size);
out:
+ kvfree(rss_vhca_ids);
kvfree(rss_rqns);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
index 77fba3ebd18d..e0bc30308c77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
@@ -20,7 +20,7 @@ void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
unsigned int num_channels);
struct mlx5e_rqt {
- struct mlx5_core_dev *mdev;
+ struct mlx5_core_dev *mdev; /* primary */
u32 rqtn;
u16 size;
};
@@ -28,7 +28,7 @@ struct mlx5e_rqt {
int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
bool indir_enabled, u32 init_rqn, u32 indir_table_size);
int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
- u32 *rqns, unsigned int num_rqns,
+ u32 *rqns, u32 *vhca_ids, unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir);
void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt);
@@ -38,8 +38,9 @@ static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt)
}
u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels);
-int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn);
-int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns,
+int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn, u32 *vhca_id);
+int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, u32 *vhca_ids,
+ unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir);
#endif /* __MLX5_EN_RQT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index c1545a2e8d6d..5f742f896600 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -74,7 +74,7 @@ struct mlx5e_rss {
struct mlx5e_tir *tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir *inner_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_rqt rqt;
- struct mlx5_core_dev *mdev;
+ struct mlx5_core_dev *mdev; /* primary */
u32 drop_rqn;
bool inner_ft_support;
bool enabled;
@@ -473,21 +473,22 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
return 0;
}
-static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
+static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
int err;
- err = mlx5e_rqt_redirect_indir(&rss->rqt, rqns, num_rqns, rss->hash.hfunc, &rss->indir);
+ err = mlx5e_rqt_redirect_indir(&rss->rqt, rqns, vhca_ids, num_rqns, rss->hash.hfunc,
+ &rss->indir);
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n",
mlx5e_rqt_get_rqtn(&rss->rqt), err);
return err;
}
-void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
+void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
rss->enabled = true;
- mlx5e_rss_apply(rss, rqns, num_rqns);
+ mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
}
void mlx5e_rss_disable(struct mlx5e_rss *rss)
@@ -495,7 +496,7 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss)
int err;
rss->enabled = false;
- err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn);
+ err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn, NULL);
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to drop RQ %#x: err = %d\n",
mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err);
@@ -568,7 +569,7 @@ int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
const u8 *key, const u8 *hfunc,
- u32 *rqns, unsigned int num_rqns)
+ u32 *rqns, u32 *vhca_ids, unsigned int num_rqns)
{
bool changed_indir = false;
bool changed_hash = false;
@@ -608,7 +609,7 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
}
if (changed_indir && rss->enabled) {
- err = mlx5e_rss_apply(rss, rqns, num_rqns);
+ err = mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns);
if (err) {
mlx5e_rss_copy(rss, old_rss);
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
index d1d0bc350e92..d0df98963c8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -39,7 +39,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner, u32 *tirn);
-void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns);
+void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
void mlx5e_rss_disable(struct mlx5e_rss *rss);
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
@@ -47,7 +47,7 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc);
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
const u8 *key, const u8 *hfunc,
- u32 *rqns, unsigned int num_rqns);
+ u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss);
u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt);
int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index b23e224e3763..a86eade9a9e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -8,7 +8,7 @@
#define MLX5E_MAX_NUM_RSS 16
struct mlx5e_rx_res {
- struct mlx5_core_dev *mdev;
+ struct mlx5_core_dev *mdev; /* primary */
enum mlx5e_rx_res_features features;
unsigned int max_nch;
u32 drop_rqn;
@@ -19,6 +19,7 @@ struct mlx5e_rx_res {
struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
bool rss_active;
u32 *rss_rqns;
+ u32 *rss_vhca_ids;
unsigned int rss_nch;
struct {
@@ -34,6 +35,13 @@ struct mlx5e_rx_res {
/* API for rx_res_rss_* */
+static u32 *get_vhca_ids(struct mlx5e_rx_res *res, int offset)
+{
+ bool multi_vhca = res->features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
+
+ return multi_vhca ? res->rss_vhca_ids + offset : NULL;
+}
+
void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch)
{
int i;
@@ -85,8 +93,11 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i
return PTR_ERR(rss);
mlx5e_rss_set_indir_uniform(rss, init_nch);
- if (res->rss_active)
- mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch);
+ if (res->rss_active) {
+ u32 *vhca_ids = get_vhca_ids(res, 0);
+
+ mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
+ }
res->rss[i] = rss;
*rss_idx = i;
@@ -153,10 +164,12 @@ static void mlx5e_rx_res_rss_enable(struct mlx5e_rx_res *res)
for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
struct mlx5e_rss *rss = res->rss[i];
+ u32 *vhca_ids;
if (!rss)
continue;
- mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch);
+ vhca_ids = get_vhca_ids(res, 0);
+ mlx5e_rss_enable(rss, res->rss_rqns, vhca_ids, res->rss_nch);
}
}
@@ -200,6 +213,7 @@ int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
const u32 *indir, const u8 *key, const u8 *hfunc)
{
+ u32 *vhca_ids = get_vhca_ids(res, 0);
struct mlx5e_rss *rss;
if (rss_idx >= MLX5E_MAX_NUM_RSS)
@@ -209,7 +223,8 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
if (!rss)
return -ENOENT;
- return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, res->rss_nch);
+ return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, vhca_ids,
+ res->rss_nch);
}
int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx,
@@ -280,11 +295,13 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx)
static void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
{
+ kvfree(res->rss_vhca_ids);
kvfree(res->rss_rqns);
kvfree(res);
}
-static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch)
+static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch,
+ bool multi_vhca)
{
struct mlx5e_rx_res *rx_res;
@@ -298,6 +315,15 @@ static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsig
return NULL;
}
+ if (multi_vhca) {
+ rx_res->rss_vhca_ids = kvcalloc(max_nch, sizeof(*rx_res->rss_vhca_ids), GFP_KERNEL);
+ if (!rx_res->rss_vhca_ids) {
+ kvfree(rx_res->rss_rqns);
+ kvfree(rx_res);
+ return NULL;
+ }
+ }
+
return rx_res;
}
@@ -424,10 +450,11 @@ mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features featu
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch)
{
+ bool multi_vhca = features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
struct mlx5e_rx_res *res;
int err;
- res = mlx5e_rx_res_alloc(mdev, max_nch);
+ res = mlx5e_rx_res_alloc(mdev, max_nch, multi_vhca);
if (!res)
return ERR_PTR(-ENOMEM);
@@ -504,10 +531,11 @@ static void mlx5e_rx_res_channel_activate_direct(struct mlx5e_rx_res *res,
struct mlx5e_channels *chs,
unsigned int ix)
{
+ u32 *vhca_id = get_vhca_ids(res, ix);
u32 rqn = res->rss_rqns[ix];
int err;
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn);
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn, vhca_id);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
@@ -519,7 +547,7 @@ static void mlx5e_rx_res_channel_deactivate_direct(struct mlx5e_rx_res *res,
{
int err;
- err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn);
+ err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn, NULL);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n",
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
@@ -534,10 +562,12 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
nch = mlx5e_channels_get_num(chs);
for (ix = 0; ix < chs->num; ix++) {
+ u32 *vhca_id = get_vhca_ids(res, ix);
+
if (mlx5e_channels_is_xsk(chs, ix))
- mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]);
+ mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
else
- mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
+ mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
}
res->rss_nch = chs->num;
@@ -554,7 +584,7 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
rqn = res->drop_rqn;
- err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
+ err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn, NULL);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (PTP): err = %d\n",
mlx5e_rqt_get_rqtn(&res->ptp.rqt),
@@ -573,7 +603,7 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
mlx5e_rx_res_channel_deactivate_direct(res, ix);
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
- err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn);
+ err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn, NULL);
if (err)
mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (PTP): err = %d\n",
mlx5e_rqt_get_rqtn(&res->ptp.rqt),
@@ -584,10 +614,12 @@ void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res)
void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *chs,
unsigned int ix, bool xsk)
{
+ u32 *vhca_id = get_vhca_ids(res, ix);
+
if (xsk)
- mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix]);
+ mlx5e_channels_get_xsk_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
else
- mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]);
+ mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix], vhca_id);
mlx5e_rx_res_rss_enable(res);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 82aaba8a82b3..7b1a9f0f1874 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -18,6 +18,7 @@ struct mlx5e_rss_params_hash;
enum mlx5e_rx_res_features {
MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0),
MLX5E_RX_RES_FEATURE_PTP = BIT(1),
+ MLX5E_RX_RES_FEATURE_MULTI_VHCA = BIT(2),
};
/* Setup */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index 86bf007fd05b..b500cc2c9689 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -37,7 +37,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
- mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
+ mlx5_core_dbg(priv->mdev, "firmware flow level support is missing\n");
err = -EOPNOTSUPP;
goto err_check;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index ac458a8d10e0..53ca16cb9c41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -63,10 +63,12 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
struct mlx5e_create_cq_param ccp = {};
struct dim_cq_moder trap_moder = {};
struct mlx5e_rq *rq = &t->rq;
+ u16 q_counter;
int node;
int err;
node = dev_to_node(mdev->device);
+ q_counter = priv->q_counter[0];
ccp.netdev = priv->netdev;
ccp.wq = priv->wq;
@@ -79,7 +81,7 @@ static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct mlx5e_trap *t)
return err;
mlx5e_init_trap_rq(t, &t->params, rq);
- err = mlx5e_open_rq(&t->params, rq_param, NULL, node, rq);
+ err = mlx5e_open_rq(&t->params, rq_param, NULL, node, q_counter, rq);
if (err)
goto err_destroy_cq;
@@ -116,15 +118,14 @@ static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct ml
}
static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev,
- int max_mtu, u16 q_counter,
- struct mlx5e_trap *t)
+ int max_mtu, struct mlx5e_trap *t)
{
struct mlx5e_params *params = &t->params;
params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
mlx5e_init_rq_type_params(mdev, params);
params->sw_mtu = max_mtu;
- mlx5e_build_rq_param(mdev, params, NULL, q_counter, &t->rq_param);
+ mlx5e_build_rq_param(mdev, params, NULL, &t->rq_param);
}
static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
@@ -138,7 +139,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
if (!t)
return ERR_PTR(-ENOMEM);
- mlx5e_build_trap_params(priv->mdev, netdev->max_mtu, priv->q_counter, t);
+ mlx5e_build_trap_params(priv->mdev, netdev->max_mtu, t);
t->priv = priv;
t->mdev = priv->mdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
index ebada0c5af3c..db776e515b6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
@@ -6,10 +6,10 @@
#include "setup.h"
#include "en/params.h"
-static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
+static int mlx5e_xsk_map_pool(struct mlx5_core_dev *mdev,
struct xsk_buff_pool *pool)
{
- struct device *dev = mlx5_core_dma_dev(priv->mdev);
+ struct device *dev = mlx5_core_dma_dev(mdev);
return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC);
}
@@ -89,7 +89,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
if (unlikely(!mlx5e_xsk_is_pool_sane(pool)))
return -EINVAL;
- err = mlx5e_xsk_map_pool(priv, pool);
+ err = mlx5e_xsk_map_pool(mlx5_sd_ch_ix_get_dev(priv->mdev, ix), pool);
if (unlikely(err))
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 82e6abbc1734..06592b9f0424 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -49,10 +49,9 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
static void mlx5e_build_xsk_cparam(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- u16 q_counter,
struct mlx5e_channel_param *cparam)
{
- mlx5e_build_rq_param(mdev, params, xsk, q_counter, &cparam->rq);
+ mlx5e_build_rq_param(mdev, params, xsk, &cparam->rq);
mlx5e_build_xdpsq_param(mdev, params, xsk, &cparam->xdp_sq);
}
@@ -93,6 +92,7 @@ static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *param
struct mlx5e_rq_param *rq_params, struct xsk_buff_pool *pool,
struct mlx5e_xsk_param *xsk)
{
+ u16 q_counter = c->priv->q_counter[c->sd_ix];
struct mlx5e_rq *xskrq = &c->xskrq;
int err;
@@ -100,7 +100,7 @@ static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *param
if (err)
return err;
- err = mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), xskrq);
+ err = mlx5e_open_rq(params, rq_params, xsk, cpu_to_node(c->cpu), q_counter, xskrq);
if (err)
return err;
@@ -125,7 +125,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (!cparam)
return -ENOMEM;
- mlx5e_build_xsk_cparam(priv->mdev, params, xsk, priv->q_counter, cparam);
+ mlx5e_build_xsk_cparam(priv->mdev, params, xsk, cparam);
err = mlx5e_open_cq(c->mdev, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->xskrq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 05612d9c6080..c54fd01ea635 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -984,21 +984,41 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
queue_work(sa_entry->ipsec->wq, &work->work);
}
-static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
+static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ struct net *net = dev_net(x->xso.dev);
u64 packets, bytes, lastuse;
lockdep_assert(lockdep_is_held(&x->lock) ||
- lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex));
+ lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
+ lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock));
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
return;
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+ mlx5_fc_query_cached(ipsec_rule->auth.fc, &bytes, &packets, &lastuse);
+ x->stats.integrity_failed += packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, packets);
+
+ mlx5_fc_query_cached(ipsec_rule->trailer.fc, &bytes, &packets, &lastuse);
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, packets);
+ }
+
+ if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+ return;
+
mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
x->curlft.packets += packets;
x->curlft.bytes += bytes;
+
+ if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+ mlx5_fc_query_cached(ipsec_rule->replay.fc, &bytes, &packets, &lastuse);
+ x->stats.replay += packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, packets);
+ }
}
static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
@@ -1156,7 +1176,7 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
- .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
+ .xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
.xdo_dev_policy_delete = mlx5e_xfrm_del_policy,
.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index adaea3493193..7d943e93cf6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -137,7 +137,6 @@ struct mlx5e_ipsec_hw_stats {
struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_rx_drop_sp_alloc;
atomic64_t ipsec_rx_drop_sadb_miss;
- atomic64_t ipsec_rx_drop_syndrome;
atomic64_t ipsec_tx_drop_bundle;
atomic64_t ipsec_tx_drop_no_state;
atomic64_t ipsec_tx_drop_not_ip;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 51a144246ea6..727fa7c18523 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -304,12 +304,6 @@ drop:
return false;
}
-enum {
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
-};
-
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
u32 ipsec_meta_data)
@@ -343,20 +337,7 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
-
- switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
- xo->status = CRYPTO_SUCCESS;
- break;
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
- xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
- break;
- case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
- xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
- break;
- default:
- atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
- }
+ xo->status = CRYPTO_SUCCESS;
}
int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata)
@@ -374,8 +355,6 @@ int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metada
return err;
}
- *metadata = MLX5_IPSEC_METADATA_CREATE(ipsec_obj_id,
- MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED);
-
+ *metadata = ipsec_obj_id;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 2ed99772f168..82064614846f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -43,7 +43,6 @@
#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
-#define MLX5_IPSEC_METADATA_CREATE(id, syndrome) ((id) | ((syndrome) << 24))
struct mlx5e_accel_tx_ipsec_state {
struct xfrm_offload *xo;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index e0e36a09721c..dd36b04e30a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -51,7 +51,6 @@ static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) },
- { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_syndrome) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_bundle) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index 984fa04bd331..e3e57c849436 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -96,7 +96,7 @@ bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
{
u8 max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
- if (is_kdump_kernel() || !MLX5_CAP_GEN(mdev, tls_rx))
+ if (is_kdump_kernel() || !MLX5_CAP_GEN(mdev, tls_rx) || mlx5_get_sd(mdev))
return false;
/* Check the possibility to post the required ICOSQ WQEs. */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index f11075e67658..adc6d8ea0960 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -11,6 +11,7 @@
#ifdef CONFIG_MLX5_EN_TLS
#include "lib/crypto.h"
+#include "lib/mlx5.h"
struct mlx5_crypto_dek *mlx5_ktls_create_key(struct mlx5_crypto_dek_pool *dek_pool,
struct tls_crypto_info *crypto_info);
@@ -61,7 +62,8 @@ void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_
static inline bool mlx5e_is_ktls_tx(struct mlx5_core_dev *mdev)
{
- return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx);
+ return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx) &&
+ !mlx5_get_sd(mdev);
}
bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 9b597cb24598..65ccb33edafb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -267,7 +267,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
goto err_out;
}
- pdev = mlx5_core_dma_dev(sq->channel->priv->mdev);
+ pdev = mlx5_core_dma_dev(sq->channel->mdev);
buf->dma_addr = dma_map_single(pdev, &buf->progress,
PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) {
@@ -425,14 +425,12 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
{
struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf;
struct mlx5e_ktls_offload_context_rx *priv_rx;
- struct mlx5e_ktls_rx_resync_ctx *resync;
u8 tracker_state, auth_state, *ctx;
struct device *dev;
u32 hw_seq;
priv_rx = buf->priv_rx;
- resync = &priv_rx->resync;
- dev = mlx5_core_dma_dev(resync->priv->mdev);
+ dev = mlx5_core_dma_dev(sq->channel->mdev);
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index d4ebd8743114..b2cabd6ab86c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -310,9 +310,9 @@ static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_o
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
-static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
- struct mlx5e_macsec_sa *sa,
- bool is_tx, struct net_device *netdev, u32 fs_id)
+static void mlx5e_macsec_cleanup_sa_fs(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *sa, bool is_tx,
+ struct net_device *netdev, u32 fs_id)
{
int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
@@ -322,20 +322,49 @@ static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev,
fs_id);
- mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
sa->macsec_rule = NULL;
}
+static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *sa, bool is_tx,
+ struct net_device *netdev, u32 fs_id)
+{
+ mlx5e_macsec_cleanup_sa_fs(macsec, sa, is_tx, netdev, fs_id);
+ mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
+}
+
+static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
+ struct mlx5e_macsec_sa *sa, bool encrypt,
+ bool is_tx, u32 *fs_id)
+{
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
+ struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
+ struct mlx5_macsec_rule_attrs rule_attrs;
+ union mlx5_macsec_rule *macsec_rule;
+
+ rule_attrs.macsec_obj_id = sa->macsec_obj_id;
+ rule_attrs.sci = sa->sci;
+ rule_attrs.assoc_num = sa->assoc_num;
+ rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
+
+ macsec_rule = mlx5_macsec_fs_add_rule(macsec_fs, ctx, &rule_attrs, fs_id);
+ if (!macsec_rule)
+ return -ENOMEM;
+
+ sa->macsec_rule = macsec_rule;
+
+ return 0;
+}
+
static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
struct mlx5e_macsec_sa *sa,
bool encrypt, bool is_tx, u32 *fs_id)
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5e_macsec *macsec = priv->macsec;
- struct mlx5_macsec_rule_attrs rule_attrs;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_macsec_obj_attrs obj_attrs;
- union mlx5_macsec_rule *macsec_rule;
int err;
obj_attrs.next_pn = sa->next_pn;
@@ -357,20 +386,12 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
if (err)
return err;
- rule_attrs.macsec_obj_id = sa->macsec_obj_id;
- rule_attrs.sci = sa->sci;
- rule_attrs.assoc_num = sa->assoc_num;
- rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
- MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
-
- macsec_rule = mlx5_macsec_fs_add_rule(mdev->macsec_fs, ctx, &rule_attrs, fs_id);
- if (!macsec_rule) {
- err = -ENOMEM;
- goto destroy_macsec_object;
+ if (sa->active) {
+ err = mlx5e_macsec_init_sa_fs(ctx, sa, encrypt, is_tx, fs_id);
+ if (err)
+ goto destroy_macsec_object;
}
- sa->macsec_rule = macsec_rule;
-
return 0;
destroy_macsec_object:
@@ -526,9 +547,7 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
goto destroy_sa;
macsec_device->tx_sa[assoc_num] = tx_sa;
- if (!secy->operational ||
- assoc_num != tx_sc->encoding_sa ||
- !tx_sa->active)
+ if (!secy->operational)
goto out;
err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
@@ -595,7 +614,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
if (ctx_tx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
+ err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
} else {
@@ -604,7 +623,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
+ mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
out:
mutex_unlock(&macsec->lock);
@@ -1030,8 +1049,9 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
- rx_sc->sc_xarray_element->fs_id);
+ if (rx_sa->active)
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
+ rx_sc->sc_xarray_element->fs_id);
mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
kfree(rx_sa);
rx_sc->rx_sa[assoc_num] = NULL;
@@ -1112,8 +1132,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
if (!rx_sa || !rx_sa->macsec_rule)
continue;
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
- rx_sc->sc_xarray_element->fs_id);
+ mlx5e_macsec_cleanup_sa_fs(macsec, rx_sa, false, ctx->secy->netdev,
+ rx_sc->sc_xarray_element->fs_id);
}
}
@@ -1124,8 +1144,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
continue;
if (rx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false,
- &rx_sc->sc_xarray_element->fs_id);
+ err = mlx5e_macsec_init_sa_fs(ctx, rx_sa, true, false,
+ &rx_sc->sc_xarray_element->fs_id);
if (err)
goto out;
}
@@ -1178,7 +1198,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
if (!tx_sa)
continue;
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
+ mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
for (i = 0; i < MACSEC_NUM_AN; ++i) {
@@ -1187,7 +1207,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
continue;
if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
+ err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index e66f486faafe..c7f542d0b8f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/fs.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/rps.h>
#include "en.h"
#define ARFS_HASH_SHIFT BITS_PER_BYTE
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index c8e8f512803e..91848eae4565 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -70,6 +70,7 @@
#include "qos.h"
#include "en/trap.h"
#include "lib/devcom.h"
+#include "lib/sd.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
@@ -1024,7 +1025,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
mlx5_wq_destroy(&rq->wq_ctrl);
}
-int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
+int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter)
{
struct mlx5_core_dev *mdev = rq->mdev;
u8 ts_format;
@@ -1051,6 +1052,7 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, ts_format, ts_format);
+ MLX5_SET(rqc, rqc, counter_set_id, q_counter);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
@@ -1274,7 +1276,7 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
}
int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
- struct mlx5e_xsk_param *xsk, int node,
+ struct mlx5e_xsk_param *xsk, int node, u16 q_counter,
struct mlx5e_rq *rq)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -1287,7 +1289,7 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
if (err)
return err;
- err = mlx5e_create_rq(rq, param);
+ err = mlx5e_create_rq(rq, param, q_counter);
if (err)
goto err_free_rq;
@@ -1806,6 +1808,7 @@ void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
+ netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, sq->cq.napi);
}
void mlx5e_tx_disable_queue(struct netdev_queue *txq)
@@ -1819,6 +1822,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
+ netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, NULL);
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
@@ -2333,13 +2337,14 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_rq_param *rq_params)
{
+ u16 q_counter = c->priv->q_counter[c->sd_ix];
int err;
err = mlx5e_init_rxq_rq(c, params, rq_params->xdp_frag_size, &c->rq);
if (err)
return err;
- return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), &c->rq);
+ return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), q_counter, &c->rq);
}
static int mlx5e_open_queues(struct mlx5e_channel *c,
@@ -2526,14 +2531,20 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct xsk_buff_pool *xsk_pool,
struct mlx5e_channel **cp)
{
- int cpu = mlx5_comp_vector_get_cpu(priv->mdev, ix);
struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev;
struct mlx5e_xsk_param xsk;
struct mlx5e_channel *c;
unsigned int irq;
+ int vec_ix;
+ int cpu;
int err;
- err = mlx5_comp_irqn_get(priv->mdev, ix, &irq);
+ mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix);
+ vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix);
+ cpu = mlx5_comp_vector_get_cpu(mdev, vec_ix);
+
+ err = mlx5_comp_irqn_get(mdev, vec_ix, &irq);
if (err)
return err;
@@ -2546,20 +2557,23 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
return -ENOMEM;
c->priv = priv;
- c->mdev = priv->mdev;
+ c->mdev = mdev;
c->tstamp = &priv->tstamp;
c->ix = ix;
+ c->vec_ix = vec_ix;
+ c->sd_ix = mlx5_sd_ch_ix_get_dev_ix(mdev, ix);
c->cpu = cpu;
- c->pdev = mlx5_core_dma_dev(priv->mdev);
+ c->pdev = mlx5_core_dma_dev(mdev);
c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
+ c->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
c->num_tc = mlx5e_get_dcb_num_tc(params);
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix]->ch;
c->aff_mask = irq_get_effective_affinity_mask(irq);
- c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
+ c->lag_port = mlx5e_enumerate_lag_port(mdev, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll);
+ netif_napi_set_irq(&c->napi, irq);
err = mlx5e_open_queues(c, params, cparam);
if (unlikely(err))
@@ -2602,12 +2616,16 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
mlx5e_activate_xsk(c);
else
mlx5e_activate_rq(&c->rq);
+
+ netif_queue_set_napi(c->netdev, c->ix, NETDEV_QUEUE_TYPE_RX, &c->napi);
}
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
{
int tc;
+ netif_queue_set_napi(c->netdev, c->ix, NETDEV_QUEUE_TYPE_RX, NULL);
+
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_deactivate_xsk(c);
else
@@ -2647,7 +2665,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
if (!chs->c || !cparam)
goto err_free;
- err = mlx5e_build_channel_param(priv->mdev, &chs->params, priv->q_counter, cparam);
+ err = mlx5e_build_channel_param(priv->mdev, &chs->params, cparam);
if (err)
goto err_free;
@@ -2935,15 +2953,18 @@ static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues);
static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
struct mlx5e_params *params)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- int num_comp_vectors, ix, irq;
-
- num_comp_vectors = mlx5_comp_vectors_max(mdev);
+ int ix;
for (ix = 0; ix < params->num_channels; ix++) {
+ int num_comp_vectors, irq, vec_ix;
+ struct mlx5_core_dev *mdev;
+
+ mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix);
+ num_comp_vectors = mlx5_comp_vectors_max(mdev);
cpumask_clear(priv->scratchpad.cpumask);
+ vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix);
- for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
+ for (irq = vec_ix; irq < num_comp_vectors; irq += params->num_channels) {
int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
@@ -3335,7 +3356,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_cq *cq = &drop_rq->cq;
int err;
- mlx5e_build_drop_rq_param(mdev, priv->drop_rq_q_counter, &rq_param);
+ mlx5e_build_drop_rq_param(mdev, &rq_param);
err = mlx5e_alloc_drop_cq(priv, cq, &cq_param);
if (err)
@@ -3349,7 +3370,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
if (err)
goto err_destroy_cq;
- err = mlx5e_create_rq(drop_rq, &rq_param);
+ err = mlx5e_create_rq(drop_rq, &rq_param, priv->drop_rq_q_counter);
if (err)
goto err_free_rq;
@@ -5264,13 +5285,17 @@ void mlx5e_create_q_counters(struct mlx5e_priv *priv)
u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
- int err;
+ struct mlx5_core_dev *pos;
+ int err, i;
MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
- err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
- if (!err)
- priv->q_counter =
- MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+
+ mlx5_sd_for_each_dev(i, mdev, pos) {
+ err = mlx5_cmd_exec_inout(pos, alloc_q_counter, in, out);
+ if (!err)
+ priv->q_counter[i] =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ }
err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
if (!err)
@@ -5281,13 +5306,17 @@ void mlx5e_create_q_counters(struct mlx5e_priv *priv)
void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
{
u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
+ struct mlx5_core_dev *pos;
+ int i;
MLX5_SET(dealloc_q_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_Q_COUNTER);
- if (priv->q_counter) {
- MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
- priv->q_counter);
- mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos) {
+ if (priv->q_counter[i]) {
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
+ priv->q_counter[i]);
+ mlx5_cmd_exec_in(pos, dealloc_q_counter, in);
+ }
}
if (priv->drop_rq_q_counter) {
@@ -5371,6 +5400,8 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
features = MLX5E_RX_RES_FEATURE_PTP;
if (mlx5_tunnel_inner_ft_supported(mdev))
features |= MLX5E_RX_RES_FEATURE_INNER_FT;
+ if (mlx5_get_sd(priv->mdev))
+ features |= MLX5E_RX_RES_FEATURE_MULTI_VHCA;
priv->rx_res = mlx5e_rx_res_create(priv->mdev, features, priv->max_nch, priv->drop_rq.rqn,
&priv->channels.params.packet_merge,
@@ -5980,28 +6011,52 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
free_netdev(netdev);
}
-static int mlx5e_resume(struct auxiliary_device *adev)
+static int _mlx5e_resume(struct auxiliary_device *adev)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = edev->mdev;
- int err;
+ struct mlx5_core_dev *pos, *to;
+ int err, i;
if (netif_device_present(netdev))
return 0;
- err = mlx5e_create_mdev_resources(mdev, true);
- if (err)
- return err;
+ mlx5_sd_for_each_dev(i, mdev, pos) {
+ err = mlx5e_create_mdev_resources(pos, true);
+ if (err)
+ goto err_destroy_mdev_res;
+ }
err = mlx5e_attach_netdev(priv);
- if (err) {
- mlx5e_destroy_mdev_resources(mdev);
+ if (err)
+ goto err_destroy_mdev_res;
+
+ return 0;
+
+err_destroy_mdev_res:
+ to = pos;
+ mlx5_sd_for_each_dev_to(i, mdev, to, pos)
+ mlx5e_destroy_mdev_resources(pos);
+ return err;
+}
+
+static int mlx5e_resume(struct auxiliary_device *adev)
+{
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = edev->mdev;
+ struct auxiliary_device *actual_adev;
+ int err;
+
+ err = mlx5_sd_init(mdev);
+ if (err)
return err;
- }
+ actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
+ if (actual_adev)
+ return _mlx5e_resume(actual_adev);
return 0;
}
@@ -6011,21 +6066,36 @@ static int _mlx5e_suspend(struct auxiliary_device *adev)
struct mlx5e_priv *priv = mlx5e_dev->priv;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_dev *pos;
+ int i;
if (!netif_device_present(netdev)) {
if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
- mlx5e_destroy_mdev_resources(mdev);
+ mlx5_sd_for_each_dev(i, mdev, pos)
+ mlx5e_destroy_mdev_resources(pos);
return -ENODEV;
}
mlx5e_detach_netdev(priv);
- mlx5e_destroy_mdev_resources(mdev);
+ mlx5_sd_for_each_dev(i, mdev, pos)
+ mlx5e_destroy_mdev_resources(pos);
+
return 0;
}
static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
{
- return _mlx5e_suspend(adev);
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = edev->mdev;
+ struct auxiliary_device *actual_adev;
+ int err = 0;
+
+ actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
+ if (actual_adev)
+ err = _mlx5e_suspend(actual_adev);
+
+ mlx5_sd_cleanup(mdev);
+ return err;
}
static int _mlx5e_probe(struct auxiliary_device *adev)
@@ -6071,9 +6141,9 @@ static int _mlx5e_probe(struct auxiliary_device *adev)
goto err_destroy_netdev;
}
- err = mlx5e_resume(adev);
+ err = _mlx5e_resume(adev);
if (err) {
- mlx5_core_err(mdev, "mlx5e_resume failed, %d\n", err);
+ mlx5_core_err(mdev, "_mlx5e_resume failed, %d\n", err);
goto err_profile_cleanup;
}
@@ -6104,15 +6174,29 @@ err_devlink_unregister:
static int mlx5e_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
- return _mlx5e_probe(adev);
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = edev->mdev;
+ struct auxiliary_device *actual_adev;
+ int err;
+
+ err = mlx5_sd_init(mdev);
+ if (err)
+ return err;
+
+ actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
+ if (actual_adev)
+ return _mlx5e_probe(actual_adev);
+ return 0;
}
-static void mlx5e_remove(struct auxiliary_device *adev)
+static void _mlx5e_remove(struct auxiliary_device *adev)
{
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
+ struct mlx5_core_dev *mdev = edev->mdev;
- mlx5_core_uplink_netdev_set(priv->mdev, NULL);
+ mlx5_core_uplink_netdev_set(mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
_mlx5e_suspend(adev);
@@ -6122,6 +6206,19 @@ static void mlx5e_remove(struct auxiliary_device *adev)
mlx5e_destroy_devlink(mlx5e_dev);
}
+static void mlx5e_remove(struct auxiliary_device *adev)
+{
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = edev->mdev;
+ struct auxiliary_device *actual_adev;
+
+ actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
+ if (actual_adev)
+ _mlx5e_remove(actual_adev);
+
+ mlx5_sd_cleanup(mdev);
+}
+
static const struct auxiliary_device_id mlx5e_id_table[] = {
{ .name = MLX5_ADEV_NAME ".eth", },
{},
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 4b96ad657145..f3d0898bdbc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -561,11 +561,23 @@ static const struct counter_desc drop_rq_stats_desc[] = {
#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
+static bool q_counter_any(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *pos;
+ int i;
+
+ mlx5_sd_for_each_dev(i, priv->mdev, pos)
+ if (priv->q_counter[i++])
+ return true;
+
+ return false;
+}
+
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
{
int num_stats = 0;
- if (priv->q_counter)
+ if (q_counter_any(priv))
num_stats += NUM_Q_COUNTERS;
if (priv->drop_rq_q_counter)
@@ -578,7 +590,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
{
int i;
- for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
+ for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
q_stats_desc[i].format);
@@ -593,7 +605,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
{
int i;
- for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
+ for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
q_stats_desc, i);
for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
@@ -607,18 +619,23 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
- int ret;
+ struct mlx5_core_dev *pos;
+ u32 rx_out_of_buffer = 0;
+ int ret, i;
MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
- if (priv->q_counter) {
- MLX5_SET(query_q_counter_in, in, counter_set_id,
- priv->q_counter);
- ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
- if (!ret)
- qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
- out, out_of_buffer);
+ mlx5_sd_for_each_dev(i, priv->mdev, pos) {
+ if (priv->q_counter[i]) {
+ MLX5_SET(query_q_counter_in, in, counter_set_id,
+ priv->q_counter[i]);
+ ret = mlx5_cmd_exec_inout(pos, query_q_counter, in, out);
+ if (!ret)
+ rx_out_of_buffer += MLX5_GET(query_q_counter_out,
+ out, out_of_buffer);
+ }
}
+ qcnt->rx_out_of_buffer = rx_out_of_buffer;
if (priv->drop_rq_q_counter) {
MLX5_SET(query_q_counter_in, in, counter_set_id,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9fb2c057bd78..31ed26cac9bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -766,7 +766,7 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
return err;
mlx5e_rss_params_indir_init_uniform(&indir, hp->num_channels);
- err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
+ err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, NULL, hp->num_channels,
mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
&indir);
@@ -1169,7 +1169,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz),
MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
- params.q_counter = priv->q_counter;
+ params.q_counter = priv->q_counter[0];
err = devl_param_driverinit_value_get(
devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES, &val);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 5c166d9d2dca..2fa076b23fbe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -401,6 +401,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_skb_cb_hwtstamp_init(skb);
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
metadata_index);
+ /* ensure skb is put on metadata_map before tracking the index */
+ wmb();
mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
if (!netif_tx_queue_stopped(sq->txq) &&
mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index 190f10aba170..5a0047bdcb51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -152,7 +152,7 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
xa_for_each(&esw->offloads.vport_reps, i, rep) {
rpriv = rep->rep_data[REP_ETH].priv;
- if (!rpriv || !rpriv->netdev || !atomic_read(&rpriv->tc_ht.nelems))
+ if (!rpriv || !rpriv->netdev)
continue;
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index b0455134c98e..baaae628b0a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -535,21 +535,26 @@ esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
}
static bool
-esw_dests_to_vf_pf_vports(struct mlx5_flow_destination *dests, int max_dest)
+esw_dests_to_int_external(struct mlx5_flow_destination *dests, int max_dest)
{
- bool vf_dest = false, pf_dest = false;
+ bool internal_dest = false, external_dest = false;
int i;
for (i = 0; i < max_dest; i++) {
- if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
+ if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ dests[i].type != MLX5_FLOW_DESTINATION_TYPE_UPLINK)
continue;
- if (dests[i].vport.num == MLX5_VPORT_UPLINK)
- pf_dest = true;
+ /* Uplink dest is external, but considered as internal
+ * if there is reformat because firmware uses LB+hairpin to support it.
+ */
+ if (dests[i].vport.num == MLX5_VPORT_UPLINK &&
+ !(dests[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID))
+ external_dest = true;
else
- vf_dest = true;
+ internal_dest = true;
- if (vf_dest && pf_dest)
+ if (internal_dest && external_dest)
return true;
}
@@ -695,9 +700,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
/* Header rewrite with combined wire+loopback in FDB is not allowed */
if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) &&
- esw_dests_to_vf_pf_vports(dest, i)) {
+ esw_dests_to_int_external(dest, i)) {
esw_warn(esw->dev,
- "FDB: Header rewrite with forwarding to both PF and VF is not allowed\n");
+ "FDB: Header rewrite with forwarding to both internal and external dests is not allowed\n");
rule = ERR_PTR(-EINVAL);
goto err_esw_get;
}
@@ -3658,22 +3663,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
-static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
- struct net *devl_net, *netdev_net;
- bool ret = false;
-
- mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
- if (dev->mlx5e_res.uplink_netdev) {
- netdev_net = dev_net(dev->mlx5e_res.uplink_netdev);
- devl_net = devlink_net(devlink);
- ret = net_eq(devl_net, netdev_net);
- }
- mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
- return ret;
-}
-
int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -3718,13 +3707,6 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
- if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
- !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
- return -EPERM;
- }
-
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 58f4c0d0fafa..e7faf7e73ca4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -366,18 +366,18 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
return -EIO;
}
- mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED);
+ mlx5_set_nic_state(dev, MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED);
/* Loop until device state turns to disable */
end = jiffies + msecs_to_jiffies(delay_ms);
do {
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
cond_resched();
} while (!time_after(jiffies, end));
- if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
+ if (mlx5_get_nic_state(dev) != MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) {
dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
mlx5_get_nic_state(dev), delay_ms);
return -EIO;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index f27eab6e4929..2911aa34a5be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -703,19 +703,30 @@ void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
MLX5_NB_INIT(&fw_reset->nb, fw_reset_event_notifier, GENERAL_EVENT);
mlx5_eq_notifier_register(dev, &fw_reset->nb);
}
void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
{
- mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ if (!fw_reset)
+ return;
+
+ mlx5_eq_notifier_unregister(dev, &fw_reset->nb);
}
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
cancel_work_sync(&fw_reset->fw_live_patch_work);
cancel_work_sync(&fw_reset->reset_request_work);
@@ -733,9 +744,13 @@ static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
{
- struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
+ struct mlx5_fw_reset *fw_reset;
int err;
+ if (!MLX5_CAP_MCAM_REG(dev, mfrl))
+ return 0;
+
+ fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
if (!fw_reset)
return -ENOMEM;
fw_reset->wq = create_singlethread_workqueue("mlx5_fw_reset_events");
@@ -771,6 +786,9 @@ void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
devl_params_unregister(priv_to_devlink(dev),
mlx5_fw_reset_devlink_params,
ARRAY_SIZE(mlx5_fw_reset_devlink_params));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 8ff6dc9bc803..ad38e31822df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -116,9 +116,9 @@ u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev)
return MLX5_SENSOR_PCI_COMM_ERR;
if (pci_channel_offline(dev->pdev))
return MLX5_SENSOR_PCI_ERR;
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
return MLX5_SENSOR_NIC_DISABLED;
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_SW_RESET)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET)
return MLX5_SENSOR_NIC_SW_RESET;
if (sensor_fw_synd_rfr(dev))
return MLX5_SENSOR_FW_SYND_RFR;
@@ -185,7 +185,7 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
/* Write the NIC interface field to initiate the reset, the command
* interface address also resides here, don't overwrite it.
*/
- mlx5_set_nic_state(dev, MLX5_NIC_IFC_SW_RESET);
+ mlx5_set_nic_state(dev, MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET);
return true;
}
@@ -246,13 +246,13 @@ recover_from_sw_reset:
/* Recover from SW reset */
end = jiffies + msecs_to_jiffies(delay_ms);
do {
- if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+ if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
msleep(20);
} while (!time_after(jiffies, end));
- if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
+ if (mlx5_get_nic_state(dev) != MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) {
dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
mlx5_get_nic_state(dev), delay_ms);
}
@@ -272,26 +272,26 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
u8 nic_interface = mlx5_get_nic_state(dev);
switch (nic_interface) {
- case MLX5_NIC_IFC_FULL:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER:
mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
break;
- case MLX5_NIC_IFC_DISABLED:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED:
mlx5_core_warn(dev, "starting teardown\n");
break;
- case MLX5_NIC_IFC_NO_DRAM_NIC:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC:
mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
break;
- case MLX5_NIC_IFC_SW_RESET:
+ case MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET:
/* The IFC mode field is 3 bits, so it will read 0x7 in 2 cases:
* 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded
* and this is a VF), this is not recoverable by SW reset.
* Logging of this is handled elsewhere.
* 2. FW reset has been issued by another function, driver can
* be reloaded to recover after the mode switches to
- * MLX5_NIC_IFC_DISABLED.
+ * MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED.
*/
if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR)
mlx5_core_warn(dev, "NIC SW reset in progress\n");
@@ -452,10 +452,10 @@ mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct health_buffer __iomem *h = health->health;
u8 synd = ioread8(&h->synd);
+ devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
if (!synd)
return 0;
- devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd));
return 0;
@@ -555,12 +555,17 @@ static void mlx5_fw_reporter_err_work(struct work_struct *work)
&fw_reporter_ctx);
}
-static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
+static const struct devlink_health_reporter_ops mlx5_fw_reporter_pf_ops = {
.name = "fw",
.diagnose = mlx5_fw_reporter_diagnose,
.dump = mlx5_fw_reporter_dump,
};
+static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
+ .name = "fw",
+ .diagnose = mlx5_fw_reporter_diagnose,
+};
+
static int
mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
void *priv_ctx,
@@ -646,12 +651,17 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
}
}
-static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
+static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_pf_ops = {
.name = "fw_fatal",
.recover = mlx5_fw_fatal_reporter_recover,
.dump = mlx5_fw_fatal_reporter_dump,
};
+static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
+ .name = "fw_fatal",
+ .recover = mlx5_fw_fatal_reporter_recover,
+};
+
#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
@@ -659,10 +669,14 @@ static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
{
+ const struct devlink_health_reporter_ops *fw_fatal_ops;
struct mlx5_core_health *health = &dev->priv.health;
+ const struct devlink_health_reporter_ops *fw_ops;
struct devlink *devlink = priv_to_devlink(dev);
u64 grace_period;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_pf_ops;
+ fw_ops = &mlx5_fw_reporter_pf_ops;
if (mlx5_core_is_ecpf(dev)) {
grace_period = MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD;
} else if (mlx5_core_is_pf(dev)) {
@@ -670,18 +684,19 @@ void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
} else {
/* VF or SF */
grace_period = MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD;
+ fw_fatal_ops = &mlx5_fw_fatal_reporter_ops;
+ fw_ops = &mlx5_fw_reporter_ops;
}
health->fw_reporter =
- devl_health_reporter_create(devlink, &mlx5_fw_reporter_ops,
- 0, dev);
+ devl_health_reporter_create(devlink, fw_ops, 0, dev);
if (IS_ERR(health->fw_reporter))
mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
PTR_ERR(health->fw_reporter));
health->fw_fatal_reporter =
devl_health_reporter_create(devlink,
- &mlx5_fw_fatal_reporter_ops,
+ fw_fatal_ops,
grace_period,
dev);
if (IS_ERR(health->fw_fatal_reporter))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index ec32b686f586..d58032dd0df7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -10,6 +10,7 @@ enum mlx5_devcom_component {
MLX5_DEVCOM_ESW_OFFLOADS,
MLX5_DEVCOM_MPV,
MLX5_DEVCOM_HCA_PORTS,
+ MLX5_DEVCOM_SD_GROUP,
MLX5_DEVCOM_NUM_COMPONENTS,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 2b5826a785c4..37d5f445598c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -54,4 +54,16 @@ static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *md
{
return mdev->mlx5e_res.uplink_netdev;
}
+
+struct mlx5_sd;
+
+static inline struct mlx5_sd *mlx5_get_sd(struct mlx5_core_dev *dev)
+{
+ return dev->sd;
+}
+
+static inline void mlx5_set_sd(struct mlx5_core_dev *dev, struct mlx5_sd *sd)
+{
+ dev->sd = sd;
+}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
new file mode 100644
index 000000000000..5b28084e8a03
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -0,0 +1,524 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "lib/sd.h"
+#include "mlx5_core.h"
+#include "lib/mlx5.h"
+#include "fs_cmd.h"
+#include <linux/mlx5/vport.h>
+#include <linux/debugfs.h>
+
+#define sd_info(__dev, format, ...) \
+ dev_info((__dev)->device, "Socket-Direct: " format, ##__VA_ARGS__)
+#define sd_warn(__dev, format, ...) \
+ dev_warn((__dev)->device, "Socket-Direct: " format, ##__VA_ARGS__)
+
+struct mlx5_sd {
+ u32 group_id;
+ u8 host_buses;
+ struct mlx5_devcom_comp_dev *devcom;
+ struct dentry *dfs;
+ bool primary;
+ union {
+ struct { /* primary */
+ struct mlx5_core_dev *secondaries[MLX5_SD_MAX_GROUP_SZ - 1];
+ struct mlx5_flow_table *tx_ft;
+ };
+ struct { /* secondary */
+ struct mlx5_core_dev *primary_dev;
+ u32 alias_obj_id;
+ };
+ };
+};
+
+static int mlx5_sd_get_host_buses(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+
+ if (!sd)
+ return 1;
+
+ return sd->host_buses;
+}
+
+static struct mlx5_core_dev *mlx5_sd_get_primary(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+
+ if (!sd)
+ return dev;
+
+ return sd->primary ? dev : sd->primary_dev;
+}
+
+struct mlx5_core_dev *
+mlx5_sd_primary_get_peer(struct mlx5_core_dev *primary, int idx)
+{
+ struct mlx5_sd *sd;
+
+ if (idx == 0)
+ return primary;
+
+ if (idx >= mlx5_sd_get_host_buses(primary))
+ return NULL;
+
+ sd = mlx5_get_sd(primary);
+ return sd->secondaries[idx - 1];
+}
+
+int mlx5_sd_ch_ix_get_dev_ix(struct mlx5_core_dev *dev, int ch_ix)
+{
+ return ch_ix % mlx5_sd_get_host_buses(dev);
+}
+
+int mlx5_sd_ch_ix_get_vec_ix(struct mlx5_core_dev *dev, int ch_ix)
+{
+ return ch_ix / mlx5_sd_get_host_buses(dev);
+}
+
+struct mlx5_core_dev *mlx5_sd_ch_ix_get_dev(struct mlx5_core_dev *primary, int ch_ix)
+{
+ int mdev_idx = mlx5_sd_ch_ix_get_dev_ix(primary, ch_ix);
+
+ return mlx5_sd_primary_get_peer(primary, mdev_idx);
+}
+
+static bool ft_create_alias_supported(struct mlx5_core_dev *dev)
+{
+ u64 obj_allowed = MLX5_CAP_GEN_2_64(dev, allowed_object_for_other_vhca_access);
+ u32 obj_supp = MLX5_CAP_GEN_2(dev, cross_vhca_object_to_object_supported);
+
+ if (!(obj_supp &
+ MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_ROOT_TO_REMOTE_FLOW_TABLE))
+ return false;
+
+ if (!(obj_allowed & MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE))
+ return false;
+
+ return true;
+}
+
+static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
+{
+ /* Feature is currently implemented for PFs only */
+ if (!mlx5_core_is_pf(dev))
+ return false;
+
+ /* Honor the SW implementation limit */
+ if (host_buses > MLX5_SD_MAX_GROUP_SZ)
+ return false;
+
+ /* Disconnect secondaries from the network */
+ if (!MLX5_CAP_GEN(dev, eswitch_manager))
+ return false;
+ if (!MLX5_CAP_GEN(dev, silent_mode))
+ return false;
+
+ /* RX steering from primary to secondaries */
+ if (!MLX5_CAP_GEN(dev, cross_vhca_rqt))
+ return false;
+ if (host_buses > MLX5_CAP_GEN_2(dev, max_rqt_vhca_id))
+ return false;
+
+ /* TX steering from secondaries to primary */
+ if (!ft_create_alias_supported(dev))
+ return false;
+ if (!MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default))
+ return false;
+
+ return true;
+}
+
+static int mlx5_query_sd(struct mlx5_core_dev *dev, bool *sdm,
+ u8 *host_buses, u8 *sd_group)
+{
+ u32 out[MLX5_ST_SZ_DW(mpir_reg)];
+ int err;
+
+ err = mlx5_query_mpir_reg(dev, out);
+ if (err)
+ return err;
+
+ err = mlx5_query_nic_vport_sd_group(dev, sd_group);
+ if (err)
+ return err;
+
+ *sdm = MLX5_GET(mpir_reg, out, sdm);
+ *host_buses = MLX5_GET(mpir_reg, out, host_buses);
+
+ return 0;
+}
+
+static u32 mlx5_sd_group_id(struct mlx5_core_dev *dev, u8 sd_group)
+{
+ return (u32)((MLX5_CAP_GEN(dev, native_port_num) << 8) | sd_group);
+}
+
+static int sd_init(struct mlx5_core_dev *dev)
+{
+ u8 host_buses, sd_group;
+ struct mlx5_sd *sd;
+ u32 group_id;
+ bool sdm;
+ int err;
+
+ if (!MLX5_CAP_MCAM_REG(dev, mpir))
+ return 0;
+
+ err = mlx5_query_sd(dev, &sdm, &host_buses, &sd_group);
+ if (err)
+ return err;
+
+ if (!sdm)
+ return 0;
+
+ if (!sd_group)
+ return 0;
+
+ group_id = mlx5_sd_group_id(dev, sd_group);
+
+ if (!mlx5_sd_is_supported(dev, host_buses)) {
+ sd_warn(dev, "can't support requested netdev combining for group id 0x%x), skipping\n",
+ group_id);
+ return 0;
+ }
+
+ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+ if (!sd)
+ return -ENOMEM;
+
+ sd->host_buses = host_buses;
+ sd->group_id = group_id;
+
+ mlx5_set_sd(dev, sd);
+
+ return 0;
+}
+
+static void sd_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+
+ mlx5_set_sd(dev, NULL);
+ kfree(sd);
+}
+
+static int sd_register(struct mlx5_core_dev *dev)
+{
+ struct mlx5_devcom_comp_dev *devcom, *pos;
+ struct mlx5_core_dev *peer, *primary;
+ struct mlx5_sd *sd, *primary_sd;
+ int err, i;
+
+ sd = mlx5_get_sd(dev);
+ devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP,
+ sd->group_id, NULL, dev);
+ if (!devcom)
+ return -ENOMEM;
+
+ sd->devcom = devcom;
+
+ if (mlx5_devcom_comp_get_size(devcom) != sd->host_buses)
+ return 0;
+
+ mlx5_devcom_comp_lock(devcom);
+ mlx5_devcom_comp_set_ready(devcom, true);
+ mlx5_devcom_comp_unlock(devcom);
+
+ if (!mlx5_devcom_for_each_peer_begin(devcom)) {
+ err = -ENODEV;
+ goto err_devcom_unreg;
+ }
+
+ primary = dev;
+ mlx5_devcom_for_each_peer_entry(devcom, peer, pos)
+ if (peer->pdev->bus->number < primary->pdev->bus->number)
+ primary = peer;
+
+ primary_sd = mlx5_get_sd(primary);
+ primary_sd->primary = true;
+ i = 0;
+ /* loop the secondaries */
+ mlx5_devcom_for_each_peer_entry(primary_sd->devcom, peer, pos) {
+ struct mlx5_sd *peer_sd = mlx5_get_sd(peer);
+
+ primary_sd->secondaries[i++] = peer;
+ peer_sd->primary = false;
+ peer_sd->primary_dev = primary;
+ }
+
+ mlx5_devcom_for_each_peer_end(devcom);
+ return 0;
+
+err_devcom_unreg:
+ mlx5_devcom_comp_lock(sd->devcom);
+ mlx5_devcom_comp_set_ready(sd->devcom, false);
+ mlx5_devcom_comp_unlock(sd->devcom);
+ mlx5_devcom_unregister_component(sd->devcom);
+ return err;
+}
+
+static void sd_unregister(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+
+ mlx5_devcom_comp_lock(sd->devcom);
+ mlx5_devcom_comp_set_ready(sd->devcom, false);
+ mlx5_devcom_comp_unlock(sd->devcom);
+ mlx5_devcom_unregister_component(sd->devcom);
+}
+
+static int sd_cmd_set_primary(struct mlx5_core_dev *primary, u8 *alias_key)
+{
+ struct mlx5_cmd_allow_other_vhca_access_attr allow_attr = {};
+ struct mlx5_sd *sd = mlx5_get_sd(primary);
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *nic_ns;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ nic_ns = mlx5_get_flow_namespace(primary, MLX5_FLOW_NAMESPACE_EGRESS);
+ if (!nic_ns)
+ return -EOPNOTSUPP;
+
+ ft = mlx5_create_flow_table(nic_ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ return err;
+ }
+ sd->tx_ft = ft;
+ memcpy(allow_attr.access_key, alias_key, ACCESS_KEY_LEN);
+ allow_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS;
+ allow_attr.obj_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id;
+
+ err = mlx5_cmd_allow_other_vhca_access(primary, &allow_attr);
+ if (err) {
+ mlx5_core_err(primary, "Failed to allow other vhca access err=%d\n",
+ err);
+ mlx5_destroy_flow_table(ft);
+ return err;
+ }
+
+ return 0;
+}
+
+static void sd_cmd_unset_primary(struct mlx5_core_dev *primary)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(primary);
+
+ mlx5_destroy_flow_table(sd->tx_ft);
+}
+
+static int sd_secondary_create_alias_ft(struct mlx5_core_dev *secondary,
+ struct mlx5_core_dev *primary,
+ struct mlx5_flow_table *ft,
+ u32 *obj_id, u8 *alias_key)
+{
+ u32 aliased_object_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id;
+ u16 vhca_id_to_be_accessed = MLX5_CAP_GEN(primary, vhca_id);
+ struct mlx5_cmd_alias_obj_create_attr alias_attr = {};
+ int ret;
+
+ memcpy(alias_attr.access_key, alias_key, ACCESS_KEY_LEN);
+ alias_attr.obj_id = aliased_object_id;
+ alias_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS;
+ alias_attr.vhca_id = vhca_id_to_be_accessed;
+ ret = mlx5_cmd_alias_obj_create(secondary, &alias_attr, obj_id);
+ if (ret) {
+ mlx5_core_err(secondary, "Failed to create alias object err=%d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sd_secondary_destroy_alias_ft(struct mlx5_core_dev *secondary)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(secondary);
+
+ mlx5_cmd_alias_obj_destroy(secondary, sd->alias_obj_id,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
+}
+
+static int sd_cmd_set_secondary(struct mlx5_core_dev *secondary,
+ struct mlx5_core_dev *primary,
+ u8 *alias_key)
+{
+ struct mlx5_sd *primary_sd = mlx5_get_sd(primary);
+ struct mlx5_sd *sd = mlx5_get_sd(secondary);
+ int err;
+
+ err = mlx5_fs_cmd_set_l2table_entry_silent(secondary, 1);
+ if (err)
+ return err;
+
+ err = sd_secondary_create_alias_ft(secondary, primary, primary_sd->tx_ft,
+ &sd->alias_obj_id, alias_key);
+ if (err)
+ goto err_unset_silent;
+
+ err = mlx5_fs_cmd_set_tx_flow_table_root(secondary, sd->alias_obj_id, false);
+ if (err)
+ goto err_destroy_alias_ft;
+
+ return 0;
+
+err_destroy_alias_ft:
+ sd_secondary_destroy_alias_ft(secondary);
+err_unset_silent:
+ mlx5_fs_cmd_set_l2table_entry_silent(secondary, 0);
+ return err;
+}
+
+static void sd_cmd_unset_secondary(struct mlx5_core_dev *secondary)
+{
+ mlx5_fs_cmd_set_tx_flow_table_root(secondary, 0, true);
+ sd_secondary_destroy_alias_ft(secondary);
+ mlx5_fs_cmd_set_l2table_entry_silent(secondary, 0);
+}
+
+static void sd_print_group(struct mlx5_core_dev *primary)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(primary);
+ struct mlx5_core_dev *pos;
+ int i;
+
+ sd_info(primary, "group id %#x, primary %s, vhca %#x\n",
+ sd->group_id, pci_name(primary->pdev),
+ MLX5_CAP_GEN(primary, vhca_id));
+ mlx5_sd_for_each_secondary(i, primary, pos)
+ sd_info(primary, "group id %#x, secondary_%d %s, vhca %#x\n",
+ sd->group_id, i - 1, pci_name(pos->pdev),
+ MLX5_CAP_GEN(pos, vhca_id));
+}
+
+static ssize_t dev_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct mlx5_core_dev *dev;
+ char tbuf[32];
+ int ret;
+
+ dev = filp->private_data;
+ ret = snprintf(tbuf, sizeof(tbuf), "%s vhca %#x\n", pci_name(dev->pdev),
+ MLX5_CAP_GEN(dev, vhca_id));
+
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+}
+
+static const struct file_operations dev_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = dev_read,
+};
+
+int mlx5_sd_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_dev *primary, *pos, *to;
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+ u8 alias_key[ACCESS_KEY_LEN];
+ int err, i;
+
+ err = sd_init(dev);
+ if (err)
+ return err;
+
+ sd = mlx5_get_sd(dev);
+ if (!sd)
+ return 0;
+
+ err = sd_register(dev);
+ if (err)
+ goto err_sd_cleanup;
+
+ if (!mlx5_devcom_comp_is_ready(sd->devcom))
+ return 0;
+
+ primary = mlx5_sd_get_primary(dev);
+
+ for (i = 0; i < ACCESS_KEY_LEN; i++)
+ alias_key[i] = get_random_u8();
+
+ err = sd_cmd_set_primary(primary, alias_key);
+ if (err)
+ goto err_sd_unregister;
+
+ sd->dfs = debugfs_create_dir("multi-pf", mlx5_debugfs_get_dev_root(primary));
+ debugfs_create_x32("group_id", 0400, sd->dfs, &sd->group_id);
+ debugfs_create_file("primary", 0400, sd->dfs, primary, &dev_fops);
+
+ mlx5_sd_for_each_secondary(i, primary, pos) {
+ char name[32];
+
+ err = sd_cmd_set_secondary(pos, primary, alias_key);
+ if (err)
+ goto err_unset_secondaries;
+
+ snprintf(name, sizeof(name), "secondary_%d", i - 1);
+ debugfs_create_file(name, 0400, sd->dfs, pos, &dev_fops);
+
+ }
+
+ sd_info(primary, "group id %#x, size %d, combined\n",
+ sd->group_id, mlx5_devcom_comp_get_size(sd->devcom));
+ sd_print_group(primary);
+
+ return 0;
+
+err_unset_secondaries:
+ to = pos;
+ mlx5_sd_for_each_secondary_to(i, primary, to, pos)
+ sd_cmd_unset_secondary(pos);
+ sd_cmd_unset_primary(primary);
+ debugfs_remove_recursive(sd->dfs);
+err_sd_unregister:
+ sd_unregister(dev);
+err_sd_cleanup:
+ sd_cleanup(dev);
+ return err;
+}
+
+void mlx5_sd_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+ struct mlx5_core_dev *primary, *pos;
+ int i;
+
+ if (!sd)
+ return;
+
+ if (!mlx5_devcom_comp_is_ready(sd->devcom))
+ goto out;
+
+ primary = mlx5_sd_get_primary(dev);
+ mlx5_sd_for_each_secondary(i, primary, pos)
+ sd_cmd_unset_secondary(pos);
+ sd_cmd_unset_primary(primary);
+ debugfs_remove_recursive(sd->dfs);
+
+ sd_info(primary, "group id %#x, uncombined\n", sd->group_id);
+out:
+ sd_unregister(dev);
+ sd_cleanup(dev);
+}
+
+struct auxiliary_device *mlx5_sd_get_adev(struct mlx5_core_dev *dev,
+ struct auxiliary_device *adev,
+ int idx)
+{
+ struct mlx5_sd *sd = mlx5_get_sd(dev);
+ struct mlx5_core_dev *primary;
+
+ if (!sd)
+ return adev;
+
+ if (!mlx5_devcom_comp_is_ready(sd->devcom))
+ return NULL;
+
+ primary = mlx5_sd_get_primary(dev);
+ if (dev == primary)
+ return adev;
+
+ return &primary->priv.adev[idx]->adev;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h
new file mode 100644
index 000000000000..137efaf9aabc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LIB_SD_H__
+#define __MLX5_LIB_SD_H__
+
+#define MLX5_SD_MAX_GROUP_SZ 2
+
+struct mlx5_sd;
+
+struct mlx5_core_dev *mlx5_sd_primary_get_peer(struct mlx5_core_dev *primary, int idx);
+int mlx5_sd_ch_ix_get_dev_ix(struct mlx5_core_dev *dev, int ch_ix);
+int mlx5_sd_ch_ix_get_vec_ix(struct mlx5_core_dev *dev, int ch_ix);
+struct mlx5_core_dev *mlx5_sd_ch_ix_get_dev(struct mlx5_core_dev *primary, int ch_ix);
+struct auxiliary_device *mlx5_sd_get_adev(struct mlx5_core_dev *dev,
+ struct auxiliary_device *adev,
+ int idx);
+
+int mlx5_sd_init(struct mlx5_core_dev *dev);
+void mlx5_sd_cleanup(struct mlx5_core_dev *dev);
+
+#define mlx5_sd_for_each_dev_from_to(i, primary, ix_from, to, pos) \
+ for (i = ix_from; \
+ (pos = mlx5_sd_primary_get_peer(primary, i)) && pos != (to); i++)
+
+#define mlx5_sd_for_each_dev(i, primary, pos) \
+ mlx5_sd_for_each_dev_from_to(i, primary, 0, NULL, pos)
+
+#define mlx5_sd_for_each_dev_to(i, primary, to, pos) \
+ mlx5_sd_for_each_dev_from_to(i, primary, 0, to, pos)
+
+#define mlx5_sd_for_each_secondary(i, primary, pos) \
+ mlx5_sd_for_each_dev_from_to(i, primary, 1, NULL, pos)
+
+#define mlx5_sd_for_each_secondary_to(i, primary, to, pos) \
+ mlx5_sd_for_each_dev_from_to(i, primary, 1, to, pos)
+
+#endif /* __MLX5_LIB_SD_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index bccf6e53556c..c2593625c09a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -187,31 +187,36 @@ static struct mlx5_profile profile[] = {
};
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
- u32 warn_time_mili)
+ u32 warn_time_mili, const char *init_state)
{
unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
u32 fw_initializing;
- int err = 0;
do {
fw_initializing = ioread32be(&dev->iseg->initializing);
if (!(fw_initializing >> 31))
break;
- if (time_after(jiffies, end) ||
- test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
- err = -EBUSY;
- break;
+ if (time_after(jiffies, end)) {
+ mlx5_core_err(dev, "Firmware over %u MS in %s state, aborting\n",
+ max_wait_mili, init_state);
+ return -ETIMEDOUT;
+ }
+ if (test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
+ mlx5_core_warn(dev, "device is being removed, stop waiting for FW %s\n",
+ init_state);
+ return -ENODEV;
}
if (warn_time_mili && time_after(jiffies, warn)) {
- mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds (0x%x)\n",
- jiffies_to_msecs(end - warn) / 1000, fw_initializing);
+ mlx5_core_warn(dev, "Waiting for FW %s, timeout abort in %ds (0x%x)\n",
+ init_state, jiffies_to_msecs(end - warn) / 1000,
+ fw_initializing);
warn = jiffies + msecs_to_jiffies(warn_time_mili);
}
msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT));
} while (true);
- return err;
+ return 0;
}
static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
@@ -1151,12 +1156,10 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
/* wait for firmware to accept initialization segments configurations
*/
err = wait_fw_init(dev, timeout,
- mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL));
- if (err) {
- mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
- timeout);
+ mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL),
+ "pre-initializing");
+ if (err)
return err;
- }
err = mlx5_cmd_enable(dev);
if (err) {
@@ -1166,12 +1169,9 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
mlx5_tout_query_iseg(dev);
- err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0);
- if (err) {
- mlx5_core_err(dev, "Firmware over %llu MS in initializing state, aborting\n",
- mlx5_tout_ms(dev, FW_INIT));
+ err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0, "initializing");
+ if (err)
goto err_cmd_cleanup;
- }
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a79b7959361b..58732f44940f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -312,13 +312,6 @@ static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
return ret;
}
-enum {
- MLX5_NIC_IFC_FULL = 0,
- MLX5_NIC_IFC_DISABLED = 1,
- MLX5_NIC_IFC_NO_DRAM_NIC = 2,
- MLX5_NIC_IFC_SW_RESET = 7
-};
-
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index c93492b67788..99219ea52c4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -74,7 +74,8 @@ static void mlx5_sf_dev_release(struct device *device)
kfree(sf_dev);
}
-static void mlx5_sf_dev_remove(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_dev)
+static void mlx5_sf_dev_remove_aux(struct mlx5_core_dev *dev,
+ struct mlx5_sf_dev *sf_dev)
{
int id;
@@ -138,7 +139,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id,
return;
xa_err:
- mlx5_sf_dev_remove(dev, sf_dev);
+ mlx5_sf_dev_remove_aux(dev, sf_dev);
add_err:
mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n",
sf_index, sfnum, err);
@@ -149,7 +150,7 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
xa_erase(&table->devices, sf_index);
- mlx5_sf_dev_remove(dev, sf_dev);
+ mlx5_sf_dev_remove_aux(dev, sf_dev);
}
static int
@@ -367,7 +368,7 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
xa_for_each(&table->devices, index, sf_dev) {
xa_erase(&table->devices, index);
- mlx5_sf_dev_remove(table->dev, sf_dev);
+ mlx5_sf_dev_remove_aux(table->dev, sf_dev);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 169c2c68ed5c..bc863e1f062e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -95,24 +95,29 @@ mdev_err:
static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
- struct devlink *devlink = priv_to_devlink(sf_dev->mdev);
+ struct mlx5_core_dev *mdev = sf_dev->mdev;
+ struct devlink *devlink;
- mlx5_drain_health_wq(sf_dev->mdev);
+ devlink = priv_to_devlink(mdev);
+ set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
+ mlx5_drain_health_wq(mdev);
devlink_unregister(devlink);
- if (mlx5_dev_is_lightweight(sf_dev->mdev))
- mlx5_uninit_one_light(sf_dev->mdev);
+ if (mlx5_dev_is_lightweight(mdev))
+ mlx5_uninit_one_light(mdev);
else
- mlx5_uninit_one(sf_dev->mdev);
- iounmap(sf_dev->mdev->iseg);
- mlx5_mdev_uninit(sf_dev->mdev);
+ mlx5_uninit_one(mdev);
+ iounmap(mdev->iseg);
+ mlx5_mdev_uninit(mdev);
mlx5_devlink_free(devlink);
}
static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+ struct mlx5_core_dev *mdev = sf_dev->mdev;
- mlx5_unload_one(sf_dev->mdev, false);
+ set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
+ mlx5_unload_one(mdev, false);
}
static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
index 7e36e1062139..64f4cc284aea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
@@ -54,6 +54,107 @@ enum dr_dump_rec_type {
DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE = 3425,
};
+static struct mlx5dr_dbg_dump_buff *
+mlx5dr_dbg_dump_data_init_new_buff(struct mlx5dr_dbg_dump_data *dump_data)
+{
+ struct mlx5dr_dbg_dump_buff *new_buff;
+
+ new_buff = kzalloc(sizeof(*new_buff), GFP_KERNEL);
+ if (!new_buff)
+ return NULL;
+
+ new_buff->buff = kvzalloc(MLX5DR_DEBUG_DUMP_BUFF_SIZE, GFP_KERNEL);
+ if (!new_buff->buff) {
+ kfree(new_buff);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&new_buff->node);
+ list_add_tail(&new_buff->node, &dump_data->buff_list);
+
+ return new_buff;
+}
+
+static struct mlx5dr_dbg_dump_data *
+mlx5dr_dbg_create_dump_data(void)
+{
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ dump_data = kzalloc(sizeof(*dump_data), GFP_KERNEL);
+ if (!dump_data)
+ return NULL;
+
+ INIT_LIST_HEAD(&dump_data->buff_list);
+
+ if (!mlx5dr_dbg_dump_data_init_new_buff(dump_data)) {
+ kfree(dump_data);
+ return NULL;
+ }
+
+ return dump_data;
+}
+
+static void
+mlx5dr_dbg_destroy_dump_data(struct mlx5dr_dbg_dump_data *dump_data)
+{
+ struct mlx5dr_dbg_dump_buff *dump_buff, *tmp_buff;
+
+ if (!dump_data)
+ return;
+
+ list_for_each_entry_safe(dump_buff, tmp_buff, &dump_data->buff_list, node) {
+ kvfree(dump_buff->buff);
+ list_del(&dump_buff->node);
+ kfree(dump_buff);
+ }
+
+ kfree(dump_data);
+}
+
+static int
+mlx5dr_dbg_dump_data_print(struct seq_file *file, char *str, u32 size)
+{
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+ struct mlx5dr_dbg_dump_buff *buff;
+ u32 buff_capacity, write_size;
+ int remain_size, ret;
+
+ if (size >= MLX5DR_DEBUG_DUMP_BUFF_SIZE)
+ return -EINVAL;
+
+ dump_data = dmn->dump_info.dump_data;
+ buff = list_last_entry(&dump_data->buff_list,
+ struct mlx5dr_dbg_dump_buff, node);
+
+ buff_capacity = (MLX5DR_DEBUG_DUMP_BUFF_SIZE - 1) - buff->index;
+ remain_size = buff_capacity - size;
+ write_size = (remain_size > 0) ? size : buff_capacity;
+
+ if (likely(write_size)) {
+ ret = snprintf(buff->buff + buff->index, write_size + 1, "%s", str);
+ if (ret < 0)
+ return ret;
+
+ buff->index += write_size;
+ }
+
+ if (remain_size < 0) {
+ remain_size *= -1;
+ buff = mlx5dr_dbg_dump_data_init_new_buff(dump_data);
+ if (!buff)
+ return -ENOMEM;
+
+ ret = snprintf(buff->buff, remain_size + 1, "%s", str + write_size);
+ if (ret < 0)
+ return ret;
+
+ buff->index += remain_size;
+ }
+
+ return 0;
+}
+
void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl)
{
mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
@@ -109,36 +210,68 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
{
struct mlx5dr_action *action = action_mem->action;
const u64 action_id = DR_DBG_PTR_TO_ID(action);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 hit_tbl_ptr, miss_tbl_ptr;
u32 hit_tbl_id, miss_tbl_id;
+ int ret;
switch (action->action_type) {
case DR_ACTION_TYP_DROP:
- seq_printf(file, "%d,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_DROP, action_id, rule_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_DROP, action_id,
+ rule_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_FT:
if (action->dest_tbl->is_fw_tbl)
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->fw_tbl.id,
- -1);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_FT, action_id,
+ rule_id, action->dest_tbl->fw_tbl.id,
+ -1);
else
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_FT, action_id,
- rule_id, action->dest_tbl->tbl->table_id,
- DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_FT, action_id,
+ rule_id, action->dest_tbl->tbl->table_id,
+ DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
+
+ if (ret < 0)
+ return ret;
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_CTR:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id,
- action->ctr->ctr_id + action->ctr->offset);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id,
+ action->ctr->ctr_id + action->ctr->offset);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_TAG:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id,
- action->flow_tag->flow_tag);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id,
+ action->flow_tag->flow_tag);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_MODIFY_HDR:
{
@@ -150,83 +283,171 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
ptrn_arg = !action->rewrite->single_action_opt && ptrn && arg;
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x",
- DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
- rule_id, action->rewrite->index,
- action->rewrite->single_action_opt,
- ptrn_arg ? action->rewrite->num_of_actions : 0,
- ptrn_arg ? ptrn->index : 0,
- ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x",
+ DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
+ rule_id, action->rewrite->index,
+ action->rewrite->single_action_opt,
+ ptrn_arg ? action->rewrite->num_of_actions : 0,
+ ptrn_arg ? ptrn->index : 0,
+ ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (ptrn_arg) {
for (i = 0; i < action->rewrite->num_of_actions; i++) {
- seq_printf(file, ",0x%016llx",
- be64_to_cpu(((__be64 *)rewrite_data)[i]));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ ",0x%016llx",
+ be64_to_cpu(((__be64 *)rewrite_data)[i]));
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
}
}
- seq_puts(file, "\n");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "\n");
+ if (ret < 0)
+ return ret;
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
}
case DR_ACTION_TYP_VPORT:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
- action->vport->caps->num);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
+ action->vport->caps->num);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_TNL_L2_TO_L2:
- seq_printf(file, "%d,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id,
- rule_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id,
+ rule_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_TNL_L3_TO_L2:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
- rule_id,
- (action->rewrite->ptrn && action->rewrite->arg) ?
- mlx5dr_arg_get_obj_id(action->rewrite->arg) :
- action->rewrite->index);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
+ rule_id,
+ (action->rewrite->ptrn && action->rewrite->arg) ?
+ mlx5dr_arg_get_obj_id(action->rewrite->arg) :
+ action->rewrite->index);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id,
- rule_id, action->reformat->id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id,
+ rule_id, action->reformat->id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_L2_TO_TNL_L3:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id,
- rule_id, action->reformat->id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id,
+ rule_id, action->reformat->id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_POP_VLAN:
- seq_printf(file, "%d,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id,
- rule_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id,
+ rule_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_PUSH_VLAN:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id,
- rule_id, action->push_vlan->vlan_hdr);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id,
+ rule_id, action->push_vlan->vlan_hdr);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_INSERT_HDR:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id,
- rule_id, action->reformat->id,
- action->reformat->param_0,
- action->reformat->param_1);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id,
+ rule_id, action->reformat->id,
+ action->reformat->param_0,
+ action->reformat->param_1);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_REMOVE_HDR:
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id,
- rule_id, action->reformat->id,
- action->reformat->param_0,
- action->reformat->param_1);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id,
+ rule_id, action->reformat->id,
+ action->reformat->param_0,
+ action->reformat->param_1);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_SAMPLER:
- seq_printf(file,
- "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id, rule_id,
- 0, 0, action->sampler->sampler_id,
- action->sampler->rx_icm_addr,
- action->sampler->tx_icm_addr);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id,
+ rule_id, 0, 0, action->sampler->sampler_id,
+ action->sampler->rx_icm_addr,
+ action->sampler->tx_icm_addr);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
case DR_ACTION_TYP_RANGE:
if (action->range->hit_tbl_action->dest_tbl->is_fw_tbl) {
@@ -247,10 +468,17 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id,
DR_DBG_PTR_TO_ID(action->range->miss_tbl_action->dest_tbl->tbl);
}
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n",
- DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id, rule_id,
- hit_tbl_id, hit_tbl_ptr, miss_tbl_id, miss_tbl_ptr,
- action->range->definer_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n",
+ DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id,
+ rule_id, hit_tbl_id, hit_tbl_ptr, miss_tbl_id,
+ miss_tbl_ptr, action->range->definer_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
break;
default:
return 0;
@@ -263,8 +491,10 @@ static int
dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
bool is_rx, const u64 rule_id, u8 format_ver)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char hw_ste_dump[DR_HEX_SIZE];
u32 mem_rec_type;
+ int ret;
if (format_ver == MLX5_STEERING_FORMAT_CONNECTX_5) {
mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 :
@@ -277,9 +507,16 @@ dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste,
dr_dump_hex_print(hw_ste_dump, (char *)mlx5dr_ste_get_hw_ste(ste),
DR_STE_SIZE_REDUCED);
- seq_printf(file, "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
- dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)), rule_id,
- hw_ste_dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
+ dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)),
+ rule_id, hw_ste_dump);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
return 0;
}
@@ -309,6 +546,7 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
{
struct mlx5dr_rule_action_member *action_mem;
const u64 rule_id = DR_DBG_PTR_TO_ID(rule);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
struct mlx5dr_rule_rx_tx *rx = &rule->rx;
struct mlx5dr_rule_rx_tx *tx = &rule->tx;
u8 format_ver;
@@ -316,8 +554,15 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
format_ver = rule->matcher->tbl->dmn->info.caps.sw_format_ver;
- seq_printf(file, "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE, rule_id,
- DR_DBG_PTR_TO_ID(rule->matcher));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE,
+ rule_id, DR_DBG_PTR_TO_ID(rule->matcher));
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (rx->nic_matcher) {
ret = dr_dump_rule_rx_tx(file, rx, true, rule_id, format_ver);
@@ -344,46 +589,94 @@ static int
dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask,
u8 criteria, const u64 matcher_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
char dump[DR_HEX_SIZE];
+ int ret;
- seq_printf(file, "%d,0x%llx,", DR_DUMP_REC_TYPE_MATCHER_MASK,
- matcher_id);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "%d,0x%llx,",
+ DR_DUMP_REC_TYPE_MATCHER_MASK, matcher_id);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (criteria & DR_MATCHER_CRITERIA_OUTER) {
dr_dump_hex_print(dump, (char *)&mask->outer, sizeof(mask->outer));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_INNER) {
dr_dump_hex_print(dump, (char *)&mask->inner, sizeof(mask->inner));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_MISC) {
dr_dump_hex_print(dump, (char *)&mask->misc, sizeof(mask->misc));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_MISC2) {
dr_dump_hex_print(dump, (char *)&mask->misc2, sizeof(mask->misc2));
- seq_printf(file, "%s,", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s,", dump);
} else {
- seq_puts(file, ",");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
if (criteria & DR_MATCHER_CRITERIA_MISC3) {
dr_dump_hex_print(dump, (char *)&mask->misc3, sizeof(mask->misc3));
- seq_printf(file, "%s\n", dump);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%s\n", dump);
} else {
- seq_puts(file, ",\n");
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",\n");
}
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -391,9 +684,19 @@ static int
dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder,
u32 index, bool is_rx, const u64 matcher_id)
{
- seq_printf(file, "%d,0x%llx,%d,%d,0x%x\n",
- DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index, is_rx,
- builder->lu_type);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+ int ret;
+
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%d,%d,0x%x\n",
+ DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index,
+ is_rx, builder->lu_type);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
return 0;
}
@@ -403,6 +706,7 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
struct mlx5dr_matcher_rx_tx *matcher_rx_tx,
const u64 matcher_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr, e_icm_addr;
int i, ret;
@@ -412,11 +716,19 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx,
s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->s_htbl->chunk);
e_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->e_anchor->chunk);
- seq_printf(file, "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
- rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
- matcher_id, matcher_rx_tx->num_of_builders,
- dr_dump_icm_to_idx(s_icm_addr),
- dr_dump_icm_to_idx(e_icm_addr));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
+ rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
+ matcher_id, matcher_rx_tx->num_of_builders,
+ dr_dump_icm_to_idx(s_icm_addr),
+ dr_dump_icm_to_idx(e_icm_addr));
+
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
ret = dr_dump_matcher_builder(file,
@@ -434,13 +746,22 @@ dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
{
struct mlx5dr_matcher_rx_tx *rx = &matcher->rx;
struct mlx5dr_matcher_rx_tx *tx = &matcher->tx;
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 matcher_id;
int ret;
matcher_id = DR_DBG_PTR_TO_ID(matcher);
- seq_printf(file, "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER,
- matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl), matcher->prio);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER,
+ matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl),
+ matcher->prio);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
ret = dr_dump_matcher_mask(file, &matcher->mask,
matcher->match_criteria, matcher_id);
@@ -486,15 +807,24 @@ dr_dump_table_rx_tx(struct seq_file *file, bool is_rx,
struct mlx5dr_table_rx_tx *table_rx_tx,
const u64 table_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
enum dr_dump_rec_type rec_type;
u64 s_icm_addr;
+ int ret;
rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX :
DR_DUMP_REC_TYPE_TABLE_TX;
s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(table_rx_tx->s_anchor->chunk);
- seq_printf(file, "%d,0x%llx,0x%llx\n", rec_type, table_id,
- dr_dump_icm_to_idx(s_icm_addr));
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx\n", rec_type, table_id,
+ dr_dump_icm_to_idx(s_icm_addr));
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
return 0;
}
@@ -503,11 +833,19 @@ static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
{
struct mlx5dr_table_rx_tx *rx = &table->rx;
struct mlx5dr_table_rx_tx *tx = &table->tx;
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
int ret;
- seq_printf(file, "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE,
- DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
- table->table_type, table->level);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE,
+ DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
+ table->table_type, table->level);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
if (rx->nic_dmn) {
ret = dr_dump_table_rx_tx(file, true, rx,
@@ -546,46 +884,86 @@ static int
dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring,
const u64 domain_id)
{
- seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n",
- DR_DUMP_REC_TYPE_DOMAIN_SEND_RING, DR_DBG_PTR_TO_ID(ring),
- domain_id, ring->cq->mcq.cqn, ring->qp->qpn);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+ int ret;
+
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%llx,0x%x,0x%x\n",
+ DR_DUMP_REC_TYPE_DOMAIN_SEND_RING,
+ DR_DBG_PTR_TO_ID(ring), domain_id,
+ ring->cq->mcq.cqn, ring->qp->qpn);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_domain_info_flex_parser(struct seq_file *file,
const char *flex_parser_name,
const u8 flex_parser_value,
const u64 domain_id)
{
- seq_printf(file, "%d,0x%llx,%s,0x%x\n",
- DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id,
- flex_parser_name, flex_parser_value);
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+ int ret;
+
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%s,0x%x\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id,
+ flex_parser_name, flex_parser_value);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
+
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps,
const u64 domain_id)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
struct mlx5dr_cmd_vport_cap *vport_caps;
unsigned long i, vports_num;
+ int ret;
xa_for_each(&caps->vports.vports_caps_xa, vports_num, vport_caps)
; /* count the number of vports in xarray */
- seq_printf(file, "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n",
- DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi,
- caps->nic_rx_drop_address, caps->nic_tx_drop_address,
- caps->flex_protocols, vports_num, caps->eswitch_manager);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi,
+ caps->nic_rx_drop_address, caps->nic_tx_drop_address,
+ caps->flex_protocols, vports_num, caps->eswitch_manager);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
xa_for_each(&caps->vports.vports_caps_xa, i, vport_caps) {
vport_caps = xa_load(&caps->vports.vports_caps_xa, i);
- seq_printf(file, "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n",
- DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT, domain_id, i,
- vport_caps->vport_gvmi, vport_caps->icm_address_rx,
- vport_caps->icm_address_tx);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n",
+ DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT,
+ domain_id, i, vport_caps->vport_gvmi,
+ vport_caps->icm_address_rx,
+ vport_caps->icm_address_tx);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
}
return 0;
}
@@ -627,24 +1005,32 @@ dr_dump_domain_info(struct seq_file *file, struct mlx5dr_domain_info *info,
return 0;
}
-static int
+static noinline_for_stack int
dr_dump_domain(struct seq_file *file, struct mlx5dr_domain *dmn)
{
+ char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
u64 domain_id = DR_DBG_PTR_TO_ID(dmn);
int ret;
- seq_printf(file, "%d,0x%llx,%d,0%x,%d,%u.%u.%u,%s,%d,%u,%u,%u\n",
- DR_DUMP_REC_TYPE_DOMAIN,
- domain_id, dmn->type, dmn->info.caps.gvmi,
- dmn->info.supp_sw_steering,
- /* package version */
- LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
- LINUX_VERSION_SUBLEVEL,
- pci_name(dmn->mdev->pdev),
- 0, /* domain flags */
- dmn->num_buddies[DR_ICM_TYPE_STE],
- dmn->num_buddies[DR_ICM_TYPE_MODIFY_ACTION],
- dmn->num_buddies[DR_ICM_TYPE_MODIFY_HDR_PTRN]);
+ ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+ "%d,0x%llx,%d,0%x,%d,%u.%u.%u,%s,%d,%u,%u,%u\n",
+ DR_DUMP_REC_TYPE_DOMAIN,
+ domain_id, dmn->type, dmn->info.caps.gvmi,
+ dmn->info.supp_sw_steering,
+ /* package version */
+ LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
+ LINUX_VERSION_SUBLEVEL,
+ pci_name(dmn->mdev->pdev),
+ 0, /* domain flags */
+ dmn->num_buddies[DR_ICM_TYPE_STE],
+ dmn->num_buddies[DR_ICM_TYPE_MODIFY_ACTION],
+ dmn->num_buddies[DR_ICM_TYPE_MODIFY_HDR_PTRN]);
+ if (ret < 0)
+ return ret;
+
+ ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+ if (ret)
+ return ret;
ret = dr_dump_domain_info(file, &dmn->info, domain_id);
if (ret < 0)
@@ -683,11 +1069,91 @@ unlock_mutex:
return ret;
}
-static int dr_dump_show(struct seq_file *file, void *priv)
+static void *
+dr_dump_start(struct seq_file *file, loff_t *pos)
{
- return dr_dump_domain_all(file, file->private);
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ if (atomic_read(&dmn->dump_info.state) != MLX5DR_DEBUG_DUMP_STATE_FREE) {
+ mlx5_core_warn(dmn->mdev, "Dump already in progress\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS);
+ dump_data = dmn->dump_info.dump_data;
+
+ if (dump_data) {
+ return seq_list_start(&dump_data->buff_list, *pos);
+ } else if (*pos == 0) {
+ dump_data = mlx5dr_dbg_create_dump_data();
+ if (!dump_data)
+ goto exit;
+
+ dmn->dump_info.dump_data = dump_data;
+ if (dr_dump_domain_all(file, dmn)) {
+ mlx5dr_dbg_destroy_dump_data(dump_data);
+ dmn->dump_info.dump_data = NULL;
+ goto exit;
+ }
+
+ return seq_list_start(&dump_data->buff_list, *pos);
+ }
+
+exit:
+ atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE);
+ return NULL;
}
-DEFINE_SHOW_ATTRIBUTE(dr_dump);
+
+static void *
+dr_dump_next(struct seq_file *file, void *v, loff_t *pos)
+{
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ dump_data = dmn->dump_info.dump_data;
+
+ return seq_list_next(v, &dump_data->buff_list, pos);
+}
+
+static void
+dr_dump_stop(struct seq_file *file, void *v)
+{
+ struct mlx5dr_domain *dmn = file->private;
+ struct mlx5dr_dbg_dump_data *dump_data;
+
+ if (v && IS_ERR(v))
+ return;
+
+ if (!v) {
+ dump_data = dmn->dump_info.dump_data;
+ if (dump_data) {
+ mlx5dr_dbg_destroy_dump_data(dump_data);
+ dmn->dump_info.dump_data = NULL;
+ }
+ }
+
+ atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE);
+}
+
+static int
+dr_dump_show(struct seq_file *file, void *v)
+{
+ struct mlx5dr_dbg_dump_buff *entry;
+
+ entry = list_entry(v, struct mlx5dr_dbg_dump_buff, node);
+ seq_printf(file, "%s", entry->buff);
+
+ return 0;
+}
+
+static const struct seq_operations dr_dump_sops = {
+ .start = dr_dump_start,
+ .next = dr_dump_next,
+ .stop = dr_dump_stop,
+ .show = dr_dump_show,
+};
+DEFINE_SEQ_ATTRIBUTE(dr_dump);
void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
index def6cf853eea..57c6b363b870 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
@@ -1,10 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+#define MLX5DR_DEBUG_DUMP_BUFF_SIZE (64 * 1024 * 1024)
+#define MLX5DR_DEBUG_DUMP_BUFF_LENGTH 512
+
+enum {
+ MLX5DR_DEBUG_DUMP_STATE_FREE,
+ MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS,
+};
+
+struct mlx5dr_dbg_dump_buff {
+ char *buff;
+ u32 index;
+ struct list_head node;
+};
+
+struct mlx5dr_dbg_dump_data {
+ struct list_head buff_list;
+};
+
struct mlx5dr_dbg_dump_info {
struct mutex dbg_mutex; /* protect dbg lists */
struct dentry *steering_debugfs;
struct dentry *fdb_debugfs;
+ struct mlx5dr_dbg_dump_data *dump_data;
+ atomic_t state;
};
void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
index 253d7ad9b809..8b63968bbee9 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
@@ -124,6 +124,41 @@ static void mlxbf_gige_get_pauseparam(struct net_device *netdev,
pause->tx_pause = 1;
}
+static bool mlxbf_gige_llu_counters_enabled(struct mlxbf_gige *priv)
+{
+ u32 data;
+
+ if (priv->hw_version == MLXBF_GIGE_VERSION_BF2) {
+ data = readl(priv->llu_base + MLXBF_GIGE_BF2_LLU_GENERAL_CONFIG);
+ if (data & MLXBF_GIGE_BF2_LLU_COUNTERS_EN)
+ return true;
+ } else {
+ data = readl(priv->llu_base + MLXBF_GIGE_BF3_LLU_GENERAL_CONFIG);
+ if (data & MLXBF_GIGE_BF3_LLU_COUNTERS_EN)
+ return true;
+ }
+
+ return false;
+}
+
+static void mlxbf_gige_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+ u64 data_lo, data_hi;
+
+ /* Read LLU counters to provide stats only if counters are enabled */
+ if (mlxbf_gige_llu_counters_enabled(priv)) {
+ data_lo = readl(priv->llu_base + MLXBF_GIGE_TX_PAUSE_CNT_LO);
+ data_hi = readl(priv->llu_base + MLXBF_GIGE_TX_PAUSE_CNT_HI);
+ pause_stats->tx_pause_frames = (data_hi << 32) | data_lo;
+
+ data_lo = readl(priv->llu_base + MLXBF_GIGE_RX_PAUSE_CNT_LO);
+ data_hi = readl(priv->llu_base + MLXBF_GIGE_RX_PAUSE_CNT_HI);
+ pause_stats->rx_pause_frames = (data_hi << 32) | data_lo;
+ }
+}
+
const struct ethtool_ops mlxbf_gige_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_ringparam = mlxbf_gige_get_ringparam,
@@ -134,6 +169,7 @@ const struct ethtool_ops mlxbf_gige_ethtool_ops = {
.get_ethtool_stats = mlxbf_gige_get_ethtool_stats,
.nway_reset = phy_ethtool_nway_reset,
.get_pauseparam = mlxbf_gige_get_pauseparam,
+ .get_pause_stats = mlxbf_gige_get_pause_stats,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
index cd0973229c9b..98a8681c21b9 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
@@ -99,4 +99,34 @@
#define MLXBF_GIGE_100M_IPG_SIZE 119
#define MLXBF_GIGE_10M_IPG_SIZE 1199
+/* Offsets into OOB LLU block for pause frame counters */
+#define MLXBF_GIGE_BF2_TX_PAUSE_CNT_HI 0x33d8
+#define MLXBF_GIGE_BF2_TX_PAUSE_CNT_LO 0x33dc
+#define MLXBF_GIGE_BF2_RX_PAUSE_CNT_HI 0x3210
+#define MLXBF_GIGE_BF2_RX_PAUSE_CNT_LO 0x3214
+
+#define MLXBF_GIGE_BF3_TX_PAUSE_CNT_HI 0x3a88
+#define MLXBF_GIGE_BF3_TX_PAUSE_CNT_LO 0x3a8c
+#define MLXBF_GIGE_BF3_RX_PAUSE_CNT_HI 0x38c0
+#define MLXBF_GIGE_BF3_RX_PAUSE_CNT_LO 0x38c4
+
+#define MLXBF_GIGE_TX_PAUSE_CNT_HI ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \
+ MLXBF_GIGE_BF2_TX_PAUSE_CNT_HI : \
+ MLXBF_GIGE_BF3_TX_PAUSE_CNT_HI)
+#define MLXBF_GIGE_TX_PAUSE_CNT_LO ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \
+ MLXBF_GIGE_BF2_TX_PAUSE_CNT_LO : \
+ MLXBF_GIGE_BF3_TX_PAUSE_CNT_LO)
+#define MLXBF_GIGE_RX_PAUSE_CNT_HI ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \
+ MLXBF_GIGE_BF2_RX_PAUSE_CNT_HI : \
+ MLXBF_GIGE_BF3_RX_PAUSE_CNT_HI)
+#define MLXBF_GIGE_RX_PAUSE_CNT_LO ((priv->hw_version == MLXBF_GIGE_VERSION_BF2) ? \
+ MLXBF_GIGE_BF2_RX_PAUSE_CNT_LO : \
+ MLXBF_GIGE_BF3_RX_PAUSE_CNT_LO)
+
+#define MLXBF_GIGE_BF2_LLU_GENERAL_CONFIG 0x2110
+#define MLXBF_GIGE_BF3_LLU_GENERAL_CONFIG 0x2030
+
+#define MLXBF_GIGE_BF2_LLU_COUNTERS_EN BIT(0)
+#define MLXBF_GIGE_BF3_LLU_COUNTERS_EN BIT(4)
+
#endif /* !defined(__MLXBF_GIGE_REGS_H__) */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index faa63ea9b83e..1915fa41c622 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -95,7 +95,7 @@ struct mlxsw_afa_set {
*/
has_trap:1,
has_police:1;
- unsigned int ref_count;
+ refcount_t ref_count;
struct mlxsw_afa_set *next; /* Pointer to the next set. */
struct mlxsw_afa_set *prev; /* Pointer to the previous set,
* note that set may have multiple
@@ -120,7 +120,7 @@ struct mlxsw_afa_fwd_entry {
struct rhash_head ht_node;
struct mlxsw_afa_fwd_entry_ht_key ht_key;
u32 kvdl_index;
- unsigned int ref_count;
+ refcount_t ref_count;
};
static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
@@ -282,7 +282,7 @@ static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first)
/* Need to initialize the set to pass by default */
mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
set->ht_key.is_first = is_first;
- set->ref_count = 1;
+ refcount_set(&set->ref_count, 1);
return set;
}
@@ -330,7 +330,7 @@ static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa,
static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa,
struct mlxsw_afa_set *set)
{
- if (--set->ref_count)
+ if (!refcount_dec_and_test(&set->ref_count))
return;
if (set->shared)
mlxsw_afa_set_unshare(mlxsw_afa, set);
@@ -350,7 +350,7 @@ static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa,
set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key,
mlxsw_afa_set_ht_params);
if (set) {
- set->ref_count++;
+ refcount_inc(&set->ref_count);
mlxsw_afa_set_put(mlxsw_afa, orig_set);
} else {
set = orig_set;
@@ -564,7 +564,7 @@ mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u16 local_port)
if (!fwd_entry)
return ERR_PTR(-ENOMEM);
fwd_entry->ht_key.local_port = local_port;
- fwd_entry->ref_count = 1;
+ refcount_set(&fwd_entry->ref_count, 1);
err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht,
&fwd_entry->ht_node,
@@ -607,7 +607,7 @@ mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key,
mlxsw_afa_fwd_entry_ht_params);
if (fwd_entry) {
- fwd_entry->ref_count++;
+ refcount_inc(&fwd_entry->ref_count);
return fwd_entry;
}
return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port);
@@ -616,7 +616,7 @@ mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port)
static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
struct mlxsw_afa_fwd_entry *fwd_entry)
{
- if (--fwd_entry->ref_count)
+ if (!refcount_dec_and_test(&fwd_entry->ref_count))
return;
mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 0d5e6f9b466e..947500f8ed71 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -5,6 +5,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/errno.h>
+#include <linux/refcount.h>
#include "item.h"
#include "core_acl_flex_keys.h"
@@ -107,7 +108,7 @@ EXPORT_SYMBOL(mlxsw_afk_destroy);
struct mlxsw_afk_key_info {
struct list_head list;
- unsigned int ref_count;
+ refcount_t ref_count;
unsigned int blocks_count;
int element_to_block[MLXSW_AFK_ELEMENT_MAX]; /* index is element, value
* is index inside "blocks"
@@ -334,7 +335,7 @@ mlxsw_afk_key_info_create(struct mlxsw_afk *mlxsw_afk,
if (err)
goto err_picker;
list_add(&key_info->list, &mlxsw_afk->key_info_list);
- key_info->ref_count = 1;
+ refcount_set(&key_info->ref_count, 1);
return key_info;
err_picker:
@@ -356,7 +357,7 @@ mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk,
key_info = mlxsw_afk_key_info_find(mlxsw_afk, elusage);
if (key_info) {
- key_info->ref_count++;
+ refcount_inc(&key_info->ref_count);
return key_info;
}
return mlxsw_afk_key_info_create(mlxsw_afk, elusage);
@@ -365,7 +366,7 @@ EXPORT_SYMBOL(mlxsw_afk_key_info_get);
void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info)
{
- if (--key_info->ref_count)
+ if (!refcount_dec_and_test(&key_info->ref_count))
return;
mlxsw_afk_key_info_destroy(key_info);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 6b98c3287b49..f0ceb196a6ce 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -708,7 +708,6 @@ static const struct i2c_device_id mlxsw_m_i2c_id[] = {
static struct i2c_driver mlxsw_m_i2c_driver = {
.driver.name = "mlxsw_minimal",
- .class = I2C_CLASS_HWMON,
.id_table = mlxsw_m_i2c_id,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 5d3413636a62..bb642e9bb6cf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -176,13 +176,15 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
- unsigned int counter_index, u64 *packets,
- u64 *bytes)
+ unsigned int counter_index, bool clear,
+ u64 *packets, u64 *bytes)
{
+ enum mlxsw_reg_mgpc_opcode op = clear ? MLXSW_REG_MGPC_OPCODE_CLEAR :
+ MLXSW_REG_MGPC_OPCODE_NOP;
char mgpc_pl[MLXSW_REG_MGPC_LEN];
int err;
- mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
+ mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, op,
MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
if (err)
@@ -2695,23 +2697,18 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp)
{
char sgcr_pl[MLXSW_REG_SGCR_LEN];
- u16 max_lag;
int err;
if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
return 0;
- err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
- if (err)
- return err;
-
/* In DDD mode, which we by default use, each LAG entry is 8 PGT
* entries. The LAG table address needs to be 8-aligned, but that ought
* to be the case, since the LAG table is allocated first.
*/
err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base,
- max_lag * 8);
+ mlxsw_sp->max_lag * 8);
if (err)
return err;
if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) {
@@ -2728,33 +2725,31 @@ static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp)
err_mid_alloc_range:
mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
- max_lag * 8);
+ mlxsw_sp->max_lag * 8);
return err;
}
static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp)
{
- u16 max_lag;
- int err;
-
if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
return;
- err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
- if (err)
- return;
-
mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
- max_lag * 8);
+ mlxsw_sp->max_lag * 8);
}
#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
+struct mlxsw_sp_lag {
+ struct net_device *dev;
+ refcount_t ref_count;
+ u16 lag_id;
+};
+
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
char slcr_pl[MLXSW_REG_SLCR_LEN];
- u16 max_lag;
u32 seed;
int err;
@@ -2773,7 +2768,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
+ err = mlxsw_core_max_lag(mlxsw_sp->core, &mlxsw_sp->max_lag);
if (err)
return err;
@@ -2784,7 +2779,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper),
+ mlxsw_sp->lags = kcalloc(mlxsw_sp->max_lag, sizeof(struct mlxsw_sp_lag),
GFP_KERNEL);
if (!mlxsw_sp->lags) {
err = -ENOMEM;
@@ -4269,19 +4264,48 @@ mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
-static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
+static struct mlxsw_sp_lag *
+mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
{
char sldr_pl[MLXSW_REG_SLDR_LEN];
+ struct mlxsw_sp_lag *lag;
+ u16 lag_id;
+ int i, err;
+
+ for (i = 0; i < mlxsw_sp->max_lag; i++) {
+ if (!mlxsw_sp->lags[i].dev)
+ break;
+ }
+
+ if (i == mlxsw_sp->max_lag) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Exceeded number of supported LAG devices");
+ return ERR_PTR(-EBUSY);
+ }
+ lag_id = i;
mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
+ if (err)
+ return ERR_PTR(err);
+
+ lag = &mlxsw_sp->lags[lag_id];
+ lag->lag_id = lag_id;
+ lag->dev = lag_dev;
+ refcount_set(&lag->ref_count, 1);
+
+ return lag;
}
-static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
+static int
+mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag)
{
char sldr_pl[MLXSW_REG_SLDR_LEN];
- mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
+ lag->dev = NULL;
+
+ mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag->lag_id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
}
@@ -4329,34 +4353,44 @@ static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
}
-static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
- struct net_device *lag_dev,
- u16 *p_lag_id)
+static struct mlxsw_sp_lag *
+mlxsw_sp_lag_find(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev)
{
- struct mlxsw_sp_upper *lag;
- int free_lag_id = -1;
- u16 max_lag;
- int err, i;
+ int i;
- err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
- if (err)
- return err;
+ for (i = 0; i < mlxsw_sp->max_lag; i++) {
+ if (!mlxsw_sp->lags[i].dev)
+ continue;
- for (i = 0; i < max_lag; i++) {
- lag = mlxsw_sp_lag_get(mlxsw_sp, i);
- if (lag->ref_count) {
- if (lag->dev == lag_dev) {
- *p_lag_id = i;
- return 0;
- }
- } else if (free_lag_id < 0) {
- free_lag_id = i;
- }
+ if (mlxsw_sp->lags[i].dev == lag_dev)
+ return &mlxsw_sp->lags[i];
}
- if (free_lag_id < 0)
- return -EBUSY;
- *p_lag_id = free_lag_id;
- return 0;
+
+ return NULL;
+}
+
+static struct mlxsw_sp_lag *
+mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_lag *lag;
+
+ lag = mlxsw_sp_lag_find(mlxsw_sp, lag_dev);
+ if (lag) {
+ refcount_inc(&lag->ref_count);
+ return lag;
+ }
+
+ return mlxsw_sp_lag_create(mlxsw_sp, lag_dev, extack);
+}
+
+static void
+mlxsw_sp_lag_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag)
+{
+ if (!refcount_dec_and_test(&lag->ref_count))
+ return;
+
+ mlxsw_sp_lag_destroy(mlxsw_sp, lag);
}
static bool
@@ -4365,12 +4399,6 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
struct netdev_lag_upper_info *lag_upper_info,
struct netlink_ext_ack *extack)
{
- u16 lag_id;
-
- if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
- NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
- return false;
- }
if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
return false;
@@ -4482,22 +4510,16 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_upper *lag;
+ struct mlxsw_sp_lag *lag;
u16 lag_id;
u8 port_index;
int err;
- err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
- if (err)
- return err;
- lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
- if (!lag->ref_count) {
- err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
- if (err)
- return err;
- lag->dev = lag_dev;
- }
+ lag = mlxsw_sp_lag_get(mlxsw_sp, lag_dev, extack);
+ if (IS_ERR(lag))
+ return PTR_ERR(lag);
+ lag_id = lag->lag_id;
err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
if (err)
return err;
@@ -4515,7 +4537,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->local_port);
mlxsw_sp_port->lag_id = lag_id;
mlxsw_sp_port->lagged = 1;
- lag->ref_count++;
err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port);
if (err)
@@ -4542,7 +4563,6 @@ err_replay:
err_router_join:
mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
err_fid_port_join_lag:
- lag->ref_count--;
mlxsw_sp_port->lagged = 0;
mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
mlxsw_sp_port->local_port);
@@ -4550,8 +4570,7 @@ err_fid_port_join_lag:
err_col_port_add:
mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev);
err_lag_uppers_bridge_join:
- if (!lag->ref_count)
- mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
+ mlxsw_sp_lag_put(mlxsw_sp, lag);
return err;
}
@@ -4560,12 +4579,11 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u16 lag_id = mlxsw_sp_port->lag_id;
- struct mlxsw_sp_upper *lag;
+ struct mlxsw_sp_lag *lag;
if (!mlxsw_sp_port->lagged)
return;
- lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
- WARN_ON(lag->ref_count == 0);
+ lag = &mlxsw_sp->lags[lag_id];
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
@@ -4579,13 +4597,11 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
- if (lag->ref_count == 1)
- mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
+ mlxsw_sp_lag_put(mlxsw_sp, lag);
mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
mlxsw_sp_port->local_port);
mlxsw_sp_port->lagged = 0;
- lag->ref_count--;
/* Make sure untagged frames are allowed to ingress */
mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a0c9775fa955..3beb5d0847ab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -78,11 +78,6 @@ struct mlxsw_sp_span_entry;
enum mlxsw_sp_l3proto;
union mlxsw_sp_l3addr;
-struct mlxsw_sp_upper {
- struct net_device *dev;
- unsigned int ref_count;
-};
-
enum mlxsw_sp_rif_type {
MLXSW_SP_RIF_TYPE_SUBPORT,
MLXSW_SP_RIF_TYPE_VLAN,
@@ -136,6 +131,7 @@ struct mlxsw_sp_span_ops;
struct mlxsw_sp_qdisc_state;
struct mlxsw_sp_mall_entry;
struct mlxsw_sp_pgt;
+struct mlxsw_sp_lag;
struct mlxsw_sp_port_mapping {
u8 module;
@@ -164,7 +160,8 @@ struct mlxsw_sp {
const struct mlxsw_bus_info *bus_info;
unsigned char base_mac[ETH_ALEN];
const unsigned char *mac_mask;
- struct mlxsw_sp_upper *lags;
+ struct mlxsw_sp_lag *lags;
+ u16 max_lag;
struct mlxsw_sp_port_mapping *port_mapping;
struct mlxsw_sp_port_mapping_events port_mapping_events;
struct rhashtable sample_trigger_ht;
@@ -257,12 +254,6 @@ struct mlxsw_sp_fid_core_ops {
void (*fini)(struct mlxsw_sp *mlxsw_sp);
};
-static inline struct mlxsw_sp_upper *
-mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
-{
- return &mlxsw_sp->lags[lag_id];
-}
-
struct mlxsw_sp_port_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
@@ -715,8 +706,8 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
u16 vid_end, bool is_member, bool untagged);
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
- unsigned int counter_index, u64 *packets,
- u64 *bytes);
+ unsigned int counter_index, bool clear,
+ u64 *packets, u64 *bytes);
int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 7c59c8a13584..3e70cee4d2f3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -9,6 +9,7 @@
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <net/net_namespace.h>
#include <net/tc_act/tc_vlan.h>
@@ -55,7 +56,7 @@ struct mlxsw_sp_acl_ruleset {
struct rhash_head ht_node; /* Member of acl HT */
struct mlxsw_sp_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
- unsigned int ref_count;
+ refcount_t ref_count;
unsigned int min_prio;
unsigned int max_prio;
unsigned long priv[];
@@ -99,7 +100,7 @@ static bool
mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset)
{
/* We hold a reference on ruleset ourselves */
- return ruleset->ref_count == 2;
+ return refcount_read(&ruleset->ref_count) == 2;
}
int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
@@ -176,7 +177,7 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
ruleset = kzalloc(alloc_size, GFP_KERNEL);
if (!ruleset)
return ERR_PTR(-ENOMEM);
- ruleset->ref_count = 1;
+ refcount_set(&ruleset->ref_count, 1);
ruleset->ht_key.block = block;
ruleset->ht_key.chain_index = chain_index;
ruleset->ht_key.ops = ops;
@@ -222,13 +223,13 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
{
- ruleset->ref_count++;
+ refcount_inc(&ruleset->ref_count);
}
static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset)
{
- if (--ruleset->ref_count)
+ if (!refcount_dec_and_test(&ruleset->ref_count))
return;
mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
}
@@ -1023,7 +1024,7 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
rulei = mlxsw_sp_acl_rule_rulei(rule);
if (rulei->counter_valid) {
err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
- &current_packets,
+ false, &current_packets,
&current_bytes);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 50ea1eff02b2..f20052776b3f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -9,6 +9,7 @@
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <net/devlink.h>
#include <trace/events/mlxsw.h>
@@ -155,7 +156,7 @@ struct mlxsw_sp_acl_tcam_vregion {
struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
} rehash;
struct mlxsw_sp *mlxsw_sp;
- unsigned int ref_count;
+ refcount_t ref_count;
};
struct mlxsw_sp_acl_tcam_vchunk;
@@ -176,7 +177,7 @@ struct mlxsw_sp_acl_tcam_vchunk {
unsigned int priority; /* Priority within the vregion and group */
struct mlxsw_sp_acl_tcam_vgroup *vgroup;
struct mlxsw_sp_acl_tcam_vregion *vregion;
- unsigned int ref_count;
+ refcount_t ref_count;
};
struct mlxsw_sp_acl_tcam_entry {
@@ -769,7 +770,7 @@ mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
vregion->tcam = tcam;
vregion->mlxsw_sp = mlxsw_sp;
vregion->vgroup = vgroup;
- vregion->ref_count = 1;
+ refcount_set(&vregion->ref_count, 1);
vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
if (IS_ERR(vregion->key_info)) {
@@ -856,7 +857,7 @@ mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
*/
return ERR_PTR(-EOPNOTSUPP);
}
- vregion->ref_count++;
+ refcount_inc(&vregion->ref_count);
return vregion;
}
@@ -871,7 +872,7 @@ static void
mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion)
{
- if (--vregion->ref_count)
+ if (!refcount_dec_and_test(&vregion->ref_count))
return;
mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
}
@@ -924,7 +925,7 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
INIT_LIST_HEAD(&vchunk->ventry_list);
vchunk->priority = priority;
vchunk->vgroup = vgroup;
- vchunk->ref_count = 1;
+ refcount_set(&vchunk->ref_count, 1);
vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
priority, elusage);
@@ -1008,7 +1009,7 @@ mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
elusage)))
return ERR_PTR(-EINVAL);
- vchunk->ref_count++;
+ refcount_inc(&vchunk->ref_count);
return vchunk;
}
return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
@@ -1019,7 +1020,7 @@ static void
mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
- if (--vchunk->ref_count)
+ if (!refcount_dec_and_test(&vchunk->ref_count))
return;
mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index c8a356accdf8..ca80af06465f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -1181,9 +1181,11 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable)
char ratr_pl[MLXSW_REG_RATR_LEN];
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_nexthop *nh;
+ unsigned int n_done = 0;
u32 adj_hash_index = 0;
u32 adj_index = 0;
u32 adj_size = 0;
+ int err;
mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
if (!mlxsw_sp_nexthop_is_forward(nh) ||
@@ -1192,15 +1194,27 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable)
mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size,
&adj_hash_index);
- if (enable)
- mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
- else
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ if (enable) {
+ err = mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
+ if (err)
+ goto err_counter_enable;
+ } else {
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
+ }
mlxsw_sp_nexthop_eth_update(mlxsw_sp,
adj_index + adj_hash_index, nh,
true, ratr_pl);
+ n_done++;
}
return 0;
+
+err_counter_enable:
+ mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
+ if (!n_done--)
+ break;
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
+ }
+ return err;
}
static u64
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
index 221aa6a474eb..01d81ae3662a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
@@ -361,7 +361,7 @@ static int mlxsw_sp_mr_tcam_route_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_tcam_route *route = route_priv;
return mlxsw_sp_flow_counter_get(mlxsw_sp, route->counter_index,
- packets, bytes);
+ false, packets, bytes);
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 7164f9e6370f..40ba314fbc72 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -19,6 +19,7 @@
#include <linux/net_namespace.h>
#include <linux/mutex.h>
#include <linux/genalloc.h>
+#include <linux/xarray.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -501,7 +502,7 @@ struct mlxsw_sp_rt6 {
struct mlxsw_sp_lpm_tree {
u8 id; /* tree ID */
- unsigned int ref_count;
+ refcount_t ref_count;
enum mlxsw_sp_l3proto proto;
unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
struct mlxsw_sp_prefix_usage prefix_usage;
@@ -578,7 +579,7 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
lpm_tree = &mlxsw_sp->router->lpm.trees[i];
- if (lpm_tree->ref_count == 0)
+ if (refcount_read(&lpm_tree->ref_count) == 0)
return lpm_tree;
}
return NULL;
@@ -654,7 +655,7 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
sizeof(lpm_tree->prefix_usage));
memset(&lpm_tree->prefix_ref_count, 0,
sizeof(lpm_tree->prefix_ref_count));
- lpm_tree->ref_count = 1;
+ refcount_set(&lpm_tree->ref_count, 1);
return lpm_tree;
err_left_struct_set:
@@ -678,7 +679,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
lpm_tree = &mlxsw_sp->router->lpm.trees[i];
- if (lpm_tree->ref_count != 0 &&
+ if (refcount_read(&lpm_tree->ref_count) &&
lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
prefix_usage)) {
@@ -691,14 +692,15 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
{
- lpm_tree->ref_count++;
+ refcount_inc(&lpm_tree->ref_count);
}
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- if (--lpm_tree->ref_count == 0)
- mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
+ if (!refcount_dec_and_test(&lpm_tree->ref_count))
+ return;
+ mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
}
#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
@@ -2250,7 +2252,7 @@ int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
- p_counter, NULL);
+ false, p_counter, NULL);
}
static struct mlxsw_sp_neigh_entry *
@@ -3048,6 +3050,8 @@ struct mlxsw_sp_nexthop_key {
struct fib_nh *fib_nh;
};
+struct mlxsw_sp_nexthop_counter;
+
struct mlxsw_sp_nexthop {
struct list_head neigh_list_node; /* member of neigh entry list */
struct list_head crif_list_node;
@@ -3079,8 +3083,8 @@ struct mlxsw_sp_nexthop {
struct mlxsw_sp_neigh_entry *neigh_entry;
struct mlxsw_sp_ipip_entry *ipip_entry;
};
- unsigned int counter_index;
- bool counter_valid;
+ struct mlxsw_sp_nexthop_counter *counter;
+ u32 id; /* NH ID for members of a NH object group. */
};
static struct net_device *
@@ -3105,8 +3109,10 @@ struct mlxsw_sp_nexthop_group_info {
int sum_norm_weight;
u8 adj_index_valid:1,
gateway:1, /* routes using the group use a gateway */
- is_resilient:1;
+ is_resilient:1,
+ hw_stats:1;
struct list_head list; /* member in nh_res_grp_list */
+ struct xarray nexthop_counters;
struct mlxsw_sp_nexthop nexthops[] __counted_by(count);
};
@@ -3150,39 +3156,148 @@ struct mlxsw_sp_nexthop_group {
bool can_destroy;
};
-void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+struct mlxsw_sp_nexthop_counter {
+ unsigned int counter_index;
+ refcount_t ref_count;
+};
+
+static struct mlxsw_sp_nexthop_counter *
+mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_nexthop_counter *nhct;
+ int err;
+
+ nhct = kzalloc(sizeof(*nhct), GFP_KERNEL);
+ if (!nhct)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nhct->counter_index);
+ if (err)
+ goto err_counter_alloc;
+
+ refcount_set(&nhct->ref_count, 1);
+ return nhct;
+
+err_counter_alloc:
+ kfree(nhct);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_counter *nhct)
+{
+ mlxsw_sp_flow_counter_free(mlxsw_sp, nhct->counter_index);
+ kfree(nhct);
+}
+
+static struct mlxsw_sp_nexthop_counter *
+mlxsw_sp_nexthop_sh_counter_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
+ struct mlxsw_sp_nexthop_counter *nhct;
+ void *ptr;
+ int err;
+
+ nhct = xa_load(&nh_grp->nhgi->nexthop_counters, nh->id);
+ if (nhct) {
+ refcount_inc(&nhct->ref_count);
+ return nhct;
+ }
+
+ nhct = mlxsw_sp_nexthop_counter_alloc(mlxsw_sp);
+ if (IS_ERR(nhct))
+ return nhct;
+
+ ptr = xa_store(&nh_grp->nhgi->nexthop_counters, nh->id, nhct,
+ GFP_KERNEL);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ goto err_store;
+ }
+
+ return nhct;
+
+err_store:
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nhct);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_nexthop_sh_counter_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
+ struct mlxsw_sp_nexthop_counter *nhct;
+
+ nhct = xa_load(&nh_grp->nhgi->nexthop_counters, nh->id);
+ if (WARN_ON(!nhct))
+ return;
+
+ if (!refcount_dec_and_test(&nhct->ref_count))
+ return;
+
+ xa_erase(&nh_grp->nhgi->nexthop_counters, nh->id);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nhct);
+}
+
+int mlxsw_sp_nexthop_counter_enable(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
+ const char *table_adj = MLXSW_SP_DPIPE_TABLE_NAME_ADJ;
+ struct mlxsw_sp_nexthop_counter *nhct;
struct devlink *devlink;
+ bool dpipe_stats;
+
+ if (nh->counter)
+ return 0;
devlink = priv_to_devlink(mlxsw_sp->core);
- if (!devlink_dpipe_table_counter_enabled(devlink,
- MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
- return;
+ dpipe_stats = devlink_dpipe_table_counter_enabled(devlink, table_adj);
+ if (!(nh->nhgi->hw_stats || dpipe_stats))
+ return 0;
- if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
- return;
+ if (nh->id)
+ nhct = mlxsw_sp_nexthop_sh_counter_get(mlxsw_sp, nh);
+ else
+ nhct = mlxsw_sp_nexthop_counter_alloc(mlxsw_sp);
+ if (IS_ERR(nhct))
+ return PTR_ERR(nhct);
- nh->counter_valid = true;
+ nh->counter = nhct;
+ return 0;
}
-void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh)
+void mlxsw_sp_nexthop_counter_disable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
{
- if (!nh->counter_valid)
+ if (!nh->counter)
return;
- mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
- nh->counter_valid = false;
+
+ if (nh->id)
+ mlxsw_sp_nexthop_sh_counter_put(mlxsw_sp, nh);
+ else
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh->counter);
+ nh->counter = NULL;
+}
+
+static int mlxsw_sp_nexthop_counter_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ if (nh->nhgi->hw_stats)
+ return mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
+ return 0;
}
int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh, u64 *p_counter)
{
- if (!nh->counter_valid)
+ if (!nh->counter)
return -EINVAL;
- return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
- p_counter, NULL);
+ return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter->counter_index,
+ true, p_counter, NULL);
}
struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
@@ -3655,8 +3770,9 @@ static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
WARN_ON_ONCE(1);
return -EINVAL;
}
- if (nh->counter_valid)
- mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
+ if (nh->counter)
+ mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter->counter_index,
+ true);
else
mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
@@ -3743,6 +3859,7 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
nh = &nhgi->nexthops[i];
if (!nh->should_offload) {
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
nh->offloaded = 0;
continue;
}
@@ -3750,6 +3867,10 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
if (nh->update || reallocate) {
int err = 0;
+ err = mlxsw_sp_nexthop_counter_update(mlxsw_sp, nh);
+ if (err)
+ return err;
+
err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
true, ratr_pl);
if (err)
@@ -4506,7 +4627,10 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+ err = mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
+ if (err)
+ goto err_counter_enable;
+
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
if (!dev)
@@ -4530,7 +4654,8 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
err_nexthop_neigh_init:
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
+err_counter_enable:
mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
return err;
}
@@ -4540,7 +4665,7 @@ static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
}
@@ -5005,9 +5130,9 @@ mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
break;
}
- mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
nh->ifindex = dev->ifindex;
+ nh->id = nh_obj->id;
err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
if (err)
@@ -5029,7 +5154,6 @@ mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
err_type_init:
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
return err;
}
@@ -5040,7 +5164,7 @@ static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
nh->should_offload = 0;
}
@@ -5052,6 +5176,7 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group_info *nhgi;
struct mlxsw_sp_nexthop *nh;
bool is_resilient = false;
+ bool hw_stats = false;
unsigned int nhs;
int err, i;
@@ -5061,9 +5186,11 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
break;
case NH_NOTIFIER_INFO_TYPE_GRP:
nhs = info->nh_grp->num_nh;
+ hw_stats = info->nh_grp->hw_stats;
break;
case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
nhs = info->nh_res_table->num_nh_buckets;
+ hw_stats = info->nh_res_table->hw_stats;
is_resilient = true;
break;
default:
@@ -5078,6 +5205,10 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
nhgi->is_resilient = is_resilient;
nhgi->count = nhs;
+ nhgi->hw_stats = hw_stats;
+
+ xa_init_flags(&nhgi->nexthop_counters, XA_FLAGS_ALLOC1);
+
for (i = 0; i < nhgi->count; i++) {
struct nh_notifier_single_info *nh_obj;
int weight;
@@ -5160,6 +5291,8 @@ mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
}
mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
WARN_ON_ONCE(nhgi->adj_index_valid);
+ WARN_ON(!xa_empty(&nhgi->nexthop_counters));
+ xa_destroy(&nhgi->nexthop_counters);
kfree(nhgi);
}
@@ -5299,6 +5432,43 @@ err_out:
return err;
}
+static int mlxsw_sp_nexthop_obj_res_group_pre(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ struct nh_notifier_grp_info *grp_info = info->nh_grp;
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ int err;
+ int i;
+
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
+ if (!nh_grp)
+ return 0;
+ nhgi = nh_grp->nhgi;
+
+ if (nhgi->hw_stats == grp_info->hw_stats)
+ return 0;
+
+ nhgi->hw_stats = grp_info->hw_stats;
+
+ for (i = 0; i < nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
+
+ if (nh->offloaded)
+ nh->update = 1;
+ }
+
+ err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_group_refresh;
+
+ return 0;
+
+err_group_refresh:
+ nhgi->hw_stats = !grp_info->hw_stats;
+ return err;
+}
+
static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
struct nh_notifier_info *info)
{
@@ -5475,6 +5645,79 @@ err_nexthop_obj_init:
return err;
}
+static void
+mlxsw_sp_nexthop_obj_mp_hw_stats_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group_info *nhgi,
+ struct nh_notifier_grp_hw_stats_info *info)
+{
+ int nhi;
+
+ for (nhi = 0; nhi < info->num_nh; nhi++) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[nhi];
+ u64 packets;
+ int err;
+
+ err = mlxsw_sp_nexthop_counter_get(mlxsw_sp, nh, &packets);
+ if (err)
+ continue;
+
+ nh_grp_hw_stats_report_delta(info, nhi, packets);
+ }
+}
+
+static void
+mlxsw_sp_nexthop_obj_res_hw_stats_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group_info *nhgi,
+ struct nh_notifier_grp_hw_stats_info *info)
+{
+ int nhi = -1;
+ int bucket;
+
+ for (bucket = 0; bucket < nhgi->count; bucket++) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[bucket];
+ u64 packets;
+ int err;
+
+ if (nhi == -1 || info->stats[nhi].id != nh->id) {
+ for (nhi = 0; nhi < info->num_nh; nhi++)
+ if (info->stats[nhi].id == nh->id)
+ break;
+ if (WARN_ON_ONCE(nhi == info->num_nh)) {
+ nhi = -1;
+ continue;
+ }
+ }
+
+ err = mlxsw_sp_nexthop_counter_get(mlxsw_sp, nh, &packets);
+ if (err)
+ continue;
+
+ nh_grp_hw_stats_report_delta(info, nhi, packets);
+ }
+}
+
+static void mlxsw_sp_nexthop_obj_hw_stats_get(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct mlxsw_sp_nexthop_group *nh_grp;
+
+ if (info->type != NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS)
+ return;
+
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
+ if (!nh_grp)
+ return;
+ nhgi = nh_grp->nhgi;
+
+ if (nhgi->is_resilient)
+ mlxsw_sp_nexthop_obj_res_hw_stats_get(mlxsw_sp, nhgi,
+ info->nh_grp_hw_stats);
+ else
+ mlxsw_sp_nexthop_obj_mp_hw_stats_get(mlxsw_sp, nhgi,
+ info->nh_grp_hw_stats);
+}
+
static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -5490,6 +5733,10 @@ static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
mutex_lock(&router->lock);
switch (event) {
+ case NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE:
+ err = mlxsw_sp_nexthop_obj_res_group_pre(router->mlxsw_sp,
+ info);
+ break;
case NEXTHOP_EVENT_REPLACE:
err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
break;
@@ -5500,6 +5747,9 @@ static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
info);
break;
+ case NEXTHOP_EVENT_HW_STATS_REPORT_DELTA:
+ mlxsw_sp_nexthop_obj_hw_stats_get(router->mlxsw_sp, info);
+ break;
default:
break;
}
@@ -6733,7 +6983,10 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
#if IS_ENABLED(CONFIG_IPV6)
nh->neigh_tbl = &nd_tbl;
#endif
- mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+
+ err = mlxsw_sp_nexthop_counter_enable(mlxsw_sp, nh);
+ if (err)
+ return err;
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
@@ -6749,7 +7002,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
err_nexthop_type_init:
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
return err;
}
@@ -6758,7 +7011,7 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
- mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_counter_disable(mlxsw_sp, nh);
}
static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index ed3b628caafe..0432c7cc6b07 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -156,10 +156,10 @@ int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
struct mlxsw_sp_nexthop *nh, bool force,
char *ratr_pl);
-void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
+int mlxsw_sp_nexthop_counter_enable(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh);
-void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh);
+void mlxsw_sp_nexthop_counter_disable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh);
static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
const union mlxsw_sp_l3addr *addr2)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 6c749c148148..6397ff0dc951 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -61,7 +61,7 @@ struct mlxsw_sp_bridge_port {
struct mlxsw_sp_bridge_device *bridge_device;
struct list_head list;
struct list_head vlans_list;
- unsigned int ref_count;
+ refcount_t ref_count;
u8 stp_state;
unsigned long flags;
bool mrouter;
@@ -495,7 +495,7 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
BR_MCAST_FLOOD;
INIT_LIST_HEAD(&bridge_port->vlans_list);
list_add(&bridge_port->list, &bridge_device->ports_list);
- bridge_port->ref_count = 1;
+ refcount_set(&bridge_port->ref_count, 1);
err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
NULL, NULL, NULL, false, extack);
@@ -531,7 +531,7 @@ mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
if (bridge_port) {
- bridge_port->ref_count++;
+ refcount_inc(&bridge_port->ref_count);
return bridge_port;
}
@@ -558,7 +558,7 @@ static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
{
struct mlxsw_sp_bridge_device *bridge_device;
- if (--bridge_port->ref_count != 0)
+ if (!refcount_dec_and_test(&bridge_port->ref_count))
return;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_bridge_port_destroy(bridge_port);
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index 5693784eec5b..443128adbcb6 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -464,7 +464,7 @@ static struct regmap_config regcfg = {
.val_bits = 16,
.max_register = 0xee,
.reg_stride = 2,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
.readable_reg = encx24j600_regmap_readable,
.writeable_reg = encx24j600_regmap_writeable,
@@ -485,7 +485,7 @@ static struct regmap_config phycfg = {
.reg_bits = 8,
.val_bits = 16,
.max_register = 0x1f,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
.readable_reg = encx24j600_phymap_readable,
.writeable_reg = encx24j600_phymap_writeable,
@@ -513,4 +513,5 @@ int devm_regmap_init_encx24j600(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600);
+MODULE_DESCRIPTION("Microchip ENCX24J600 helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index a2b3f4433ca8..8a6ae171e375 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1055,7 +1055,7 @@ static int lan743x_ethtool_get_ts_info(struct net_device *netdev,
}
static int lan743x_ethtool_get_eee(struct net_device *netdev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
@@ -1092,7 +1092,7 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
}
static int lan743x_ethtool_set_eee(struct net_device *netdev,
- struct ethtool_eee *eee)
+ struct ethtool_keee *eee)
{
struct lan743x_adapter *adapter;
struct phy_device *phydev;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 45e209a7d083..bd8aa83b47e5 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1196,7 +1196,7 @@ static int lan743x_sgmii_config(struct lan743x_adapter *adapter)
ret = lan743x_is_sgmii_2_5G_mode(adapter, &status);
if (ret < 0) {
netif_err(adapter, drv, adapter->netdev,
- "erro %d SGMII get mode failed\n", ret);
+ "error %d SGMII get mode failed\n", ret);
return ret;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 2f04bc77a118..2801f08bf1c9 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -1712,13 +1712,13 @@ bool lan743x_ptp_request_tx_timestamp(struct lan743x_adapter *adapter)
struct lan743x_ptp *ptp = &adapter->ptp;
bool result = false;
- spin_lock_bh(&ptp->tx_ts_lock);
+ spin_lock(&ptp->tx_ts_lock);
if (ptp->pending_tx_timestamps < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) {
/* request granted */
ptp->pending_tx_timestamps++;
result = true;
}
- spin_unlock_bh(&ptp->tx_ts_lock);
+ spin_unlock(&ptp->tx_ts_lock);
return result;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
index 41fa2523d91d..5f2cd9a8cf8f 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c
@@ -37,19 +37,24 @@ static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x)
/* Now, set PGIDs for each active LAG */
for (lag = 0; lag < lan966x->num_phys_ports; ++lag) {
- struct net_device *bond = lan966x->ports[lag]->bond;
+ struct lan966x_port *port = lan966x->ports[lag];
int num_active_ports = 0;
+ struct net_device *bond;
unsigned long bond_mask;
u8 aggr_idx[16];
- if (!bond || (visited & BIT(lag)))
+ if (!port || !port->bond || (visited & BIT(lag)))
continue;
+ bond = port->bond;
bond_mask = lan966x_lag_get_mask(lan966x, bond);
for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) {
struct lan966x_port *port = lan966x->ports[p];
+ if (!port)
+ continue;
+
lan_wr(ANA_PGID_PGID_SET(bond_mask),
lan966x, ANA_PGID(p));
if (port->lag_tx_active)
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
index ac525ff1503e..3a01e13bd10b 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
@@ -25,6 +25,8 @@ static void lan966x_vcap_is1_port_keys(struct lan966x_port *port,
for (int l = 0; l < admin->lookups; ++l) {
out->prf(out->dst, "\n Lookup %d: ", l);
+ val = lan_rd(lan966x, ANA_VCAP_S1_CFG(port->chip_port, l));
+
out->prf(out->dst, "\n other: ");
switch (ANA_VCAP_S1_CFG_KEY_OTHER_CFG_GET(val)) {
case VCAP_IS1_PS_OTHER_NORMAL:
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
index 4af285918ea2..75868b3f548e 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
@@ -347,10 +347,10 @@ int sparx5_del_mact_entry(struct sparx5 *sparx5,
list) {
if ((vid == 0 || mact_entry->vid == vid) &&
ether_addr_equal(addr, mact_entry->mac)) {
+ sparx5_mact_forget(sparx5, addr, mact_entry->vid);
+
list_del(&mact_entry->list);
devm_kfree(sparx5->dev, mact_entry);
-
- sparx5_mact_forget(sparx5, addr, mact_entry->vid);
}
}
mutex_unlock(&sparx5->mact_lock);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index d1f7fc8b1b71..3c066b62e689 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -757,6 +757,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sparx5);
sparx5->pdev = pdev;
sparx5->dev = &pdev->dev;
+ spin_lock_init(&sparx5->tx_lock);
/* Do switch core reset if available */
reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch");
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 6f565c0c0c3d..316fed5f2735 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -280,6 +280,7 @@ struct sparx5 {
int xtr_irq;
/* Frame DMA */
int fdma_irq;
+ spinlock_t tx_lock; /* lock for frame transmission */
struct sparx5_rx rx;
struct sparx5_tx tx;
/* PTP */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index 6db6ac6a3bbc..ac7e1cffbcec 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -244,10 +244,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
}
skb_tx_timestamp(skb);
+ spin_lock(&sparx5->tx_lock);
if (sparx5->fdma_irq > 0)
ret = sparx5_fdma_xmit(sparx5, ifh, skb);
else
ret = sparx5_inject(sparx5, ifh, skb, dev);
+ spin_unlock(&sparx5->tx_lock);
if (ret == -EBUSY)
goto busy;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index d33b27214539..1332db9a08eb 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1249,15 +1249,47 @@ void mana_gd_free_res_map(struct gdma_resource *r)
r->size = 0;
}
+static int irq_setup(unsigned int *irqs, unsigned int len, int node)
+{
+ const struct cpumask *next, *prev = cpu_none_mask;
+ cpumask_var_t cpus __free(free_cpumask_var);
+ int cpu, weight;
+
+ if (!alloc_cpumask_var(&cpus, GFP_KERNEL))
+ return -ENOMEM;
+
+ rcu_read_lock();
+ for_each_numa_hop_mask(next, node) {
+ weight = cpumask_weight_andnot(next, prev);
+ while (weight > 0) {
+ cpumask_andnot(cpus, next, prev);
+ for_each_cpu(cpu, cpus) {
+ if (len-- == 0)
+ goto done;
+ irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu));
+ cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
+ --weight;
+ }
+ }
+ prev = next;
+ }
+done:
+ rcu_read_unlock();
+ return 0;
+}
+
static int mana_gd_setup_irqs(struct pci_dev *pdev)
{
- unsigned int max_queues_per_port = num_online_cpus();
struct gdma_context *gc = pci_get_drvdata(pdev);
+ unsigned int max_queues_per_port;
struct gdma_irq_context *gic;
unsigned int max_irqs, cpu;
- int nvec, irq;
+ int start_irq_index = 1;
+ int nvec, *irqs, irq;
int err, i = 0, j;
+ cpus_read_lock();
+ max_queues_per_port = num_online_cpus();
if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
max_queues_per_port = MANA_MAX_NUM_QUEUES;
@@ -1265,8 +1297,18 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
max_irqs = max_queues_per_port + 1;
nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
- if (nvec < 0)
+ if (nvec < 0) {
+ cpus_read_unlock();
return nvec;
+ }
+ if (nvec <= num_online_cpus())
+ start_irq_index = 0;
+
+ irqs = kmalloc_array((nvec - start_irq_index), sizeof(int), GFP_KERNEL);
+ if (!irqs) {
+ err = -ENOMEM;
+ goto free_irq_vector;
+ }
gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
GFP_KERNEL);
@@ -1294,17 +1336,41 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
goto free_irq;
}
- err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
- if (err)
- goto free_irq;
-
- cpu = cpumask_local_spread(i, gc->numa_node);
- irq_set_affinity_and_hint(irq, cpumask_of(cpu));
+ if (!i) {
+ err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
+ if (err)
+ goto free_irq;
+
+ /* If number of IRQ is one extra than number of online CPUs,
+ * then we need to assign IRQ0 (hwc irq) and IRQ1 to
+ * same CPU.
+ * Else we will use different CPUs for IRQ0 and IRQ1.
+ * Also we are using cpumask_local_spread instead of
+ * cpumask_first for the node, because the node can be
+ * mem only.
+ */
+ if (start_irq_index) {
+ cpu = cpumask_local_spread(i, gc->numa_node);
+ irq_set_affinity_and_hint(irq, cpumask_of(cpu));
+ } else {
+ irqs[start_irq_index] = irq;
+ }
+ } else {
+ irqs[i - start_irq_index] = irq;
+ err = request_irq(irqs[i - start_irq_index], mana_gd_intr, 0,
+ gic->name, gic);
+ if (err)
+ goto free_irq;
+ }
}
+ err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node);
+ if (err)
+ goto free_irq;
+
gc->max_num_msix = nvec;
gc->num_msix_usable = nvec;
-
+ cpus_read_unlock();
return 0;
free_irq:
@@ -1317,8 +1383,10 @@ free_irq:
}
kfree(gc->irq_contexts);
+ kfree(irqs);
gc->irq_contexts = NULL;
free_irq_vector:
+ cpus_read_unlock();
pci_free_irq_vectors(pdev);
return err;
}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 56ccbd4c37fe..ed2fb44500b0 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -3078,4 +3078,5 @@ void ocelot_deinit_port(struct ocelot *ocelot, int port)
}
EXPORT_SYMBOL(ocelot_deinit_port);
+MODULE_DESCRIPTION("Microsemi Ocelot switch family library");
MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 2b383d92d7f5..2c3f62907958 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -460,7 +460,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
set_tun->ttl = ip6_dst_hoplimit(dst);
dst_release(dst);
} else {
- set_tun->ttl = net->ipv6.devconf_all->hop_limit;
+ set_tun->ttl = READ_ONCE(net->ipv6.devconf_all->hop_limit);
}
#endif
} else {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 361d7c495e2d..2c7bd6e80d99 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -337,6 +337,11 @@ static void nfp_fl_lag_do_work(struct work_struct *work)
acti_netdevs = kmalloc_array(entry->slave_cnt,
sizeof(*acti_netdevs), GFP_KERNEL);
+ if (!acti_netdevs) {
+ schedule_delayed_work(&lag->work,
+ NFP_FL_LAG_DELAY);
+ continue;
+ }
/* Include sanity check in the loop. It may be that a bond has
* changed between processing the last notification and the
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index e522845c7c21..0d7d138d6e0d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -1084,7 +1084,7 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
u16 nfp_mac_idx = 0;
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
- if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
+ if (entry && (nfp_tunnel_is_mac_idx_global(entry->index) || netif_is_lag_port(netdev))) {
if (entry->bridge_count ||
!nfp_flower_is_supported_bridge(netdev)) {
nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 3b3210d823e8..f28e769e6fda 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2776,6 +2776,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
case NFP_NFD_VER_NFD3:
netdev->netdev_ops = &nfp_nfd3_netdev_ops;
netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ netdev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
break;
case NFP_NFD_VER_NFDK:
netdev->netdev_ops = &nfp_nfdk_netdev_ops;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
index 33b4c2856316..3f10c5365c80 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
@@ -537,11 +537,13 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
const u32 barcfg_msix_general =
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) |
- NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT;
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT);
const u32 barcfg_msix_xpb =
NFP_PCIE_BAR_PCIE2CPP_MapType(
NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) |
- NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT |
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT) |
NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(
NFP_CPP_TARGET_ISLAND_XPB);
const u32 barcfg_explicit[4] = {
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 7a549b834e97..31f896c4aa26 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1761,7 +1761,7 @@ static void nv_get_stats(int cpu, struct fe_priv *np,
/*
* nv_get_stats64: dev->ndo_get_stats64 function
* Get latest stats value from the nic.
- * Called with read_lock(&dev_base_lock) held for read -
+ * Called with rcu_read_lock() held -
* only synchronized against unregister_netdevice.
*/
static void
@@ -3090,7 +3090,7 @@ static void set_bufsize(struct net_device *dev)
/*
* nv_change_mtu: dev->change_mtu function
- * Called with dev_base_lock held for read.
+ * Called with RTNL held for read.
*/
static int nv_change_mtu(struct net_device *dev, int new_mtu)
{
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 9ffef2e06885..2ccc2c2a06e3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -76,6 +76,8 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
u8 status, int err);
+bool ionic_notifyq_service(struct ionic_cq *cq);
+bool ionic_adminq_service(struct ionic_cq *cq);
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_wait);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index c49aa358e424..6ba8d4aca0a0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -93,6 +93,7 @@ static void ionic_unmap_bars(struct ionic *ionic)
bars[i].len = 0;
}
}
+ ionic->num_bars = 0;
}
void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num)
@@ -215,15 +216,17 @@ out:
static void ionic_clear_pci(struct ionic *ionic)
{
- ionic->idev.dev_info_regs = NULL;
- ionic->idev.dev_cmd_regs = NULL;
- ionic->idev.intr_status = NULL;
- ionic->idev.intr_ctrl = NULL;
-
- ionic_unmap_bars(ionic);
- pci_release_regions(ionic->pdev);
+ if (ionic->num_bars) {
+ ionic->idev.dev_info_regs = NULL;
+ ionic->idev.dev_cmd_regs = NULL;
+ ionic->idev.intr_status = NULL;
+ ionic->idev.intr_ctrl = NULL;
+
+ ionic_unmap_bars(ionic);
+ pci_release_regions(ionic->pdev);
+ }
- if (atomic_read(&ionic->pdev->enable_cnt) > 0)
+ if (pci_is_enabled(ionic->pdev))
pci_disable_device(ionic->pdev);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index 91327ef670c7..c3ae11a48024 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -113,8 +113,8 @@ static const struct debugfs_reg32 intr_ctrl_regs[] = {
void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct dentry *qcq_dentry, *q_dentry, *cq_dentry;
- struct dentry *intr_dentry, *stats_dentry;
struct ionic_dev *idev = &lif->ionic->idev;
+ struct dentry *intr_dentry, *stats_dentry;
struct debugfs_regset32 *intr_ctrl_regset;
struct ionic_intr_info *intr = &qcq->intr;
struct debugfs_blob_wrapper *desc_blob;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 1e7c71f7f081..874499337132 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -319,22 +319,32 @@ do_check_time:
u8 ionic_dev_cmd_status(struct ionic_dev *idev)
{
+ if (!idev->dev_cmd_regs)
+ return (u8)PCI_ERROR_RESPONSE;
return ioread8(&idev->dev_cmd_regs->comp.comp.status);
}
bool ionic_dev_cmd_done(struct ionic_dev *idev)
{
+ if (!idev->dev_cmd_regs)
+ return false;
return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE;
}
void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp)
{
+ if (!idev->dev_cmd_regs)
+ return;
memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp));
}
void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd)
{
idev->opcode = cmd->cmd.opcode;
+
+ if (!idev->dev_cmd_regs)
+ return;
+
memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd));
iowrite32(0, &idev->dev_cmd_regs->done);
iowrite32(1, &idev->dev_cmd_regs->doorbell);
@@ -619,43 +629,25 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
cq->desc_size = desc_size;
cq->tail_idx = 0;
cq->done_color = 1;
+ cq->idev = &lif->ionic->idev;
return 0;
}
-void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa)
-{
- struct ionic_cq_info *cur;
- unsigned int i;
-
- cq->base = base;
- cq->base_pa = base_pa;
-
- for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++)
- cur->cq_desc = base + (i * cq->desc_size);
-}
-
-void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q)
-{
- cq->bound_q = q;
-}
-
unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg)
{
- struct ionic_cq_info *cq_info;
unsigned int work_done = 0;
if (work_to_do == 0)
return 0;
- cq_info = &cq->info[cq->tail_idx];
- while (cb(cq, cq_info)) {
+ while (cb(cq)) {
if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
+
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
- cq_info = &cq->info[cq->tail_idx];
if (++work_done >= work_to_do)
break;
@@ -682,7 +674,6 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
return -EINVAL;
q->lif = lif;
- q->idev = idev;
q->index = index;
q->num_descs = num_descs;
q->desc_size = desc_size;
@@ -696,53 +687,11 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
return 0;
}
-void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
-{
- struct ionic_desc_info *cur;
- unsigned int i;
-
- q->base = base;
- q->base_pa = base_pa;
-
- for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
- cur->desc = base + (i * q->desc_size);
-}
-
-void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa)
-{
- struct ionic_desc_info *cur;
- unsigned int i;
-
- q->cmb_base = base;
- q->cmb_base_pa = base_pa;
-
- for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
- cur->cmb_desc = base + (i * q->desc_size);
-}
-
-void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
-{
- struct ionic_desc_info *cur;
- unsigned int i;
-
- q->sg_base = base;
- q->sg_base_pa = base_pa;
-
- for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
- cur->sg_desc = base + (i * q->sg_desc_size);
-}
-
-void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
- void *cb_arg)
+void ionic_q_post(struct ionic_queue *q, bool ring_doorbell)
{
- struct ionic_desc_info *desc_info;
struct ionic_lif *lif = q->lif;
struct device *dev = q->dev;
- desc_info = &q->info[q->head_idx];
- desc_info->cb = cb;
- desc_info->cb_arg = cb_arg;
-
q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n",
@@ -761,7 +710,7 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
}
}
-static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
+bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
{
unsigned int mask, tail, head;
@@ -771,37 +720,3 @@ static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
return ((pos - tail) & mask) < ((head - tail) & mask);
}
-
-void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
- unsigned int stop_index)
-{
- struct ionic_desc_info *desc_info;
- ionic_desc_cb cb;
- void *cb_arg;
- u16 index;
-
- /* check for empty queue */
- if (q->tail_idx == q->head_idx)
- return;
-
- /* stop index must be for a descriptor that is not yet completed */
- if (unlikely(!ionic_q_is_posted(q, stop_index)))
- dev_err(q->dev,
- "ionic stop is not posted %s stop %u tail %u head %u\n",
- q->name, stop_index, q->tail_idx, q->head_idx);
-
- do {
- desc_info = &q->info[q->tail_idx];
- index = q->tail_idx;
- q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
-
- cb = desc_info->cb;
- cb_arg = desc_info->cb_arg;
-
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
-
- if (cb)
- cb(q, desc_info, cq_info, cb_arg);
- } while (index != stop_index);
-}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 2667e1cde16b..f30eee4a5a80 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/skbuff.h>
+#include <linux/bpf_trace.h>
#include "ionic_if.h"
#include "ionic_regs.h"
@@ -15,9 +16,10 @@
#define IONIC_MAX_TX_DESC 8192
#define IONIC_MAX_RX_DESC 16384
#define IONIC_MIN_TXRX_DESC 64
-#define IONIC_DEF_TXRX_DESC 4096
+#define IONIC_DEF_TXRX_DESC 1024
#define IONIC_RX_FILL_THRESHOLD 16
#define IONIC_RX_FILL_DIV 8
+#define IONIC_TSO_DESCS_NEEDED 44 /* 64K TSO @1500B */
#define IONIC_LIFS_MAX 1024
#define IONIC_WATCHDOG_SECS 5
#define IONIC_ITR_COAL_USEC_DEFAULT 64
@@ -120,11 +122,13 @@ static_assert(sizeof(struct ionic_log_event) == 64);
/* I/O */
static_assert(sizeof(struct ionic_txq_desc) == 16);
static_assert(sizeof(struct ionic_txq_sg_desc) == 128);
+static_assert(sizeof(struct ionic_txq_sg_desc_v1) == 256);
static_assert(sizeof(struct ionic_txq_comp) == 16);
static_assert(sizeof(struct ionic_rxq_desc) == 16);
static_assert(sizeof(struct ionic_rxq_sg_desc) == 128);
static_assert(sizeof(struct ionic_rxq_comp) == 16);
+static_assert(sizeof(struct ionic_rxq_comp) == sizeof(struct ionic_txq_comp));
/* SR/IOV */
static_assert(sizeof(struct ionic_vf_setattr_cmd) == 64);
@@ -173,21 +177,8 @@ struct ionic_dev {
struct ionic_devinfo dev_info;
};
-struct ionic_cq_info {
- union {
- void *cq_desc;
- struct ionic_admin_comp *admincq;
- struct ionic_notifyq_event *notifyq;
- };
-};
-
struct ionic_queue;
struct ionic_qcq;
-struct ionic_desc_info;
-
-typedef void (*ionic_desc_cb)(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg);
#define IONIC_MAX_BUF_LEN ((u16)-1)
#define IONIC_PAGE_SIZE PAGE_SIZE
@@ -195,6 +186,11 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q,
#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\
__GFP_COMP | __GFP_MEMALLOC)
+#define IONIC_XDP_MAX_LINEAR_MTU (IONIC_PAGE_SIZE - \
+ (VLAN_ETH_HLEN + \
+ XDP_PACKET_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
+
struct ionic_buf_info {
struct page *page;
dma_addr_t dma_addr;
@@ -202,26 +198,25 @@ struct ionic_buf_info {
u32 len;
};
-#define IONIC_MAX_FRAGS (1 + IONIC_TX_MAX_SG_ELEMS_V1)
+#define IONIC_TX_MAX_FRAGS (1 + IONIC_TX_MAX_SG_ELEMS_V1)
+#define IONIC_RX_MAX_FRAGS (1 + IONIC_RX_MAX_SG_ELEMS)
-struct ionic_desc_info {
- union {
- void *desc;
- struct ionic_txq_desc *txq_desc;
- struct ionic_rxq_desc *rxq_desc;
- struct ionic_admin_cmd *adminq_desc;
- };
- void __iomem *cmb_desc;
- union {
- void *sg_desc;
- struct ionic_txq_sg_desc *txq_sg_desc;
- struct ionic_rxq_sg_desc *rxq_sgl_desc;
- };
+struct ionic_tx_desc_info {
unsigned int bytes;
unsigned int nbufs;
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ enum xdp_action act;
struct ionic_buf_info bufs[MAX_SKB_FRAGS + 1];
- ionic_desc_cb cb;
- void *cb_arg;
+};
+
+struct ionic_rx_desc_info {
+ unsigned int nbufs;
+ struct ionic_buf_info bufs[IONIC_RX_MAX_FRAGS];
+};
+
+struct ionic_admin_desc_info {
+ void *ctx;
};
#define IONIC_QUEUE_NAME_MAX_SZ 16
@@ -229,7 +224,12 @@ struct ionic_desc_info {
struct ionic_queue {
struct device *dev;
struct ionic_lif *lif;
- struct ionic_desc_info *info;
+ union {
+ void *info;
+ struct ionic_tx_desc_info *tx_info;
+ struct ionic_rx_desc_info *rx_info;
+ struct ionic_admin_desc_info *admin_info;
+ };
u64 dbval;
unsigned long dbell_deadline;
unsigned long dbell_jiffies;
@@ -239,26 +239,33 @@ struct ionic_queue {
unsigned int num_descs;
unsigned int max_sg_elems;
u64 features;
- u64 drop;
- struct ionic_dev *idev;
unsigned int type;
unsigned int hw_index;
unsigned int hw_type;
+ bool xdp_flush;
union {
void *base;
struct ionic_txq_desc *txq;
struct ionic_rxq_desc *rxq;
struct ionic_admin_cmd *adminq;
};
- void __iomem *cmb_base;
+ union {
+ void __iomem *cmb_base;
+ struct ionic_txq_desc __iomem *cmb_txq;
+ struct ionic_rxq_desc __iomem *cmb_rxq;
+ };
union {
void *sg_base;
struct ionic_txq_sg_desc *txq_sgl;
+ struct ionic_txq_sg_desc_v1 *txq_sgl_v1;
struct ionic_rxq_sg_desc *rxq_sgl;
};
+ struct xdp_rxq_info *xdp_rxq_info;
+ struct ionic_queue *partner;
dma_addr_t base_pa;
dma_addr_t cmb_base_pa;
dma_addr_t sg_base_pa;
+ u64 drop;
unsigned int desc_size;
unsigned int sg_desc_size;
unsigned int pid;
@@ -280,7 +287,6 @@ struct ionic_intr_info {
struct ionic_cq {
struct ionic_lif *lif;
- struct ionic_cq_info *info;
struct ionic_queue *bound_q;
struct ionic_intr_info *bound_intr;
u16 tail_idx;
@@ -289,6 +295,7 @@ struct ionic_cq {
unsigned int desc_size;
void *base;
dma_addr_t base_pa;
+ struct ionic_dev *idev;
} ____cacheline_aligned_in_smp;
struct ionic;
@@ -363,23 +370,20 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
unsigned int num_descs, size_t desc_size);
void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa);
void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q);
-typedef bool (*ionic_cq_cb)(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
+typedef bool (*ionic_cq_cb)(struct ionic_cq *cq);
typedef void (*ionic_cq_done_cb)(void *done_arg);
unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg);
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do);
int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
struct ionic_queue *q, unsigned int index, const char *name,
unsigned int num_descs, size_t desc_size,
size_t sg_desc_size, unsigned int pid);
-void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
-void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa);
-void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
-void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
- void *cb_arg);
-void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
- unsigned int stop_index);
+void ionic_q_post(struct ionic_queue *q, bool ring_doorbell);
+bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos);
+
int ionic_heartbeat_check(struct ionic *ionic);
bool ionic_is_fw_running(struct ionic_dev *idev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index cd3c0b01402e..91183965a6b7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -90,18 +90,23 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
void *p)
{
struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_dev *idev;
unsigned int offset;
unsigned int size;
regs->version = IONIC_DEV_CMD_REG_VERSION;
+ idev = &lif->ionic->idev;
+ if (!idev->dev_info_regs)
+ return;
+
offset = 0;
size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32);
memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size);
offset += size;
size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32);
- memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size);
+ memcpy_fromio(p + offset, idev->dev_cmd_regs->words, size);
}
static void ionic_get_link_ext_stats(struct net_device *netdev,
@@ -721,6 +726,11 @@ static int ionic_set_channels(struct net_device *netdev,
ionic_init_queue_params(lif, &qparam);
+ if ((ch->rx_count || ch->tx_count) && lif->xdp_prog) {
+ netdev_info(lif->netdev, "Split Tx/Rx interrupts not available when using XDP\n");
+ return -EOPNOTSUPP;
+ }
+
if (ch->rx_count != ch->tx_count) {
netdev_info(netdev, "The rx and tx count must be equal\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_fw.c b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
index 5f40324cd243..3c209c1a2337 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_fw.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
@@ -109,6 +109,11 @@ int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw,
dl = priv_to_devlink(ionic);
devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
+ if (!idev->dev_cmd_regs) {
+ err = -ENXIO;
+ goto err_out;
+ }
+
buf_sz = sizeof(idev->dev_cmd_regs->data);
netdev_dbg(netdev,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index cf2d5ad7b68c..7f0c6cdc375e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -46,18 +46,26 @@ static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif);
static void ionic_lif_queue_identify(struct ionic_lif *lif);
+static int ionic_xdp_queues_config(struct ionic_lif *lif);
+static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q);
+
static void ionic_dim_work(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
- struct ionic_intr_info *intr;
struct dim_cq_moder cur_moder;
+ struct ionic_intr_info *intr;
struct ionic_qcq *qcq;
struct ionic_lif *lif;
+ struct ionic_queue *q;
u32 new_coal;
- cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
qcq = container_of(dim, struct ionic_qcq, dim);
- lif = qcq->q.lif;
+ q = &qcq->q;
+ if (q->type == IONIC_QTYPE_RXQ)
+ cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ else
+ cur_moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
+ lif = q->lif;
new_coal = ionic_coal_usec_to_hw(lif->ionic, cur_moder.usec);
new_coal = new_coal ? new_coal : 1;
@@ -422,10 +430,9 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->sg_base_pa = 0;
}
+ ionic_xdp_unregister_rxq_info(&qcq->q);
ionic_qcq_intr_free(lif, qcq);
- vfree(qcq->cq.info);
- qcq->cq.info = NULL;
vfree(qcq->q.info);
qcq->q.info = NULL;
}
@@ -529,14 +536,11 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
unsigned int num_descs, unsigned int desc_size,
unsigned int cq_desc_size,
unsigned int sg_desc_size,
+ unsigned int desc_info_size,
unsigned int pid, struct ionic_qcq **qcq)
{
struct ionic_dev *idev = &lif->ionic->idev;
struct device *dev = lif->ionic->dev;
- void *q_base, *cq_base, *sg_base;
- dma_addr_t cq_base_pa = 0;
- dma_addr_t sg_base_pa = 0;
- dma_addr_t q_base_pa = 0;
struct ionic_qcq *new;
int err;
@@ -552,7 +556,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
new->q.dev = dev;
new->flags = flags;
- new->q.info = vcalloc(num_descs, sizeof(*new->q.info));
+ new->q.info = vcalloc(num_descs, desc_info_size);
if (!new->q.info) {
netdev_err(lif->netdev, "Cannot allocate queue info\n");
err = -ENOMEM;
@@ -571,19 +575,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err = ionic_alloc_qcq_interrupt(lif, new);
if (err)
- goto err_out;
-
- new->cq.info = vcalloc(num_descs, sizeof(*new->cq.info));
- if (!new->cq.info) {
- netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
- err = -ENOMEM;
- goto err_out_free_irq;
- }
+ goto err_out_free_q_info;
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
if (err) {
netdev_err(lif->netdev, "Cannot initialize completion queue\n");
- goto err_out_free_cq_info;
+ goto err_out_free_irq;
}
if (flags & IONIC_QCQ_F_NOTIFYQ) {
@@ -601,16 +598,15 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->q_base) {
netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
err = -ENOMEM;
- goto err_out_free_cq_info;
+ goto err_out_free_irq;
}
- q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
- q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
- ionic_q_map(&new->q, q_base, q_base_pa);
-
- cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
- cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
- ionic_cq_map(&new->cq, cq_base, cq_base_pa);
- ionic_cq_bind(&new->cq, &new->q);
+ new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE);
+ new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
+
+ /* Base the NotifyQ cq.base off of the ALIGNed q.base */
+ new->cq.base = PTR_ALIGN(new->q.base + q_size, PAGE_SIZE);
+ new->cq.base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
+ new->cq.bound_q = &new->q;
} else {
/* regular DMA q descriptors */
new->q_size = PAGE_SIZE + (num_descs * desc_size);
@@ -619,11 +615,10 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->q_base) {
netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
err = -ENOMEM;
- goto err_out_free_cq_info;
+ goto err_out_free_irq;
}
- q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
- q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
- ionic_q_map(&new->q, q_base, q_base_pa);
+ new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE);
+ new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
if (flags & IONIC_QCQ_F_CMB_RINGS) {
/* on-chip CMB q descriptors */
@@ -648,7 +643,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
}
new->cmb_q_base_pa -= idev->phy_cmb_pages;
- ionic_q_cmb_map(&new->q, new->cmb_q_base, new->cmb_q_base_pa);
+ new->q.cmb_base = new->cmb_q_base;
+ new->q.cmb_base_pa = new->cmb_q_base_pa;
}
/* cq DMA descriptors */
@@ -660,10 +656,9 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err = -ENOMEM;
goto err_out_free_q;
}
- cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
- cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
- ionic_cq_map(&new->cq, cq_base, cq_base_pa);
- ionic_cq_bind(&new->cq, &new->q);
+ new->cq.base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
+ new->cq.base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
+ new->cq.bound_q = &new->q;
}
if (flags & IONIC_QCQ_F_SG) {
@@ -675,13 +670,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err = -ENOMEM;
goto err_out_free_cq;
}
- sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
- sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
- ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
+ new->q.sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
+ new->q.sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
}
INIT_WORK(&new->dim.work, ionic_dim_work);
- new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
*qcq = new;
@@ -695,8 +689,6 @@ err_out_free_q:
ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
}
dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
-err_out_free_cq_info:
- vfree(new->cq.info);
err_out_free_irq:
if (flags & IONIC_QCQ_F_INTR) {
devm_free_irq(dev, new->intr.vector, &new->napi);
@@ -722,7 +714,9 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
IONIC_ADMINQ_LENGTH,
sizeof(struct ionic_admin_cmd),
sizeof(struct ionic_admin_comp),
- 0, lif->kern_pid, &lif->adminqcq);
+ 0,
+ sizeof(struct ionic_admin_desc_info),
+ lif->kern_pid, &lif->adminqcq);
if (err)
return err;
ionic_debugfs_add_qcq(lif, lif->adminqcq);
@@ -733,7 +727,9 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
flags, IONIC_NOTIFYQ_LENGTH,
sizeof(struct ionic_notifyq_cmd),
sizeof(union ionic_notifyq_comp),
- 0, lif->kern_pid, &lif->notifyqcq);
+ 0,
+ sizeof(struct ionic_admin_desc_info),
+ lif->kern_pid, &lif->notifyqcq);
if (err)
goto err_out;
ionic_debugfs_add_qcq(lif, lif->notifyqcq);
@@ -862,8 +858,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.type = q->type,
.ver = lif->qtype_info[q->type].version,
.index = cpu_to_le32(q->index),
- .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
- IONIC_QINIT_F_SG),
+ .flags = cpu_to_le16(IONIC_QINIT_F_IRQ),
.intr_index = cpu_to_le16(cq->bound_intr->index),
.pid = cpu_to_le16(q->pid),
.ring_size = ilog2(q->num_descs),
@@ -875,6 +870,13 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
};
int err;
+ q->partner = &lif->txqcqs[q->index]->q;
+ q->partner->partner = q;
+
+ if (!lif->xdp_prog ||
+ (lif->xdp_prog->aux && lif->xdp_prog->aux->xdp_has_frags))
+ ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_SG);
+
if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
@@ -945,6 +947,7 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_tx_desc_info),
lif->kern_pid, &txq);
if (err)
goto err_qcq_alloc;
@@ -1004,6 +1007,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_rx_desc_info),
lif->kern_pid, &rxq);
if (err)
goto err_qcq_alloc;
@@ -1157,71 +1161,6 @@ int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class);
}
-static bool ionic_notifyq_service(struct ionic_cq *cq,
- struct ionic_cq_info *cq_info)
-{
- union ionic_notifyq_comp *comp = cq_info->cq_desc;
- struct ionic_deferred_work *work;
- struct net_device *netdev;
- struct ionic_queue *q;
- struct ionic_lif *lif;
- u64 eid;
-
- q = cq->bound_q;
- lif = q->info[0].cb_arg;
- netdev = lif->netdev;
- eid = le64_to_cpu(comp->event.eid);
-
- /* Have we run out of new completions to process? */
- if ((s64)(eid - lif->last_eid) <= 0)
- return false;
-
- lif->last_eid = eid;
-
- dev_dbg(lif->ionic->dev, "notifyq event:\n");
- dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
- comp, sizeof(*comp), true);
-
- switch (le16_to_cpu(comp->event.ecode)) {
- case IONIC_EVENT_LINK_CHANGE:
- ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
- break;
- case IONIC_EVENT_RESET:
- if (lif->ionic->idev.fw_status_ready &&
- !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
- !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- netdev_err(lif->netdev, "Reset event dropped\n");
- clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
- } else {
- work->type = IONIC_DW_TYPE_LIF_RESET;
- ionic_lif_deferred_enqueue(&lif->deferred, work);
- }
- }
- break;
- default:
- netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
- comp->event.ecode, eid);
- break;
- }
-
- return true;
-}
-
-static bool ionic_adminq_service(struct ionic_cq *cq,
- struct ionic_cq_info *cq_info)
-{
- struct ionic_admin_comp *comp = cq_info->cq_desc;
-
- if (!color_match(comp->color, cq->done_color))
- return false;
-
- ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
-
- return true;
-}
-
static int ionic_adminq_napi(struct napi_struct *napi, int budget)
{
struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
@@ -1252,8 +1191,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
ionic_rx_service, NULL, NULL);
if (lif->hwstamp_txq)
- tx_work = ionic_cq_service(&lif->hwstamp_txq->cq, budget,
- ionic_tx_service, NULL, NULL);
+ tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget);
work_done = max(max(n_work, a_work), max(rx_work, tx_work));
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -1640,6 +1578,12 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
netdev->priv_flags |= IFF_UNICAST_FLT |
IFF_LIVE_ADDR_CHANGE;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+
return 0;
}
@@ -1777,6 +1721,21 @@ static int ionic_start_queues_reconfig(struct ionic_lif *lif)
return err;
}
+static bool ionic_xdp_is_valid_mtu(struct ionic_lif *lif, u32 mtu,
+ struct bpf_prog *xdp_prog)
+{
+ if (!xdp_prog)
+ return true;
+
+ if (mtu <= IONIC_XDP_MAX_LINEAR_MTU)
+ return true;
+
+ if (xdp_prog->aux && xdp_prog->aux->xdp_has_frags)
+ return true;
+
+ return false;
+}
+
static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -1789,8 +1748,13 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
.mtu = cpu_to_le32(new_mtu),
},
};
+ struct bpf_prog *xdp_prog;
int err;
+ xdp_prog = READ_ONCE(lif->xdp_prog);
+ if (!ionic_xdp_is_valid_mtu(lif, new_mtu, xdp_prog))
+ return -EINVAL;
+
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
return err;
@@ -2070,6 +2034,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_tx_desc_info),
lif->kern_pid, &lif->txqcqs[i]);
if (err)
goto err_out;
@@ -2101,6 +2066,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_rx_desc_info),
lif->kern_pid, &lif->rxqcqs[i]);
if (err)
goto err_out;
@@ -2166,6 +2132,10 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
int derr = 0;
int i, err;
+ err = ionic_xdp_queues_config(lif);
+ if (err)
+ return err;
+
for (i = 0; i < lif->nxqs; i++) {
if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
@@ -2211,6 +2181,8 @@ err_out:
derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
}
+ ionic_xdp_queues_config(lif);
+
return err;
}
@@ -2668,11 +2640,151 @@ static void ionic_vf_attr_replay(struct ionic_lif *lif)
ionic_vf_start(ionic);
}
+static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q)
+{
+ struct xdp_rxq_info *xi;
+
+ if (!q->xdp_rxq_info)
+ return;
+
+ xi = q->xdp_rxq_info;
+ q->xdp_rxq_info = NULL;
+
+ xdp_rxq_info_unreg(xi);
+ kfree(xi);
+}
+
+static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
+{
+ struct xdp_rxq_info *rxq_info;
+ int err;
+
+ rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
+ if (!rxq_info)
+ return -ENOMEM;
+
+ err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id);
+ if (err) {
+ dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n",
+ q->index, err);
+ goto err_out;
+ }
+
+ err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL);
+ if (err) {
+ dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n",
+ q->index, err);
+ xdp_rxq_info_unreg(rxq_info);
+ goto err_out;
+ }
+
+ q->xdp_rxq_info = rxq_info;
+
+ return 0;
+
+err_out:
+ kfree(rxq_info);
+ return err;
+}
+
+static int ionic_xdp_queues_config(struct ionic_lif *lif)
+{
+ unsigned int i;
+ int err;
+
+ if (!lif->rxqcqs)
+ return 0;
+
+ /* There's no need to rework memory if not going to/from NULL program.
+ * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info
+ * This way we don't need to keep an *xdp_prog in every queue struct.
+ */
+ if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info)
+ return 0;
+
+ for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
+ struct ionic_queue *q = &lif->rxqcqs[i]->q;
+
+ if (q->xdp_rxq_info) {
+ ionic_xdp_unregister_rxq_info(q);
+ continue;
+ }
+
+ err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id);
+ if (err) {
+ dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n",
+ i, err);
+ goto err_out;
+ }
+ }
+
+ return 0;
+
+err_out:
+ for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++)
+ ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q);
+
+ return err;
+}
+
+static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct bpf_prog *old_prog;
+ u32 maxfs;
+
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
+#define XDP_ERR_SPLIT "XDP not available with split Tx/Rx interrupts"
+ NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_SPLIT);
+ netdev_info(lif->netdev, XDP_ERR_SPLIT);
+ return -EOPNOTSUPP;
+ }
+
+ if (!ionic_xdp_is_valid_mtu(lif, netdev->mtu, bpf->prog)) {
+#define XDP_ERR_MTU "MTU is too large for XDP without frags support"
+ NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_MTU);
+ netdev_info(lif->netdev, XDP_ERR_MTU);
+ return -EINVAL;
+ }
+
+ maxfs = __le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN;
+ if (bpf->prog && !(bpf->prog->aux && bpf->prog->aux->xdp_has_frags))
+ maxfs = min_t(u32, maxfs, IONIC_XDP_MAX_LINEAR_MTU);
+ netdev->max_mtu = maxfs;
+
+ if (!netif_running(netdev)) {
+ old_prog = xchg(&lif->xdp_prog, bpf->prog);
+ } else {
+ mutex_lock(&lif->queue_lock);
+ ionic_stop_queues_reconfig(lif);
+ old_prog = xchg(&lif->xdp_prog, bpf->prog);
+ ionic_start_queues_reconfig(lif);
+ mutex_unlock(&lif->queue_lock);
+ }
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return ionic_xdp_config(netdev, bpf);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops ionic_netdev_ops = {
.ndo_open = ionic_open,
.ndo_stop = ionic_stop,
.ndo_eth_ioctl = ionic_eth_ioctl,
.ndo_start_xmit = ionic_start_xmit,
+ .ndo_bpf = ionic_xdp,
+ .ndo_xdp_xmit = ionic_xdp_xmit,
.ndo_get_stats64 = ionic_get_stats64,
.ndo_set_rx_mode = ionic_ndo_set_rx_mode,
.ndo_set_features = ionic_set_features,
@@ -2755,6 +2867,8 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
swap(a->q.base, b->q.base);
swap(a->q.base_pa, b->q.base_pa);
swap(a->q.info, b->q.info);
+ swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info);
+ swap(a->q.partner, b->q.partner);
swap(a->q_base, b->q_base);
swap(a->q_base_pa, b->q_base_pa);
swap(a->q_size, b->q_size);
@@ -2770,7 +2884,6 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
swap(a->cq.desc_size, b->cq.desc_size);
swap(a->cq.base, b->cq.base);
swap(a->cq.base_pa, b->cq.base_pa);
- swap(a->cq.info, b->cq.info);
swap(a->cq_base, b->cq_base);
swap(a->cq_base_pa, b->cq_base_pa);
swap(a->cq_size, b->cq_size);
@@ -2834,6 +2947,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_tx_desc_info),
lif->kern_pid, &lif->txqcqs[i]);
if (err)
goto err_out;
@@ -2842,6 +2956,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_tx_desc_info),
lif->kern_pid, &tx_qcqs[i]);
if (err)
goto err_out;
@@ -2863,6 +2978,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
4, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_rx_desc_info),
lif->kern_pid, &lif->rxqcqs[i]);
if (err)
goto err_out;
@@ -2871,6 +2987,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
+ sizeof(struct ionic_rx_desc_info),
lif->kern_pid, &rx_qcqs[i]);
if (err)
goto err_out;
@@ -3391,9 +3508,12 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
napi_enable(&qcq->napi);
- if (qcq->flags & IONIC_QCQ_F_INTR)
+ if (qcq->flags & IONIC_QCQ_F_INTR) {
+ irq_set_affinity_hint(qcq->intr.vector,
+ &qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
+ }
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -3442,7 +3562,7 @@ static int ionic_lif_notifyq_init(struct ionic_lif *lif)
dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
/* preset the callback info */
- q->info[0].cb_arg = lif;
+ q->admin_info[0].ctx = lif;
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -3559,7 +3679,10 @@ int ionic_lif_init(struct ionic_lif *lif)
goto err_out_notifyq_deinit;
}
- err = ionic_init_nic_features(lif);
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
+ err = ionic_set_nic_features(lif, lif->netdev->features);
+ else
+ err = ionic_init_nic_features(lif);
if (err)
goto err_out_notifyq_deinit;
@@ -3691,6 +3814,7 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif)
union ionic_q_identity __iomem *q_ident;
struct ionic *ionic = lif->ionic;
struct ionic_dev *idev;
+ u16 max_frags;
int qtype;
int err;
@@ -3758,17 +3882,16 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif)
dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
qtype, qti->sg_desc_stride);
- if (qti->max_sg_elems >= IONIC_MAX_FRAGS) {
- qti->max_sg_elems = IONIC_MAX_FRAGS - 1;
- dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to IONIC_MAX_FRAGS-1 %d\n",
- qtype, qti->max_sg_elems);
- }
+ if (qtype == IONIC_QTYPE_TXQ)
+ max_frags = IONIC_TX_MAX_FRAGS;
+ else if (qtype == IONIC_QTYPE_RXQ)
+ max_frags = IONIC_RX_MAX_FRAGS;
+ else
+ max_frags = 1;
- if (qti->max_sg_elems > MAX_SKB_FRAGS) {
- qti->max_sg_elems = MAX_SKB_FRAGS;
- dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to MAX_SKB_FRAGS %d\n",
- qtype, qti->max_sg_elems);
- }
+ qti->max_sg_elems = min_t(u16, max_frags - 1, MAX_SKB_FRAGS);
+ dev_dbg(ionic->dev, "qtype %d max_sg_elems %d\n",
+ qtype, qti->max_sg_elems);
}
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 61548b3eea93..08f4266fe2aa 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -37,6 +37,7 @@ struct ionic_tx_stats {
u64 dma_map_err;
u64 hwstamp_valid;
u64 hwstamp_invalid;
+ u64 xdp_frames;
};
struct ionic_rx_stats {
@@ -51,6 +52,11 @@ struct ionic_rx_stats {
u64 alloc_err;
u64 hwstamp_valid;
u64 hwstamp_invalid;
+ u64 xdp_drop;
+ u64 xdp_aborted;
+ u64 xdp_pass;
+ u64 xdp_tx;
+ u64 xdp_redirect;
};
#define IONIC_QCQ_F_INITED BIT(0)
@@ -65,25 +71,25 @@ struct ionic_qcq {
void *q_base;
dma_addr_t q_base_pa;
u32 q_size;
+ u32 cq_size;
void *cq_base;
dma_addr_t cq_base_pa;
- u32 cq_size;
void *sg_base;
dma_addr_t sg_base_pa;
u32 sg_size;
+ unsigned int flags;
void __iomem *cmb_q_base;
phys_addr_t cmb_q_base_pa;
u32 cmb_q_size;
u32 cmb_pgid;
u32 cmb_order;
struct dim dim;
+ struct timer_list napi_deadline;
struct ionic_queue q;
struct ionic_cq cq;
- struct ionic_intr_info intr;
- struct timer_list napi_deadline;
struct napi_struct napi;
- unsigned int flags;
struct ionic_qcq *napi_qcq;
+ struct ionic_intr_info intr;
struct dentry *dentry;
};
@@ -135,6 +141,12 @@ struct ionic_lif_sw_stats {
u64 hw_rx_over_errors;
u64 hw_rx_missed_errors;
u64 hw_tx_aborted_errors;
+ u64 xdp_drop;
+ u64 xdp_aborted;
+ u64 xdp_pass;
+ u64 xdp_tx;
+ u64 xdp_redirect;
+ u64 xdp_frames;
};
enum ionic_lif_state_flags {
@@ -230,6 +242,7 @@ struct ionic_lif {
struct ionic_phc *phc;
struct dentry *dentry;
+ struct bpf_prog *xdp_prog;
};
struct ionic_phc {
@@ -314,7 +327,7 @@ static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
static inline bool ionic_txq_hwstamp_enabled(struct ionic_queue *q)
{
- return unlikely(q->features & IONIC_TXQ_F_HWSTAMP);
+ return q->features & IONIC_TXQ_F_HWSTAMP;
}
void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 165ab08ad2dd..c1259324b0be 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -15,7 +15,7 @@
#include "ionic_debugfs.h"
MODULE_DESCRIPTION(IONIC_DRV_DESCRIPTION);
-MODULE_AUTHOR("Pensando Systems, Inc");
+MODULE_AUTHOR("Shannon Nelson <shannon.nelson@amd.com>");
MODULE_LICENSE("GPL");
static const char *ionic_error_to_str(enum ionic_status_code code)
@@ -190,7 +190,8 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
static void ionic_adminq_flush(struct ionic_lif *lif)
{
- struct ionic_desc_info *desc_info;
+ struct ionic_admin_desc_info *desc_info;
+ struct ionic_admin_cmd *desc;
unsigned long irqflags;
struct ionic_queue *q;
@@ -203,10 +204,10 @@ static void ionic_adminq_flush(struct ionic_lif *lif)
q = &lif->adminqcq->q;
while (q->tail_idx != q->head_idx) {
- desc_info = &q->info[q->tail_idx];
- memset(desc_info->desc, 0, sizeof(union ionic_adminq_cmd));
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
+ desc = &q->adminq[q->tail_idx];
+ desc_info = &q->admin_info[q->tail_idx];
+ memset(desc, 0, sizeof(union ionic_adminq_cmd));
+ desc_info->ctx = NULL;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
}
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
@@ -246,25 +247,93 @@ static int ionic_adminq_check_err(struct ionic_lif *lif,
return err;
}
-static void ionic_adminq_cb(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info, void *cb_arg)
+bool ionic_notifyq_service(struct ionic_cq *cq)
{
- struct ionic_admin_ctx *ctx = cb_arg;
+ struct ionic_deferred_work *work;
+ union ionic_notifyq_comp *comp;
+ struct net_device *netdev;
+ struct ionic_queue *q;
+ struct ionic_lif *lif;
+ u64 eid;
+
+ comp = &((union ionic_notifyq_comp *)cq->base)[cq->tail_idx];
+
+ q = cq->bound_q;
+ lif = q->admin_info[0].ctx;
+ netdev = lif->netdev;
+ eid = le64_to_cpu(comp->event.eid);
+
+ /* Have we run out of new completions to process? */
+ if ((s64)(eid - lif->last_eid) <= 0)
+ return false;
+
+ lif->last_eid = eid;
+
+ dev_dbg(lif->ionic->dev, "notifyq event:\n");
+ dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
+ comp, sizeof(*comp), true);
+
+ switch (le16_to_cpu(comp->event.ecode)) {
+ case IONIC_EVENT_LINK_CHANGE:
+ ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
+ break;
+ case IONIC_EVENT_RESET:
+ if (lif->ionic->idev.fw_status_ready &&
+ !test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
+ !test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ netdev_err(lif->netdev, "Reset event dropped\n");
+ clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
+ } else {
+ work->type = IONIC_DW_TYPE_LIF_RESET;
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ }
+ }
+ break;
+ default:
+ netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
+ comp->event.ecode, eid);
+ break;
+ }
+
+ return true;
+}
+
+bool ionic_adminq_service(struct ionic_cq *cq)
+{
+ struct ionic_admin_desc_info *desc_info;
+ struct ionic_queue *q = cq->bound_q;
struct ionic_admin_comp *comp;
+ u16 index;
- if (!ctx)
- return;
+ comp = &((struct ionic_admin_comp *)cq->base)[cq->tail_idx];
- comp = cq_info->cq_desc;
+ if (!color_match(comp->color, cq->done_color))
+ return false;
- memcpy(&ctx->comp, comp, sizeof(*comp));
+ /* check for empty queue */
+ if (q->tail_idx == q->head_idx)
+ return false;
+
+ do {
+ desc_info = &q->admin_info[q->tail_idx];
+ index = q->tail_idx;
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
+ if (likely(desc_info->ctx)) {
+ struct ionic_admin_ctx *ctx = desc_info->ctx;
- dev_dbg(q->dev, "comp admin queue command:\n");
- dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
- &ctx->comp, sizeof(ctx->comp), true);
+ memcpy(&ctx->comp, comp, sizeof(*comp));
- complete_all(&ctx->work);
+ dev_dbg(q->dev, "comp admin queue command:\n");
+ dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
+ &ctx->comp, sizeof(ctx->comp), true);
+ complete_all(&ctx->work);
+ desc_info->ctx = NULL;
+ }
+ } while (index != le16_to_cpu(comp->comp_index));
+
+ return true;
}
bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
@@ -298,7 +367,8 @@ bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
- struct ionic_desc_info *desc_info;
+ struct ionic_admin_desc_info *desc_info;
+ struct ionic_admin_cmd *desc;
unsigned long irqflags;
struct ionic_queue *q;
int err = 0;
@@ -320,14 +390,17 @@ int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
if (err)
goto err_out;
- desc_info = &q->info[q->head_idx];
- memcpy(desc_info->desc, &ctx->cmd, sizeof(ctx->cmd));
+ desc_info = &q->admin_info[q->head_idx];
+ desc_info->ctx = ctx;
+
+ desc = &q->adminq[q->head_idx];
+ memcpy(desc, &ctx->cmd, sizeof(ctx->cmd));
dev_dbg(&lif->netdev->dev, "post admin queue command:\n");
dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
&ctx->cmd, sizeof(ctx->cmd), true);
- ionic_q_post(q, true, ionic_adminq_cb, ctx);
+ ionic_q_post(q, true);
err_out:
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
@@ -416,6 +489,9 @@ static void ionic_dev_cmd_clean(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
+ if (!idev->dev_cmd_regs)
+ return;
+
iowrite32(0, &idev->dev_cmd_regs->doorbell);
memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd));
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 1f6022fb7679..0107599a9dd4 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -27,6 +27,12 @@ static const struct ionic_stat_desc ionic_lif_stats_desc[] = {
IONIC_LIF_STAT_DESC(hw_rx_over_errors),
IONIC_LIF_STAT_DESC(hw_rx_missed_errors),
IONIC_LIF_STAT_DESC(hw_tx_aborted_errors),
+ IONIC_LIF_STAT_DESC(xdp_drop),
+ IONIC_LIF_STAT_DESC(xdp_aborted),
+ IONIC_LIF_STAT_DESC(xdp_pass),
+ IONIC_LIF_STAT_DESC(xdp_tx),
+ IONIC_LIF_STAT_DESC(xdp_redirect),
+ IONIC_LIF_STAT_DESC(xdp_frames),
};
static const struct ionic_stat_desc ionic_port_stats_desc[] = {
@@ -135,6 +141,7 @@ static const struct ionic_stat_desc ionic_tx_stats_desc[] = {
IONIC_TX_STAT_DESC(csum_none),
IONIC_TX_STAT_DESC(csum),
IONIC_TX_STAT_DESC(vlan_inserted),
+ IONIC_TX_STAT_DESC(xdp_frames),
};
static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
@@ -149,6 +156,11 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(hwstamp_invalid),
IONIC_RX_STAT_DESC(dropped),
IONIC_RX_STAT_DESC(vlan_stripped),
+ IONIC_RX_STAT_DESC(xdp_drop),
+ IONIC_RX_STAT_DESC(xdp_aborted),
+ IONIC_RX_STAT_DESC(xdp_pass),
+ IONIC_RX_STAT_DESC(xdp_tx),
+ IONIC_RX_STAT_DESC(xdp_redirect),
};
#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
@@ -171,6 +183,7 @@ static void ionic_add_lif_txq_stats(struct ionic_lif *lif, int q_num,
stats->tx_csum += txstats->csum;
stats->tx_hwstamp_valid += txstats->hwstamp_valid;
stats->tx_hwstamp_invalid += txstats->hwstamp_invalid;
+ stats->xdp_frames += txstats->xdp_frames;
}
static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num,
@@ -185,6 +198,11 @@ static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num,
stats->rx_csum_error += rxstats->csum_error;
stats->rx_hwstamp_valid += rxstats->hwstamp_valid;
stats->rx_hwstamp_invalid += rxstats->hwstamp_invalid;
+ stats->xdp_drop += rxstats->xdp_drop;
+ stats->xdp_aborted += rxstats->xdp_aborted;
+ stats->xdp_pass += rxstats->xdp_pass;
+ stats->xdp_tx += rxstats->xdp_tx;
+ stats->xdp_redirect += rxstats->xdp_redirect;
}
static void ionic_get_lif_stats(struct ionic_lif *lif,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 54cd96b035d6..5dba6d2d633c 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -5,27 +5,40 @@
#include <linux/ipv6.h>
#include <linux/if_vlan.h>
#include <net/ip6_checksum.h>
+#include <net/netdev_queues.h>
#include "ionic.h"
#include "ionic_lif.h"
#include "ionic_txrx.h"
-static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
- ionic_desc_cb cb_func, void *cb_arg)
+static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
+ void *data, size_t len);
+
+static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
+ const skb_frag_t *frag,
+ size_t offset, size_t len);
+
+static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
+ struct ionic_tx_desc_info *desc_info);
+
+static void ionic_tx_clean(struct ionic_queue *q,
+ struct ionic_tx_desc_info *desc_info,
+ struct ionic_txq_comp *comp);
+
+static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell)
{
- ionic_q_post(q, ring_dbell, cb_func, cb_arg);
+ ionic_q_post(q, ring_dbell);
}
-static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
- ionic_desc_cb cb_func, void *cb_arg)
+static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell)
{
- ionic_q_post(q, ring_dbell, cb_func, cb_arg);
+ ionic_q_post(q, ring_dbell);
}
bool ionic_txq_poke_doorbell(struct ionic_queue *q)
{
- unsigned long now, then, dif;
struct netdev_queue *netdev_txq;
+ unsigned long now, then, dif;
struct net_device *netdev;
netdev = q->lif->netdev;
@@ -83,46 +96,61 @@ bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
return true;
}
-static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
+static inline struct ionic_txq_sg_elem *ionic_tx_sg_elems(struct ionic_queue *q)
+{
+ if (likely(q->sg_desc_size == sizeof(struct ionic_txq_sg_desc_v1)))
+ return q->txq_sgl_v1[q->head_idx].elems;
+ else
+ return q->txq_sgl[q->head_idx].elems;
+}
+
+static inline struct netdev_queue *q_to_ndq(struct net_device *netdev,
+ struct ionic_queue *q)
{
- return netdev_get_tx_queue(q->lif->netdev, q->index);
+ return netdev_get_tx_queue(netdev, q->index);
+}
+
+static void *ionic_rx_buf_va(struct ionic_buf_info *buf_info)
+{
+ return page_address(buf_info->page) + buf_info->page_offset;
+}
+
+static dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info)
+{
+ return buf_info->dma_addr + buf_info->page_offset;
+}
+
+static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info)
+{
+ return min_t(u32, IONIC_MAX_BUF_LEN, IONIC_PAGE_SIZE - buf_info->page_offset);
}
static int ionic_rx_page_alloc(struct ionic_queue *q,
struct ionic_buf_info *buf_info)
{
- struct net_device *netdev = q->lif->netdev;
- struct ionic_rx_stats *stats;
- struct device *dev;
+ struct device *dev = q->dev;
+ dma_addr_t dma_addr;
struct page *page;
- dev = q->dev;
- stats = q_to_rx_stats(q);
-
- if (unlikely(!buf_info)) {
- net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
- netdev->name, q->name);
- return -EINVAL;
- }
-
page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
if (unlikely(!page)) {
net_err_ratelimited("%s: %s page alloc failed\n",
- netdev->name, q->name);
- stats->alloc_err++;
+ dev_name(dev), q->name);
+ q_to_rx_stats(q)->alloc_err++;
return -ENOMEM;
}
- buf_info->dma_addr = dma_map_page(dev, page, 0,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
+ dma_addr = dma_map_page(dev, page, 0,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
__free_pages(page, 0);
net_err_ratelimited("%s: %s dma map failed\n",
- netdev->name, q->name);
- stats->dma_map_err++;
+ dev_name(dev), q->name);
+ q_to_rx_stats(q)->dma_map_err++;
return -EIO;
}
+ buf_info->dma_addr = dma_addr;
buf_info->page = page;
buf_info->page_offset = 0;
@@ -132,12 +160,11 @@ static int ionic_rx_page_alloc(struct ionic_queue *q,
static void ionic_rx_page_free(struct ionic_queue *q,
struct ionic_buf_info *buf_info)
{
- struct net_device *netdev = q->lif->netdev;
struct device *dev = q->dev;
if (unlikely(!buf_info)) {
net_err_ratelimited("%s: %s invalid buf_info in free\n",
- netdev->name, q->name);
+ dev_name(dev), q->name);
return;
}
@@ -150,7 +177,7 @@ static void ionic_rx_page_free(struct ionic_queue *q,
}
static bool ionic_rx_buf_recycle(struct ionic_queue *q,
- struct ionic_buf_info *buf_info, u32 used)
+ struct ionic_buf_info *buf_info, u32 len)
{
u32 size;
@@ -162,7 +189,7 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q,
if (page_to_nid(buf_info->page) != numa_mem_id())
return false;
- size = ALIGN(used, IONIC_PAGE_SPLIT_SZ);
+ size = ALIGN(len, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ);
buf_info->page_offset += size;
if (buf_info->page_offset >= IONIC_PAGE_SIZE)
return false;
@@ -172,88 +199,96 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q,
return true;
}
-static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_rxq_comp *comp)
+static void ionic_rx_add_skb_frag(struct ionic_queue *q,
+ struct sk_buff *skb,
+ struct ionic_buf_info *buf_info,
+ u32 off, u32 len,
+ bool synced)
+{
+ if (!synced)
+ dma_sync_single_range_for_cpu(q->dev, ionic_rx_buf_pa(buf_info),
+ off, len, DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ buf_info->page, buf_info->page_offset + off,
+ len,
+ IONIC_PAGE_SIZE);
+
+ if (!ionic_rx_buf_recycle(q, buf_info, len)) {
+ dma_unmap_page(q->dev, buf_info->dma_addr,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+ buf_info->page = NULL;
+ }
+}
+
+static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
+ struct ionic_rx_desc_info *desc_info,
+ unsigned int headroom,
+ unsigned int len,
+ unsigned int num_sg_elems,
+ bool synced)
{
- struct net_device *netdev = q->lif->netdev;
struct ionic_buf_info *buf_info;
- struct ionic_rx_stats *stats;
- struct device *dev = q->dev;
struct sk_buff *skb;
unsigned int i;
u16 frag_len;
- u16 len;
-
- stats = q_to_rx_stats(q);
buf_info = &desc_info->bufs[0];
- len = le16_to_cpu(comp->len);
-
prefetchw(buf_info->page);
skb = napi_get_frags(&q_to_qcq(q)->napi);
if (unlikely(!skb)) {
net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
- netdev->name, q->name);
- stats->alloc_err++;
+ dev_name(q->dev), q->name);
+ q_to_rx_stats(q)->alloc_err++;
return NULL;
}
- i = comp->num_sg_elems + 1;
- do {
- if (unlikely(!buf_info->page)) {
- dev_kfree_skb(skb);
- return NULL;
- }
-
- frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
- IONIC_PAGE_SIZE - buf_info->page_offset));
- len -= frag_len;
-
- dma_sync_single_for_cpu(dev,
- buf_info->dma_addr + buf_info->page_offset,
- frag_len, DMA_FROM_DEVICE);
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- buf_info->page, buf_info->page_offset, frag_len,
- IONIC_PAGE_SIZE);
-
- if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
- dma_unmap_page(dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
- buf_info->page = NULL;
- }
+ if (headroom)
+ frag_len = min_t(u16, len,
+ IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
+ else
+ frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
- buf_info++;
+ if (unlikely(!buf_info->page))
+ goto err_bad_buf_page;
+ ionic_rx_add_skb_frag(q, skb, buf_info, headroom, frag_len, synced);
+ len -= frag_len;
+ buf_info++;
- i--;
- } while (i > 0);
+ for (i = 0; i < num_sg_elems; i++, buf_info++) {
+ if (unlikely(!buf_info->page))
+ goto err_bad_buf_page;
+ frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
+ ionic_rx_add_skb_frag(q, skb, buf_info, 0, frag_len, synced);
+ len -= frag_len;
+ }
return skb;
+
+err_bad_buf_page:
+ dev_kfree_skb(skb);
+ return NULL;
}
-static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_rxq_comp *comp)
+static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
+ struct ionic_queue *q,
+ struct ionic_rx_desc_info *desc_info,
+ unsigned int headroom,
+ unsigned int len,
+ bool synced)
{
- struct net_device *netdev = q->lif->netdev;
struct ionic_buf_info *buf_info;
- struct ionic_rx_stats *stats;
struct device *dev = q->dev;
struct sk_buff *skb;
- u16 len;
-
- stats = q_to_rx_stats(q);
buf_info = &desc_info->bufs[0];
- len = le16_to_cpu(comp->len);
skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
if (unlikely(!skb)) {
net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
- netdev->name, q->name);
- stats->alloc_err++;
+ dev_name(dev), q->name);
+ q_to_rx_stats(q)->alloc_err++;
return NULL;
}
@@ -262,30 +297,343 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
return NULL;
}
- dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset,
- len, DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len);
- dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset,
- len, DMA_FROM_DEVICE);
+ if (!synced)
+ dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
+ headroom, len, DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len);
+ dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info),
+ headroom, len, DMA_FROM_DEVICE);
skb_put(skb, len);
- skb->protocol = eth_type_trans(skb, q->lif->netdev);
+ skb->protocol = eth_type_trans(skb, netdev);
return skb;
}
+static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
+ struct ionic_tx_desc_info *desc_info)
+{
+ unsigned int nbufs = desc_info->nbufs;
+ struct ionic_buf_info *buf_info;
+ struct device *dev = q->dev;
+ int i;
+
+ if (!nbufs)
+ return;
+
+ buf_info = desc_info->bufs;
+ dma_unmap_single(dev, buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+ if (desc_info->act == XDP_TX)
+ __free_pages(buf_info->page, 0);
+ buf_info->page = NULL;
+
+ buf_info++;
+ for (i = 1; i < nbufs + 1 && buf_info->page; i++, buf_info++) {
+ dma_unmap_page(dev, buf_info->dma_addr,
+ buf_info->len, DMA_TO_DEVICE);
+ if (desc_info->act == XDP_TX)
+ __free_pages(buf_info->page, 0);
+ buf_info->page = NULL;
+ }
+
+ if (desc_info->act == XDP_REDIRECT)
+ xdp_return_frame(desc_info->xdpf);
+
+ desc_info->nbufs = 0;
+ desc_info->xdpf = NULL;
+ desc_info->act = 0;
+}
+
+static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
+ enum xdp_action act, struct page *page, int off,
+ bool ring_doorbell)
+{
+ struct ionic_tx_desc_info *desc_info;
+ struct ionic_buf_info *buf_info;
+ struct ionic_tx_stats *stats;
+ struct ionic_txq_desc *desc;
+ size_t len = frame->len;
+ dma_addr_t dma_addr;
+ u64 cmd;
+
+ desc_info = &q->tx_info[q->head_idx];
+ desc = &q->txq[q->head_idx];
+ buf_info = desc_info->bufs;
+ stats = q_to_tx_stats(q);
+
+ dma_addr = ionic_tx_map_single(q, frame->data, len);
+ if (!dma_addr)
+ return -EIO;
+ buf_info->dma_addr = dma_addr;
+ buf_info->len = len;
+ buf_info->page = page;
+ buf_info->page_offset = off;
+
+ desc_info->nbufs = 1;
+ desc_info->xdpf = frame;
+ desc_info->act = act;
+
+ if (xdp_frame_has_frags(frame)) {
+ struct ionic_txq_sg_elem *elem;
+ struct skb_shared_info *sinfo;
+ struct ionic_buf_info *bi;
+ skb_frag_t *frag;
+ int i;
+
+ bi = &buf_info[1];
+ sinfo = xdp_get_shared_info_from_frame(frame);
+ frag = sinfo->frags;
+ elem = ionic_tx_sg_elems(q);
+ for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) {
+ dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
+ if (!dma_addr) {
+ ionic_tx_desc_unmap_bufs(q, desc_info);
+ return -EIO;
+ }
+ bi->dma_addr = dma_addr;
+ bi->len = skb_frag_size(frag);
+ bi->page = skb_frag_page(frag);
+
+ elem->addr = cpu_to_le64(bi->dma_addr);
+ elem->len = cpu_to_le16(bi->len);
+ elem++;
+
+ desc_info->nbufs++;
+ }
+ }
+
+ cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
+ 0, (desc_info->nbufs - 1), buf_info->dma_addr);
+ desc->cmd = cpu_to_le64(cmd);
+ desc->len = cpu_to_le16(len);
+ desc->csum_start = 0;
+ desc->csum_offset = 0;
+
+ stats->xdp_frames++;
+ stats->pkts++;
+ stats->bytes += len;
+
+ ionic_txq_post(q, ring_doorbell);
+
+ return 0;
+}
+
+int ionic_xdp_xmit(struct net_device *netdev, int n,
+ struct xdp_frame **xdp_frames, u32 flags)
+{
+ struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_queue *txq;
+ struct netdev_queue *nq;
+ int nxmit;
+ int space;
+ int cpu;
+ int qi;
+
+ if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ /* AdminQ is assumed on cpu 0, while we attempt to affinitize the
+ * TxRx queue pairs 0..n-1 on cpus 1..n. We try to keep with that
+ * affinitization here, but of course irqbalance and friends might
+ * have juggled things anyway, so we have to check for the 0 case.
+ */
+ cpu = smp_processor_id();
+ qi = cpu ? (cpu - 1) % lif->nxqs : cpu;
+
+ txq = &lif->txqcqs[qi]->q;
+ nq = netdev_get_tx_queue(netdev, txq->index);
+ __netif_tx_lock(nq, cpu);
+ txq_trans_cond_update(nq);
+
+ if (netif_tx_queue_stopped(nq) ||
+ !netif_txq_maybe_stop(q_to_ndq(netdev, txq),
+ ionic_q_space_avail(txq),
+ 1, 1)) {
+ __netif_tx_unlock(nq);
+ return -EIO;
+ }
+
+ space = min_t(int, n, ionic_q_space_avail(txq));
+ for (nxmit = 0; nxmit < space ; nxmit++) {
+ if (ionic_xdp_post_frame(txq, xdp_frames[nxmit],
+ XDP_REDIRECT,
+ virt_to_page(xdp_frames[nxmit]->data),
+ 0, false)) {
+ nxmit--;
+ break;
+ }
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ ionic_dbell_ring(lif->kern_dbpage, txq->hw_type,
+ txq->dbval | txq->head_idx);
+
+ netif_txq_maybe_stop(q_to_ndq(netdev, txq),
+ ionic_q_space_avail(txq),
+ 4, 4);
+ __netif_tx_unlock(nq);
+
+ return nxmit;
+}
+
+static bool ionic_run_xdp(struct ionic_rx_stats *stats,
+ struct net_device *netdev,
+ struct bpf_prog *xdp_prog,
+ struct ionic_queue *rxq,
+ struct ionic_buf_info *buf_info,
+ int len)
+{
+ u32 xdp_action = XDP_ABORTED;
+ struct xdp_buff xdp_buf;
+ struct ionic_queue *txq;
+ struct netdev_queue *nq;
+ struct xdp_frame *xdpf;
+ int remain_len;
+ int frag_len;
+ int err = 0;
+
+ xdp_init_buff(&xdp_buf, IONIC_PAGE_SIZE, rxq->xdp_rxq_info);
+ frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
+ xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info),
+ XDP_PACKET_HEADROOM, frag_len, false);
+
+ dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info),
+ XDP_PACKET_HEADROOM, len,
+ DMA_FROM_DEVICE);
+
+ prefetchw(&xdp_buf.data_hard_start);
+
+ /* We limit MTU size to one buffer if !xdp_has_frags, so
+ * if the recv len is bigger than one buffer
+ * then we know we have frag info to gather
+ */
+ remain_len = len - frag_len;
+ if (remain_len) {
+ struct skb_shared_info *sinfo;
+ struct ionic_buf_info *bi;
+ skb_frag_t *frag;
+
+ bi = buf_info;
+ sinfo = xdp_get_shared_info_from_buff(&xdp_buf);
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(&xdp_buf);
+
+ do {
+ if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
+ err = -ENOSPC;
+ goto out_xdp_abort;
+ }
+
+ frag = &sinfo->frags[sinfo->nr_frags];
+ sinfo->nr_frags++;
+ bi++;
+ frag_len = min_t(u16, remain_len, ionic_rx_buf_size(bi));
+ dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(bi),
+ 0, frag_len, DMA_FROM_DEVICE);
+ skb_frag_fill_page_desc(frag, bi->page, 0, frag_len);
+ sinfo->xdp_frags_size += frag_len;
+ remain_len -= frag_len;
+
+ if (page_is_pfmemalloc(bi->page))
+ xdp_buff_set_frag_pfmemalloc(&xdp_buf);
+ } while (remain_len > 0);
+ }
+
+ xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf);
+
+ switch (xdp_action) {
+ case XDP_PASS:
+ stats->xdp_pass++;
+ return false; /* false = we didn't consume the packet */
+
+ case XDP_DROP:
+ ionic_rx_page_free(rxq, buf_info);
+ stats->xdp_drop++;
+ break;
+
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(&xdp_buf);
+ if (!xdpf)
+ goto out_xdp_abort;
+
+ txq = rxq->partner;
+ nq = netdev_get_tx_queue(netdev, txq->index);
+ __netif_tx_lock(nq, smp_processor_id());
+ txq_trans_cond_update(nq);
+
+ if (netif_tx_queue_stopped(nq) ||
+ !netif_txq_maybe_stop(q_to_ndq(netdev, txq),
+ ionic_q_space_avail(txq),
+ 1, 1)) {
+ __netif_tx_unlock(nq);
+ goto out_xdp_abort;
+ }
+
+ dma_unmap_page(rxq->dev, buf_info->dma_addr,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+
+ err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
+ buf_info->page,
+ buf_info->page_offset,
+ true);
+ __netif_tx_unlock(nq);
+ if (err) {
+ netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
+ goto out_xdp_abort;
+ }
+ stats->xdp_tx++;
+
+ /* the Tx completion will free the buffers */
+ break;
+
+ case XDP_REDIRECT:
+ /* unmap the pages before handing them to a different device */
+ dma_unmap_page(rxq->dev, buf_info->dma_addr,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+
+ err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
+ if (err) {
+ netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
+ goto out_xdp_abort;
+ }
+ buf_info->page = NULL;
+ rxq->xdp_flush = true;
+ stats->xdp_redirect++;
+ break;
+
+ case XDP_ABORTED:
+ default:
+ goto out_xdp_abort;
+ }
+
+ return true;
+
+out_xdp_abort:
+ trace_xdp_exception(netdev, xdp_prog, xdp_action);
+ ionic_rx_page_free(rxq, buf_info);
+ stats->xdp_aborted++;
+
+ return true;
+}
+
static void ionic_rx_clean(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info,
- void *cb_arg)
+ struct ionic_rx_desc_info *desc_info,
+ struct ionic_rxq_comp *comp)
{
struct net_device *netdev = q->lif->netdev;
struct ionic_qcq *qcq = q_to_qcq(q);
struct ionic_rx_stats *stats;
- struct ionic_rxq_comp *comp;
+ struct bpf_prog *xdp_prog;
+ unsigned int headroom;
struct sk_buff *skb;
-
- comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp);
+ bool synced = false;
+ bool use_copybreak;
+ u16 len;
stats = q_to_rx_stats(q);
@@ -294,13 +642,25 @@ static void ionic_rx_clean(struct ionic_queue *q,
return;
}
+ len = le16_to_cpu(comp->len);
stats->pkts++;
- stats->bytes += le16_to_cpu(comp->len);
+ stats->bytes += len;
+
+ xdp_prog = READ_ONCE(q->lif->xdp_prog);
+ if (xdp_prog) {
+ if (ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len))
+ return;
+ synced = true;
+ }
- if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
- skb = ionic_rx_copybreak(q, desc_info, comp);
+ headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
+ use_copybreak = len <= q->lif->rx_copybreak;
+ if (use_copybreak)
+ skb = ionic_rx_copybreak(netdev, q, desc_info,
+ headroom, len, synced);
else
- skb = ionic_rx_frags(q, desc_info, comp);
+ skb = ionic_rx_build_skb(q, desc_info, headroom, len,
+ comp->num_sg_elems, synced);
if (unlikely(!skb)) {
stats->dropped++;
@@ -352,7 +712,7 @@ static void ionic_rx_clean(struct ionic_queue *q,
u64 hwstamp;
cq_desc_hwstamp =
- cq_info->cq_desc +
+ (void *)comp +
qcq->cq.desc_size -
sizeof(struct ionic_rxq_comp) -
IONIC_HWSTAMP_CQ_NEGOFFSET;
@@ -367,19 +727,19 @@ static void ionic_rx_clean(struct ionic_queue *q,
}
}
- if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
+ if (use_copybreak)
napi_gro_receive(&qcq->napi, skb);
else
napi_gro_frags(&qcq->napi);
}
-bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
+bool ionic_rx_service(struct ionic_cq *cq)
{
+ struct ionic_rx_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q;
- struct ionic_desc_info *desc_info;
struct ionic_rxq_comp *comp;
- comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
+ comp = &((struct ionic_rxq_comp *)cq->base)[cq->tail_idx];
if (!color_match(comp->pkt_type_color, cq->done_color))
return false;
@@ -391,31 +751,29 @@ bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
if (q->tail_idx != le16_to_cpu(comp->comp_index))
return false;
- desc_info = &q->info[q->tail_idx];
+ desc_info = &q->rx_info[q->tail_idx];
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
/* clean the related q entry, only one per qc completion */
- ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
-
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
+ ionic_rx_clean(q, desc_info, comp);
return true;
}
static inline void ionic_write_cmb_desc(struct ionic_queue *q,
- void __iomem *cmb_desc,
void *desc)
{
- if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS)
- memcpy_toio(cmb_desc, desc, q->desc_size);
+ /* Since Rx and Tx descriptors are the same size, we can
+ * save an instruction or two and skip the qtype check.
+ */
+ if (unlikely(q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS))
+ memcpy_toio(&q->cmb_txq[q->head_idx], desc, sizeof(q->cmb_txq[0]));
}
void ionic_rx_fill(struct ionic_queue *q)
{
struct net_device *netdev = q->lif->netdev;
- struct ionic_desc_info *desc_info;
- struct ionic_rxq_sg_desc *sg_desc;
+ struct ionic_rx_desc_info *desc_info;
struct ionic_rxq_sg_elem *sg_elem;
struct ionic_buf_info *buf_info;
unsigned int fill_threshold;
@@ -424,8 +782,9 @@ void ionic_rx_fill(struct ionic_queue *q)
unsigned int frag_len;
unsigned int nfrags;
unsigned int n_fill;
- unsigned int i, j;
unsigned int len;
+ unsigned int i;
+ unsigned int j;
n_fill = ionic_q_space_avail(q);
@@ -434,13 +793,16 @@ void ionic_rx_fill(struct ionic_queue *q)
if (n_fill < fill_threshold)
return;
- len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
+ len = netdev->mtu + VLAN_ETH_HLEN;
for (i = n_fill; i; i--) {
+ unsigned int headroom;
+ unsigned int buf_len;
+
nfrags = 0;
remain_len = len;
- desc_info = &q->info[q->head_idx];
- desc = desc_info->desc;
+ desc = &q->rxq[q->head_idx];
+ desc_info = &q->rx_info[q->head_idx];
buf_info = &desc_info->bufs[0];
if (!buf_info->page) { /* alloc a new buffer? */
@@ -451,19 +813,26 @@ void ionic_rx_fill(struct ionic_queue *q)
}
}
- /* fill main descriptor - buf[0] */
- desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
- frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
- IONIC_PAGE_SIZE - buf_info->page_offset));
+ /* fill main descriptor - buf[0]
+ * XDP uses space in the first buffer, so account for
+ * head room, tail room, and ip header in the first frag size.
+ */
+ headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
+ if (q->xdp_rxq_info)
+ buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN;
+ else
+ buf_len = ionic_rx_buf_size(buf_info);
+ frag_len = min_t(u16, len, buf_len);
+
+ desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom);
desc->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
nfrags++;
/* fill sg descriptors - buf[1..n] */
- sg_desc = desc_info->sg_desc;
- for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) {
- sg_elem = &sg_desc->elems[j];
+ sg_elem = q->rxq_sgl[q->head_idx].elems;
+ for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++, sg_elem++) {
if (!buf_info->page) { /* alloc a new sg buffer? */
if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
sg_elem->addr = 0;
@@ -472,10 +841,8 @@ void ionic_rx_fill(struct ionic_queue *q)
}
}
- sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
- frag_len = min_t(u16, remain_len, min_t(u32, IONIC_MAX_BUF_LEN,
- IONIC_PAGE_SIZE -
- buf_info->page_offset));
+ sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info));
+ frag_len = min_t(u16, remain_len, ionic_rx_buf_size(buf_info));
sg_elem->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
@@ -483,18 +850,16 @@ void ionic_rx_fill(struct ionic_queue *q)
}
/* clear end sg element as a sentinel */
- if (j < q->max_sg_elems) {
- sg_elem = &sg_desc->elems[j];
+ if (j < q->max_sg_elems)
memset(sg_elem, 0, sizeof(*sg_elem));
- }
desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
IONIC_RXQ_DESC_OPCODE_SIMPLE;
desc_info->nbufs = nfrags;
- ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+ ionic_write_cmb_desc(q, desc);
- ionic_rxq_post(q, false, ionic_rx_clean, NULL);
+ ionic_rxq_post(q, false);
}
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
@@ -509,21 +874,19 @@ void ionic_rx_fill(struct ionic_queue *q)
void ionic_rx_empty(struct ionic_queue *q)
{
- struct ionic_desc_info *desc_info;
+ struct ionic_rx_desc_info *desc_info;
struct ionic_buf_info *buf_info;
unsigned int i, j;
for (i = 0; i < q->num_descs; i++) {
- desc_info = &q->info[i];
- for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
+ desc_info = &q->rx_info[i];
+ for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++) {
buf_info = &desc_info->bufs[j];
if (buf_info->page)
ionic_rx_page_free(q, buf_info);
}
desc_info->nbufs = 0;
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
}
q->head_idx = 0;
@@ -568,16 +931,13 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
{
struct ionic_qcq *qcq = napi_to_qcq(napi);
struct ionic_cq *cq = napi_to_cq(napi);
- struct ionic_dev *idev;
- struct ionic_lif *lif;
u32 work_done = 0;
u32 flags = 0;
- lif = cq->bound_q->lif;
- idev = &lif->ionic->idev;
+ work_done = ionic_tx_cq_service(cq, budget);
- work_done = ionic_cq_service(cq, budget,
- ionic_tx_service, NULL, NULL);
+ if (unlikely(!budget))
+ return budget;
if (work_done < budget && napi_complete_done(napi, work_done)) {
ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
@@ -587,7 +947,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
if (work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE;
- ionic_intr_credits(idev->intr_ctrl,
+ ionic_intr_credits(cq->idev->intr_ctrl,
cq->bound_intr->index,
work_done, flags);
}
@@ -598,23 +958,30 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
return work_done;
}
+static void ionic_xdp_do_flush(struct ionic_cq *cq)
+{
+ if (cq->bound_q->xdp_flush) {
+ xdp_do_flush();
+ cq->bound_q->xdp_flush = false;
+ }
+}
+
int ionic_rx_napi(struct napi_struct *napi, int budget)
{
struct ionic_qcq *qcq = napi_to_qcq(napi);
struct ionic_cq *cq = napi_to_cq(napi);
- struct ionic_dev *idev;
- struct ionic_lif *lif;
u32 work_done = 0;
u32 flags = 0;
- lif = cq->bound_q->lif;
- idev = &lif->ionic->idev;
+ if (unlikely(!budget))
+ return budget;
work_done = ionic_cq_service(cq, budget,
ionic_rx_service, NULL, NULL);
ionic_rx_fill(cq->bound_q);
+ ionic_xdp_do_flush(cq);
if (work_done < budget && napi_complete_done(napi, work_done)) {
ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -623,7 +990,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
if (work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE;
- ionic_intr_credits(idev->intr_ctrl,
+ ionic_intr_credits(cq->idev->intr_ctrl,
cq->bound_intr->index,
work_done, flags);
}
@@ -640,7 +1007,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
struct ionic_cq *rxcq = napi_to_cq(napi);
unsigned int qi = rxcq->bound_q->index;
struct ionic_qcq *txqcq;
- struct ionic_dev *idev;
struct ionic_lif *lif;
struct ionic_cq *txcq;
bool resched = false;
@@ -649,18 +1015,20 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
u32 flags = 0;
lif = rxcq->bound_q->lif;
- idev = &lif->ionic->idev;
txqcq = lif->txqcqs[qi];
txcq = &lif->txqcqs[qi]->cq;
- tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
- ionic_tx_service, NULL, NULL);
+ tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT);
+
+ if (unlikely(!budget))
+ return budget;
rx_work_done = ionic_cq_service(rxcq, budget,
ionic_rx_service, NULL, NULL);
ionic_rx_fill(rxcq->bound_q);
+ ionic_xdp_do_flush(rxcq);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
ionic_dim_update(rxqcq, 0);
flags |= IONIC_INTR_CRED_UNMASK;
@@ -669,7 +1037,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
if (rx_work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE;
- ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
+ ionic_intr_credits(rxcq->idev->intr_ctrl, rxcq->bound_intr->index,
tx_work_done + rx_work_done, flags);
}
@@ -686,15 +1054,14 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
void *data, size_t len)
{
- struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->dev;
dma_addr_t dma_addr;
dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
net_warn_ratelimited("%s: DMA single map failed on %s!\n",
- q->lif->netdev->name, q->name);
- stats->dma_map_err++;
+ dev_name(dev), q->name);
+ q_to_tx_stats(q)->dma_map_err++;
return 0;
}
return dma_addr;
@@ -704,24 +1071,23 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
const skb_frag_t *frag,
size_t offset, size_t len)
{
- struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->dev;
dma_addr_t dma_addr;
dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
- q->lif->netdev->name, q->name);
- stats->dma_map_err++;
+ dev_name(dev), q->name);
+ q_to_tx_stats(q)->dma_map_err++;
+ return 0;
}
return dma_addr;
}
static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info)
{
struct ionic_buf_info *buf_info = desc_info->bufs;
- struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->dev;
dma_addr_t dma_addr;
unsigned int nfrags;
@@ -729,10 +1095,8 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
int frag_idx;
dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
- if (dma_mapping_error(dev, dma_addr)) {
- stats->dma_map_err++;
+ if (!dma_addr)
return -EIO;
- }
buf_info->dma_addr = dma_addr;
buf_info->len = skb_headlen(skb);
buf_info++;
@@ -741,10 +1105,8 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
nfrags = skb_shinfo(skb)->nr_frags;
for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
- if (dma_mapping_error(dev, dma_addr)) {
- stats->dma_map_err++;
+ if (!dma_addr)
goto dma_fail;
- }
buf_info->dma_addr = dma_addr;
buf_info->len = skb_frag_size(frag);
buf_info++;
@@ -762,12 +1124,13 @@ dma_fail:
dma_unmap_page(dev, buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
}
- dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE);
+ dma_unmap_single(dev, desc_info->bufs[0].dma_addr,
+ desc_info->bufs[0].len, DMA_TO_DEVICE);
return -EIO;
}
static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
- struct ionic_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info)
{
struct ionic_buf_info *buf_info = desc_info->bufs;
struct device *dev = q->dev;
@@ -776,41 +1139,48 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
if (!desc_info->nbufs)
return;
- dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
+ dma_unmap_single(dev, buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
buf_info++;
for (i = 1; i < desc_info->nbufs; i++, buf_info++)
- dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
+ dma_unmap_page(dev, buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
desc_info->nbufs = 0;
}
static void ionic_tx_clean(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
- struct ionic_cq_info *cq_info,
- void *cb_arg)
+ struct ionic_tx_desc_info *desc_info,
+ struct ionic_txq_comp *comp)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_qcq *qcq = q_to_qcq(q);
- struct sk_buff *skb = cb_arg;
- u16 qi;
+ struct sk_buff *skb;
+
+ if (desc_info->xdpf) {
+ ionic_xdp_tx_desc_clean(q->partner, desc_info);
+ stats->clean++;
+
+ if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q->index)))
+ netif_wake_subqueue(q->lif->netdev, q->index);
+
+ return;
+ }
ionic_tx_desc_unmap_bufs(q, desc_info);
+ skb = desc_info->skb;
if (!skb)
return;
- qi = skb_get_queue_mapping(skb);
-
- if (ionic_txq_hwstamp_enabled(q)) {
- if (cq_info) {
+ if (unlikely(ionic_txq_hwstamp_enabled(q))) {
+ if (comp) {
struct skb_shared_hwtstamps hwts = {};
__le64 *cq_desc_hwstamp;
u64 hwstamp;
cq_desc_hwstamp =
- cq_info->cq_desc +
+ (void *)comp +
qcq->cq.desc_size -
sizeof(struct ionic_txq_comp) -
IONIC_HWSTAMP_CQ_NEGOFFSET;
@@ -828,27 +1198,25 @@ static void ionic_tx_clean(struct ionic_queue *q,
stats->hwstamp_invalid++;
}
}
-
- } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
- netif_wake_subqueue(q->lif->netdev, qi);
}
desc_info->bytes = skb->len;
stats->clean++;
- dev_consume_skb_any(skb);
+ napi_consume_skb(skb, 1);
}
-bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
+static bool ionic_tx_service(struct ionic_cq *cq,
+ unsigned int *total_pkts, unsigned int *total_bytes)
{
+ struct ionic_tx_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q;
- struct ionic_desc_info *desc_info;
struct ionic_txq_comp *comp;
- int bytes = 0;
- int pkts = 0;
+ unsigned int bytes = 0;
+ unsigned int pkts = 0;
u16 index;
- comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
+ comp = &((struct ionic_txq_comp *)cq->base)[cq->tail_idx];
if (!color_match(comp->color, cq->done_color))
return false;
@@ -857,59 +1225,90 @@ bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
* several q entries completed for each cq completion
*/
do {
- desc_info = &q->info[q->tail_idx];
+ desc_info = &q->tx_info[q->tail_idx];
desc_info->bytes = 0;
index = q->tail_idx;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
- if (desc_info->cb_arg) {
+ ionic_tx_clean(q, desc_info, comp);
+ if (desc_info->skb) {
pkts++;
bytes += desc_info->bytes;
+ desc_info->skb = NULL;
}
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
} while (index != le16_to_cpu(comp->comp_index));
- if (pkts && bytes && !ionic_txq_hwstamp_enabled(q))
- netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
+ (*total_pkts) += pkts;
+ (*total_bytes) += bytes;
return true;
}
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
+{
+ unsigned int work_done = 0;
+ unsigned int bytes = 0;
+ unsigned int pkts = 0;
+
+ if (work_to_do == 0)
+ return 0;
+
+ while (ionic_tx_service(cq, &pkts, &bytes)) {
+ if (cq->tail_idx == cq->num_descs - 1)
+ cq->done_color = !cq->done_color;
+ cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+
+ if (++work_done >= work_to_do)
+ break;
+ }
+
+ if (work_done) {
+ struct ionic_queue *q = cq->bound_q;
+
+ if (likely(!ionic_txq_hwstamp_enabled(q)))
+ netif_txq_completed_wake(q_to_ndq(q->lif->netdev, q),
+ pkts, bytes,
+ ionic_q_space_avail(q),
+ IONIC_TSO_DESCS_NEEDED);
+ }
+
+ return work_done;
+}
+
void ionic_tx_flush(struct ionic_cq *cq)
{
- struct ionic_dev *idev = &cq->lif->ionic->idev;
u32 work_done;
- work_done = ionic_cq_service(cq, cq->num_descs,
- ionic_tx_service, NULL, NULL);
+ work_done = ionic_tx_cq_service(cq, cq->num_descs);
if (work_done)
- ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
+ ionic_intr_credits(cq->idev->intr_ctrl, cq->bound_intr->index,
work_done, IONIC_INTR_CRED_RESET_COALESCE);
}
void ionic_tx_empty(struct ionic_queue *q)
{
- struct ionic_desc_info *desc_info;
+ struct ionic_tx_desc_info *desc_info;
int bytes = 0;
int pkts = 0;
/* walk the not completed tx entries, if any */
while (q->head_idx != q->tail_idx) {
- desc_info = &q->info[q->tail_idx];
+ desc_info = &q->tx_info[q->tail_idx];
desc_info->bytes = 0;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
- if (desc_info->cb_arg) {
+ ionic_tx_clean(q, desc_info, NULL);
+ if (desc_info->skb) {
pkts++;
bytes += desc_info->bytes;
+ desc_info->skb = NULL;
}
- desc_info->cb = NULL;
- desc_info->cb_arg = NULL;
}
- if (pkts && bytes && !ionic_txq_hwstamp_enabled(q))
- netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
+ if (likely(!ionic_txq_hwstamp_enabled(q))) {
+ struct netdev_queue *ndq = q_to_ndq(q->lif->netdev, q);
+
+ netdev_tx_completed_queue(ndq, pkts, bytes);
+ netdev_tx_reset_queue(ndq);
+ }
}
static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
@@ -957,8 +1356,8 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
return 0;
}
-static void ionic_tx_tso_post(struct ionic_queue *q,
- struct ionic_desc_info *desc_info,
+static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q,
+ struct ionic_tx_desc_info *desc_info,
struct sk_buff *skb,
dma_addr_t addr, u8 nsge, u16 len,
unsigned int hdrlen, unsigned int mss,
@@ -966,7 +1365,7 @@ static void ionic_tx_tso_post(struct ionic_queue *q,
u16 vlan_tci, bool has_vlan,
bool start, bool done)
{
- struct ionic_txq_desc *desc = desc_info->desc;
+ struct ionic_txq_desc *desc = &q->txq[q->head_idx];
u8 flags = 0;
u64 cmd;
@@ -982,22 +1381,23 @@ static void ionic_tx_tso_post(struct ionic_queue *q,
desc->hdr_len = cpu_to_le16(hdrlen);
desc->mss = cpu_to_le16(mss);
- ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+ ionic_write_cmb_desc(q, desc);
if (start) {
skb_tx_timestamp(skb);
- if (!ionic_txq_hwstamp_enabled(q))
- netdev_tx_sent_queue(q_to_ndq(q), skb->len);
- ionic_txq_post(q, false, ionic_tx_clean, skb);
+ if (likely(!ionic_txq_hwstamp_enabled(q)))
+ netdev_tx_sent_queue(q_to_ndq(netdev, q), skb->len);
+ ionic_txq_post(q, false);
} else {
- ionic_txq_post(q, done, NULL, NULL);
+ ionic_txq_post(q, done);
}
}
-static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
+static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
+ struct sk_buff *skb)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct ionic_desc_info *desc_info;
+ struct ionic_tx_desc_info *desc_info;
struct ionic_buf_info *buf_info;
struct ionic_txq_sg_elem *elem;
struct ionic_txq_desc *desc;
@@ -1019,8 +1419,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
bool encap;
int err;
- desc_info = &q->info[q->head_idx];
- buf_info = desc_info->bufs;
+ desc_info = &q->tx_info[q->head_idx];
if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
return -EIO;
@@ -1057,6 +1456,8 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
else
hdrlen = skb_tcp_all_headers(skb);
+ desc_info->skb = skb;
+ buf_info = desc_info->bufs;
tso_rem = len;
seg_rem = min(tso_rem, hdrlen + mss);
@@ -1083,8 +1484,8 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
chunk_len = min(frag_rem, seg_rem);
if (!desc) {
/* fill main descriptor */
- desc = desc_info->txq_desc;
- elem = desc_info->txq_sg_desc->elems;
+ desc = &q->txq[q->head_idx];
+ elem = ionic_tx_sg_elems(q);
desc_addr = frag_addr;
desc_len = chunk_len;
} else {
@@ -1102,13 +1503,13 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
seg_rem = min(tso_rem, mss);
done = (tso_rem == 0);
/* post descriptor */
- ionic_tx_tso_post(q, desc_info, skb,
+ ionic_tx_tso_post(netdev, q, desc_info, skb,
desc_addr, desc_nsge, desc_len,
hdrlen, mss, outer_csum, vlan_tci, has_vlan,
start, done);
start = false;
/* Buffer information is stored with the first tso descriptor */
- desc_info = &q->info[q->head_idx];
+ desc_info = &q->tx_info[q->head_idx];
desc_info->nbufs = 0;
}
@@ -1121,9 +1522,9 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
}
static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info)
{
- struct ionic_txq_desc *desc = desc_info->txq_desc;
+ struct ionic_txq_desc *desc = &q->txq[q->head_idx];
struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
bool has_vlan;
@@ -1151,7 +1552,7 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
desc->csum_offset = cpu_to_le16(skb->csum_offset);
- ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+ ionic_write_cmb_desc(q, desc);
if (skb_csum_is_sctp(skb))
stats->crc32_csum++;
@@ -1160,9 +1561,9 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
}
static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info)
{
- struct ionic_txq_desc *desc = desc_info->txq_desc;
+ struct ionic_txq_desc *desc = &q->txq[q->head_idx];
struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
bool has_vlan;
@@ -1190,20 +1591,20 @@ static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = 0;
desc->csum_offset = 0;
- ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+ ionic_write_cmb_desc(q, desc);
stats->csum_none++;
}
static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
- struct ionic_desc_info *desc_info)
+ struct ionic_tx_desc_info *desc_info)
{
- struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
struct ionic_buf_info *buf_info = &desc_info->bufs[1];
- struct ionic_txq_sg_elem *elem = sg_desc->elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ struct ionic_txq_sg_elem *elem;
unsigned int i;
+ elem = ionic_tx_sg_elems(q);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
elem->addr = cpu_to_le64(buf_info->dma_addr);
elem->len = cpu_to_le16(buf_info->len);
@@ -1212,14 +1613,18 @@ static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
stats->frags += skb_shinfo(skb)->nr_frags;
}
-static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
+static int ionic_tx(struct net_device *netdev, struct ionic_queue *q,
+ struct sk_buff *skb)
{
- struct ionic_desc_info *desc_info = &q->info[q->head_idx];
+ struct ionic_tx_desc_info *desc_info = &q->tx_info[q->head_idx];
struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ bool ring_dbell = true;
if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
return -EIO;
+ desc_info->skb = skb;
+
/* set up the initial descriptor */
if (skb->ip_summed == CHECKSUM_PARTIAL)
ionic_tx_calc_csum(q, skb, desc_info);
@@ -1233,16 +1638,22 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
stats->pkts++;
stats->bytes += skb->len;
- if (!ionic_txq_hwstamp_enabled(q))
- netdev_tx_sent_queue(q_to_ndq(q), skb->len);
- ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
+ if (likely(!ionic_txq_hwstamp_enabled(q))) {
+ struct netdev_queue *ndq = q_to_ndq(netdev, q);
+
+ if (unlikely(!ionic_q_has_space(q, MAX_SKB_FRAGS + 1)))
+ netif_tx_stop_queue(ndq);
+ ring_dbell = __netdev_tx_sent_queue(ndq, skb->len,
+ netdev_xmit_more());
+ }
+ ionic_txq_post(q, ring_dbell);
return 0;
}
static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
{
- struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
bool too_many_frags = false;
skb_frag_t *frag;
int desc_bufs;
@@ -1258,17 +1669,20 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
/* Each desc is mss long max, so a descriptor for each gso_seg */
if (skb_is_gso(skb)) {
ndescs = skb_shinfo(skb)->gso_segs;
+ if (!nr_frags)
+ return ndescs;
} else {
ndescs = 1;
- if (skb_shinfo(skb)->nr_frags > q->max_sg_elems) {
+ if (!nr_frags)
+ return ndescs;
+
+ if (unlikely(nr_frags > q->max_sg_elems)) {
too_many_frags = true;
goto linearize;
}
- }
- /* If non-TSO, or no frags to check, we're done */
- if (!skb_is_gso(skb) || !skb_shinfo(skb)->nr_frags)
return ndescs;
+ }
/* We need to scan the skb to be sure that none of the MTU sized
* packets in the TSO will require more sgs per descriptor than we
@@ -1319,36 +1733,17 @@ linearize:
err = skb_linearize(skb);
if (err)
return err;
- stats->linearize++;
+ q_to_tx_stats(q)->linearize++;
}
return ndescs;
}
-static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
-{
- int stopped = 0;
-
- if (unlikely(!ionic_q_has_space(q, ndescs))) {
- netif_stop_subqueue(q->lif->netdev, q->index);
- stopped = 1;
-
- /* Might race with ionic_tx_clean, check again */
- smp_rmb();
- if (ionic_q_has_space(q, ndescs)) {
- netif_wake_subqueue(q->lif->netdev, q->index);
- stopped = 0;
- }
- }
-
- return stopped;
-}
-
static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_queue *q = &lif->hwstamp_txq->q;
+ struct ionic_queue *q;
int err, ndescs;
/* Does not stop/start txq, because we post to a separate tx queue
@@ -1356,6 +1751,7 @@ static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
* the timestamping queue, it is dropped.
*/
+ q = &lif->hwstamp_txq->q;
ndescs = ionic_tx_descs_needed(q, skb);
if (unlikely(ndescs < 0))
goto err_out_drop;
@@ -1365,9 +1761,9 @@ static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
if (skb_is_gso(skb))
- err = ionic_tx_tso(q, skb);
+ err = ionic_tx_tso(netdev, q, skb);
else
- err = ionic_tx(q, skb);
+ err = ionic_tx(netdev, q, skb);
if (err)
goto err_out_drop;
@@ -1405,23 +1801,19 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (ndescs < 0)
goto err_out_drop;
- if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
+ if (!netif_txq_maybe_stop(q_to_ndq(netdev, q),
+ ionic_q_space_avail(q),
+ ndescs, ndescs))
return NETDEV_TX_BUSY;
if (skb_is_gso(skb))
- err = ionic_tx_tso(q, skb);
+ err = ionic_tx_tso(netdev, q, skb);
else
- err = ionic_tx(q, skb);
+ err = ionic_tx(netdev, q, skb);
if (err)
goto err_out_drop;
- /* Stop the queue if there aren't descriptors for the next packet.
- * Since our SG lists per descriptor take care of most of the possible
- * fragmentation, we don't need to have many descriptors available.
- */
- ionic_maybe_stop_tx(q, 4);
-
return NETDEV_TX_OK;
err_out_drop:
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
index d7cbaad8a6fb..9e73e324e7a1 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h
@@ -14,7 +14,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget);
int ionic_txrx_napi(struct napi_struct *napi, int budget);
netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
-bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
+bool ionic_rx_service(struct ionic_cq *cq);
+int ionic_xdp_xmit(struct net_device *netdev, int n, struct xdp_frame **xdp, u32 flags);
#endif /* _IONIC_TXRX_H_ */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 35ec9aab3dc7..51fa880eaf6c 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1186,7 +1186,6 @@ static int
netxen_p3_has_mn(struct netxen_adapter *adapter)
{
u32 capability, flashed_ver;
- capability = 0;
/* NX2031 always had MN */
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
@@ -1197,7 +1196,6 @@ netxen_p3_has_mn(struct netxen_adapter *adapter)
flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
-
capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
if (capability & NX_PEG_TUNE_MN_PRESENT)
return 1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 5a5dbbb8d8aa..9a1660a12c57 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -1794,8 +1794,6 @@ qed_rdma_create_srq(void *rdma_cxt,
goto err;
opaque_fid = p_hwfn->hw_info.opaque_fid;
-
- opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.opaque_fid = opaque_fid;
init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 0e240b5ab8d4..ae3ebf0cf999 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1776,7 +1776,7 @@ static int qede_get_tunable(struct net_device *dev,
return 0;
}
-static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int qede_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
@@ -1789,18 +1789,26 @@ static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- if (current_link.eee.adv_caps & QED_EEE_1G_ADV)
- edata->advertised = ADVERTISED_1000baseT_Full;
- if (current_link.eee.adv_caps & QED_EEE_10G_ADV)
- edata->advertised |= ADVERTISED_10000baseT_Full;
- if (current_link.sup_caps & QED_EEE_1G_ADV)
- edata->supported = ADVERTISED_1000baseT_Full;
- if (current_link.sup_caps & QED_EEE_10G_ADV)
- edata->supported |= ADVERTISED_10000baseT_Full;
- if (current_link.eee.lp_adv_caps & QED_EEE_1G_ADV)
- edata->lp_advertised = ADVERTISED_1000baseT_Full;
- if (current_link.eee.lp_adv_caps & QED_EEE_10G_ADV)
- edata->lp_advertised |= ADVERTISED_10000baseT_Full;
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->advertised,
+ current_link.eee.adv_caps & QED_EEE_1G_ADV);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ edata->advertised,
+ current_link.eee.adv_caps & QED_EEE_10G_ADV);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->supported,
+ current_link.sup_caps & QED_EEE_1G_ADV);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ edata->supported,
+ current_link.sup_caps & QED_EEE_10G_ADV);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->lp_advertised,
+ current_link.eee.lp_adv_caps & QED_EEE_1G_ADV);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ edata->lp_advertised,
+ current_link.eee.lp_adv_caps & QED_EEE_10G_ADV);
edata->tx_lpi_timer = current_link.eee.tx_lpi_timer;
edata->eee_enabled = current_link.eee.enable;
@@ -1810,11 +1818,14 @@ static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int qede_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = {};
struct qede_dev *edev = netdev_priv(dev);
struct qed_link_output current_link;
struct qed_link_params params;
+ bool unsupp;
if (!edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev, "Link settings are not allowed to be changed\n");
@@ -1832,21 +1843,26 @@ static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata)
memset(&params, 0, sizeof(params));
params.override_flags |= QED_LINK_OVERRIDE_EEE_CONFIG;
- if (!(edata->advertised & (ADVERTISED_1000baseT_Full |
- ADVERTISED_10000baseT_Full)) ||
- ((edata->advertised & (ADVERTISED_1000baseT_Full |
- ADVERTISED_10000baseT_Full)) !=
- edata->advertised)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+
+ unsupp = linkmode_andnot(tmp, edata->advertised, supported);
+ if (unsupp) {
DP_VERBOSE(edev, QED_MSG_DEBUG,
- "Invalid advertised capabilities %d\n",
- edata->advertised);
+ "Invalid advertised capabilities %*pb\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, edata->advertised);
return -EINVAL;
}
- if (edata->advertised & ADVERTISED_1000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->advertised))
params.eee.adv_caps = QED_EEE_1G_ADV;
- if (edata->advertised & ADVERTISED_10000baseT_Full)
- params.eee.adv_caps |= QED_EEE_10G_ADV;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ edata->advertised))
+ params.eee.adv_caps = QED_EEE_10G_ADV;
+
params.eee.enable = edata->eee_enabled;
params.eee.tx_lpi_enable = edata->tx_lpi_enabled;
params.eee.tx_lpi_timer = edata->tx_lpi_timer;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index cb1746bc0e0c..847fa62c80df 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -215,7 +215,7 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
- bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+ bd2_bits2 |= ((skb_transport_offset(skb) >> 1) &
ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
<< ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 41894d154013..b9dc0071c5de 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -446,8 +446,7 @@ static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter,
encap_descr |= skb_network_offset(skb) << 10;
first_desc->encap_descr = cpu_to_le16(encap_descr);
- first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
- skb->data;
+ first_desc->tcp_hdr_offset = skb_inner_transport_offset(skb);
first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 3270df72541b..4c06f55878de 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -771,5 +771,6 @@ static struct platform_driver emac_platform_driver = {
module_platform_driver(emac_platform_driver);
+MODULE_DESCRIPTION("Qualcomm EMAC Gigabit Ethernet driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:qcom-emac");
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c
index 4292c89bd35c..6263e4cf47fa 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k.c
+++ b/drivers/net/ethernet/qualcomm/qca_7k.c
@@ -1,22 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- *
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
*/
/* This module implements the Qualcomm Atheros SPI protocol for
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.h b/drivers/net/ethernet/qualcomm/qca_7k.h
index 356de8ec5d48..828ee9c27578 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k.h
+++ b/drivers/net/ethernet/qualcomm/qca_7k.h
@@ -1,21 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
*/
/* Qualcomm Atheros SPI register definition.
diff --git a/drivers/net/ethernet/qualcomm/qca_7k_common.c b/drivers/net/ethernet/qualcomm/qca_7k_common.c
index 6b511f05df61..5302da587620 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k_common.c
+++ b/drivers/net/ethernet/qualcomm/qca_7k_common.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright (c) 2011, 2012, Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Atheros ethernet framing. Every Ethernet frame is surrounded
@@ -162,5 +149,5 @@ EXPORT_SYMBOL_GPL(qcafrm_fsm_decode);
MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 common");
MODULE_AUTHOR("Qualcomm Atheros Communications");
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/qualcomm/qca_7k_common.h b/drivers/net/ethernet/qualcomm/qca_7k_common.h
index 928554f11e35..44ed66fdb407 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k_common.h
+++ b/drivers/net/ethernet/qualcomm/qca_7k_common.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright (c) 2011, 2012, Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Atheros Ethernet framing. Every Ethernet frame is surrounded by an atheros
@@ -107,9 +94,6 @@ struct qcafrm_handle {
/* Offset in buffer (borrowed for length too) */
u16 offset;
-
- /* Frame length as kept by this module */
- u16 len;
};
u16 qcafrm_create_header(u8 *buf, u16 len);
@@ -128,17 +112,6 @@ static inline void qcafrm_fsm_init_uart(struct qcafrm_handle *handle)
handle->state = handle->init;
}
-/* Gather received bytes and try to extract a full Ethernet frame
- * by following a simple state machine.
- *
- * Return: QCAFRM_GATHER No Ethernet frame fully received yet.
- * QCAFRM_NOHEAD Header expected but not found.
- * QCAFRM_INVLEN QCA7K frame length is invalid
- * QCAFRM_NOTAIL Footer expected but not found.
- * > 0 Number of byte in the fully received
- * Ethernet frame
- */
-
s32 qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_byte);
#endif /* _QCA_FRAMING_H */
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index 1822f2ad8f0d..ff3b89e9028e 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This file contains debugging routines for use in the QCA7K driver.
@@ -255,7 +242,7 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
struct qcaspi *qca = netdev_priv(dev);
ring->rx_max_pending = QCASPI_RX_MAX_FRAMES;
- ring->tx_max_pending = TX_RING_MAX_LEN;
+ ring->tx_max_pending = QCASPI_TX_RING_MAX_LEN;
ring->rx_pending = QCASPI_RX_MAX_FRAMES;
ring->tx_pending = qca->txr.count;
}
@@ -275,8 +262,8 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring,
if (qca->spi_thread)
kthread_park(qca->spi_thread);
- qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN);
- qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN);
+ qca->txr.count = max_t(u32, ring->tx_pending, QCASPI_TX_RING_MIN_LEN);
+ qca->txr.count = min_t(u16, qca->txr.count, QCASPI_TX_RING_MAX_LEN);
if (qca->spi_thread)
kthread_unpark(qca->spi_thread);
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.h b/drivers/net/ethernet/qualcomm/qca_debug.h
index 46a785844421..0d98cef3abc4 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.h
+++ b/drivers/net/ethernet/qualcomm/qca_debug.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This file contains debugging routines for use in the QCA7K driver.
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5f3c11fb3fa2..5799ecc88a87 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This module implements the Qualcomm Atheros SPI protocol for
@@ -359,7 +346,7 @@ qcaspi_receive(struct qcaspi *qca)
/* Read the packet size. */
qcaspi_read_register(qca, SPI_REG_RDBUF_BYTE_AVA, &available);
- netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n",
+ netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %04x\n",
available);
if (available > QCASPI_HW_BUF_LEN + QCASPI_HW_PKT_LEN) {
@@ -476,7 +463,7 @@ qcaspi_flush_tx_ring(struct qcaspi *qca)
* has been replaced by netif_tx_lock_bh() and so on.
*/
netif_tx_lock_bh(qca->net_dev);
- for (i = 0; i < TX_RING_MAX_LEN; i++) {
+ for (i = 0; i < QCASPI_TX_RING_MAX_LEN; i++) {
if (qca->txr.skb[i]) {
dev_kfree_skb(qca->txr.skb[i]);
qca->txr.skb[i] = NULL;
@@ -687,7 +674,7 @@ static int
qcaspi_netdev_open(struct net_device *dev)
{
struct qcaspi *qca = netdev_priv(dev);
- int ret = 0;
+ struct task_struct *thread;
if (!qca)
return -EINVAL;
@@ -697,23 +684,18 @@ qcaspi_netdev_open(struct net_device *dev)
qca->sync = QCASPI_SYNC_UNKNOWN;
qcafrm_fsm_init_spi(&qca->frm_handle);
- qca->spi_thread = kthread_run((void *)qcaspi_spi_thread,
- qca, "%s", dev->name);
+ thread = kthread_run((void *)qcaspi_spi_thread,
+ qca, "%s", dev->name);
- if (IS_ERR(qca->spi_thread)) {
+ if (IS_ERR(thread)) {
netdev_err(dev, "%s: unable to start kernel thread.\n",
QCASPI_DRV_NAME);
- return PTR_ERR(qca->spi_thread);
+ return PTR_ERR(thread);
}
- ret = request_irq(qca->spi_dev->irq, qcaspi_intr_handler, 0,
- dev->name, qca);
- if (ret) {
- netdev_err(dev, "%s: unable to get IRQ %d (irqval=%d).\n",
- QCASPI_DRV_NAME, qca->spi_dev->irq, ret);
- kthread_stop(qca->spi_thread);
- return ret;
- }
+ qca->spi_thread = thread;
+
+ enable_irq(qca->spi_dev->irq);
/* SPI thread takes care of TX queue */
@@ -728,10 +710,12 @@ qcaspi_netdev_close(struct net_device *dev)
netif_stop_queue(dev);
qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0, wr_verify);
- free_irq(qca->spi_dev->irq, qca);
+ disable_irq(qca->spi_dev->irq);
- kthread_stop(qca->spi_thread);
- qca->spi_thread = NULL;
+ if (qca->spi_thread) {
+ kthread_stop(qca->spi_thread);
+ qca->spi_thread = NULL;
+ }
qcaspi_flush_tx_ring(qca);
return 0;
@@ -831,8 +815,8 @@ qcaspi_netdev_init(struct net_device *dev)
qca->clkspeed = qcaspi_clkspeed;
qca->burst_len = qcaspi_burst_len;
qca->spi_thread = NULL;
- qca->buffer_size = (dev->mtu + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
- QCAFRM_FOOTER_LEN + 4) * 4;
+ qca->buffer_size = (QCAFRM_MAX_MTU + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
+ QCAFRM_FOOTER_LEN + QCASPI_HW_PKT_LEN) * QCASPI_RX_MAX_FRAMES;
memset(&qca->stats, 0, sizeof(struct qcaspi_stats));
@@ -881,6 +865,8 @@ qcaspi_netdev_setup(struct net_device *dev)
qcaspi_set_ethtool_ops(dev);
dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->needed_tailroom = ALIGN(QCAFRM_FOOTER_LEN + QCAFRM_MIN_LEN, 4);
+ dev->needed_headroom = ALIGN(QCAFRM_HEADER_LEN, 4);
dev->tx_queue_len = 100;
/* MTU range: 46 - 1500 */
@@ -891,7 +877,7 @@ qcaspi_netdev_setup(struct net_device *dev)
memset(qca, 0, sizeof(struct qcaspi));
memset(&qca->txr, 0, sizeof(qca->txr));
- qca->txr.count = TX_RING_MAX_LEN;
+ qca->txr.count = QCASPI_TX_RING_MAX_LEN;
}
static const struct of_device_id qca_spi_of_match[] = {
@@ -984,6 +970,15 @@ qca_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, qcaspi_devs);
+ ret = devm_request_irq(&spi->dev, spi->irq, qcaspi_intr_handler,
+ IRQF_NO_AUTOEN, qca->net_dev->name, qca);
+ if (ret) {
+ dev_err(&spi->dev, "Unable to get IRQ %d (irqval=%d).\n",
+ spi->irq, ret);
+ free_netdev(qcaspi_devs);
+ return ret;
+ }
+
ret = of_get_ethdev_address(spi->dev.of_node, qca->net_dev);
if (ret) {
eth_hw_addr_random(qca->net_dev);
@@ -998,8 +993,8 @@ qca_spi_probe(struct spi_device *spi)
qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
if (signature != QCASPI_GOOD_SIGNATURE) {
- dev_err(&spi->dev, "Invalid signature (0x%04X)\n",
- signature);
+ dev_err(&spi->dev, "Invalid signature (expected 0x%04x, read 0x%04x)\n",
+ QCASPI_GOOD_SIGNATURE, signature);
free_netdev(qcaspi_devs);
return -EFAULT;
}
@@ -1048,6 +1043,6 @@ module_spi_driver(qca_spi_driver);
MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 SPI Driver");
MODULE_AUTHOR("Qualcomm Atheros Communications");
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(QCASPI_DRV_VERSION);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index 3067356106f0..d59cb2352cee 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2014, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Qualcomm Atheros SPI register definition.
@@ -39,8 +26,9 @@
#define QCASPI_GOOD_SIGNATURE 0xAA55
-#define TX_RING_MAX_LEN 10
-#define TX_RING_MIN_LEN 2
+#define QCASPI_TX_RING_MAX_LEN 10
+#define QCASPI_TX_RING_MIN_LEN 2
+#define QCASPI_RX_MAX_FRAMES 4
/* sync related constants */
#define QCASPI_SYNC_UNKNOWN 0
@@ -54,7 +42,7 @@
#define QCASPI_EVENT_CPUON 1
struct tx_ring {
- struct sk_buff *skb[TX_RING_MAX_LEN];
+ struct sk_buff *skb[QCASPI_TX_RING_MAX_LEN];
u16 head;
u16 tail;
u16 size;
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 223321897b96..321fd8d00730 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
* Copyright (c) 2017, I2SE GmbH
- *
- * Permission to use, copy, modify, and/or distribute this software
- * for any purpose with or without fee is hereby granted, provided
- * that the above copyright notice and this permission notice appear
- * in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* This module implements the Qualcomm Atheros UART protocol for
@@ -410,6 +397,6 @@ module_serdev_device_driver(qca_uart_driver);
MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver");
MODULE_AUTHOR("Qualcomm Atheros Communications");
-MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(QCAUART_DRV_VERSION);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 5b69b9268c75..f3bea196a8f9 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -520,4 +520,5 @@ static void __exit rmnet_exit(void)
module_init(rmnet_init)
module_exit(rmnet_exit)
MODULE_ALIAS_RTNL_LINK("rmnet");
+MODULE_DESCRIPTION("Qualcomm RmNet MAP driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 046b5f7d8e7c..9d2a9562c96f 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -98,7 +98,7 @@ static int rmnet_vnd_get_iflink(const struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
- return priv->real_dev->ifindex;
+ return READ_ONCE(priv->real_dev->ifindex);
}
static int rmnet_vnd_init(struct net_device *dev)
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 81567fcf3957..4c043052198d 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -68,6 +68,7 @@ enum mac_version {
/* support for RTL_GIGA_MAC_VER_60 has been removed */
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_VER_63,
+ RTL_GIGA_MAC_VER_65,
RTL_GIGA_MAC_NONE
};
@@ -84,3 +85,6 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx,
int rtl8168_get_led_mode(struct rtl8169_private *tp);
int rtl8168_led_mod_ctrl(struct rtl8169_private *tp, u16 mask, u16 val);
void rtl8168_init_leds(struct net_device *ndev);
+int rtl8125_get_led_mode(struct rtl8169_private *tp, int index);
+int rtl8125_set_led_mode(struct rtl8169_private *tp, int index, u16 mode);
+void rtl8125_init_leds(struct net_device *ndev);
diff --git a/drivers/net/ethernet/realtek/r8169_leds.c b/drivers/net/ethernet/realtek/r8169_leds.c
index 007d077edcad..7c5dc9d0df85 100644
--- a/drivers/net/ethernet/realtek/r8169_leds.c
+++ b/drivers/net/ethernet/realtek/r8169_leds.c
@@ -18,12 +18,14 @@
#define RTL8168_LED_CTRL_LINK_100 BIT(1)
#define RTL8168_LED_CTRL_LINK_10 BIT(0)
-#define RTL8168_NUM_LEDS 3
+#define RTL8125_LED_CTRL_ACT BIT(9)
+#define RTL8125_LED_CTRL_LINK_2500 BIT(5)
+#define RTL8125_LED_CTRL_LINK_1000 BIT(3)
+#define RTL8125_LED_CTRL_LINK_100 BIT(1)
+#define RTL8125_LED_CTRL_LINK_10 BIT(0)
-#define RTL8168_SUPPORTED_MODES \
- (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK_100) | \
- BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_RX) | \
- BIT(TRIGGER_NETDEV_TX))
+#define RTL8168_NUM_LEDS 3
+#define RTL8125_NUM_LEDS 4
struct r8169_led_classdev {
struct led_classdev led;
@@ -33,28 +35,35 @@ struct r8169_led_classdev {
#define lcdev_to_r8169_ldev(lcdev) container_of(lcdev, struct r8169_led_classdev, led)
+static bool r8169_trigger_mode_is_valid(unsigned long flags)
+{
+ bool rx, tx;
+
+ if (flags & BIT(TRIGGER_NETDEV_HALF_DUPLEX))
+ return false;
+ if (flags & BIT(TRIGGER_NETDEV_FULL_DUPLEX))
+ return false;
+
+ rx = flags & BIT(TRIGGER_NETDEV_RX);
+ tx = flags & BIT(TRIGGER_NETDEV_TX);
+
+ return rx == tx;
+}
+
static int rtl8168_led_hw_control_is_supported(struct led_classdev *led_cdev,
unsigned long flags)
{
struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
struct rtl8169_private *tp = netdev_priv(ldev->ndev);
int shift = ldev->index * 4;
- bool rx, tx;
-
- if (flags & ~RTL8168_SUPPORTED_MODES)
- goto nosupp;
- rx = flags & BIT(TRIGGER_NETDEV_RX);
- tx = flags & BIT(TRIGGER_NETDEV_TX);
- if (rx != tx)
- goto nosupp;
+ if (!r8169_trigger_mode_is_valid(flags)) {
+ /* Switch LED off to indicate that mode isn't supported */
+ rtl8168_led_mod_ctrl(tp, 0x000f << shift, 0);
+ return -EOPNOTSUPP;
+ }
return 0;
-
-nosupp:
- /* Switch LED off to indicate that mode isn't supported */
- rtl8168_led_mod_ctrl(tp, 0x000f << shift, 0);
- return -EOPNOTSUPP;
}
static int rtl8168_led_hw_control_set(struct led_classdev *led_cdev,
@@ -129,7 +138,6 @@ static void rtl8168_setup_ldev(struct r8169_led_classdev *ldev,
r8169_get_led_name(tp, index, led_name, LED_MAX_NAME_SIZE);
led_cdev->name = led_name;
- led_cdev->default_trigger = "netdev";
led_cdev->hw_control_trigger = "netdev";
led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
led_cdev->hw_control_is_supported = rtl8168_led_hw_control_is_supported;
@@ -155,3 +163,102 @@ void rtl8168_init_leds(struct net_device *ndev)
for (i = 0; i < RTL8168_NUM_LEDS; i++)
rtl8168_setup_ldev(leds + i, ndev, i);
}
+
+static int rtl8125_led_hw_control_is_supported(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+
+ if (!r8169_trigger_mode_is_valid(flags)) {
+ /* Switch LED off to indicate that mode isn't supported */
+ rtl8125_set_led_mode(tp, ldev->index, 0);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int rtl8125_led_hw_control_set(struct led_classdev *led_cdev,
+ unsigned long flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+ u16 mode = 0;
+
+ if (flags & BIT(TRIGGER_NETDEV_LINK_10))
+ mode |= RTL8125_LED_CTRL_LINK_10;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_100))
+ mode |= RTL8125_LED_CTRL_LINK_100;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_1000))
+ mode |= RTL8125_LED_CTRL_LINK_1000;
+ if (flags & BIT(TRIGGER_NETDEV_LINK_2500))
+ mode |= RTL8125_LED_CTRL_LINK_2500;
+ if (flags & (BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX)))
+ mode |= RTL8125_LED_CTRL_ACT;
+
+ return rtl8125_set_led_mode(tp, ldev->index, mode);
+}
+
+static int rtl8125_led_hw_control_get(struct led_classdev *led_cdev,
+ unsigned long *flags)
+{
+ struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev);
+ struct rtl8169_private *tp = netdev_priv(ldev->ndev);
+ int mode;
+
+ mode = rtl8125_get_led_mode(tp, ldev->index);
+ if (mode < 0)
+ return mode;
+
+ if (mode & RTL8125_LED_CTRL_LINK_10)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_10);
+ if (mode & RTL8125_LED_CTRL_LINK_100)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_100);
+ if (mode & RTL8125_LED_CTRL_LINK_1000)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_1000);
+ if (mode & RTL8125_LED_CTRL_LINK_2500)
+ *flags |= BIT(TRIGGER_NETDEV_LINK_2500);
+ if (mode & RTL8125_LED_CTRL_ACT)
+ *flags |= BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX);
+
+ return 0;
+}
+
+static void rtl8125_setup_led_ldev(struct r8169_led_classdev *ldev,
+ struct net_device *ndev, int index)
+{
+ struct rtl8169_private *tp = netdev_priv(ndev);
+ struct led_classdev *led_cdev = &ldev->led;
+ char led_name[LED_MAX_NAME_SIZE];
+
+ ldev->ndev = ndev;
+ ldev->index = index;
+
+ r8169_get_led_name(tp, index, led_name, LED_MAX_NAME_SIZE);
+ led_cdev->name = led_name;
+ led_cdev->hw_control_trigger = "netdev";
+ led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
+ led_cdev->hw_control_is_supported = rtl8125_led_hw_control_is_supported;
+ led_cdev->hw_control_set = rtl8125_led_hw_control_set;
+ led_cdev->hw_control_get = rtl8125_led_hw_control_get;
+ led_cdev->hw_control_get_device = r8169_led_hw_control_get_device;
+
+ /* ignore errors */
+ devm_led_classdev_register(&ndev->dev, led_cdev);
+}
+
+void rtl8125_init_leds(struct net_device *ndev)
+{
+ /* bind resource mgmt to netdev */
+ struct device *dev = &ndev->dev;
+ struct r8169_led_classdev *leds;
+ int i;
+
+ leds = devm_kcalloc(dev, RTL8125_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+ if (!leds)
+ return;
+
+ for (i = 0; i < RTL8125_NUM_LEDS; i++)
+ rtl8125_setup_led_ldev(leds + i, ndev, i);
+}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index dd73df6b17b0..5c879a5c86d7 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -55,6 +55,7 @@
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw"
+#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw"
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -136,6 +137,7 @@ static const struct {
[RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3},
/* reserve 62 for CFG_METHOD_4 in the vendor driver */
[RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2},
+ [RTL_GIGA_MAC_VER_65] = {"RTL8126A", FIRMWARE_8126A_2},
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
@@ -158,6 +160,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 },
{ 0x0001, 0x8168, PCI_ANY_ID, 0x2410 },
{ PCI_VDEVICE(REALTEK, 0x8125) },
+ { PCI_VDEVICE(REALTEK, 0x8126) },
{ PCI_VDEVICE(REALTEK, 0x3000) },
{}
};
@@ -327,13 +330,23 @@ enum rtl8168_registers {
};
enum rtl8125_registers {
+ LEDSEL0 = 0x18,
+ INT_CFG0_8125 = 0x34,
+#define INT_CFG0_ENABLE_8125 BIT(0)
+#define INT_CFG0_CLKREQEN BIT(3)
IntrMask_8125 = 0x38,
IntrStatus_8125 = 0x3c,
+ INT_CFG1_8125 = 0x7a,
+ LEDSEL2 = 0x84,
+ LEDSEL1 = 0x86,
TxPoll_8125 = 0x90,
+ LEDSEL3 = 0x96,
MAC0_BKP = 0x19e0,
EEE_TXIDLE_TIMER_8125 = 0x6048,
};
+#define LEDSEL_MASK_8125 0x23f
+
#define RX_VLAN_INNER_8125 BIT(22)
#define RX_VLAN_OUTER_8125 BIT(23)
#define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125)
@@ -606,6 +619,7 @@ struct rtl8169_private {
struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
u16 cp_cmd;
+ u16 tx_lpi_timer;
u32 irq_mask;
int irq;
struct clk *clk;
@@ -629,7 +643,6 @@ struct rtl8169_private {
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
u32 saved_wolopts;
- int eee_adv;
const char *fw_name;
struct rtl_fw *rtl_fw;
@@ -663,6 +676,7 @@ MODULE_FIRMWARE(FIRMWARE_8168FP_3);
MODULE_FIRMWARE(FIRMWARE_8107E_2);
MODULE_FIRMWARE(FIRMWARE_8125A_3);
MODULE_FIRMWARE(FIRMWARE_8125B_2);
+MODULE_FIRMWARE(FIRMWARE_8126A_2);
static inline struct device *tp_to_dev(struct rtl8169_private *tp)
{
@@ -824,6 +838,51 @@ int rtl8168_get_led_mode(struct rtl8169_private *tp)
return ret;
}
+static int rtl8125_get_led_reg(int index)
+{
+ static const int led_regs[] = { LEDSEL0, LEDSEL1, LEDSEL2, LEDSEL3 };
+
+ return led_regs[index];
+}
+
+int rtl8125_set_led_mode(struct rtl8169_private *tp, int index, u16 mode)
+{
+ int reg = rtl8125_get_led_reg(index);
+ struct device *dev = tp_to_dev(tp);
+ int ret;
+ u16 val;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&tp->led_lock);
+ val = RTL_R16(tp, reg) & ~LEDSEL_MASK_8125;
+ RTL_W16(tp, reg, val | mode);
+ mutex_unlock(&tp->led_lock);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+int rtl8125_get_led_mode(struct rtl8169_private *tp, int index)
+{
+ int reg = rtl8125_get_led_reg(index);
+ struct device *dev = tp_to_dev(tp);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = RTL_R16(tp, reg);
+
+ pm_runtime_put_sync(dev);
+
+ return ret;
+}
+
void r8169_get_led_name(struct rtl8169_private *tp, int idx,
char *buf, int buf_len)
{
@@ -1140,7 +1199,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
r8168g_mdio_write(tp, location, val);
break;
default:
@@ -1155,7 +1214,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
return r8168g_mdio_read(tp, location);
default:
return r8169_mdio_read(tp, location);
@@ -1341,7 +1400,7 @@ static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65:
if (enable)
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN);
else
@@ -1508,7 +1567,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
break;
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65:
if (wolopts)
rtl_mod_config2(tp, 0, PME_SIGNAL);
else
@@ -1974,30 +2033,64 @@ static int rtl_set_coalesce(struct net_device *dev,
return 0;
}
-static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data)
+static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp)
+{
+ unsigned int timer_val = READ_ONCE(tp->dev->mtu) + ETH_HLEN + 0x20;
+
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_46:
+ case RTL_GIGA_MAC_VER_48:
+ tp->tx_lpi_timer = timer_val;
+ r8168_mac_ocp_write(tp, 0xe048, timer_val);
+ break;
+ case RTL_GIGA_MAC_VER_61:
+ case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_65:
+ tp->tx_lpi_timer = timer_val;
+ RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val);
+ break;
+ default:
+ break;
+ }
+}
+
+static unsigned int r8169_get_tx_lpi_timer_us(struct rtl8169_private *tp)
+{
+ unsigned int speed = tp->phydev->speed;
+ unsigned int timer = tp->tx_lpi_timer;
+
+ if (!timer || speed == SPEED_UNKNOWN)
+ return 0;
+
+ /* tx_lpi_timer value is in bytes */
+ return DIV_ROUND_CLOSEST(timer * BITS_PER_BYTE, speed);
+}
+
+static int rtl8169_get_eee(struct net_device *dev, struct ethtool_keee *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ int ret;
if (!rtl_supports_eee(tp))
return -EOPNOTSUPP;
- return phy_ethtool_get_eee(tp->phydev, data);
+ ret = phy_ethtool_get_eee(tp->phydev, data);
+ if (ret)
+ return ret;
+
+ data->tx_lpi_timer = r8169_get_tx_lpi_timer_us(tp);
+
+ return 0;
}
-static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
+static int rtl8169_set_eee(struct net_device *dev, struct ethtool_keee *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
- int ret;
if (!rtl_supports_eee(tp))
return -EOPNOTSUPP;
- ret = phy_ethtool_set_eee(tp->phydev, data);
-
- if (!ret)
- tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN,
- MDIO_AN_EEE_ADV);
- return ret;
+ return phy_ethtool_set_eee(tp->phydev, data);
}
static void rtl8169_get_ringparam(struct net_device *dev,
@@ -2062,21 +2155,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.set_pauseparam = rtl8169_set_pauseparam,
};
-static void rtl_enable_eee(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- int adv;
-
- /* respect EEE advertisement the user may have set */
- if (tp->eee_adv >= 0)
- adv = tp->eee_adv;
- else
- adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
-
- if (adv >= 0)
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
-}
-
static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{
/*
@@ -2095,6 +2173,9 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
u16 val;
enum mac_version ver;
} mac_info[] = {
+ /* 8126A family. */
+ { 0x7cf, 0x649, RTL_GIGA_MAC_VER_65 },
+
/* 8125B family. */
{ 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 },
@@ -2250,14 +2331,8 @@ static void rtl8125a_config_eee_mac(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
}
-static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp)
-{
- RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20);
-}
-
static void rtl8125b_config_eee_mac(struct rtl8169_private *tp)
{
- rtl8125_set_eee_txidle_timer(tp);
r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
}
@@ -2313,9 +2388,6 @@ static void rtl8169_init_phy(struct rtl8169_private *tp)
/* We may have called phy_speed_down before */
phy_speed_up(tp->phydev);
- if (rtl_supports_eee(tp))
- rtl_enable_eee(tp);
-
genphy_soft_reset(tp->phydev);
}
@@ -2368,6 +2440,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_65:
RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
RX_PAUSE_SLOT_ON);
break;
@@ -2554,7 +2627,7 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
- case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_65:
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
@@ -2797,7 +2870,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
rtl_eri_set_bits(tp, 0xd4, 0x0c00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
break;
default:
@@ -2811,7 +2884,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
break;
default:
@@ -2821,6 +2894,8 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp)
static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
{
+ u8 val8;
+
if (tp->mac_version < RTL_GIGA_MAC_VER_32)
return;
@@ -2834,11 +2909,19 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
return;
rtl_mod_config5(tp, 0, ASPM_en);
- rtl_mod_config2(tp, 0, ClkReqEn);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_65:
+ val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN;
+ RTL_W8(tp, INT_CFG0_8125, val8);
+ break;
+ default:
+ rtl_mod_config2(tp, 0, ClkReqEn);
+ break;
+ }
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
/* reset ephy tx/rx disable timer */
r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
/* chip can trigger L1.2 */
@@ -2850,14 +2933,22 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
} else {
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
break;
default:
break;
}
- rtl_mod_config2(tp, ClkReqEn, 0);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_65:
+ val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN;
+ RTL_W8(tp, INT_CFG0_8125, val8);
+ break;
+ default:
+ rtl_mod_config2(tp, ClkReqEn, 0);
+ break;
+ }
rtl_mod_config5(tp, ASPM_en, 0);
}
}
@@ -3570,10 +3661,15 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
/* disable new tx descriptor format */
r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
- if (tp->mac_version == RTL_GIGA_MAC_VER_63)
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02);
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
else
- r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
+ r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0300);
if (tp->mac_version == RTL_GIGA_MAC_VER_63)
r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000);
@@ -3586,6 +3682,10 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
+ if (tp->mac_version == RTL_GIGA_MAC_VER_65)
+ r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000);
+ else
+ r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403);
r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068);
r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f);
@@ -3600,10 +3700,10 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
- if (tp->mac_version == RTL_GIGA_MAC_VER_63)
- rtl8125b_config_eee_mac(tp);
- else
+ if (tp->mac_version == RTL_GIGA_MAC_VER_61)
rtl8125a_config_eee_mac(tp);
+ else
+ rtl8125b_config_eee_mac(tp);
rtl_disable_rxdvgate(tp);
}
@@ -3647,6 +3747,12 @@ static void rtl_hw_start_8125b(struct rtl8169_private *tp)
rtl_hw_start_8125_common(tp);
}
+static void rtl_hw_start_8126a(struct rtl8169_private *tp)
+{
+ rtl_set_def_aspm_entry_latency(tp);
+ rtl_hw_start_8125_common(tp);
+}
+
static void rtl_hw_config(struct rtl8169_private *tp)
{
static const rtl_generic_fct hw_configs[] = {
@@ -3689,6 +3795,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
+ [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a,
};
if (hw_configs[tp->mac_version])
@@ -3699,9 +3806,23 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp)
{
int i;
+ RTL_W8(tp, INT_CFG0_8125, 0x00);
+
/* disable interrupt coalescing */
- for (i = 0xa00; i < 0xb00; i += 4)
- RTL_W32(tp, i, 0);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_61:
+ for (i = 0xa00; i < 0xb00; i += 4)
+ RTL_W32(tp, i, 0);
+ break;
+ case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_65:
+ for (i = 0xa00; i < 0xa80; i += 4)
+ RTL_W32(tp, i, 0);
+ RTL_W16(tp, INT_CFG1_8125, 0x0000);
+ break;
+ default:
+ break;
+ }
rtl_hw_config(tp);
}
@@ -3744,6 +3865,8 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_hw_aspm_clkreq_enable(tp, false);
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
+ rtl_set_eee_txidle_timer(tp);
+
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
rtl_hw_start_8169(tp);
else if (rtl_is_8125(tp))
@@ -3777,15 +3900,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
netdev_update_features(dev);
rtl_jumbo_config(tp);
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_61:
- case RTL_GIGA_MAC_VER_63:
- rtl8125_set_eee_txidle_timer(tp);
- break;
- default:
- break;
- }
+ rtl_set_eee_txidle_timer(tp);
return 0;
}
@@ -3929,7 +4044,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65:
rtl_enable_rxdvgate(tp);
fsleep(2000);
break;
@@ -4080,8 +4195,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_61:
- case RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
padto = max_t(unsigned int, padto, ETH_ZLEN);
break;
default:
@@ -5058,7 +5172,8 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
}
tp->phydev->mac_managed_pm = true;
-
+ if (rtl_supports_eee(tp))
+ phy_support_eee(tp->phydev);
phy_support_asym_pause(tp->phydev);
/* PHY will be woken up in rtl_open() */
@@ -5108,7 +5223,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
- case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
+ case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65:
rtl_hw_init_8125(tp);
break;
default:
@@ -5193,7 +5308,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->dev = dev;
tp->pci_dev = pdev;
tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
- tp->eee_adv = -1;
tp->ocp_base = OCP_STD_PHY_BASE;
raw_spin_lock_init(&tp->cfg9346_usage_lock);
@@ -5201,11 +5315,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
raw_spin_lock_init(&tp->mac_ocp_lock);
mutex_init(&tp->led_lock);
- dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev,
- struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
/* Get the *optional* external "ether_clk" used on some boards */
tp->clk = devm_clk_get_optional_enabled(&pdev->dev, "ether_clk");
if (IS_ERR(tp->clk))
@@ -5320,6 +5429,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
+
netdev_sw_irq_coalesce_default_on(dev);
/* configure chip for default features */
@@ -5356,10 +5467,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- if (IS_ENABLED(CONFIG_R8169_LEDS) &&
- tp->mac_version > RTL_GIGA_MAC_VER_06 &&
- tp->mac_version < RTL_GIGA_MAC_VER_61)
- rtl8168_init_leds(dev);
+ if (IS_ENABLED(CONFIG_R8169_LEDS)) {
+ if (rtl_is_8125(tp))
+ rtl8125_init_leds(dev);
+ else if (tp->mac_version > RTL_GIGA_MAC_VER_06)
+ rtl8168_init_leds(dev);
+ }
netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq);
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index b50f16786c24..1f74317beb88 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -1102,6 +1102,12 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
rtl8125b_config_eee_phy(phydev);
}
+static void rtl8126a_hw_phy_config(struct rtl8169_private *tp,
+ struct phy_device *phydev)
+{
+ r8169_apply_firmware(tp);
+}
+
void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
enum mac_version ver)
{
@@ -1152,6 +1158,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
+ [RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config,
};
if (phy_configs[ver])
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index d6136fe5c206..b03fae7a0f72 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -34,6 +34,7 @@ config RAVB
select MII
select MDIO_BITBANG
select PHYLIB
+ select RESET_CONTROLLER
help
Renesas Ethernet AVB device driver.
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index e0f8276cffed..b48935ec7e28 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -205,7 +205,11 @@ enum ravb_reg {
TLFRCR = 0x0758,
RFCR = 0x0760,
MAFCR = 0x0778,
- CSR0 = 0x0800, /* RZ/G2L only */
+
+ /* TOE registers (RZ/G2L only) */
+ CSR0 = 0x0800,
+ CSR1 = 0x0804,
+ CSR2 = 0x0808,
};
@@ -978,16 +982,39 @@ enum CSR0_BIT {
CSR0_RPE = 0x00000020,
};
+enum CSR1_BIT {
+ CSR1_TIP4 = 0x00000001,
+ CSR1_TTCP4 = 0x00000010,
+ CSR1_TUDP4 = 0x00000020,
+ CSR1_TICMP4 = 0x00000040,
+ CSR1_TTCP6 = 0x00100000,
+ CSR1_TUDP6 = 0x00200000,
+ CSR1_TICMP6 = 0x00400000,
+ CSR1_THOP = 0x01000000,
+ CSR1_TROUT = 0x02000000,
+ CSR1_TAHD = 0x04000000,
+ CSR1_TDHD = 0x08000000,
+};
+
+enum CSR2_BIT {
+ CSR2_RIP4 = 0x00000001,
+ CSR2_RTCP4 = 0x00000010,
+ CSR2_RUDP4 = 0x00000020,
+ CSR2_RICMP4 = 0x00000040,
+ CSR2_RTCP6 = 0x00100000,
+ CSR2_RUDP6 = 0x00200000,
+ CSR2_RICMP6 = 0x00400000,
+ CSR2_RHOP = 0x01000000,
+ CSR2_RROUT = 0x02000000,
+ CSR2_RAHD = 0x04000000,
+ CSR2_RDHD = 0x08000000,
+};
+
#define DBAT_ENTRY_NUM 22
#define RX_QUEUE_OFFSET 4
#define NUM_RX_QUEUE 2
#define NUM_TX_QUEUE 2
-#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16))
-
-#define GBETH_RX_BUFF_MAX 8192
-#define GBETH_RX_DESC_DATA_SIZE 4080
-
struct ravb_tstamp_skb {
struct list_head list;
struct sk_buff *skb;
@@ -1012,9 +1039,6 @@ struct ravb_ptp {
};
struct ravb_hw_info {
- void (*rx_ring_free)(struct net_device *ndev, int q);
- void (*rx_ring_format)(struct net_device *ndev, int q);
- void *(*alloc_rx_desc)(struct net_device *ndev, int q);
bool (*receive)(struct net_device *ndev, int *quota, int q);
void (*set_rate)(struct net_device *ndev);
int (*set_feature)(struct net_device *ndev, netdev_features_t features);
@@ -1025,9 +1049,10 @@ struct ravb_hw_info {
netdev_features_t net_hw_features;
netdev_features_t net_features;
int stats_len;
- size_t max_rx_len;
u32 tccr_mask;
- u32 rx_max_buf_size;
+ u32 rx_max_frame_size;
+ u32 rx_max_desc_use;
+ u32 rx_desc_size;
unsigned aligned_tx: 1;
/* hardware features */
@@ -1060,8 +1085,11 @@ struct ravb_private {
struct ravb_desc *desc_bat;
dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
- struct ravb_rx_desc *gbeth_rx_ring;
- struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
+ union {
+ struct ravb_rx_desc *desc;
+ struct ravb_ex_rx_desc *ex_desc;
+ void *raw;
+ } rx_ring[NUM_RX_QUEUE];
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE];
struct sk_buff *rx_1st_skb;
@@ -1089,10 +1117,6 @@ struct ravb_private {
int msg_enable;
int speed;
int emac_irq;
- int erra_irq;
- int mgmta_irq;
- int rx_irqs[NUM_RX_QUEUE];
- int tx_irqs[NUM_TX_QUEUE];
unsigned no_avb_link:1;
unsigned avb_link_active_low:1;
@@ -1106,6 +1130,8 @@ struct ravb_private {
const struct ravb_hw_info *info;
struct reset_control *rstc;
+
+ u32 gti_tiv;
};
static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 0e3731f50fc2..d1be030c8848 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -29,6 +29,7 @@
#include <linux/spinlock.h>
#include <linux/reset.h>
#include <linux/math64.h>
+#include <net/ip.h>
#include "ravb.h"
@@ -38,16 +39,6 @@
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
-static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
- "ch0", /* RAVB_BE */
- "ch1", /* RAVB_NC */
-};
-
-static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
- "ch18", /* RAVB_BE */
- "ch19", /* RAVB_NC */
-};
-
void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
u32 set)
{
@@ -96,13 +87,13 @@ static void ravb_set_rate_gbeth(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
switch (priv->speed) {
- case 10: /* 10BASE */
+ case 10: /* 10BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
break;
- case 100: /* 100BASE */
+ case 100: /* 100BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
break;
- case 1000: /* 1000BASE */
+ case 1000: /* 1000BASE */
ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
break;
}
@@ -122,12 +113,23 @@ static void ravb_set_rate_rcar(struct net_device *ndev)
}
}
-static void ravb_set_buffer_align(struct sk_buff *skb)
+static struct sk_buff *
+ravb_alloc_skb(struct net_device *ndev, const struct ravb_hw_info *info,
+ gfp_t gfp_mask)
{
- u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
+ struct sk_buff *skb;
+ u32 reserve;
+
+ skb = __netdev_alloc_skb(ndev, info->rx_max_frame_size + RAVB_ALIGN - 1,
+ gfp_mask);
+ if (!skb)
+ return NULL;
+ reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
if (reserve)
skb_reserve(skb, RAVB_ALIGN - reserve);
+
+ return skb;
}
/* Get MAC address from the MAC address registers
@@ -200,6 +202,13 @@ static const struct mdiobb_ops bb_ops = {
.get_mdio_data = ravb_get_mdio_data,
};
+static struct ravb_rx_desc *
+ravb_rx_get_desc(struct ravb_private *priv, unsigned int q,
+ unsigned int i)
+{
+ return priv->rx_ring[q].raw + priv->info->rx_desc_size * i;
+}
+
/* Free TX skb function for AVB-IP */
static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
{
@@ -244,67 +253,40 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
return free_num;
}
-static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
+static void ravb_rx_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
unsigned int i;
- if (!priv->gbeth_rx_ring)
+ if (!priv->rx_ring[q].raw)
return;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
+ struct ravb_rx_desc *desc = ravb_rx_get_desc(priv, q, i);
if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr)))
dma_unmap_single(ndev->dev.parent,
le32_to_cpu(desc->dptr),
- GBETH_RX_BUFF_MAX,
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
}
- ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
- dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
+ ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw,
priv->rx_desc_dma[q]);
- priv->gbeth_rx_ring = NULL;
-}
-
-static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- unsigned int ring_size;
- unsigned int i;
-
- if (!priv->rx_ring[q])
- return;
-
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
-
- if (!dma_mapping_error(ndev->dev.parent,
- le32_to_cpu(desc->dptr)))
- dma_unmap_single(ndev->dev.parent,
- le32_to_cpu(desc->dptr),
- RX_BUF_SZ,
- DMA_FROM_DEVICE);
- }
- ring_size = sizeof(struct ravb_ex_rx_desc) *
- (priv->num_rx_ring[q] + 1);
- dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
- priv->rx_desc_dma[q]);
- priv->rx_ring[q] = NULL;
+ priv->rx_ring[q].raw = NULL;
}
/* Free skb's and DMA buffers for Ethernet AVB */
static void ravb_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
unsigned int ring_size;
unsigned int i;
- info->rx_ring_free(ndev, q);
+ ravb_rx_ring_free(ndev, q);
if (priv->tx_ring[q]) {
ravb_tx_free(ndev, q, false);
@@ -335,7 +317,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_skb[q] = NULL;
}
-static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
+static void ravb_rx_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
struct ravb_rx_desc *rx_desc;
@@ -343,45 +325,15 @@ static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
dma_addr_t dma_addr;
unsigned int i;
- rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
- memset(priv->gbeth_rx_ring, 0, rx_ring_size);
- /* Build RX ring buffer */
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- /* RX descriptor */
- rx_desc = &priv->gbeth_rx_ring[i];
- rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
- dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- GBETH_RX_BUFF_MAX,
- DMA_FROM_DEVICE);
- /* We just set the data size to 0 for a failed mapping which
- * should prevent DMA from happening...
- */
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- rx_desc->ds_cc = cpu_to_le16(0);
- rx_desc->dptr = cpu_to_le32(dma_addr);
- rx_desc->die_dt = DT_FEMPTY;
- }
- rx_desc = &priv->gbeth_rx_ring[i];
- rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
- rx_desc->die_dt = DT_LINKFIX; /* type */
-}
-
-static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- struct ravb_ex_rx_desc *rx_desc;
- unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
- dma_addr_t dma_addr;
- unsigned int i;
-
- memset(priv->rx_ring[q], 0, rx_ring_size);
+ rx_ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
+ memset(priv->rx_ring[q].raw, 0, rx_ring_size);
/* Build RX ring buffer */
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
- rx_desc = &priv->rx_ring[q][i];
- rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
+ rx_desc = ravb_rx_get_desc(priv, q, i);
+ rx_desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- RX_BUF_SZ,
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
@@ -391,7 +343,7 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
rx_desc->dptr = cpu_to_le32(dma_addr);
rx_desc->die_dt = DT_FEMPTY;
}
- rx_desc = &priv->rx_ring[q][i];
+ rx_desc = ravb_rx_get_desc(priv, q, i);
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
rx_desc->die_dt = DT_LINKFIX; /* type */
}
@@ -400,7 +352,6 @@ static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
static void ravb_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
struct ravb_tx_desc *tx_desc;
struct ravb_desc *desc;
@@ -413,7 +364,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
priv->dirty_rx[q] = 0;
priv->dirty_tx[q] = 0;
- info->rx_ring_format(ndev, q);
+ ravb_rx_ring_format(ndev, q);
memset(priv->tx_ring[q], 0, tx_ring_size);
/* Build TX ring buffer */
@@ -439,30 +390,18 @@ static void ravb_ring_format(struct net_device *ndev, int q)
desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
}
-static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
+static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
- ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
+ ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
- priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
- &priv->rx_desc_dma[q],
- GFP_KERNEL);
- return priv->gbeth_rx_ring;
-}
+ priv->rx_ring[q].raw = dma_alloc_coherent(ndev->dev.parent, ring_size,
+ &priv->rx_desc_dma[q],
+ GFP_KERNEL);
-static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- unsigned int ring_size;
-
- ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
-
- priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
- &priv->rx_desc_dma[q],
- GFP_KERNEL);
- return priv->rx_ring[q];
+ return priv->rx_ring[q].raw;
}
/* Init skb and descriptor buffer for Ethernet AVB */
@@ -484,10 +423,9 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL);
+ skb = ravb_alloc_skb(ndev, info, GFP_KERNEL);
if (!skb)
goto error;
- ravb_set_buffer_align(skb);
priv->rx_skb[q][i] = skb;
}
@@ -500,7 +438,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
}
/* Allocate all RX descriptors. */
- if (!info->alloc_rx_desc(ndev, q))
+ if (!ravb_alloc_rx_desc(ndev, q))
goto error;
priv->dirty_rx[q] = 0;
@@ -522,6 +460,36 @@ error:
return -ENOMEM;
}
+static void ravb_csum_init_gbeth(struct net_device *ndev)
+{
+ bool tx_enable = ndev->features & NETIF_F_HW_CSUM;
+ bool rx_enable = ndev->features & NETIF_F_RXCSUM;
+
+ if (!(tx_enable || rx_enable))
+ goto done;
+
+ ravb_write(ndev, 0, CSR0);
+ if (ravb_wait(ndev, CSR0, CSR0_TPE | CSR0_RPE, 0)) {
+ netdev_err(ndev, "Timeout enabling hardware checksum\n");
+
+ if (tx_enable)
+ ndev->features &= ~NETIF_F_HW_CSUM;
+
+ if (rx_enable)
+ ndev->features &= ~NETIF_F_RXCSUM;
+ } else {
+ if (tx_enable)
+ ravb_write(ndev, CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4, CSR1);
+
+ if (rx_enable)
+ ravb_write(ndev, CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4,
+ CSR2);
+ }
+
+done:
+ ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
+}
+
static void ravb_emac_init_gbeth(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -536,7 +504,7 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
}
/* Receive frame limit set register */
- ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
+ ravb_write(ndev, priv->info->rx_max_frame_size + ETH_FCS_LEN, RFLR);
/* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
@@ -553,7 +521,8 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
/* E-MAC status register clear */
ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
- ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
+
+ ravb_csum_init_gbeth(ndev);
/* E-MAC interrupt enable register */
ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
@@ -596,6 +565,7 @@ static void ravb_emac_init(struct net_device *ndev)
static int ravb_dmac_init_gbeth(struct net_device *ndev)
{
+ struct ravb_private *priv = netdev_priv(ndev);
int error;
error = ravb_ring_init(ndev, RAVB_BE);
@@ -609,7 +579,7 @@ static int ravb_dmac_init_gbeth(struct net_device *ndev)
ravb_write(ndev, 0x60000000, RCR);
/* Set Max Frame Length (RTC) */
- ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC);
+ ravb_write(ndev, 0x7ffc0000 | priv->info->rx_max_frame_size, RTC);
/* Set FIFO size */
ravb_write(ndev, 0x00222200, TGC);
@@ -734,6 +704,30 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
}
}
+static void ravb_rx_csum_gbeth(struct sk_buff *skb)
+{
+ __wsum csum_ip_hdr, csum_proto;
+ u8 *hw_csum;
+
+ /* The hardware checksum status is contained in sizeof(__sum16) * 2 = 4
+ * bytes appended to packet data. First 2 bytes is ip header checksum
+ * and last 2 bytes is protocol checksum.
+ */
+ if (unlikely(skb->len < sizeof(__sum16) * 2))
+ return;
+
+ hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
+ csum_proto = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
+
+ hw_csum -= sizeof(__sum16);
+ csum_ip_hdr = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
+ skb_trim(skb, skb->len - 2 * sizeof(__sum16));
+
+ /* TODO: IPV6 Rx checksum */
+ if (skb->protocol == htons(ETH_P_IP) && !csum_ip_hdr && !csum_proto)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
static void ravb_rx_csum(struct sk_buff *skb)
{
u8 *hw_csum;
@@ -758,7 +752,8 @@ static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
skb = priv->rx_skb[RAVB_BE][entry];
priv->rx_skb[RAVB_BE][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE);
+ ALIGN(priv->info->rx_max_frame_size, 16),
+ DMA_FROM_DEVICE);
return skb;
}
@@ -772,29 +767,25 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
struct ravb_rx_desc *desc;
struct sk_buff *skb;
dma_addr_t dma_addr;
+ int rx_packets = 0;
u8 desc_status;
- int boguscnt;
u16 pkt_len;
u8 die_dt;
int entry;
int limit;
+ int i;
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
- boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
+ limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
stats = &priv->stats[q];
- boguscnt = min(boguscnt, *quota);
- limit = boguscnt;
- desc = &priv->gbeth_rx_ring[entry];
- while (desc->die_dt != DT_FEMPTY) {
+ desc = &priv->rx_ring[q].desc[entry];
+ for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
desc_status = desc->msc;
pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
- if (--boguscnt < 0)
- break;
-
/* We use 0-byte descriptors to mark the DMA mapping errors */
if (!pkt_len)
continue;
@@ -819,8 +810,10 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
skb = ravb_get_skb_gbeth(ndev, entry, desc);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
+ if (ndev->features & NETIF_F_RXCSUM)
+ ravb_rx_csum_gbeth(skb);
napi_gro_receive(&priv->napi[q], skb);
- stats->rx_packets++;
+ rx_packets++;
stats->rx_bytes += pkt_len;
break;
case DT_FSTART:
@@ -846,32 +839,33 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
dev_kfree_skb(skb);
priv->rx_1st_skb->protocol =
eth_type_trans(priv->rx_1st_skb, ndev);
+ if (ndev->features & NETIF_F_RXCSUM)
+ ravb_rx_csum_gbeth(skb);
napi_gro_receive(&priv->napi[q],
priv->rx_1st_skb);
- stats->rx_packets++;
+ rx_packets++;
stats->rx_bytes += pkt_len;
break;
}
}
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
+ desc = &priv->rx_ring[q].desc[entry];
}
/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
- desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
+ desc = &priv->rx_ring[q].desc[entry];
+ desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
if (!priv->rx_skb[q][entry]) {
- skb = netdev_alloc_skb(ndev, info->max_rx_len);
+ skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
if (!skb)
break;
- ravb_set_buffer_align(skb);
dma_addr = dma_map_single(ndev->dev.parent,
skb->data,
- GBETH_RX_BUFF_MAX,
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
/* We just set the data size to 0 for a failed mapping
@@ -887,9 +881,9 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
desc->die_dt = DT_FEMPTY;
}
- *quota -= limit - (++boguscnt);
-
- return boguscnt <= 0;
+ stats->rx_packets += rx_packets;
+ *quota -= rx_packets;
+ return *quota == 0;
}
/* Packet receive function for Ethernet AVB */
@@ -911,7 +905,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
boguscnt = min(boguscnt, *quota);
limit = boguscnt;
- desc = &priv->rx_ring[q][entry];
+ desc = &priv->rx_ring[q].ex_desc[entry];
while (desc->die_dt != DT_FEMPTY) {
/* Descriptor type must be checked before all other reads */
dma_rmb();
@@ -945,7 +939,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- RX_BUF_SZ,
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@@ -971,22 +965,21 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
}
entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q][entry];
+ desc = &priv->rx_ring[q].ex_desc[entry];
}
/* Refill the RX ring buffers. */
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q][entry];
- desc->ds_cc = cpu_to_le16(RX_BUF_SZ);
+ desc = &priv->rx_ring[q].ex_desc[entry];
+ desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
if (!priv->rx_skb[q][entry]) {
- skb = netdev_alloc_skb(ndev, info->max_rx_len);
+ skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
if (!skb)
break; /* Better luck next round. */
- ravb_set_buffer_align(skb);
dma_addr = dma_map_single(ndev->dev.parent, skb->data,
- le16_to_cpu(desc->ds_cc),
+ priv->info->rx_max_frame_size,
DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
/* We just set the data size to 0 for a failed mapping
@@ -1092,11 +1085,23 @@ static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
+ irqreturn_t result = IRQ_HANDLED;
+
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev))) {
+ result = IRQ_NONE;
+ goto out_rpm_put;
+ }
spin_lock(&priv->lock);
ravb_emac_interrupt_unlocked(ndev);
spin_unlock(&priv->lock);
- return IRQ_HANDLED;
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
+ return result;
}
/* Error interrupt handler */
@@ -1176,9 +1181,15 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct device *dev = &priv->pdev->dev;
irqreturn_t result = IRQ_NONE;
u32 iss;
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev)))
+ goto out_rpm_put;
+
spin_lock(&priv->lock);
/* Get interrupt status */
iss = ravb_read(ndev, ISS);
@@ -1222,6 +1233,9 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
}
spin_unlock(&priv->lock);
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return result;
}
@@ -1230,9 +1244,15 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
irqreturn_t result = IRQ_NONE;
u32 iss;
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev)))
+ goto out_rpm_put;
+
spin_lock(&priv->lock);
/* Get interrupt status */
iss = ravb_read(ndev, ISS);
@@ -1254,6 +1274,9 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
}
spin_unlock(&priv->lock);
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return result;
}
@@ -1261,8 +1284,14 @@ static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ struct device *dev = &priv->pdev->dev;
irqreturn_t result = IRQ_NONE;
+ pm_runtime_get_noresume(dev);
+
+ if (unlikely(!pm_runtime_active(dev)))
+ goto out_rpm_put;
+
spin_lock(&priv->lock);
/* Network control/Best effort queue RX/TX */
@@ -1270,6 +1299,9 @@ static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
result = IRQ_HANDLED;
spin_unlock(&priv->lock);
+
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return result;
}
@@ -1288,25 +1320,16 @@ static int ravb_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = napi->dev;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- bool gptp = info->gptp || info->ccc_gac;
- struct ravb_rx_desc *desc;
unsigned long flags;
int q = napi - priv->napi;
int mask = BIT(q);
int quota = budget;
- unsigned int entry;
- if (!gptp) {
- entry = priv->cur_rx[q] % priv->num_rx_ring[q];
- desc = &priv->gbeth_rx_ring[entry];
- }
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- if (gptp || desc->die_dt != DT_FEMPTY) {
- if (ravb_rx(ndev, &quota, q))
- goto out;
- }
+ if (ravb_rx(ndev, &quota, q))
+ goto out;
/* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
@@ -1736,89 +1759,159 @@ static const struct ethtool_ops ravb_ethtool_ops = {
.set_wol = ravb_set_wol,
};
-static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
- struct net_device *ndev, struct device *dev,
- const char *ch)
+static int ravb_set_config_mode(struct net_device *ndev)
{
- char *name;
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
int error;
- name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
- if (!name)
- return -ENOMEM;
- error = request_irq(irq, handler, 0, name, ndev);
- if (error)
- netdev_err(ndev, "cannot request IRQ %s\n", name);
+ if (info->gptp) {
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ if (error)
+ return error;
+ /* Set CSEL value */
+ ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
+ } else if (info->ccc_gac) {
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
+ } else {
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ }
return error;
}
+static void ravb_set_gti(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+
+ if (!(info->gptp || info->ccc_gac))
+ return;
+
+ ravb_write(ndev, priv->gti_tiv, GTI);
+
+ /* Request GTI loading */
+ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+}
+
+static int ravb_compute_gti(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ struct device *dev = ndev->dev.parent;
+ unsigned long rate;
+ u64 inc;
+
+ if (!(info->gptp || info->ccc_gac))
+ return 0;
+
+ if (info->gptp_ref_clk)
+ rate = clk_get_rate(priv->gptp_clk);
+ else
+ rate = clk_get_rate(priv->clk);
+ if (!rate)
+ return -EINVAL;
+
+ inc = div64_ul(1000000000ULL << 20, rate);
+
+ if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
+ dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
+ inc, GTI_TIV_MIN, GTI_TIV_MAX);
+ return -EINVAL;
+ }
+ priv->gti_tiv = inc;
+
+ return 0;
+}
+
+/* Set tx and rx clock internal delay modes */
+static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ bool explicit_delay = false;
+ u32 delay;
+
+ if (!priv->info->internal_delay)
+ return;
+
+ if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
+ /* Valid values are 0 and 1800, according to DT bindings */
+ priv->rxcidm = !!delay;
+ explicit_delay = true;
+ }
+ if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
+ /* Valid values are 0 and 2000, according to DT bindings */
+ priv->txcidm = !!delay;
+ explicit_delay = true;
+ }
+
+ if (explicit_delay)
+ return;
+
+ /* Fall back to legacy rgmii-*id behavior */
+ if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ priv->rxcidm = 1;
+ priv->rgmii_override = 1;
+ }
+
+ if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ priv->txcidm = 1;
+ priv->rgmii_override = 1;
+ }
+}
+
+static void ravb_set_delay_mode(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 set = 0;
+
+ if (!priv->info->internal_delay)
+ return;
+
+ if (priv->rxcidm)
+ set |= APSR_RDM;
+ if (priv->txcidm)
+ set |= APSR_TDM;
+ ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
+}
+
/* Network device open function for Ethernet AVB */
static int ravb_open(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- struct platform_device *pdev = priv->pdev;
- struct device *dev = &pdev->dev;
+ struct device *dev = &priv->pdev->dev;
int error;
napi_enable(&priv->napi[RAVB_BE]);
if (info->nc_queues)
napi_enable(&priv->napi[RAVB_NC]);
- if (!info->multi_irqs) {
- error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
- ndev->name, ndev);
- if (error) {
- netdev_err(ndev, "cannot request IRQ\n");
- goto out_napi_off;
- }
- } else {
- error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
- dev, "ch22:multi");
- if (error)
- goto out_napi_off;
- error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
- dev, "ch24:emac");
- if (error)
- goto out_free_irq;
- error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
- ndev, dev, "ch0:rx_be");
- if (error)
- goto out_free_irq_emac;
- error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
- ndev, dev, "ch18:tx_be");
- if (error)
- goto out_free_irq_be_rx;
- error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
- ndev, dev, "ch1:rx_nc");
- if (error)
- goto out_free_irq_be_tx;
- error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
- ndev, dev, "ch19:tx_nc");
- if (error)
- goto out_free_irq_nc_rx;
-
- if (info->err_mgmt_irqs) {
- error = ravb_hook_irq(priv->erra_irq, ravb_multi_interrupt,
- ndev, dev, "err_a");
- if (error)
- goto out_free_irq_nc_tx;
- error = ravb_hook_irq(priv->mgmta_irq, ravb_multi_interrupt,
- ndev, dev, "mgmt_a");
- if (error)
- goto out_free_irq_erra;
- }
- }
+ error = pm_runtime_resume_and_get(dev);
+ if (error < 0)
+ goto out_napi_off;
+
+ /* Set AVB config mode */
+ error = ravb_set_config_mode(ndev);
+ if (error)
+ goto out_rpm_put;
+
+ ravb_set_delay_mode(ndev);
+ ravb_write(ndev, priv->desc_bat_dma, DBAT);
/* Device init */
error = ravb_dmac_init(ndev);
if (error)
- goto out_free_irq_mgmta;
+ goto out_set_reset;
+
ravb_emac_init(ndev);
+ ravb_set_gti(ndev);
+
/* Initialise PTP Clock driver */
- if (info->gptp)
+ if (info->gptp || info->ccc_gac)
ravb_ptp_init(ndev, priv->pdev);
/* PHY control start */
@@ -1832,29 +1925,14 @@ static int ravb_open(struct net_device *ndev)
out_ptp_stop:
/* Stop PTP Clock driver */
- if (info->gptp)
+ if (info->gptp || info->ccc_gac)
ravb_ptp_stop(ndev);
ravb_stop_dma(ndev);
-out_free_irq_mgmta:
- if (!info->multi_irqs)
- goto out_free_irq;
- if (info->err_mgmt_irqs)
- free_irq(priv->mgmta_irq, ndev);
-out_free_irq_erra:
- if (info->err_mgmt_irqs)
- free_irq(priv->erra_irq, ndev);
-out_free_irq_nc_tx:
- free_irq(priv->tx_irqs[RAVB_NC], ndev);
-out_free_irq_nc_rx:
- free_irq(priv->rx_irqs[RAVB_NC], ndev);
-out_free_irq_be_tx:
- free_irq(priv->tx_irqs[RAVB_BE], ndev);
-out_free_irq_be_rx:
- free_irq(priv->rx_irqs[RAVB_BE], ndev);
-out_free_irq_emac:
- free_irq(priv->emac_irq, ndev);
-out_free_irq:
- free_irq(ndev->irq, ndev);
+out_set_reset:
+ ravb_set_opmode(ndev, CCC_OPC_RESET);
+out_rpm_put:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
out_napi_off:
if (info->nc_queues)
napi_disable(&priv->napi[RAVB_NC]);
@@ -1939,6 +2017,36 @@ out_unlock:
rtnl_unlock();
}
+static bool ravb_can_tx_csum_gbeth(struct sk_buff *skb)
+{
+ struct iphdr *ip = ip_hdr(skb);
+
+ /* TODO: Need to add support for VLAN tag 802.1Q */
+ if (skb_vlan_tag_present(skb))
+ return false;
+
+ /* TODO: Need to add hardware checksum for IPv6 */
+ if (skb->protocol != htons(ETH_P_IP))
+ return false;
+
+ switch (ip->protocol) {
+ case IPPROTO_TCP:
+ break;
+ case IPPROTO_UDP:
+ /* If the checksum value in the UDP header field is 0, TOE does
+ * not calculate checksum for UDP part of this frame as it is
+ * optional function as per standards.
+ */
+ if (udp_hdr(skb)->check == 0)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
/* Packet transmit function for Ethernet AVB */
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
@@ -1954,6 +2062,9 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 entry;
u32 len;
+ if (skb->ip_summed == CHECKSUM_PARTIAL && !ravb_can_tx_csum_gbeth(skb))
+ skb_checksum_help(skb);
+
spin_lock_irqsave(&priv->lock, flags);
if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
num_tx_desc) {
@@ -2088,8 +2199,15 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct net_device_stats *nstats, *stats0, *stats1;
+ struct device *dev = &priv->pdev->dev;
nstats = &ndev->stats;
+
+ pm_runtime_get_noresume(dev);
+
+ if (!pm_runtime_active(dev))
+ goto out_rpm_put;
+
stats0 = &priv->stats[RAVB_BE];
if (info->tx_counters) {
@@ -2131,6 +2249,8 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
nstats->rx_over_errors += stats1->rx_over_errors;
}
+out_rpm_put:
+ pm_runtime_put_noidle(dev);
return nstats;
}
@@ -2153,6 +2273,8 @@ static int ravb_close(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct ravb_tstamp_skb *ts_skb, *ts_skb2;
+ struct device *dev = &priv->pdev->dev;
+ int error;
netif_tx_stop_all_queues(ndev);
@@ -2161,8 +2283,16 @@ static int ravb_close(struct net_device *ndev)
ravb_write(ndev, 0, RIC2);
ravb_write(ndev, 0, TIC);
+ /* PHY disconnect */
+ if (ndev->phydev) {
+ phy_stop(ndev->phydev);
+ phy_disconnect(ndev->phydev);
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ }
+
/* Stop PTP Clock driver */
- if (info->gptp)
+ if (info->gptp || info->ccc_gac)
ravb_ptp_stop(ndev);
/* Set the config mode to stop the AVB-DMAC's processes */
@@ -2179,29 +2309,8 @@ static int ravb_close(struct net_device *ndev)
}
}
- /* PHY disconnect */
- if (ndev->phydev) {
- phy_stop(ndev->phydev);
- phy_disconnect(ndev->phydev);
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
- }
-
cancel_work_sync(&priv->work);
- if (info->multi_irqs) {
- free_irq(priv->tx_irqs[RAVB_NC], ndev);
- free_irq(priv->rx_irqs[RAVB_NC], ndev);
- free_irq(priv->tx_irqs[RAVB_BE], ndev);
- free_irq(priv->rx_irqs[RAVB_BE], ndev);
- free_irq(priv->emac_irq, ndev);
- if (info->err_mgmt_irqs) {
- free_irq(priv->erra_irq, ndev);
- free_irq(priv->mgmta_irq, ndev);
- }
- }
- free_irq(ndev->irq, ndev);
-
if (info->nc_queues)
napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
@@ -2211,6 +2320,17 @@ static int ravb_close(struct net_device *ndev)
if (info->nc_queues)
ravb_ring_free(ndev, RAVB_NC);
+ /* Update statistics. */
+ ravb_get_stats(ndev);
+
+ /* Set reset mode. */
+ error = ravb_set_opmode(ndev, CCC_OPC_RESET);
+ if (error)
+ return error;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return 0;
}
@@ -2334,11 +2454,58 @@ static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
spin_unlock_irqrestore(&priv->lock, flags);
}
+static int ravb_endisable_csum_gbeth(struct net_device *ndev, enum ravb_reg reg,
+ u32 val, u32 mask)
+{
+ u32 csr0 = CSR0_TPE | CSR0_RPE;
+ int ret;
+
+ ravb_write(ndev, csr0 & ~mask, CSR0);
+ ret = ravb_wait(ndev, CSR0, mask, 0);
+ if (!ret)
+ ravb_write(ndev, val, reg);
+
+ ravb_write(ndev, csr0, CSR0);
+
+ return ret;
+}
+
static int ravb_set_features_gbeth(struct net_device *ndev,
netdev_features_t features)
{
- /* Place holder */
- return 0;
+ netdev_features_t changed = ndev->features ^ features;
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned long flags;
+ int ret = 0;
+ u32 val;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (changed & NETIF_F_RXCSUM) {
+ if (features & NETIF_F_RXCSUM)
+ val = CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4;
+ else
+ val = 0;
+
+ ret = ravb_endisable_csum_gbeth(ndev, CSR2, val, CSR0_RPE);
+ if (ret)
+ goto done;
+ }
+
+ if (changed & NETIF_F_HW_CSUM) {
+ if (features & NETIF_F_HW_CSUM)
+ val = CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4;
+ else
+ val = 0;
+
+ ret = ravb_endisable_csum_gbeth(ndev, CSR1, val, CSR0_TPE);
+ if (ret)
+ goto done;
+ }
+
+done:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
}
static int ravb_set_features_rcar(struct net_device *ndev,
@@ -2349,8 +2516,6 @@ static int ravb_set_features_rcar(struct net_device *ndev,
if (changed & NETIF_F_RXCSUM)
ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
- ndev->features = features;
-
return 0;
}
@@ -2359,8 +2524,24 @@ static int ravb_set_features(struct net_device *ndev,
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct device *dev = &priv->pdev->dev;
+ int ret;
+
+ pm_runtime_get_noresume(dev);
+
+ if (pm_runtime_active(dev))
+ ret = info->set_feature(ndev, features);
+ else
+ ret = 0;
+
+ pm_runtime_put_noidle(dev);
+
+ if (ret)
+ return ret;
- return info->set_feature(ndev, features);
+ ndev->features = features;
+
+ return 0;
}
static const struct net_device_ops ravb_netdev_ops = {
@@ -2434,9 +2615,6 @@ static int ravb_mdio_release(struct ravb_private *priv)
}
static const struct ravb_hw_info ravb_gen3_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_rcar,
- .rx_ring_format = ravb_rx_ring_format_rcar,
- .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
@@ -2447,9 +2625,10 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
- .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
- .rx_max_buf_size = SZ_2K,
+ .rx_max_frame_size = SZ_2K,
+ .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.internal_delay = 1,
.tx_counters = 1,
.multi_irqs = 1,
@@ -2460,9 +2639,6 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
};
static const struct ravb_hw_info ravb_gen2_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_rcar,
- .rx_ring_format = ravb_rx_ring_format_rcar,
- .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
@@ -2473,9 +2649,10 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
- .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
- .rx_max_buf_size = SZ_2K,
+ .rx_max_frame_size = SZ_2K,
+ .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.aligned_tx = 1,
.gptp = 1,
.nc_queues = 1,
@@ -2483,9 +2660,6 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
};
static const struct ravb_hw_info ravb_rzv2m_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_rcar,
- .rx_ring_format = ravb_rx_ring_format_rcar,
- .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
@@ -2496,9 +2670,10 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
- .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
- .rx_max_buf_size = SZ_2K,
+ .rx_max_frame_size = SZ_2K,
+ .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.multi_irqs = 1,
.err_mgmt_irqs = 1,
.gptp = 1,
@@ -2508,9 +2683,6 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
};
static const struct ravb_hw_info gbeth_hw_info = {
- .rx_ring_free = ravb_rx_ring_free_gbeth,
- .rx_ring_format = ravb_rx_ring_format_gbeth,
- .alloc_rx_desc = ravb_alloc_rx_desc_gbeth,
.receive = ravb_rx_gbeth,
.set_rate = ravb_set_rate_gbeth,
.set_feature = ravb_set_features_gbeth,
@@ -2518,10 +2690,13 @@ static const struct ravb_hw_info gbeth_hw_info = {
.emac_init = ravb_emac_init_gbeth,
.gstrings_stats = ravb_gstrings_stats_gbeth,
.gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
+ .net_hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
+ .net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
- .max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN),
.tccr_mask = TCCR_TSRQ0,
- .rx_max_buf_size = SZ_8K,
+ .rx_max_frame_size = SZ_8K,
+ .rx_max_desc_use = 4080,
+ .rx_desc_size = sizeof(struct ravb_rx_desc),
.aligned_tx = 1,
.tx_counters = 1,
.carrier_counters = 1,
@@ -2541,100 +2716,91 @@ static const struct of_device_id ravb_match_table[] = {
};
MODULE_DEVICE_TABLE(of, ravb_match_table);
-static int ravb_set_gti(struct net_device *ndev)
+static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
+ const char *ch, int *irq, irq_handler_t handler)
{
- struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
- struct device *dev = ndev->dev.parent;
- unsigned long rate;
- uint64_t inc;
-
- if (info->gptp_ref_clk)
- rate = clk_get_rate(priv->gptp_clk);
- else
- rate = clk_get_rate(priv->clk);
- if (!rate)
- return -EINVAL;
+ struct platform_device *pdev = priv->pdev;
+ struct net_device *ndev = priv->ndev;
+ struct device *dev = &pdev->dev;
+ const char *dev_name;
+ unsigned long flags;
+ int error, irq_num;
- inc = div64_ul(1000000000ULL << 20, rate);
+ if (irq_name) {
+ dev_name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
+ if (!dev_name)
+ return -ENOMEM;
- if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) {
- dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
- inc, GTI_TIV_MIN, GTI_TIV_MAX);
- return -EINVAL;
+ irq_num = platform_get_irq_byname(pdev, irq_name);
+ flags = 0;
+ } else {
+ dev_name = ndev->name;
+ irq_num = platform_get_irq(pdev, 0);
+ flags = IRQF_SHARED;
}
+ if (irq_num < 0)
+ return irq_num;
- ravb_write(ndev, inc, GTI);
+ if (irq)
+ *irq = irq_num;
- return 0;
+ error = devm_request_irq(dev, irq_num, handler, flags, dev_name, ndev);
+ if (error)
+ netdev_err(ndev, "cannot request IRQ %s\n", dev_name);
+
+ return error;
}
-static int ravb_set_config_mode(struct net_device *ndev)
+static int ravb_setup_irqs(struct ravb_private *priv)
{
- struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct net_device *ndev = priv->ndev;
+ const char *irq_name, *emac_irq_name;
int error;
- if (info->gptp) {
- error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
- if (error)
- return error;
- /* Set CSEL value */
- ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
- } else if (info->ccc_gac) {
- error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB);
+ if (!info->multi_irqs)
+ return ravb_setup_irq(priv, NULL, NULL, &ndev->irq, ravb_interrupt);
+
+ if (info->err_mgmt_irqs) {
+ irq_name = "dia";
+ emac_irq_name = "line3";
} else {
- error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ irq_name = "ch22";
+ emac_irq_name = "ch24";
}
- return error;
-}
-
-/* Set tx and rx clock internal delay modes */
-static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- bool explicit_delay = false;
- u32 delay;
+ error = ravb_setup_irq(priv, irq_name, "ch22:multi", &ndev->irq, ravb_multi_interrupt);
+ if (error)
+ return error;
- if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
- /* Valid values are 0 and 1800, according to DT bindings */
- priv->rxcidm = !!delay;
- explicit_delay = true;
- }
- if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
- /* Valid values are 0 and 2000, according to DT bindings */
- priv->txcidm = !!delay;
- explicit_delay = true;
- }
+ error = ravb_setup_irq(priv, emac_irq_name, "ch24:emac", &priv->emac_irq,
+ ravb_emac_interrupt);
+ if (error)
+ return error;
- if (explicit_delay)
- return;
+ if (info->err_mgmt_irqs) {
+ error = ravb_setup_irq(priv, "err_a", "err_a", NULL, ravb_multi_interrupt);
+ if (error)
+ return error;
- /* Fall back to legacy rgmii-*id behavior */
- if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
- priv->rxcidm = 1;
- priv->rgmii_override = 1;
+ error = ravb_setup_irq(priv, "mgmt_a", "mgmt_a", NULL, ravb_multi_interrupt);
+ if (error)
+ return error;
}
- if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
- priv->txcidm = 1;
- priv->rgmii_override = 1;
- }
-}
+ error = ravb_setup_irq(priv, "ch0", "ch0:rx_be", NULL, ravb_be_interrupt);
+ if (error)
+ return error;
-static void ravb_set_delay_mode(struct net_device *ndev)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- u32 set = 0;
+ error = ravb_setup_irq(priv, "ch1", "ch1:rx_nc", NULL, ravb_nc_interrupt);
+ if (error)
+ return error;
- if (priv->rxcidm)
- set |= APSR_RDM;
- if (priv->txcidm)
- set |= APSR_TDM;
- ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
+ error = ravb_setup_irq(priv, "ch18", "ch18:tx_be", NULL, ravb_be_interrupt);
+ if (error)
+ return error;
+
+ return ravb_setup_irq(priv, "ch19", "ch19:tx_nc", NULL, ravb_nc_interrupt);
}
static int ravb_probe(struct platform_device *pdev)
@@ -2644,9 +2810,8 @@ static int ravb_probe(struct platform_device *pdev)
struct reset_control *rstc;
struct ravb_private *priv;
struct net_device *ndev;
- int error, irq, q;
struct resource *res;
- int i;
+ int error, q;
if (!np) {
dev_err(&pdev->dev,
@@ -2654,7 +2819,7 @@ static int ravb_probe(struct platform_device *pdev)
return -EINVAL;
}
- rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(rstc))
return dev_err_probe(&pdev->dev, PTR_ERR(rstc),
"failed to get cpg reset\n");
@@ -2673,25 +2838,6 @@ static int ravb_probe(struct platform_device *pdev)
if (error)
goto out_free_netdev;
- pm_runtime_enable(&pdev->dev);
- error = pm_runtime_resume_and_get(&pdev->dev);
- if (error < 0)
- goto out_rpm_disable;
-
- if (info->multi_irqs) {
- if (info->err_mgmt_irqs)
- irq = platform_get_irq_byname(pdev, "dia");
- else
- irq = platform_get_irq_byname(pdev, "ch22");
- } else {
- irq = platform_get_irq(pdev, 0);
- }
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- ndev->irq = irq;
-
SET_NETDEV_DEV(ndev, &pdev->dev);
priv = netdev_priv(ndev);
@@ -2706,10 +2852,43 @@ static int ravb_probe(struct platform_device *pdev)
priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
}
+ error = ravb_setup_irqs(priv);
+ if (error)
+ goto out_reset_assert;
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ error = PTR_ERR(priv->clk);
+ goto out_reset_assert;
+ }
+
+ if (info->gptp_ref_clk) {
+ priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
+ if (IS_ERR(priv->gptp_clk)) {
+ error = PTR_ERR(priv->gptp_clk);
+ goto out_reset_assert;
+ }
+ }
+
+ priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
+ if (IS_ERR(priv->refclk)) {
+ error = PTR_ERR(priv->refclk);
+ goto out_reset_assert;
+ }
+ clk_prepare(priv->refclk);
+
+ platform_set_drvdata(pdev, ndev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ error = pm_runtime_resume_and_get(&pdev->dev);
+ if (error < 0)
+ goto out_rpm_disable;
+
priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->addr)) {
error = PTR_ERR(priv->addr);
- goto out_release;
+ goto out_rpm_put;
}
/* The Ether-specific entries in the device structure. */
@@ -2720,79 +2899,14 @@ static int ravb_probe(struct platform_device *pdev)
error = of_get_phy_mode(np, &priv->phy_interface);
if (error && error != -ENODEV)
- goto out_release;
+ goto out_rpm_put;
priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link");
priv->avb_link_active_low =
of_property_read_bool(np, "renesas,ether-link-active-low");
- if (info->multi_irqs) {
- if (info->err_mgmt_irqs)
- irq = platform_get_irq_byname(pdev, "line3");
- else
- irq = platform_get_irq_byname(pdev, "ch24");
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->emac_irq = irq;
- for (i = 0; i < NUM_RX_QUEUE; i++) {
- irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->rx_irqs[i] = irq;
- }
- for (i = 0; i < NUM_TX_QUEUE; i++) {
- irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->tx_irqs[i] = irq;
- }
-
- if (info->err_mgmt_irqs) {
- irq = platform_get_irq_byname(pdev, "err_a");
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->erra_irq = irq;
-
- irq = platform_get_irq_byname(pdev, "mgmt_a");
- if (irq < 0) {
- error = irq;
- goto out_release;
- }
- priv->mgmta_irq = irq;
- }
- }
-
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- error = PTR_ERR(priv->clk);
- goto out_release;
- }
-
- priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk");
- if (IS_ERR(priv->refclk)) {
- error = PTR_ERR(priv->refclk);
- goto out_release;
- }
- clk_prepare_enable(priv->refclk);
-
- if (info->gptp_ref_clk) {
- priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp");
- if (IS_ERR(priv->gptp_clk)) {
- error = PTR_ERR(priv->gptp_clk);
- goto out_disable_refclk;
- }
- clk_prepare_enable(priv->gptp_clk);
- }
-
- ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
+ ndev->max_mtu = info->rx_max_frame_size -
+ (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
/* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
@@ -2806,25 +2920,11 @@ static int ravb_probe(struct platform_device *pdev)
ndev->netdev_ops = &ravb_netdev_ops;
ndev->ethtool_ops = &ravb_ethtool_ops;
- /* Set AVB config mode */
- error = ravb_set_config_mode(ndev);
+ error = ravb_compute_gti(ndev);
if (error)
- goto out_disable_gptp_clk;
-
- if (info->gptp || info->ccc_gac) {
- /* Set GTI value */
- error = ravb_set_gti(ndev);
- if (error)
- goto out_disable_gptp_clk;
+ goto out_rpm_put;
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
- }
-
- if (info->internal_delay) {
- ravb_parse_delay_mode(np, ndev);
- ravb_set_delay_mode(ndev);
- }
+ ravb_parse_delay_mode(np, ndev);
/* Allocate descriptor base address table */
priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
@@ -2835,22 +2935,22 @@ static int ravb_probe(struct platform_device *pdev)
"Cannot allocate desc base address table (size %d bytes)\n",
priv->desc_bat_size);
error = -ENOMEM;
- goto out_disable_gptp_clk;
+ goto out_rpm_put;
}
for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++)
priv->desc_bat[q].die_dt = DT_EOS;
- ravb_write(ndev, priv->desc_bat_dma, DBAT);
/* Initialise HW timestamp list */
INIT_LIST_HEAD(&priv->ts_skb_list);
- /* Initialise PTP Clock driver */
- if (info->ccc_gac)
- ravb_ptp_init(ndev, pdev);
-
/* Debug message level */
priv->msg_enable = RAVB_DEF_MSG_ENABLE;
+ /* Set config mode as this is needed for PHY initialization. */
+ error = ravb_set_opmode(ndev, CCC_OPC_CONFIG);
+ if (error)
+ goto out_rpm_put;
+
/* Read and set MAC address */
ravb_read_mac_address(np, ndev);
if (!is_valid_ether_addr(ndev->dev_addr)) {
@@ -2863,9 +2963,14 @@ static int ravb_probe(struct platform_device *pdev)
error = ravb_mdio_init(priv);
if (error) {
dev_err(&pdev->dev, "failed to initialize MDIO\n");
- goto out_dma_free;
+ goto out_reset_mode;
}
+ /* Undo previous switch to config opmode. */
+ error = ravb_set_opmode(ndev, CCC_OPC_RESET);
+ if (error)
+ goto out_mdio_release;
+
netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll);
if (info->nc_queues)
netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
@@ -2881,7 +2986,8 @@ static int ravb_probe(struct platform_device *pdev)
netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n",
(u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
- platform_set_drvdata(pdev, ndev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
return 0;
@@ -2890,22 +2996,19 @@ out_napi_del:
netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
+out_mdio_release:
ravb_mdio_release(priv);
-out_dma_free:
+out_reset_mode:
+ ravb_set_opmode(ndev, CCC_OPC_RESET);
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
-
- /* Stop PTP Clock driver */
- if (info->ccc_gac)
- ravb_ptp_stop(ndev);
-out_disable_gptp_clk:
- clk_disable_unprepare(priv->gptp_clk);
-out_disable_refclk:
- clk_disable_unprepare(priv->refclk);
-out_release:
+out_rpm_put:
pm_runtime_put(&pdev->dev);
out_rpm_disable:
pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ clk_unprepare(priv->refclk);
+out_reset_assert:
reset_control_assert(rstc);
out_free_netdev:
free_netdev(ndev);
@@ -2917,6 +3020,12 @@ static void ravb_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ struct device *dev = &priv->pdev->dev;
+ int error;
+
+ error = pm_runtime_resume_and_get(dev);
+ if (error < 0)
+ return;
unregister_netdev(ndev);
if (info->nc_queues)
@@ -2925,20 +3034,13 @@ static void ravb_remove(struct platform_device *pdev)
ravb_mdio_release(priv);
- /* Stop PTP Clock driver */
- if (info->ccc_gac)
- ravb_ptp_stop(ndev);
-
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
- ravb_set_opmode(ndev, CCC_OPC_RESET);
-
- clk_disable_unprepare(priv->gptp_clk);
- clk_disable_unprepare(priv->refclk);
-
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_sync_suspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ clk_unprepare(priv->refclk);
reset_control_assert(priv->rstc);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
@@ -2964,6 +3066,9 @@ static int ravb_wol_setup(struct net_device *ndev)
/* Enable MagicPacket */
ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
+ if (priv->info->ccc_gac)
+ ravb_ptp_stop(ndev);
+
return enable_irq_wake(priv->emac_irq);
}
@@ -2971,6 +3076,20 @@ static int ravb_wol_restore(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ int error;
+
+ /* Set reset mode to rearm the WoL logic. */
+ error = ravb_set_opmode(ndev, CCC_OPC_RESET);
+ if (error)
+ return error;
+
+ /* Set AVB config mode. */
+ error = ravb_set_config_mode(ndev);
+ if (error)
+ return error;
+
+ if (priv->info->ccc_gac)
+ ravb_ptp_init(ndev, priv->pdev);
if (info->nc_queues)
napi_enable(&priv->napi[RAVB_NC]);
@@ -2984,102 +3103,96 @@ static int ravb_wol_restore(struct net_device *ndev)
return disable_irq_wake(priv->emac_irq);
}
-static int __maybe_unused ravb_suspend(struct device *dev)
+static int ravb_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct ravb_private *priv = netdev_priv(ndev);
int ret;
if (!netif_running(ndev))
- return 0;
+ goto reset_assert;
netif_device_detach(ndev);
if (priv->wol_enabled)
- ret = ravb_wol_setup(ndev);
- else
- ret = ravb_close(ndev);
+ return ravb_wol_setup(ndev);
- if (priv->info->ccc_gac)
- ravb_ptp_stop(ndev);
+ ret = ravb_close(ndev);
+ if (ret)
+ return ret;
- return ret;
+ ret = pm_runtime_force_suspend(&priv->pdev->dev);
+ if (ret)
+ return ret;
+
+reset_assert:
+ return reset_control_assert(priv->rstc);
}
-static int __maybe_unused ravb_resume(struct device *dev)
+static int ravb_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
- int ret = 0;
-
- /* If WoL is enabled set reset mode to rearm the WoL logic */
- if (priv->wol_enabled) {
- ret = ravb_set_opmode(ndev, CCC_OPC_RESET);
- if (ret)
- return ret;
- }
-
- /* All register have been reset to default values.
- * Restore all registers which where setup at probe time and
- * reopen device if it was running before system suspended.
- */
+ int ret;
- /* Set AVB config mode */
- ret = ravb_set_config_mode(ndev);
+ ret = reset_control_deassert(priv->rstc);
if (ret)
return ret;
- if (info->gptp || info->ccc_gac) {
- /* Set GTI value */
- ret = ravb_set_gti(ndev);
+ if (!netif_running(ndev))
+ return 0;
+
+ /* If WoL is enabled restore the interface. */
+ if (priv->wol_enabled) {
+ ret = ravb_wol_restore(ndev);
+ if (ret)
+ return ret;
+ } else {
+ ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
-
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
}
- if (info->internal_delay)
- ravb_set_delay_mode(ndev);
+ /* Reopening the interface will restore the device to the working state. */
+ ret = ravb_open(ndev);
+ if (ret < 0)
+ goto out_rpm_put;
- /* Restore descriptor base address table */
- ravb_write(ndev, priv->desc_bat_dma, DBAT);
+ ravb_set_rx_mode(ndev);
+ netif_device_attach(ndev);
- if (priv->info->ccc_gac)
- ravb_ptp_init(ndev, priv->pdev);
+ return 0;
- if (netif_running(ndev)) {
- if (priv->wol_enabled) {
- ret = ravb_wol_restore(ndev);
- if (ret)
- return ret;
- }
- ret = ravb_open(ndev);
- if (ret < 0)
- return ret;
- ravb_set_rx_mode(ndev);
- netif_device_attach(ndev);
+out_rpm_put:
+ if (!priv->wol_enabled) {
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
return ret;
}
-static int __maybe_unused ravb_runtime_nop(struct device *dev)
+static int ravb_runtime_suspend(struct device *dev)
{
- /* Runtime PM callback shared between ->runtime_suspend()
- * and ->runtime_resume(). Simply returns success.
- *
- * This driver re-initializes all registers after
- * pm_runtime_get_sync() anyway so there is no need
- * to save and restore registers here.
- */
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ clk_disable(priv->refclk);
+
return 0;
}
+static int ravb_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ return clk_enable(priv->refclk);
+}
+
static const struct dev_pm_ops ravb_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
- SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
+ SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume)
+ RUNTIME_PM_OPS(ravb_runtime_suspend, ravb_runtime_resume, NULL)
};
static struct platform_driver ravb_driver = {
@@ -3087,7 +3200,7 @@ static struct platform_driver ravb_driver = {
.remove_new = ravb_remove,
.driver = {
.name = "ravb",
- .pm = &ravb_dev_pm_ops,
+ .pm = pm_ptr(&ravb_dev_pm_ops),
.of_match_table = ravb_match_table,
},
};
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 9e59669a93dd..755db89db909 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -32,7 +32,6 @@
#include <net/fib_rules.h>
#include <net/fib_notifier.h>
#include <linux/io-64-nonatomic-lo-hi.h>
-#include <generated/utsrelease.h>
#include "rocker_hw.h"
#include "rocker.h"
@@ -2227,7 +2226,6 @@ static void rocker_port_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
strscpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
- strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
}
static struct rocker_port_stats {
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index d14e0cfc3a6b..1458939c3bf5 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -503,7 +503,6 @@ struct sxgbe_priv_data {
bool tx_path_in_lpi_mode;
int lpi_irq;
int eee_enabled;
- int eee_active;
int tx_lpi_timer;
};
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 8ba017ec9849..4a439b34114d 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -133,22 +133,20 @@ static const struct sxgbe_stats sxgbe_gstrings_stats[] = {
#define SXGBE_STATS_LEN ARRAY_SIZE(sxgbe_gstrings_stats)
static int sxgbe_get_eee(struct net_device *dev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
if (!priv->hw_cap.eee)
return -EOPNOTSUPP;
- edata->eee_enabled = priv->eee_enabled;
- edata->eee_active = priv->eee_active;
edata->tx_lpi_timer = priv->tx_lpi_timer;
return phy_ethtool_get_eee(dev->phydev, edata);
}
static int sxgbe_set_eee(struct net_device *dev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 71439825ea4e..ecbe3994f2b1 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -130,7 +130,6 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
if (phy_init_eee(ndev->phydev, true))
return false;
- priv->eee_active = 1;
timer_setup(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer, 0);
priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
add_timer(&priv->eee_ctrl_timer);
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 175bd9cdfdac..551f890db90a 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -595,7 +595,7 @@ void efx_stop_all(struct efx_nic *efx)
efx_stop_datapath(efx);
}
-/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+/* Context: process, rcu_read_lock or RTNL held, non-blocking. */
void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index e001f27085c6..1cb32aedd89c 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2085,7 +2085,7 @@ int ef4_net_stop(struct net_device *net_dev)
return 0;
}
-/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+/* Context: process, rcu_read_lock or RTNL held, non-blocking. */
static void ef4_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index fac227d372db..dcd901eccfc8 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -11,6 +11,7 @@
#include "net_driver.h"
#include <linux/module.h>
#include <linux/iommu.h>
+#include <net/rps.h>
#include "efx.h"
#include "nic.h"
#include "rx_common.h"
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c
index e4b294b8e9ac..88e5bc347a44 100644
--- a/drivers/net/ethernet/sfc/siena/efx_common.c
+++ b/drivers/net/ethernet/sfc/siena/efx_common.c
@@ -605,7 +605,7 @@ static size_t efx_siena_update_stats_atomic(struct efx_nic *efx, u64 *full_stats
return efx->type->update_stats(efx, full_stats, core_stats);
}
-/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+/* Context: process, rcu_read_lock or RTNL held, non-blocking. */
void efx_siena_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c
index 4579f43484c3..219fb358a646 100644
--- a/drivers/net/ethernet/sfc/siena/rx_common.c
+++ b/drivers/net/ethernet/sfc/siena/rx_common.c
@@ -11,6 +11,7 @@
#include "net_driver.h"
#include <linux/module.h>
#include <linux/iommu.h>
+#include <net/rps.h>
#include "efx.h"
#include "nic.h"
#include "rx_common.h"
diff --git a/drivers/net/ethernet/sfc/siena/tx_common.c b/drivers/net/ethernet/sfc/siena/tx_common.c
index a7a9ab304e13..71f9b5ec5ae4 100644
--- a/drivers/net/ethernet/sfc/siena/tx_common.c
+++ b/drivers/net/ethernet/sfc/siena/tx_common.c
@@ -317,11 +317,10 @@ static int efx_tx_tso_header_length(struct sk_buff *skb)
size_t header_len;
if (skb->encapsulation)
- header_len = skb_inner_transport_header(skb) -
- skb->data +
+ header_len = skb_inner_transport_offset(skb) +
(inner_tcp_hdr(skb)->doff << 2u);
else
- header_len = skb_transport_header(skb) - skb->data +
+ header_len = skb_transport_offset(skb) +
(tcp_hdr(skb)->doff << 2u);
return header_len;
}
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index 9f2393d34371..2adb132b2f7e 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -336,11 +336,10 @@ int efx_tx_tso_header_length(struct sk_buff *skb)
size_t header_len;
if (skb->encapsulation)
- header_len = skb_inner_transport_header(skb) -
- skb->data +
+ header_len = skb_inner_transport_offset(skb) +
(inner_tcp_hdr(skb)->doff << 2u);
else
- header_len = skb_transport_header(skb) - skb->data +
+ header_len = skb_transport_offset(skb) +
(tcp_hdr(skb)->doff << 2u);
return header_len;
}
diff --git a/drivers/net/ethernet/sfc/tx_tso.c b/drivers/net/ethernet/sfc/tx_tso.c
index 64a6768f75ea..ddf149db8180 100644
--- a/drivers/net/ethernet/sfc/tx_tso.c
+++ b/drivers/net/ethernet/sfc/tx_tso.c
@@ -174,8 +174,8 @@ static int tso_start(struct tso_state *st, struct efx_nic *efx,
unsigned int header_len, in_len;
dma_addr_t dma_addr;
- st->ip_off = skb_network_header(skb) - skb->data;
- st->tcp_off = skb_transport_header(skb) - skb->data;
+ st->ip_off = skb_network_offset(skb);
+ st->tcp_off = skb_transport_offset(skb);
header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
in_len = skb_headlen(skb) - header_len;
st->header_len = header_len;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 758347616535..78ff3af7911a 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -98,6 +98,7 @@ static int watchdog = 1000;
module_param(watchdog, int, 0400);
MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
+MODULE_DESCRIPTION("SMC 91C9x/91C1xxx Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:smc91x");
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 31cb7d0166f0..74f1ccc96459 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -56,6 +56,7 @@
#define SMSC_MDIONAME "smsc911x-mdio"
#define SMSC_DRV_VERSION "2008-10-21"
+MODULE_DESCRIPTION("SMSC LAN911x/LAN921x Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(SMSC_DRV_VERSION);
MODULE_ALIAS("platform:smsc911x");
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index e1c4a11c1f18..15cb96c2506d 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -26,6 +26,7 @@
#define DRV_DESCRIPTION "SMSC LAN9420 driver"
#define DRV_VERSION "1.01"
+MODULE_DESCRIPTION("SMSC LAN9420 Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 85dcda51df05..4ec61f1ee71a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -165,9 +165,9 @@ config DWMAC_STARFIVE
help
Support for ethernet controllers on StarFive RISC-V SoCs
- This selects the StarFive platform specific glue layer support for
- the stmmac device driver. This driver is used for StarFive JH7110
- ethernet controller.
+ This selects the StarFive platform specific glue layer support
+ for the stmmac device driver. This driver is used for the
+ StarFive JH7100 and JH7110 ethernet controllers.
config DWMAC_STI
tristate "STi GMAC support"
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 721c1f8e892f..a6fefe675ef1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -59,28 +59,51 @@
#undef FRAME_FILTER_DEBUG
/* #define FRAME_FILTER_DEBUG */
+struct stmmac_q_tx_stats {
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_set_ic_bit;
+ u64_stats_t tx_tso_frames;
+ u64_stats_t tx_tso_nfrags;
+};
+
+struct stmmac_napi_tx_stats {
+ u64_stats_t tx_packets;
+ u64_stats_t tx_pkt_n;
+ u64_stats_t poll;
+ u64_stats_t tx_clean;
+ u64_stats_t tx_set_ic_bit;
+};
+
struct stmmac_txq_stats {
- u64 tx_bytes;
- u64 tx_packets;
- u64 tx_pkt_n;
- u64 tx_normal_irq_n;
- u64 napi_poll;
- u64 tx_clean;
- u64 tx_set_ic_bit;
- u64 tx_tso_frames;
- u64 tx_tso_nfrags;
- struct u64_stats_sync syncp;
+ /* Updates protected by tx queue lock. */
+ struct u64_stats_sync q_syncp;
+ struct stmmac_q_tx_stats q;
+
+ /* Updates protected by NAPI poll logic. */
+ struct u64_stats_sync napi_syncp;
+ struct stmmac_napi_tx_stats napi;
} ____cacheline_aligned_in_smp;
+struct stmmac_napi_rx_stats {
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_pkt_n;
+ u64_stats_t poll;
+};
+
struct stmmac_rxq_stats {
- u64 rx_bytes;
- u64 rx_packets;
- u64 rx_pkt_n;
- u64 rx_normal_irq_n;
- u64 napi_poll;
- struct u64_stats_sync syncp;
+ /* Updates protected by NAPI poll logic. */
+ struct u64_stats_sync napi_syncp;
+ struct stmmac_napi_rx_stats napi;
} ____cacheline_aligned_in_smp;
+/* Updates on each CPU protected by not allowing nested irqs. */
+struct stmmac_pcpu_stats {
+ struct u64_stats_sync syncp;
+ u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES];
+ u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES];
+};
+
/* Extra statistic and debug information exposed by ethtool */
struct stmmac_extra_stats {
/* Transmit errors */
@@ -202,9 +225,12 @@ struct stmmac_extra_stats {
unsigned long mtl_est_hlbf;
unsigned long mtl_est_btre;
unsigned long mtl_est_btrlm;
+ unsigned long max_sdu_txq_drop[MTL_MAX_TX_QUEUES];
+ unsigned long mtl_est_txq_hlbf[MTL_MAX_TX_QUEUES];
/* per queue statistics */
struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
+ struct stmmac_pcpu_stats __percpu *pcpu_stats;
unsigned long rx_dropped;
unsigned long rx_errors;
unsigned long tx_dropped;
@@ -216,6 +242,7 @@ struct stmmac_safety_stats {
unsigned long mac_errors[32];
unsigned long mtl_errors[32];
unsigned long dma_errors[32];
+ unsigned long dma_dpp_errors[32];
};
/* Number of fields in Safety Stats */
@@ -344,6 +371,7 @@ enum request_irq_err {
REQ_IRQ_ERR_ALL,
REQ_IRQ_ERR_TX,
REQ_IRQ_ERR_RX,
+ REQ_IRQ_ERR_SFTY,
REQ_IRQ_ERR_SFTY_UE,
REQ_IRQ_ERR_SFTY_CE,
REQ_IRQ_ERR_LPI,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 31631e3f89d0..e254b21fdb59 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -106,6 +106,7 @@ struct qcom_ethqos {
struct clk *link_clk;
struct phy *serdes_phy;
unsigned int speed;
+ int serdes_speed;
phy_interface_t phy_mode;
const struct ethqos_emac_por *por;
@@ -169,6 +170,9 @@ static void rgmii_dump(void *priv)
static void
ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed)
{
+ if (!phy_interface_mode_is_rgmii(ethqos->phy_mode))
+ return;
+
switch (speed) {
case SPEED_1000:
ethqos->link_clk_rate = RGMII_1000_NOM_CLK_FREQ;
@@ -606,19 +610,39 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
*/
static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
{
+ struct net_device *dev = platform_get_drvdata(ethqos->pdev);
+ struct stmmac_priv *priv = netdev_priv(dev);
int val;
val = readl(ethqos->mac_base + MAC_CTRL_REG);
switch (ethqos->speed) {
+ case SPEED_2500:
+ val &= ~ETHQOS_MAC_CTRL_PORT_SEL;
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
+ RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
+ RGMII_IO_MACRO_CONFIG2);
+ if (ethqos->serdes_speed != SPEED_2500)
+ phy_set_speed(ethqos->serdes_phy, SPEED_2500);
+ ethqos->serdes_speed = SPEED_2500;
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 0, 0, 0);
+ break;
case SPEED_1000:
val &= ~ETHQOS_MAC_CTRL_PORT_SEL;
rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
+ if (ethqos->serdes_speed != SPEED_1000)
+ phy_set_speed(ethqos->serdes_phy, SPEED_1000);
+ ethqos->serdes_speed = SPEED_1000;
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
case SPEED_100:
val |= ETHQOS_MAC_CTRL_PORT_SEL | ETHQOS_MAC_CTRL_SPEED_MODE;
+ if (ethqos->serdes_speed != SPEED_1000)
+ phy_set_speed(ethqos->serdes_phy, SPEED_1000);
+ ethqos->serdes_speed = SPEED_1000;
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
case SPEED_10:
val |= ETHQOS_MAC_CTRL_PORT_SEL;
@@ -627,6 +651,10 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR,
SGMII_10M_RX_CLK_DVDR),
RGMII_IO_MACRO_CONFIG);
+ if (ethqos->serdes_speed != SPEED_1000)
+ phy_set_speed(ethqos->serdes_phy, ethqos->speed);
+ ethqos->serdes_speed = SPEED_1000;
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
}
@@ -728,7 +756,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
struct device *dev = &pdev->dev;
struct qcom_ethqos *ethqos;
- int ret;
+ int ret, i;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
@@ -799,6 +827,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
"Failed to get serdes phy\n");
ethqos->speed = SPEED_1000;
+ ethqos->serdes_speed = SPEED_1000;
ethqos_update_link_clk(ethqos, SPEED_1000);
ethqos_set_func_clk_en(ethqos);
@@ -822,6 +851,10 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->serdes_powerdown = qcom_ethqos_serdes_powerdown;
}
+ /* Enable TSO on queue0 and enable TBS on rest of the queues */
+ for (i = 1; i < plat_dat->tx_queues_to_use; i++)
+ plat_dat->tx_queues_cfg[i].tbs_en = 1;
+
return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index ba2ce776bd4d..68f85e4605cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -585,4 +585,5 @@ static struct platform_driver socfpga_dwmac_driver = {
};
module_platform_driver(socfpga_dwmac_driver);
+MODULE_DESCRIPTION("Altera SOC DWMAC Specific Glue layer");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 5d630affb4d1..4e1076faee0c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -15,13 +15,20 @@
#include "stmmac_platform.h"
-#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1
-#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4
-#define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U
+#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1
+#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4
+#define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U
+
+#define JH7100_SYSMAIN_REGISTER49_DLYCHAIN 0xc8
+
+struct starfive_dwmac_data {
+ unsigned int gtxclk_dlychain;
+};
struct starfive_dwmac {
struct device *dev;
struct clk *clk_tx;
+ const struct starfive_dwmac_data *data;
};
static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode)
@@ -67,6 +74,8 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
mode = STARFIVE_DWMAC_PHY_INFT_RGMII;
break;
@@ -89,6 +98,14 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat)
if (err)
return dev_err_probe(dwmac->dev, err, "error setting phy mode\n");
+ if (dwmac->data) {
+ err = regmap_write(regmap, JH7100_SYSMAIN_REGISTER49_DLYCHAIN,
+ dwmac->data->gtxclk_dlychain);
+ if (err)
+ return dev_err_probe(dwmac->dev, err,
+ "error selecting gtxclk delay chain\n");
+ }
+
return 0;
}
@@ -114,6 +131,8 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
+ dwmac->data = device_get_match_data(&pdev->dev);
+
dwmac->clk_tx = devm_clk_get_enabled(&pdev->dev, "tx");
if (IS_ERR(dwmac->clk_tx))
return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->clk_tx),
@@ -144,8 +163,13 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
+static const struct starfive_dwmac_data jh7100_data = {
+ .gtxclk_dlychain = 4,
+};
+
static const struct of_device_id starfive_dwmac_match[] = {
- { .compatible = "starfive,jh7110-dwmac" },
+ { .compatible = "starfive,jh7100-dwmac", .data = &jh7100_data },
+ { .compatible = "starfive,jh7110-dwmac" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, starfive_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 137741b94122..b21d99faa2d0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -441,8 +441,7 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
u32 v;
@@ -455,9 +454,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_TX_INT) {
ret |= handle_tx;
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
}
if (v & EMAC_TX_DMA_STOP_INT)
@@ -479,9 +478,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_RX_INT) {
ret |= handle_rx;
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
}
if (v & EMAC_RX_BUF_UA_INT)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index 358e7dcb6a9a..17d9120db5fe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -92,7 +92,7 @@
#define DMA_TBS_FTOV BIT(0)
#define DMA_TBS_DEF_FTOS (DMA_TBS_FTOS | DMA_TBS_FTOV)
-/* Following DMA defines are chanels oriented */
+/* Following DMA defines are channel-oriented */
#define DMA_CHAN_BASE_ADDR 0x00001100
#define DMA_CHAN_BASE_OFFSET 0x80
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 9470d3fd2ded..0d185e54eb7e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -171,8 +171,7 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan));
u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
if (dir == DMA_DIR_RX)
@@ -201,15 +200,15 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
}
/* TX/RX NORMAL interrupts */
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_tx;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 7907d62d3437..85e18f9a22f9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -162,8 +162,7 @@ static void show_rx_process_state(unsigned int status)
int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan, u32 dir)
{
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
int ret = 0;
/* read the status register (CSR5) */
u32 intr_status = readl(ioaddr + DMA_STATUS);
@@ -215,16 +214,16 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 value = readl(ioaddr + DMA_INTR_ENA);
/* to schedule NAPI on real RIE event. */
if (likely(value & DMA_INTR_ENA_RIE)) {
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_rx;
}
}
if (likely(intr_status & DMA_STATUS_TI)) {
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_tx;
}
if (unlikely(intr_status & DMA_STATUS_ERI))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 207ff1799f2c..6a2c7d22df1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -303,6 +303,8 @@
#define XGMAC_RXCEIE BIT(4)
#define XGMAC_TXCEIE BIT(0)
#define XGMAC_MTL_ECC_INT_STATUS 0x000010cc
+#define XGMAC_MTL_DPP_CONTROL 0x000010e0
+#define XGMAC_DPP_DISABLE BIT(0)
#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
#define XGMAC_TQS GENMASK(25, 16)
#define XGMAC_TQS_SHIFT 16
@@ -385,6 +387,7 @@
#define XGMAC_DCEIE BIT(1)
#define XGMAC_TCEIE BIT(0)
#define XGMAC_DMA_ECC_INT_STATUS 0x0000306c
+#define XGMAC_DMA_DPP_INT_STATUS 0x00003074
#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
#define XGMAC_SPH BIT(24)
#define XGMAC_PBLx8 BIT(16)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index eb48211d9b0e..1af2f89a0504 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -830,6 +830,44 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
};
+#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error"
+#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error"
+
+static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
+ { true, "TDPES0", DPP_TX_ERR },
+ { true, "TDPES1", DPP_TX_ERR },
+ { true, "TDPES2", DPP_TX_ERR },
+ { true, "TDPES3", DPP_TX_ERR },
+ { true, "TDPES4", DPP_TX_ERR },
+ { true, "TDPES5", DPP_TX_ERR },
+ { true, "TDPES6", DPP_TX_ERR },
+ { true, "TDPES7", DPP_TX_ERR },
+ { true, "TDPES8", DPP_TX_ERR },
+ { true, "TDPES9", DPP_TX_ERR },
+ { true, "TDPES10", DPP_TX_ERR },
+ { true, "TDPES11", DPP_TX_ERR },
+ { true, "TDPES12", DPP_TX_ERR },
+ { true, "TDPES13", DPP_TX_ERR },
+ { true, "TDPES14", DPP_TX_ERR },
+ { true, "TDPES15", DPP_TX_ERR },
+ { true, "RDPES0", DPP_RX_ERR },
+ { true, "RDPES1", DPP_RX_ERR },
+ { true, "RDPES2", DPP_RX_ERR },
+ { true, "RDPES3", DPP_RX_ERR },
+ { true, "RDPES4", DPP_RX_ERR },
+ { true, "RDPES5", DPP_RX_ERR },
+ { true, "RDPES6", DPP_RX_ERR },
+ { true, "RDPES7", DPP_RX_ERR },
+ { true, "RDPES8", DPP_RX_ERR },
+ { true, "RDPES9", DPP_RX_ERR },
+ { true, "RDPES10", DPP_RX_ERR },
+ { true, "RDPES11", DPP_RX_ERR },
+ { true, "RDPES12", DPP_RX_ERR },
+ { true, "RDPES13", DPP_RX_ERR },
+ { true, "RDPES14", DPP_RX_ERR },
+ { true, "RDPES15", DPP_RX_ERR },
+};
+
static void dwxgmac3_handle_dma_err(struct net_device *ndev,
void __iomem *ioaddr, bool correctable,
struct stmmac_safety_stats *stats)
@@ -841,6 +879,13 @@ static void dwxgmac3_handle_dma_err(struct net_device *ndev,
dwxgmac3_log_error(ndev, value, correctable, "DMA",
dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
+
+ value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS);
+ writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, false, "DMA_DPP",
+ dwxgmac3_dma_dpp_errors,
+ STAT_OFF(dma_dpp_errors), stats);
}
static int
@@ -881,6 +926,12 @@ dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
+ /* 5. Enable Data Path Parity Protection */
+ value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL);
+ /* already enabled by default, explicit enable it again */
+ value &= ~XGMAC_DPP_DISABLE;
+ writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL);
+
return 0;
}
@@ -914,7 +965,11 @@ static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
ret |= !corr;
}
- err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
+ /* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in
+ * DMA_Safety_Interrupt_Status, so we handle DMA Data Path
+ * Parity Errors here
+ */
+ err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS);
corr = dma & XGMAC_DECIS;
if (err) {
dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
@@ -930,6 +985,7 @@ static const struct dwxgmac3_error {
{ dwxgmac3_mac_errors },
{ dwxgmac3_mtl_errors },
{ dwxgmac3_dma_errors },
+ { dwxgmac3_dma_dpp_errors },
};
static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 3cde695fec91..dd2ab6185c40 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -337,8 +337,7 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
- struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
+ struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats);
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
int ret = 0;
@@ -367,15 +366,15 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
/* TX/RX NORMAL interrupts */
if (likely(intr_status & XGMAC_NIS)) {
if (likely(intr_status & XGMAC_RI)) {
- u64_stats_update_begin(&rxq_stats->syncp);
- rxq_stats->rx_normal_irq_n++;
- u64_stats_update_end(&rxq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->rx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
- u64_stats_update_begin(&txq_stats->syncp);
- txq_stats->tx_normal_irq_n++;
- u64_stats_update_end(&txq_stats->syncp);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_inc(&stats->tx_normal_irq_n[chan]);
+ u64_stats_update_end(&stats->syncp);
ret |= handle_tx;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 1bd34b2a47e8..29367105df54 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -224,7 +224,7 @@ static const struct stmmac_hwif_entry {
.regs = {
.ptp_off = PTP_GMAC4_OFFSET,
.mmc_off = MMC_GMAC4_OFFSET,
- .est_off = EST_XGMAC_OFFSET,
+ .est_off = EST_GMAC4_OFFSET,
},
.desc = &dwmac4_desc_ops,
.dma = &dwmac410_dma_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 14c9d2637dfe..dff02d75d519 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -86,10 +86,6 @@ struct stmmac_counters {
unsigned int mmc_rx_discard_octets_gb;
unsigned int mmc_rx_align_err_frames;
- /* IPC */
- unsigned int mmc_rx_ipc_intr_mask;
- unsigned int mmc_rx_ipc_intr;
-
/* IPv4 */
unsigned int mmc_rx_ipv4_gd;
unsigned int mmc_rx_ipv4_hderr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 8597c6abae8d..7eb477faa75a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -316,9 +316,6 @@ static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW);
mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB);
mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR);
- /* IPC */
- mmc->mmc_rx_ipc_intr_mask += readl(mmcaddr + MMC_RX_IPC_INTR_MASK);
- mmc->mmc_rx_ipc_intr += readl(mmcaddr + MMC_RX_IPC_INTR);
/* IPv4 */
mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD);
mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f155e4841c62..dddcaa9220cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -31,6 +31,7 @@ struct stmmac_resources {
int wol_irq;
int lpi_irq;
int irq;
+ int sfty_irq;
int sfty_ce_irq;
int sfty_ue_irq;
int rx_irq[MTL_MAX_RX_QUEUES];
@@ -298,6 +299,7 @@ struct stmmac_priv {
void __iomem *ptpaddr;
void __iomem *estaddr;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ int sfty_irq;
int sfty_ce_irq;
int sfty_ue_irq;
int rx_irq[MTL_MAX_RX_QUEUES];
@@ -306,6 +308,7 @@ struct stmmac_priv {
char int_name_mac[IFNAMSIZ + 9];
char int_name_wol[IFNAMSIZ + 9];
char int_name_lpi[IFNAMSIZ + 9];
+ char int_name_sfty[IFNAMSIZ + 10];
char int_name_sfty_ce[IFNAMSIZ + 10];
char int_name_sfty_ue[IFNAMSIZ + 10];
char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14];
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
index 4da6ccc17c20..c9693f77e1f6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c
@@ -81,6 +81,7 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
u32 status, value, feqn, hbfq, hbfs, btrl, btrl_max;
void __iomem *est_addr = priv->estaddr;
u32 txqcnt_mask = BIT(txqcnt) - 1;
+ int i;
status = readl(est_addr + EST_STATUS);
@@ -125,6 +126,11 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev,
x->mtl_est_hlbf++;
+ for (i = 0; i < txqcnt; i++) {
+ if (feqn & BIT(i))
+ x->mtl_est_txq_hlbf[i]++;
+ }
+
/* Clear Interrupt */
writel(feqn, est_addr + EST_FRM_SZ_ERR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 42d27b97dd1d..e1537a57815f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -243,8 +243,6 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_discard_frames_gb),
STMMAC_MMC_STAT(mmc_rx_discard_octets_gb),
STMMAC_MMC_STAT(mmc_rx_align_err_frames),
- STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask),
- STMMAC_MMC_STAT(mmc_rx_ipc_intr),
STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
STMMAC_MMC_STAT(mmc_rx_ipv4_hderr),
STMMAC_MMC_STAT(mmc_rx_ipv4_nopay),
@@ -549,44 +547,79 @@ stmmac_set_pauseparam(struct net_device *netdev,
}
}
+static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q)
+{
+ u64 total;
+ int cpu;
+
+ total = 0;
+ for_each_possible_cpu(cpu) {
+ struct stmmac_pcpu_stats *pcpu;
+ unsigned int start;
+ u64 irq_n;
+
+ pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&pcpu->syncp);
+ irq_n = u64_stats_read(&pcpu->rx_normal_irq_n[q]);
+ } while (u64_stats_fetch_retry(&pcpu->syncp, start));
+ total += irq_n;
+ }
+ return total;
+}
+
+static u64 stmmac_get_tx_normal_irq_n(struct stmmac_priv *priv, int q)
+{
+ u64 total;
+ int cpu;
+
+ total = 0;
+ for_each_possible_cpu(cpu) {
+ struct stmmac_pcpu_stats *pcpu;
+ unsigned int start;
+ u64 irq_n;
+
+ pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&pcpu->syncp);
+ irq_n = u64_stats_read(&pcpu->tx_normal_irq_n[q]);
+ } while (u64_stats_fetch_retry(&pcpu->syncp, start));
+ total += irq_n;
+ }
+ return total;
+}
+
static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int start;
- int q, stat;
- char *p;
+ int q;
for (q = 0; q < tx_cnt; q++) {
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
- struct stmmac_txq_stats snapshot;
+ u64 pkt_n;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
- snapshot = *txq_stats;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+ pkt_n = u64_stats_read(&txq_stats->napi.tx_pkt_n);
+ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
- p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n);
- for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
- *data++ = (*(u64 *)p);
- p += sizeof(u64);
- }
+ *data++ = pkt_n;
+ *data++ = stmmac_get_tx_normal_irq_n(priv, q);
}
for (q = 0; q < rx_cnt; q++) {
struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
- struct stmmac_rxq_stats snapshot;
+ u64 pkt_n;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
- snapshot = *rxq_stats;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+ pkt_n = u64_stats_read(&rxq_stats->napi.rx_pkt_n);
+ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
- p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n);
- for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
- *data++ = (*(u64 *)p);
- p += sizeof(u64);
- }
+ *data++ = pkt_n;
+ *data++ = stmmac_get_rx_normal_irq_n(priv, q);
}
}
@@ -645,39 +678,49 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
pos = j;
for (i = 0; i < rx_queues_count; i++) {
struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i];
- struct stmmac_rxq_stats snapshot;
+ struct stmmac_napi_rx_stats snapshot;
+ u64 n_irq;
j = pos;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
- snapshot = *rxq_stats;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
-
- data[j++] += snapshot.rx_pkt_n;
- data[j++] += snapshot.rx_normal_irq_n;
- normal_irq_n += snapshot.rx_normal_irq_n;
- napi_poll += snapshot.napi_poll;
+ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+ snapshot = rxq_stats->napi;
+ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
+
+ data[j++] += u64_stats_read(&snapshot.rx_pkt_n);
+ n_irq = stmmac_get_rx_normal_irq_n(priv, i);
+ data[j++] += n_irq;
+ normal_irq_n += n_irq;
+ napi_poll += u64_stats_read(&snapshot.poll);
}
pos = j;
for (i = 0; i < tx_queues_count; i++) {
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i];
- struct stmmac_txq_stats snapshot;
+ struct stmmac_napi_tx_stats napi_snapshot;
+ struct stmmac_q_tx_stats q_snapshot;
+ u64 n_irq;
j = pos;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
- snapshot = *txq_stats;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
-
- data[j++] += snapshot.tx_pkt_n;
- data[j++] += snapshot.tx_normal_irq_n;
- normal_irq_n += snapshot.tx_normal_irq_n;
- data[j++] += snapshot.tx_clean;
- data[j++] += snapshot.tx_set_ic_bit;
- data[j++] += snapshot.tx_tso_frames;
- data[j++] += snapshot.tx_tso_nfrags;
- napi_poll += snapshot.napi_poll;
+ start = u64_stats_fetch_begin(&txq_stats->q_syncp);
+ q_snapshot = txq_stats->q;
+ } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+ napi_snapshot = txq_stats->napi;
+ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
+
+ data[j++] += u64_stats_read(&napi_snapshot.tx_pkt_n);
+ n_irq = stmmac_get_tx_normal_irq_n(priv, i);
+ data[j++] += n_irq;
+ normal_irq_n += n_irq;
+ data[j++] += u64_stats_read(&napi_snapshot.tx_clean);
+ data[j++] += u64_stats_read(&q_snapshot.tx_set_ic_bit) +
+ u64_stats_read(&napi_snapshot.tx_set_ic_bit);
+ data[j++] += u64_stats_read(&q_snapshot.tx_tso_frames);
+ data[j++] += u64_stats_read(&q_snapshot.tx_tso_nfrags);
+ napi_poll += u64_stats_read(&napi_snapshot.poll);
}
normal_irq_n += priv->xstats.rx_early_irq;
data[j++] = normal_irq_n;
@@ -852,15 +895,13 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
}
static int stmmac_ethtool_op_get_eee(struct net_device *dev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
if (!priv->dma_cap.eee)
return -EOPNOTSUPP;
- edata->eee_enabled = priv->eee_enabled;
- edata->eee_active = priv->eee_active;
edata->tx_lpi_timer = priv->tx_lpi_timer;
edata->tx_lpi_enabled = priv->tx_lpi_enabled;
@@ -868,7 +909,7 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev,
}
static int stmmac_ethtool_op_set_eee(struct net_device *dev,
- struct ethtool_eee *edata)
+ struct ethtool_keee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 25519952f754..24cd80490d19 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2482,7 +2482,6 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
struct xdp_desc xdp_desc;
bool work_done = true;
u32 tx_set_ic_bit = 0;
- unsigned long flags;
/* Avoids TX time-out as we are sharing with slow path */
txq_trans_cond_update(nq);
@@ -2507,6 +2506,13 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
if (!xsk_tx_peek_desc(pool, &xdp_desc))
break;
+ if (priv->plat->est && priv->plat->est->enable &&
+ priv->plat->est->max_sdu[queue] &&
+ xdp_desc.len > priv->plat->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ continue;
+ }
+
if (likely(priv->extend_desc))
tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -2566,9 +2572,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_set_ic_bit += tx_set_ic_bit;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->napi_syncp);
if (tx_desc) {
stmmac_flush_tx_descriptors(priv, queue);
@@ -2616,7 +2622,6 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0;
u32 tx_packets = 0, tx_errors = 0;
- unsigned long flags;
__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
@@ -2674,7 +2679,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
}
if (skb) {
stmmac_get_tx_hwtstamp(priv, p, skb);
- } else {
+ } else if (tx_q->xsk_pool &&
+ xp_tx_metadata_enabled(tx_q->xsk_pool)) {
struct stmmac_xsk_tx_complete tx_compl = {
.priv = priv,
.desc = p,
@@ -2782,11 +2788,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
if (tx_q->dirty_tx != tx_q->cur_tx)
*pending_packets = true;
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_packets += tx_packets;
- txq_stats->tx_pkt_n += tx_packets;
- txq_stats->tx_clean++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
+ u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
+ u64_stats_inc(&txq_stats->napi.tx_clean);
+ u64_stats_update_end(&txq_stats->napi_syncp);
priv->xstats.tx_errors += tx_errors;
@@ -3592,6 +3598,10 @@ static void stmmac_free_irq(struct net_device *dev,
if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
fallthrough;
+ case REQ_IRQ_ERR_SFTY:
+ if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
+ free_irq(priv->sfty_irq, dev);
+ fallthrough;
case REQ_IRQ_ERR_WOL:
free_irq(dev->irq, dev);
fallthrough;
@@ -3662,6 +3672,23 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
}
}
+ /* Request the common Safety Feature Correctible/Uncorrectible
+ * Error line in case of another line is used
+ */
+ if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
+ int_name = priv->int_name_sfty;
+ sprintf(int_name, "%s:%s", dev->name, "safety");
+ ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc sfty MSI %d (error: %d)\n",
+ __func__, priv->sfty_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY;
+ goto irq_error;
+ }
+ }
+
/* Request the Safety Feature Correctible Error line in
* case of another line is used
*/
@@ -3799,6 +3826,21 @@ static int stmmac_request_irq_single(struct net_device *dev)
}
}
+ /* Request the common Safety Feature Correctible/Uncorrectible
+ * Error line in case of another line is used
+ */
+ if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
+ ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
+ __func__, priv->sfty_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY;
+ goto irq_error;
+ }
+ }
+
return 0;
irq_error:
@@ -4007,8 +4049,10 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
{
set_bit(__FPE_REMOVING, &priv->fpe_task_state);
- if (priv->fpe_wq)
+ if (priv->fpe_wq) {
destroy_workqueue(priv->fpe_wq);
+ priv->fpe_wq = NULL;
+ }
netdev_info(priv->dev, "FPE workqueue stop");
}
@@ -4213,7 +4257,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
u8 proto_hdr_len, hdr;
- unsigned long flags;
u32 pay_len, mss;
dma_addr_t des;
int i;
@@ -4378,13 +4421,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_bytes += skb->len;
- txq_stats->tx_tso_frames++;
- txq_stats->tx_tso_nfrags += nfrags;
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
+ u64_stats_inc(&txq_stats->q.tx_tso_frames);
+ u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
if (set_ic)
- txq_stats->tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4483,7 +4526,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
int entry, first_tx;
- unsigned long flags;
dma_addr_t des;
tx_q = &priv->dma_conf.tx_queue[queue];
@@ -4501,6 +4543,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return stmmac_tso_xmit(skb, dev);
}
+ if (priv->plat->est && priv->plat->est->enable &&
+ priv->plat->est->max_sdu[queue] &&
+ skb->len > priv->plat->est->max_sdu[queue]){
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ goto max_sdu_err;
+ }
+
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
@@ -4653,11 +4702,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_bytes += skb->len;
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
if (set_ic)
- txq_stats->tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4718,6 +4767,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dma_map_err:
netdev_err(priv->dev, "Tx DMA map failed\n");
+max_sdu_err:
dev_kfree_skb(skb);
priv->xstats.tx_dropped++;
return NETDEV_TX_OK;
@@ -4874,6 +4924,13 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
return STMMAC_XDP_CONSUMED;
+ if (priv->plat->est && priv->plat->est->enable &&
+ priv->plat->est->max_sdu[queue] &&
+ xdpf->len > priv->plat->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ return STMMAC_XDP_CONSUMED;
+ }
+
if (likely(priv->extend_desc))
tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
else if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -4921,12 +4978,11 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
set_ic = false;
if (set_ic) {
- unsigned long flags;
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->q_syncp);
+ u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
+ u64_stats_update_end(&txq_stats->q_syncp);
}
stmmac_enable_dma_transmission(priv, priv->ioaddr);
@@ -5076,7 +5132,6 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
unsigned int len = xdp->data_end - xdp->data;
enum pkt_hash_types hash_type;
int coe = priv->hw->rx_csum;
- unsigned long flags;
struct sk_buff *skb;
u32 hash;
@@ -5106,10 +5161,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rxtx_napi, skb);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->rx_pkt_n++;
- rxq_stats->rx_bytes += len;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
+ u64_stats_add(&rxq_stats->napi.rx_bytes, len);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
}
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -5191,7 +5246,6 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int desc_size;
struct bpf_prog *prog;
bool failure = false;
- unsigned long flags;
int xdp_status = 0;
int status = 0;
@@ -5346,9 +5400,9 @@ read_again:
stmmac_finalize_xdp_rx(priv, xdp_status);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
@@ -5386,7 +5440,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int desc_size;
struct sk_buff *skb = NULL;
struct stmmac_xdp_buff ctx;
- unsigned long flags;
int xdp_status = 0;
int buf_sz;
@@ -5646,11 +5699,11 @@ drain_data:
stmmac_rx_refill(priv, queue);
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->rx_packets += rx_packets;
- rxq_stats->rx_bytes += rx_bytes;
- rxq_stats->rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
+ u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
+ u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
@@ -5665,13 +5718,12 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
struct stmmac_priv *priv = ch->priv_data;
struct stmmac_rxq_stats *rxq_stats;
u32 chan = ch->index;
- unsigned long flags;
int work_done;
rxq_stats = &priv->xstats.rxq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.poll);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
work_done = stmmac_rx(priv, budget, chan);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -5693,13 +5745,12 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
struct stmmac_txq_stats *txq_stats;
bool pending_packets = false;
u32 chan = ch->index;
- unsigned long flags;
int work_done;
txq_stats = &priv->xstats.txq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_inc(&txq_stats->napi.poll);
+ u64_stats_update_end(&txq_stats->napi_syncp);
work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
work_done = min(work_done, budget);
@@ -5729,17 +5780,16 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
struct stmmac_rxq_stats *rxq_stats;
struct stmmac_txq_stats *txq_stats;
u32 chan = ch->index;
- unsigned long flags;
rxq_stats = &priv->xstats.rxq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
- rxq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
+ u64_stats_update_begin(&rxq_stats->napi_syncp);
+ u64_stats_inc(&rxq_stats->napi.poll);
+ u64_stats_update_end(&rxq_stats->napi_syncp);
txq_stats = &priv->xstats.txq_stats[chan];
- flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
- txq_stats->napi_poll++;
- u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
+ u64_stats_update_begin(&txq_stats->napi_syncp);
+ u64_stats_inc(&txq_stats->napi.poll);
+ u64_stats_update_end(&txq_stats->napi_syncp);
tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
tx_done = min(tx_done, budget);
@@ -6014,10 +6064,8 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv)
priv->tx_path_in_lpi_mode = false;
}
- for (queue = 0; queue < queues_count; queue++) {
- status = stmmac_host_mtl_irq_status(priv, priv->hw,
- queue);
- }
+ for (queue = 0; queue < queues_count; queue++)
+ stmmac_host_mtl_irq_status(priv, priv->hw, queue);
/* PCS link status */
if (priv->hw->pcs &&
@@ -6052,8 +6100,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
- /* Check if a fatal error happened */
- if (stmmac_safety_feat_interrupt(priv))
+ /* Check ASP error if it isn't delivered via an individual IRQ */
+ if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
return IRQ_HANDLED;
/* To handle Common interrupts */
@@ -6070,11 +6118,6 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
- if (unlikely(!dev)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6090,11 +6133,6 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct stmmac_priv *priv = netdev_priv(dev);
- if (unlikely(!dev)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6116,11 +6154,6 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
- if (unlikely(!data)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -6147,11 +6180,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
- if (unlikely(!data)) {
- netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
- return IRQ_NONE;
- }
-
/* Check if adapter is up */
if (test_bit(STMMAC_DOWN, &priv->state))
return IRQ_HANDLED;
@@ -7065,10 +7093,13 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
u64 tx_bytes;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
- tx_packets = txq_stats->tx_packets;
- tx_bytes = txq_stats->tx_bytes;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&txq_stats->q_syncp);
+ tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
+ } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
+ do {
+ start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
+ tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
+ } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
stats->tx_packets += tx_packets;
stats->tx_bytes += tx_bytes;
@@ -7080,10 +7111,10 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
u64 rx_bytes;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
- rx_packets = rxq_stats->rx_packets;
- rx_bytes = rxq_stats->rx_bytes;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
+ rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
+ rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
+ } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -7477,9 +7508,16 @@ int stmmac_dvr_probe(struct device *device,
priv->dev = ndev;
for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
- u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
- for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
- u64_stats_init(&priv->xstats.txq_stats[i].syncp);
+ u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
+ u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
+ u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
+ }
+
+ priv->xstats.pcpu_stats =
+ devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
+ if (!priv->xstats.pcpu_stats)
+ return -ENOMEM;
stmmac_set_ethtool_ops(ndev);
priv->pause = pause;
@@ -7492,6 +7530,7 @@ int stmmac_dvr_probe(struct device *device,
priv->dev->irq = res->irq;
priv->wol_irq = res->wol_irq;
priv->lpi_irq = res->lpi_irq;
+ priv->sfty_irq = res->sfty_irq;
priv->sfty_ce_irq = res->sfty_ce_irq;
priv->sfty_ue_irq = res->sfty_ue_irq;
for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
index aefc121464b5..13a30e6df4c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -110,6 +110,8 @@ static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane,
/* Enable and restart the Auto-Negotiation */
if (ane)
value |= GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_RAN;
+ else
+ value &= ~GMAC_AN_CTRL_ANE;
/* In case of MAC-2-MAC connection, block is configured to operate
* according to MAC conf register.
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 70eadc83ca68..54797edc9b38 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -743,6 +743,14 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
dev_info(&pdev->dev, "IRQ eth_lpi not found\n");
}
+ stmmac_res->sfty_irq =
+ platform_get_irq_byname_optional(pdev, "sfty");
+ if (stmmac_res->sfty_irq < 0) {
+ if (stmmac_res->sfty_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(&pdev->dev, "IRQ sfty not found\n");
+ }
+
stmmac_res->addr = devm_platform_ioremap_resource(pdev, 0);
return PTR_ERR_OR_ZERO(stmmac_res->addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 26fa33e5ec34..cce00719937d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -915,8 +915,30 @@ struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
return time;
}
-static int tc_setup_taprio(struct stmmac_priv *priv,
- struct tc_taprio_qopt_offload *qopt)
+static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct plat_stmmacenet_data *plat = priv->plat;
+ u32 num_tc = qopt->mqprio.qopt.num_tc;
+ u32 offset, count, i, j;
+
+ /* QueueMaxSDU received from the driver corresponds to the Linux traffic
+ * class. Map queueMaxSDU per Linux traffic class to DWMAC Tx queues.
+ */
+ for (i = 0; i < num_tc; i++) {
+ if (!qopt->max_sdu[i])
+ continue;
+
+ offset = qopt->mqprio.qopt.offset[i];
+ count = qopt->mqprio.qopt.count[i];
+
+ for (j = offset; j < offset + count; j++)
+ plat->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
+ }
+}
+
+static int tc_taprio_configure(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
struct plat_stmmacenet_data *plat = priv->plat;
@@ -968,8 +990,6 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
if (qopt->cmd == TAPRIO_CMD_DESTROY)
goto disable;
- else if (qopt->cmd != TAPRIO_CMD_REPLACE)
- return -EOPNOTSUPP;
if (qopt->num_entries >= dep)
return -EINVAL;
@@ -1045,6 +1065,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
priv->plat->est->ter = qopt->cycle_time_extension;
+ tc_taprio_map_maxsdu_txq(priv, qopt);
+
if (fpe && !priv->dma_cap.fpesel) {
mutex_unlock(&priv->plat->est->lock);
return -EOPNOTSUPP;
@@ -1078,6 +1100,11 @@ disable:
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
+ /* Reset taprio status */
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
+ priv->xstats.max_sdu_txq_drop[i] = 0;
+ priv->xstats.mtl_est_txq_hlbf[i] = 0;
+ }
mutex_unlock(&priv->plat->est->lock);
}
@@ -1095,6 +1122,57 @@ disable:
return ret;
}
+static void tc_taprio_stats(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ u64 window_drops = 0;
+ int i = 0;
+
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++)
+ window_drops += priv->xstats.max_sdu_txq_drop[i] +
+ priv->xstats.mtl_est_txq_hlbf[i];
+ qopt->stats.window_drops = window_drops;
+
+ /* Transmission overrun doesn't happen for stmmac, hence always 0 */
+ qopt->stats.tx_overruns = 0;
+}
+
+static void tc_taprio_queue_stats(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct tc_taprio_qopt_queue_stats *q_stats = &qopt->queue_stats;
+ int queue = qopt->queue_stats.queue;
+
+ q_stats->stats.window_drops = priv->xstats.max_sdu_txq_drop[queue] +
+ priv->xstats.mtl_est_txq_hlbf[queue];
+
+ /* Transmission overrun doesn't happen for stmmac, hence always 0 */
+ q_stats->stats.tx_overruns = 0;
+}
+
+static int tc_setup_taprio(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ int err = 0;
+
+ switch (qopt->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ case TAPRIO_CMD_DESTROY:
+ err = tc_taprio_configure(priv, qopt);
+ break;
+ case TAPRIO_CMD_STATS:
+ tc_taprio_stats(priv, qopt);
+ break;
+ case TAPRIO_CMD_QUEUE_STATS:
+ tc_taprio_queue_stats(priv, qopt);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
static int tc_setup_etf(struct stmmac_priv *priv,
struct tc_etf_qopt_offload *qopt)
{
@@ -1126,6 +1204,7 @@ static int tc_query_caps(struct stmmac_priv *priv,
return -EOPNOTSUPP;
caps->gate_mask_per_txq = true;
+ caps->supports_queue_max_sdu = true;
return 0;
}
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 3525d5c0d694..351609f4f011 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1144,9 +1144,9 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
nskb->protocol = skb->protocol;
offset = skb_mac_header(skb) - skb->data;
skb_set_mac_header(nskb, offset);
- offset = skb_network_header(skb) - skb->data;
+ offset = skb_network_offset(skb);
skb_set_network_header(nskb, offset);
- offset = skb_transport_header(skb) - skb->data;
+ offset = skb_transport_offset(skb);
skb_set_transport_header(nskb, offset);
offset = 0;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index be01450c20dc..1530d13984d4 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -189,6 +189,7 @@ config TI_ICSSG_PRUETH
select TI_K3_CPPI_DESC_POOL
depends on PRU_REMOTEPROC
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ depends on PTP_1588_CLOCK_OPTIONAL
help
Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem.
This subsystem is available starting with the AM65 platform.
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index 35fceba01ea4..d6ce2c9f0a8d 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -514,14 +514,14 @@ am65_cpsw_set_link_ksettings(struct net_device *ndev,
return phylink_ethtool_ksettings_set(salve->phylink, ecmd);
}
-static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
return phylink_ethtool_get_eee(salve->phylink, edata);
}
-static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 9d2f4ac783e4..2939a21ca74f 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -294,7 +294,7 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
txqueue,
netif_tx_queue_stopped(netif_txq),
jiffies_to_msecs(jiffies - trans_start),
- dql_avail(&netif_txq->dql),
+ netdev_queue_dql_avail(netif_txq),
k3_cppi_desc_pool_avail(tx_chn->desc_pool));
if (netif_tx_queue_stopped(netif_txq)) {
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index 26dc906eae90..57fe936bb177 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -90,4 +90,5 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
}
EXPORT_SYMBOL_GPL(ti_cm_get_macid);
+MODULE_DESCRIPTION("TI CPSW Switch common module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index ea85c6dd5484..c0a5abd8d9a8 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -631,6 +631,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
}
}
+ phy->mac_managed_pm = true;
+
slave->phy = phy;
phy_attached_info(slave->phy);
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index a557a477d039..f7b283353ba2 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -422,7 +422,7 @@ int cpsw_set_link_ksettings(struct net_device *ndev,
return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy, ecmd);
}
-int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
@@ -434,7 +434,7 @@ int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 498c50c6d1a7..087dcb67505a 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -773,6 +773,9 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->slave_num);
return;
}
+
+ phy->mac_managed_pm = true;
+
slave->phy = phy;
phy_attached_info(slave->phy);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 0e27c433098d..7efa72502c86 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -496,8 +496,8 @@ int cpsw_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *ecmd);
int cpsw_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *ecmd);
-int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata);
-int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata);
+int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata);
+int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata);
int cpsw_nway_reset(struct net_device *ndev);
void cpsw_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index bcccf43d368b..dbbea9146040 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -638,6 +638,16 @@ static void cpts_calc_mult_shift(struct cpts *cpts)
freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC));
}
+static void cpts_clk_unregister(void *clk)
+{
+ clk_hw_unregister_mux(clk);
+}
+
+static void cpts_clk_del_provider(void *np)
+{
+ of_clk_del_provider(np);
+}
+
static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
{
struct device_node *refclk_np;
@@ -687,9 +697,7 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
goto mux_fail;
}
- ret = devm_add_action_or_reset(cpts->dev,
- (void(*)(void *))clk_hw_unregister_mux,
- clk_hw);
+ ret = devm_add_action_or_reset(cpts->dev, cpts_clk_unregister, clk_hw);
if (ret) {
dev_err(cpts->dev, "add clkmux unreg action %d", ret);
goto mux_fail;
@@ -699,8 +707,7 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
if (ret)
goto mux_fail;
- ret = devm_add_action_or_reset(cpts->dev,
- (void(*)(void *))of_clk_del_provider,
+ ret = devm_add_action_or_reset(cpts->dev, cpts_clk_del_provider,
refclk_np);
if (ret) {
dev_err(cpts->dev, "add clkmux provider unreg action %d", ret);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
index a27ec1dcc8d5..9a7dd7efcf69 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
@@ -45,7 +45,7 @@ static int emac_set_link_ksettings(struct net_device *ndev,
return phy_ethtool_set_link_ksettings(ndev, ecmd);
}
-static int emac_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+static int emac_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
if (!ndev->phydev)
return -EOPNOTSUPP;
@@ -53,7 +53,7 @@ static int emac_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
return phy_ethtool_get_eee(ndev->phydev, edata);
}
-static int emac_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+static int emac_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
{
if (!ndev->phydev)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 411898a4f38c..cf7b73f8f450 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -1489,9 +1489,6 @@ static int emac_ndo_stop(struct net_device *ndev)
/* Destroying the queued work in ndo_stop() */
cancel_delayed_work_sync(&emac->stats_work);
- /* stop PRUs */
- prueth_emac_stop(emac);
-
if (prueth->emacs_initialized == 1)
icss_iep_exit(emac->iep);
@@ -1502,7 +1499,6 @@ static int emac_ndo_stop(struct net_device *ndev)
free_irq(emac->rx_chns.irq[rx_flow], emac);
prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
- prueth_cleanup_tx_chns(emac);
prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
prueth_cleanup_tx_chns(emac);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index d5b75af163d3..5ee8e8980393 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -384,18 +384,18 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
dev_info(ctodev(card), "%s: ERROR status\n", __func__);
- descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
- if (!descr->skb) {
- descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */
- return -ENOMEM;
- }
descr->hw_regs.dmac_cmd_status = 0;
descr->hw_regs.result_size = 0;
descr->hw_regs.valid_size = 0;
descr->hw_regs.data_error = 0;
descr->hw_regs.payload.dev_addr = 0;
descr->hw_regs.payload.size = 0;
- descr->skb = NULL;
+
+ descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
+ if (!descr->skb) {
+ descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */
+ return -ENOMEM;
+ }
offset = ((unsigned long)descr->skb->data) &
(GELIC_NET_RXBUF_ALIGN - 1);
@@ -698,7 +698,7 @@ gelic_card_get_next_tx_descr(struct gelic_card *card)
}
/**
- * gelic_net_set_txdescr_cmdstat - sets the tx descriptor command field
+ * gelic_descr_set_tx_cmdstat - sets the tx descriptor command field
* @descr: descriptor structure to fill out
* @skb: packet to consider
*
@@ -1461,7 +1461,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
}
/**
- * gelic_ether_setup_netdev - initialization of net_device
+ * gelic_net_setup_netdev - initialization of net_device
* @netdev: net_device structure
* @card: card structure
*
@@ -1518,14 +1518,16 @@ int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card)
return 0;
}
+#define GELIC_ALIGN (32)
+
/**
* gelic_alloc_card_net - allocates net_device and card structure
+ * @netdev: interface device structure
*
* returns the card structure or NULL in case of errors
*
* the card and net_device structures are linked to each other
*/
-#define GELIC_ALIGN (32)
static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
{
struct gelic_card *card;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 1db754615cca..945c13d1a982 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -1958,8 +1958,6 @@ int wx_sw_init(struct wx *wx)
return -ENOMEM;
}
- wx->msix_in_use = false;
-
return 0;
}
EXPORT_SYMBOL(wx_sw_init);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 8706223a6e5a..6dff2c85682d 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -1257,7 +1257,7 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
/* compute header lengths */
l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
- *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) :
+ *hdr_len = enc ? skb_inner_transport_offset(skb) :
skb_transport_offset(skb);
*hdr_len += l4len;
@@ -1614,14 +1614,12 @@ static int wx_acquire_msix_vectors(struct wx *wx)
/* One for non-queue interrupts */
nvecs += 1;
- if (!wx->msix_in_use) {
- wx->msix_entry = kcalloc(1, sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!wx->msix_entry) {
- kfree(wx->msix_q_entries);
- wx->msix_q_entries = NULL;
- return -ENOMEM;
- }
+ wx->msix_entry = kcalloc(1, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!wx->msix_entry) {
+ kfree(wx->msix_q_entries);
+ wx->msix_q_entries = NULL;
+ return -ENOMEM;
}
nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs,
@@ -1931,10 +1929,8 @@ void wx_reset_interrupt_capability(struct wx *wx)
if (pdev->msix_enabled) {
kfree(wx->msix_q_entries);
wx->msix_q_entries = NULL;
- if (!wx->msix_in_use) {
- kfree(wx->msix_entry);
- wx->msix_entry = NULL;
- }
+ kfree(wx->msix_entry);
+ wx->msix_entry = NULL;
}
pci_free_irq_vectors(wx->pdev);
}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index b4dc4f341117..1fdeb464d5f4 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -1047,7 +1047,6 @@ struct wx {
unsigned int queues_per_pool;
struct msix_entry *msix_q_entries;
struct msix_entry *msix_entry;
- bool msix_in_use;
struct wx_ring_feature ring_feature[RING_F_ARRAY_SIZE];
/* misc interrupt status block */
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
index 7507f762edfe..42718875277c 100644
--- a/drivers/net/ethernet/wangxun/txgbe/Makefile
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_TXGBE) += txgbe.o
txgbe-objs := txgbe_main.o \
txgbe_hw.o \
txgbe_phy.o \
+ txgbe_irq.o \
txgbe_ethtool.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
new file mode 100644
index 000000000000..b3e3605d1edb
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/irqdomain.h>
+#include <linux/pci.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
+#include "../libwx/wx_hw.h"
+#include "txgbe_type.h"
+#include "txgbe_phy.h"
+#include "txgbe_irq.h"
+
+/**
+ * txgbe_irq_enable - Enable default interrupt generation settings
+ * @wx: pointer to private structure
+ * @queues: enable irqs for queues
+ **/
+void txgbe_irq_enable(struct wx *wx, bool queues)
+{
+ wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
+
+ /* unmask interrupt */
+ wx_intr_enable(wx, TXGBE_INTR_MISC);
+ if (queues)
+ wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
+}
+
+/**
+ * txgbe_intr - msi/legacy mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
+{
+ struct wx_q_vector *q_vector;
+ struct wx *wx = data;
+ struct pci_dev *pdev;
+ u32 eicr;
+
+ q_vector = wx->q_vector[0];
+ pdev = wx->pdev;
+
+ eicr = wx_misc_isb(wx, WX_ISB_VEC0);
+ if (!eicr) {
+ /* shared interrupt alert!
+ * the interrupt that we masked before the ICR read.
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, true);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+ wx->isb_mem[WX_ISB_VEC0] = 0;
+ if (!(pdev->msi_enabled))
+ wr32(wx, WX_PX_INTA, 1);
+
+ wx->isb_mem[WX_ISB_MISC] = 0;
+ /* would disable interrupts here but it is auto disabled */
+ napi_schedule_irqoff(&q_vector->napi);
+
+ /* re-enable link(maybe) and non-queue interrupts, no flush.
+ * txgbe_poll will re-enable the queue interrupts
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, false);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * txgbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @wx: board private structure
+ *
+ * Allocate MSI-X vectors and request interrupts from the kernel.
+ **/
+static int txgbe_request_msix_irqs(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int vector, err;
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ struct wx_q_vector *q_vector = wx->q_vector[vector];
+ struct msix_entry *entry = &wx->msix_q_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring)
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-TxRx-%d", netdev->name, entry->entry);
+ else
+ /* skip this unused q_vector */
+ continue;
+
+ err = request_irq(entry->vector, wx_msix_clean_rings, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
+ q_vector->name, err);
+ goto free_queue_irqs;
+ }
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ free_irq(wx->msix_q_entries[vector].vector,
+ wx->q_vector[vector]);
+ }
+ wx_reset_interrupt_capability(wx);
+ return err;
+}
+
+/**
+ * txgbe_request_irq - initialize interrupts
+ * @wx: board private structure
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+int txgbe_request_irq(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ struct pci_dev *pdev = wx->pdev;
+ int err;
+
+ if (pdev->msix_enabled)
+ err = txgbe_request_msix_irqs(wx);
+ else if (pdev->msi_enabled)
+ err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
+ netdev->name, wx);
+ else
+ err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
+ netdev->name, wx);
+
+ if (err)
+ wx_err(wx, "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
+static int txgbe_request_gpio_irq(struct txgbe *txgbe)
+{
+ txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+ return request_threaded_irq(txgbe->gpio_irq, NULL,
+ txgbe_gpio_irq_handler,
+ IRQF_ONESHOT, "txgbe-gpio-irq", txgbe);
+}
+
+static int txgbe_request_link_irq(struct txgbe *txgbe)
+{
+ txgbe->link_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
+ return request_threaded_irq(txgbe->link_irq, NULL,
+ txgbe_link_irq_handler,
+ IRQF_ONESHOT, "txgbe-link-irq", txgbe);
+}
+
+static const struct irq_chip txgbe_irq_chip = {
+ .name = "txgbe-misc-irq",
+};
+
+static int txgbe_misc_irq_domain_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct txgbe *txgbe = d->host_data;
+
+ irq_set_chip_data(irq, txgbe);
+ irq_set_chip(irq, &txgbe->misc.chip);
+ irq_set_nested_thread(irq, true);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops txgbe_misc_irq_domain_ops = {
+ .map = txgbe_misc_irq_domain_map,
+};
+
+static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
+{
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
+ unsigned int nhandled = 0;
+ unsigned int sub_irq;
+ u32 eicr;
+
+ eicr = wx_misc_isb(wx, WX_ISB_MISC);
+ if (eicr & TXGBE_PX_MISC_GPIO) {
+ sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
+ handle_nested_irq(sub_irq);
+ nhandled++;
+ }
+ if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
+ TXGBE_PX_MISC_ETH_AN)) {
+ sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK);
+ handle_nested_irq(sub_irq);
+ nhandled++;
+ }
+
+ wx_intr_enable(wx, TXGBE_INTR_MISC);
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static void txgbe_del_irq_domain(struct txgbe *txgbe)
+{
+ int hwirq, virq;
+
+ for (hwirq = 0; hwirq < txgbe->misc.nirqs; hwirq++) {
+ virq = irq_find_mapping(txgbe->misc.domain, hwirq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(txgbe->misc.domain);
+}
+
+void txgbe_free_misc_irq(struct txgbe *txgbe)
+{
+ free_irq(txgbe->gpio_irq, txgbe);
+ free_irq(txgbe->link_irq, txgbe);
+ free_irq(txgbe->misc.irq, txgbe);
+ txgbe_del_irq_domain(txgbe);
+}
+
+int txgbe_setup_misc_irq(struct txgbe *txgbe)
+{
+ struct wx *wx = txgbe->wx;
+ int hwirq, err;
+
+ txgbe->misc.nirqs = 2;
+ txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0,
+ &txgbe_misc_irq_domain_ops, txgbe);
+ if (!txgbe->misc.domain)
+ return -ENOMEM;
+
+ for (hwirq = 0; hwirq < txgbe->misc.nirqs; hwirq++)
+ irq_create_mapping(txgbe->misc.domain, hwirq);
+
+ txgbe->misc.chip = txgbe_irq_chip;
+ if (wx->pdev->msix_enabled)
+ txgbe->misc.irq = wx->msix_entry->vector;
+ else
+ txgbe->misc.irq = wx->pdev->irq;
+
+ err = request_threaded_irq(txgbe->misc.irq, NULL,
+ txgbe_misc_irq_handle,
+ IRQF_ONESHOT,
+ wx->netdev->name, txgbe);
+ if (err)
+ goto del_misc_irq;
+
+ err = txgbe_request_gpio_irq(txgbe);
+ if (err)
+ goto free_msic_irq;
+
+ err = txgbe_request_link_irq(txgbe);
+ if (err)
+ goto free_gpio_irq;
+
+ return 0;
+
+free_gpio_irq:
+ free_irq(txgbe->gpio_irq, txgbe);
+free_msic_irq:
+ free_irq(txgbe->misc.irq, txgbe);
+del_misc_irq:
+ txgbe_del_irq_domain(txgbe);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
new file mode 100644
index 000000000000..b77945e7a0f2
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+
+void txgbe_irq_enable(struct wx *wx, bool queues);
+int txgbe_request_irq(struct wx *wx);
+void txgbe_free_misc_irq(struct txgbe *txgbe);
+int txgbe_setup_misc_irq(struct txgbe *txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 3b151c410a5c..bd4624d14ca0 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -17,6 +17,7 @@
#include "txgbe_type.h"
#include "txgbe_hw.h"
#include "txgbe_phy.h"
+#include "txgbe_irq.h"
#include "txgbe_ethtool.h"
char txgbe_driver_name[] = "txgbe";
@@ -76,137 +77,11 @@ static int txgbe_enumerate_functions(struct wx *wx)
return physfns;
}
-/**
- * txgbe_irq_enable - Enable default interrupt generation settings
- * @wx: pointer to private structure
- * @queues: enable irqs for queues
- **/
-static void txgbe_irq_enable(struct wx *wx, bool queues)
-{
- wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK);
-
- /* unmask interrupt */
- wx_intr_enable(wx, TXGBE_INTR_MISC);
- if (queues)
- wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
-}
-
-/**
- * txgbe_intr - msi/legacy mode Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- **/
-static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
-{
- struct wx_q_vector *q_vector;
- struct wx *wx = data;
- struct pci_dev *pdev;
- u32 eicr;
-
- q_vector = wx->q_vector[0];
- pdev = wx->pdev;
-
- eicr = wx_misc_isb(wx, WX_ISB_VEC0);
- if (!eicr) {
- /* shared interrupt alert!
- * the interrupt that we masked before the ICR read.
- */
- if (netif_running(wx->netdev))
- txgbe_irq_enable(wx, true);
- return IRQ_NONE; /* Not our interrupt */
- }
- wx->isb_mem[WX_ISB_VEC0] = 0;
- if (!(pdev->msi_enabled))
- wr32(wx, WX_PX_INTA, 1);
-
- wx->isb_mem[WX_ISB_MISC] = 0;
- /* would disable interrupts here but it is auto disabled */
- napi_schedule_irqoff(&q_vector->napi);
-
- /* re-enable link(maybe) and non-queue interrupts, no flush.
- * txgbe_poll will re-enable the queue interrupts
- */
- if (netif_running(wx->netdev))
- txgbe_irq_enable(wx, false);
-
- return IRQ_HANDLED;
-}
-
-/**
- * txgbe_request_msix_irqs - Initialize MSI-X interrupts
- * @wx: board private structure
- *
- * Allocate MSI-X vectors and request interrupts from the kernel.
- **/
-static int txgbe_request_msix_irqs(struct wx *wx)
-{
- struct net_device *netdev = wx->netdev;
- int vector, err;
-
- for (vector = 0; vector < wx->num_q_vectors; vector++) {
- struct wx_q_vector *q_vector = wx->q_vector[vector];
- struct msix_entry *entry = &wx->msix_q_entries[vector];
-
- if (q_vector->tx.ring && q_vector->rx.ring)
- snprintf(q_vector->name, sizeof(q_vector->name) - 1,
- "%s-TxRx-%d", netdev->name, entry->entry);
- else
- /* skip this unused q_vector */
- continue;
-
- err = request_irq(entry->vector, wx_msix_clean_rings, 0,
- q_vector->name, q_vector);
- if (err) {
- wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
- q_vector->name, err);
- goto free_queue_irqs;
- }
- }
-
- return 0;
-
-free_queue_irqs:
- while (vector) {
- vector--;
- free_irq(wx->msix_q_entries[vector].vector,
- wx->q_vector[vector]);
- }
- wx_reset_interrupt_capability(wx);
- return err;
-}
-
-/**
- * txgbe_request_irq - initialize interrupts
- * @wx: board private structure
- *
- * Attempt to configure interrupts using the best available
- * capabilities of the hardware and kernel.
- **/
-static int txgbe_request_irq(struct wx *wx)
-{
- struct net_device *netdev = wx->netdev;
- struct pci_dev *pdev = wx->pdev;
- int err;
-
- if (pdev->msix_enabled)
- err = txgbe_request_msix_irqs(wx);
- else if (pdev->msi_enabled)
- err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
- netdev->name, wx);
- else
- err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
- netdev->name, wx);
-
- if (err)
- wx_err(wx, "request_irq failed, Error %d\n", err);
-
- return err;
-}
-
static void txgbe_up_complete(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
+ txgbe_reinit_gpio_intr(wx);
wx_control_hw(wx, true);
wx_configure_vectors(wx);
@@ -518,6 +393,7 @@ static void txgbe_shutdown(struct pci_dev *pdev)
int txgbe_setup_tc(struct net_device *dev, u8 tc)
{
struct wx *wx = netdev_priv(dev);
+ struct txgbe *txgbe = wx->priv;
/* Hardware has to reinitialize queues and interrupts to
* match packet buffer alignment. Unfortunately, the
@@ -528,6 +404,7 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
else
txgbe_reset(wx);
+ txgbe_free_misc_irq(txgbe);
wx_clear_interrupt_scheme(wx);
if (tc)
@@ -536,6 +413,7 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
netdev_reset_tc(dev);
wx_init_interrupt_scheme(wx);
+ txgbe_setup_misc_irq(txgbe);
if (netif_running(dev))
txgbe_open(dev);
@@ -751,10 +629,14 @@ static int txgbe_probe(struct pci_dev *pdev,
txgbe->wx = wx;
wx->priv = txgbe;
- err = txgbe_init_phy(txgbe);
+ err = txgbe_setup_misc_irq(txgbe);
if (err)
goto err_release_hw;
+ err = txgbe_init_phy(txgbe);
+ if (err)
+ goto err_free_misc_irq;
+
err = register_netdev(netdev);
if (err)
goto err_remove_phy;
@@ -781,6 +663,8 @@ static int txgbe_probe(struct pci_dev *pdev,
err_remove_phy:
txgbe_remove_phy(txgbe);
+err_free_misc_irq:
+ txgbe_free_misc_irq(txgbe);
err_release_hw:
wx_clear_interrupt_scheme(wx);
wx_control_hw(wx, false);
@@ -813,6 +697,7 @@ static void txgbe_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
txgbe_remove_phy(txgbe);
+ txgbe_free_misc_irq(txgbe);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 1b84d495d14e..93295916b1d2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -292,6 +292,21 @@ static int txgbe_phylink_init(struct txgbe *txgbe)
return 0;
}
+irqreturn_t txgbe_link_irq_handler(int irq, void *data)
+{
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
+ u32 status;
+ bool up;
+
+ status = rd32(wx, TXGBE_CFG_PORT_ST);
+ up = !!(status & TXGBE_CFG_PORT_ST_LINK_UP);
+
+ phylink_mac_change(wx->phylink, up);
+
+ return IRQ_HANDLED;
+}
+
static int txgbe_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct wx *wx = gpiochip_get_data(chip);
@@ -437,7 +452,7 @@ static int txgbe_gpio_set_type(struct irq_data *d, unsigned int type)
}
static const struct irq_chip txgbe_gpio_irq_chip = {
- .name = "txgbe_gpio_irq",
+ .name = "txgbe-gpio-irq",
.irq_ack = txgbe_gpio_irq_ack,
.irq_mask = txgbe_gpio_irq_mask,
.irq_unmask = txgbe_gpio_irq_unmask,
@@ -446,29 +461,25 @@ static const struct irq_chip txgbe_gpio_irq_chip = {
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
-static void txgbe_irq_handler(struct irq_desc *desc)
+irqreturn_t txgbe_gpio_irq_handler(int irq, void *data)
{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct wx *wx = irq_desc_get_handler_data(desc);
- struct txgbe *txgbe = wx->priv;
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
irq_hw_number_t hwirq;
unsigned long gpioirq;
struct gpio_chip *gc;
unsigned long flags;
- u32 eicr;
-
- eicr = wx_misc_isb(wx, WX_ISB_MISC);
-
- chained_irq_enter(chip, desc);
gpioirq = rd32(wx, WX_GPIO_INTSTATUS);
gc = txgbe->gpio;
for_each_set_bit(hwirq, &gpioirq, gc->ngpio) {
int gpio = irq_find_mapping(gc->irq.domain, hwirq);
+ struct irq_data *d = irq_get_irq_data(gpio);
u32 irq_type = irq_get_trigger_type(gpio);
- generic_handle_domain_irq(gc->irq.domain, hwirq);
+ txgbe_gpio_irq_ack(d);
+ handle_nested_irq(gpio);
if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
raw_spin_lock_irqsave(&wx->gpio_lock, flags);
@@ -477,17 +488,34 @@ static void txgbe_irq_handler(struct irq_desc *desc)
}
}
- chained_irq_exit(chip, desc);
+ return IRQ_HANDLED;
+}
+
+void txgbe_reinit_gpio_intr(struct wx *wx)
+{
+ struct txgbe *txgbe = wx->priv;
+ irq_hw_number_t hwirq;
+ unsigned long gpioirq;
+ struct gpio_chip *gc;
+ unsigned long flags;
+
+ /* for gpio interrupt pending before irq enable */
+ gpioirq = rd32(wx, WX_GPIO_INTSTATUS);
- if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN |
- TXGBE_PX_MISC_ETH_AN)) {
- u32 reg = rd32(wx, TXGBE_CFG_PORT_ST);
+ gc = txgbe->gpio;
+ for_each_set_bit(hwirq, &gpioirq, gc->ngpio) {
+ int gpio = irq_find_mapping(gc->irq.domain, hwirq);
+ struct irq_data *d = irq_get_irq_data(gpio);
+ u32 irq_type = irq_get_trigger_type(gpio);
- phylink_mac_change(wx->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP));
- }
+ txgbe_gpio_irq_ack(d);
- /* unmask interrupt */
- wx_intr_enable(wx, TXGBE_INTR_MISC);
+ if ((irq_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ raw_spin_lock_irqsave(&wx->gpio_lock, flags);
+ txgbe_toggle_trigger(gc, hwirq);
+ raw_spin_unlock_irqrestore(&wx->gpio_lock, flags);
+ }
+ }
}
static int txgbe_gpio_init(struct txgbe *txgbe)
@@ -524,19 +552,6 @@ static int txgbe_gpio_init(struct txgbe *txgbe)
girq = &gc->irq;
gpio_irq_chip_set_chip(girq, &txgbe_gpio_irq_chip);
- girq->parent_handler = txgbe_irq_handler;
- girq->parent_handler_data = wx;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(dev, girq->num_parents,
- sizeof(*girq->parents), GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
-
- /* now only suuported on MSI-X interrupt */
- if (!wx->msix_entry)
- return -EPERM;
-
- girq->parents[0] = wx->msix_entry->vector;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
@@ -754,8 +769,6 @@ int txgbe_init_phy(struct txgbe *txgbe)
goto err_unregister_i2c;
}
- wx->msix_in_use = true;
-
return 0;
err_unregister_i2c:
@@ -788,5 +801,4 @@ void txgbe_remove_phy(struct txgbe *txgbe)
phylink_destroy(txgbe->wx->phylink);
xpcs_destroy(txgbe->xpcs);
software_node_unregister_node_group(txgbe->nodes.group);
- txgbe->wx->msix_in_use = false;
}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
index 1ab592124986..8a026d804fe2 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.h
@@ -4,6 +4,9 @@
#ifndef _TXGBE_PHY_H_
#define _TXGBE_PHY_H_
+irqreturn_t txgbe_gpio_irq_handler(int irq, void *data);
+void txgbe_reinit_gpio_intr(struct wx *wx);
+irqreturn_t txgbe_link_irq_handler(int irq, void *data);
int txgbe_init_phy(struct txgbe *txgbe);
void txgbe_remove_phy(struct txgbe *txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 270a6fd9ad0b..1b4ff50d5857 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -5,6 +5,7 @@
#define _TXGBE_TYPE_H_
#include <linux/property.h>
+#include <linux/irq.h>
/* Device IDs */
#define TXGBE_DEV_ID_SP1000 0x1001
@@ -169,15 +170,31 @@ struct txgbe_nodes {
const struct software_node *group[SWNODE_MAX + 1];
};
+enum txgbe_misc_irqs {
+ TXGBE_IRQ_GPIO = 0,
+ TXGBE_IRQ_LINK,
+ TXGBE_IRQ_MAX
+};
+
+struct txgbe_irq {
+ struct irq_chip chip;
+ struct irq_domain *domain;
+ int nirqs;
+ int irq;
+};
+
struct txgbe {
struct wx *wx;
struct txgbe_nodes nodes;
+ struct txgbe_irq misc;
struct dw_xpcs *xpcs;
struct platform_device *sfp_dev;
struct platform_device *i2c_dev;
struct clk_lookup *clock;
struct clk *clk;
struct gpio_chip *gpio;
+ unsigned int gpio_irq;
+ unsigned int link_irq;
};
#endif /* _TXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 3318b50a5911..f165616f36fe 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -539,8 +539,7 @@ static int w5300_hw_probe(struct platform_device *pdev)
eth_hw_addr_random(ndev);
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, mem);
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 765aa516aada..940452d0a4d2 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1114,8 +1114,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
ndev->irq = rc;
- res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
- lp->base_addr = devm_ioremap_resource(&ofdev->dev, res);
+ lp->base_addr = devm_platform_get_and_ioremap_resource(ofdev, 0, &res);
if (IS_ERR(lp->base_addr)) {
rc = PTR_ERR(lp->base_addr);
goto error;
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 9f505cf02d96..e9bc38fd2025 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1240,9 +1240,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
SelectPage(0);
PutWord(XIRCREG0_TRS, (u_short)pktlen+2);
- freespace = GetWord(XIRCREG0_TSO);
- okay = freespace & 0x8000;
- freespace &= 0x7fff;
+ freespace = GetWord(XIRCREG0_TSO) & 0x7fff;
/* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */
okay = pktlen +2 < freespace;
pr_debug("%s: avail. tx space=%u%s\n",
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 2b6a607ac0b7..a273362c9e70 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -153,6 +153,7 @@ static const struct pci_device_id skfddi_pci_tbl[] = {
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, skfddi_pci_tbl);
+MODULE_DESCRIPTION("SysKonnect FDDI PCI driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 32c51c244153..2f6739fe78af 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -221,7 +221,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
struct genevehdr *gnvh = geneve_hdr(skb);
struct metadata_dst *tun_dst = NULL;
unsigned int len;
- int err = 0;
+ int nh, err = 0;
void *oiph;
if (ip_tunnel_collect_metadata() || gs->collect_md) {
@@ -272,9 +272,23 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
skb->pkt_type = PACKET_HOST;
}
- oiph = skb_network_header(skb);
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
skb_reset_network_header(skb);
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(geneve->dev, rx_length_errors);
+ DEV_STATS_INC(geneve->dev, rx_errors);
+ goto drop;
+ }
+
+ /* Get the outer header. */
+ oiph = skb->head + nh;
+
if (geneve_get_sk_family(gs) == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
#if IS_ENABLED(CONFIG_IPV6)
@@ -319,22 +333,16 @@ static int geneve_init(struct net_device *dev)
struct geneve_dev *geneve = netdev_priv(dev);
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = gro_cells_init(&geneve->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
err = dst_cache_init(&geneve->cfg.info.dst_cache, GFP_KERNEL);
if (err) {
- free_percpu(dev->tstats);
gro_cells_destroy(&geneve->gro_cells);
return err;
}
+ netdev_lockdep_set_classes(dev);
return 0;
}
@@ -344,7 +352,6 @@ static void geneve_uninit(struct net_device *dev)
dst_cache_destroy(&geneve->cfg.info.dst_cache);
gro_cells_destroy(&geneve->gro_cells);
- free_percpu(dev->tstats);
}
/* Callback from net/ipv4/udp.c to receive packets */
@@ -507,7 +514,7 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
gh_len = geneve_hlen(gh);
hlen = off_gnv + gh_len;
- if (skb_gro_header_hard(skb, hlen)) {
+ if (!skb_gro_may_pull(skb, hlen)) {
gh = skb_gro_header_slow(skb, hlen, off_gnv);
if (unlikely(!gh))
goto out;
@@ -1121,7 +1128,6 @@ static const struct net_device_ops geneve_netdev_ops = {
.ndo_open = geneve_open,
.ndo_stop = geneve_stop,
.ndo_start_xmit = geneve_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = geneve_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
@@ -1141,7 +1147,7 @@ static const struct ethtool_ops geneve_ethtool_ops = {
};
/* Info for udev, that this is a virtual tunnel endpoint */
-static struct device_type geneve_type = {
+static const struct device_type geneve_type = {
.name = "geneve",
};
@@ -1188,6 +1194,7 @@ static void geneve_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
/* MTU range: 68 - (something less than 65535) */
dev->min_mtu = ETH_MIN_MTU;
/* The max_mtu calculation does not take account of GENEVE
@@ -1900,29 +1907,26 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
}
}
-static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
+static void __net_exit geneve_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
- geneve_destroy_tunnels(net, &list);
-
- /* unregister the devices gathered above */
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ geneve_destroy_tunnels(net, dev_to_kill);
+}
- list_for_each_entry(net, net_list, exit_list) {
- const struct geneve_net *gn = net_generic(net, geneve_net_id);
+static void __net_exit geneve_exit_net(struct net *net)
+{
+ const struct geneve_net *gn = net_generic(net, geneve_net_id);
- WARN_ON_ONCE(!list_empty(&gn->sock_list));
- }
+ WARN_ON_ONCE(!list_empty(&gn->sock_list));
}
static struct pernet_operations geneve_net_ops = {
.init = geneve_init_net,
- .exit_batch = geneve_exit_batch_net,
+ .exit_batch_rtnl = geneve_exit_batch_rtnl,
+ .exit = geneve_exit_net,
.id = &geneve_net_id,
.size = sizeof(struct geneve_net),
};
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index b1919278e931..ba4704c2c640 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -711,25 +711,11 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
return ret;
}
-static int gtp_dev_init(struct net_device *dev)
-{
- struct gtp_dev *gtp = netdev_priv(dev);
-
- gtp->dev = dev;
-
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
- return 0;
-}
-
static void gtp_dev_uninit(struct net_device *dev)
{
struct gtp_dev *gtp = netdev_priv(dev);
gtp_encap_disable(gtp);
- free_percpu(dev->tstats);
}
static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
@@ -942,10 +928,8 @@ tx_err:
}
static const struct net_device_ops gtp_netdev_ops = {
- .ndo_init = gtp_dev_init,
.ndo_uninit = gtp_dev_uninit,
.ndo_start_xmit = gtp_dev_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
};
static const struct device_type gtp_type = {
@@ -957,6 +941,7 @@ static void gtp_link_setup(struct net_device *dev)
unsigned int max_gtp_header_len = sizeof(struct iphdr) +
sizeof(struct udphdr) +
sizeof(struct gtp0_header);
+ struct gtp_dev *gtp = netdev_priv(dev);
dev->netdev_ops = &gtp_netdev_ops;
dev->needs_free_netdev = true;
@@ -970,11 +955,13 @@ static void gtp_link_setup(struct net_device *dev)
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->priv_flags |= IFF_NO_QUEUE;
dev->features |= NETIF_F_LLTX;
netif_keep_dst(dev);
dev->needed_headroom = LL_MAX_HEADER + max_gtp_header_len;
+ gtp->dev = dev;
}
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
@@ -1876,23 +1863,23 @@ static int __net_init gtp_net_init(struct net *net)
return 0;
}
-static void __net_exit gtp_net_exit(struct net *net)
+static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
- struct gtp_net *gn = net_generic(net, gtp_net_id);
- struct gtp_dev *gtp;
- LIST_HEAD(list);
+ struct net *net;
- rtnl_lock();
- list_for_each_entry(gtp, &gn->gtp_dev_list, list)
- gtp_dellink(gtp->dev, &list);
+ list_for_each_entry(net, net_list, exit_list) {
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+ struct gtp_dev *gtp;
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ list_for_each_entry(gtp, &gn->gtp_dev_list, list)
+ gtp_dellink(gtp->dev, dev_to_kill);
+ }
}
static struct pernet_operations gtp_net_ops = {
.init = gtp_net_init,
- .exit = gtp_net_exit,
+ .exit_batch_rtnl = gtp_net_exit_batch_rtnl,
.id = &gtp_net_id,
.size = sizeof(struct gtp_net),
};
@@ -1903,26 +1890,26 @@ static int __init gtp_init(void)
get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
- err = rtnl_link_register(&gtp_link_ops);
+ err = register_pernet_subsys(&gtp_net_ops);
if (err < 0)
goto error_out;
- err = genl_register_family(&gtp_genl_family);
+ err = rtnl_link_register(&gtp_link_ops);
if (err < 0)
- goto unreg_rtnl_link;
+ goto unreg_pernet_subsys;
- err = register_pernet_subsys(&gtp_net_ops);
+ err = genl_register_family(&gtp_genl_family);
if (err < 0)
- goto unreg_genl_family;
+ goto unreg_rtnl_link;
pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
sizeof(struct pdp_ctx));
return 0;
-unreg_genl_family:
- genl_unregister_family(&gtp_genl_family);
unreg_rtnl_link:
rtnl_link_unregister(&gtp_link_ops);
+unreg_pernet_subsys:
+ unregister_pernet_subsys(&gtp_net_ops);
error_out:
pr_err("error loading GTP module loaded\n");
return err;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 273bd8a20122..11831a1c9762 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -42,6 +42,10 @@
#define LINKCHANGE_INT (2 * HZ)
#define VF_TAKEOVER_INT (HZ / 10)
+/* Macros to define the context of vf registration */
+#define VF_REG_IN_PROBE 1
+#define VF_REG_IN_NOTIFIER 2
+
static unsigned int ring_size __ro_after_init = 128;
module_param(ring_size, uint, 0444);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)");
@@ -2185,7 +2189,7 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
}
static int netvsc_vf_join(struct net_device *vf_netdev,
- struct net_device *ndev)
+ struct net_device *ndev, int context)
{
struct net_device_context *ndev_ctx = netdev_priv(ndev);
int ret;
@@ -2208,7 +2212,11 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
goto upper_link_failed;
}
- schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
+ /* If this registration is called from probe context vf_takeover
+ * is taken care of later in probe itself.
+ */
+ if (context == VF_REG_IN_NOTIFIER)
+ schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
@@ -2346,7 +2354,7 @@ static int netvsc_prepare_bonding(struct net_device *vf_netdev)
return NOTIFY_DONE;
}
-static int netvsc_register_vf(struct net_device *vf_netdev)
+static int netvsc_register_vf(struct net_device *vf_netdev, int context)
{
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
@@ -2386,7 +2394,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
- if (netvsc_vf_join(vf_netdev, ndev) != 0)
+ if (netvsc_vf_join(vf_netdev, ndev, context) != 0)
return NOTIFY_DONE;
dev_hold(vf_netdev);
@@ -2484,10 +2492,31 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
return NOTIFY_OK;
}
+static int check_dev_is_matching_vf(struct net_device *event_ndev)
+{
+ /* Skip NetVSC interfaces */
+ if (event_ndev->netdev_ops == &device_ops)
+ return -ENODEV;
+
+ /* Avoid non-Ethernet type devices */
+ if (event_ndev->type != ARPHRD_ETHER)
+ return -ENODEV;
+
+ /* Avoid Vlan dev with same MAC registering as VF */
+ if (is_vlan_dev(event_ndev))
+ return -ENODEV;
+
+ /* Avoid Bonding master dev with same MAC registering as VF */
+ if (netif_is_bond_master(event_ndev))
+ return -ENODEV;
+
+ return 0;
+}
+
static int netvsc_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
- struct net_device *net = NULL;
+ struct net_device *net = NULL, *vf_netdev;
struct net_device_context *net_device_ctx;
struct netvsc_device_info *device_info = NULL;
struct netvsc_device *nvdev;
@@ -2599,6 +2628,30 @@ static int netvsc_probe(struct hv_device *dev,
}
list_add(&net_device_ctx->list, &netvsc_dev_list);
+
+ /* When the hv_netvsc driver is unloaded and reloaded, the
+ * NET_DEVICE_REGISTER for the vf device is replayed before probe
+ * is complete. This is because register_netdevice_notifier() gets
+ * registered before vmbus_driver_register() so that callback func
+ * is set before probe and we don't miss events like NETDEV_POST_INIT
+ * So, in this section we try to register the matching vf device that
+ * is present as a netdevice, knowing that its register call is not
+ * processed in the netvsc_netdev_notifier(as probing is progress and
+ * get_netvsc_byslot fails).
+ */
+ for_each_netdev(dev_net(net), vf_netdev) {
+ ret = check_dev_is_matching_vf(vf_netdev);
+ if (ret != 0)
+ continue;
+
+ if (net != get_netvsc_byslot(vf_netdev))
+ continue;
+
+ netvsc_prepare_bonding(vf_netdev);
+ netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE);
+ __netvsc_vf_setup(net, vf_netdev);
+ break;
+ }
rtnl_unlock();
netvsc_devinfo_put(device_info);
@@ -2754,28 +2807,17 @@ static int netvsc_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+ int ret = 0;
- /* Skip our own events */
- if (event_dev->netdev_ops == &device_ops)
- return NOTIFY_DONE;
-
- /* Avoid non-Ethernet type devices */
- if (event_dev->type != ARPHRD_ETHER)
- return NOTIFY_DONE;
-
- /* Avoid Vlan dev with same MAC registering as VF */
- if (is_vlan_dev(event_dev))
- return NOTIFY_DONE;
-
- /* Avoid Bonding master dev with same MAC registering as VF */
- if (netif_is_bond_master(event_dev))
+ ret = check_dev_is_matching_vf(event_dev);
+ if (ret != 0)
return NOTIFY_DONE;
switch (event) {
case NETDEV_POST_INIT:
return netvsc_prepare_bonding(event_dev);
case NETDEV_REGISTER:
- return netvsc_register_vf(event_dev);
+ return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER);
case NETDEV_UNREGISTER:
return netvsc_unregister_vf(event_dev);
case NETDEV_UP:
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 164c7f605af5..f632b0cfd5ae 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -11,17 +11,16 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/gpio/consumer.h>
#include <linux/hrtimer.h>
#include <linux/jiffies.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/regmap.h>
#include <linux/skbuff.h>
-#include <linux/of_gpio.h>
#include <linux/ieee802154.h>
#include <net/mac802154.h>
@@ -316,7 +315,7 @@ static const struct regmap_config at86rf230_regmap_spi_config = {
.val_bits = 8,
.write_flag_mask = CMD_REG | CMD_WRITE,
.read_flag_mask = CMD_REG,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = AT86RF2XX_NUMREGS,
.writeable_reg = at86rf230_reg_writeable,
.readable_reg = at86rf230_reg_readable,
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index f732c150462b..e685a7f946f0 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2857,19 +2857,13 @@ static int ca8210_interrupt_init(struct spi_device *spi)
*/
static int ca8210_dev_com_init(struct ca8210_priv *priv)
{
- priv->mlme_workqueue = alloc_ordered_workqueue(
- "MLME work queue",
- WQ_UNBOUND
- );
+ priv->mlme_workqueue = alloc_ordered_workqueue("MLME work queue", 0);
if (!priv->mlme_workqueue) {
dev_crit(&priv->spi->dev, "alloc of mlme_workqueue failed!\n");
return -ENOMEM;
}
- priv->irq_workqueue = alloc_ordered_workqueue(
- "ca8210 irq worker",
- WQ_UNBOUND
- );
+ priv->irq_workqueue = alloc_ordered_workqueue("ca8210 irq worker", 0);
if (!priv->irq_workqueue) {
dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n");
destroy_workqueue(priv->mlme_workqueue);
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 35e55f198e05..2930141d7dd2 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -259,4 +259,5 @@ static __exit void fake_remove_module(void)
module_init(fakelb_init_module);
module_exit(fake_remove_module);
+MODULE_DESCRIPTION("IEEE 802.15.4 loopback driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index 87abe3b46316..433fb5839203 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -12,7 +12,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/skbuff.h>
-#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <linux/ieee802154.h>
#include <linux/debugfs.h>
@@ -251,7 +250,7 @@ static const struct regmap_config mcr20a_dar_regmap = {
.val_bits = 8,
.write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE,
.read_flag_mask = REGISTER_ACCESS | REGISTER_READ,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.writeable_reg = mcr20a_dar_writeable,
.readable_reg = mcr20a_dar_readable,
.volatile_reg = mcr20a_dar_volatile,
@@ -387,7 +386,7 @@ static const struct regmap_config mcr20a_iar_regmap = {
.val_bits = 8,
.write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE | IAR_INDEX,
.read_flag_mask = REGISTER_ACCESS | REGISTER_READ | IAR_INDEX,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.writeable_reg = mcr20a_iar_writeable,
.readable_reg = mcr20a_iar_readable,
.volatile_reg = mcr20a_iar_volatile,
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index ee4cfbf2c5cc..d3f42efc5d1a 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -388,7 +388,7 @@ static const struct regmap_config mrf24j40_short_regmap = {
.pad_bits = 1,
.write_flag_mask = MRF24J40_SHORT_WRITE,
.read_flag_mask = MRF24J40_SHORT_READ,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = MRF24J40_SHORT_NUMREGS,
.writeable_reg = mrf24j40_short_reg_writeable,
.readable_reg = mrf24j40_short_reg_readable,
@@ -495,7 +495,7 @@ static const struct regmap_config mrf24j40_long_regmap = {
.pad_bits = 5,
.write_flag_mask = MRF24J40_LONG_ACCESS,
.read_flag_mask = MRF24J40_LONG_ACCESS,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.max_register = MRF24J40_LONG_NUMREGS,
.writeable_reg = mrf24j40_long_reg_writeable,
.readable_reg = mrf24j40_long_reg_readable,
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index f3355e040a9e..334cd62cf286 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -21,7 +21,6 @@
struct clk;
struct icc_path;
struct net_device;
-struct platform_device;
struct ipa_power;
struct ipa_smp2p;
@@ -31,7 +30,7 @@ struct ipa_interrupt;
* struct ipa - IPA information
* @gsi: Embedded GSI structure
* @version: IPA hardware version
- * @pdev: Platform device
+ * @dev: IPA device pointer
* @completion: Used to signal pipeline clear transfer complete
* @nb: Notifier block used for remoteproc SSR
* @notifier: Remoteproc SSR notifier
@@ -79,7 +78,7 @@ struct ipa_interrupt;
struct ipa {
struct gsi gsi;
enum ipa_version version;
- struct platform_device *pdev;
+ struct device *dev;
struct completion completion;
struct notifier_block nb;
void *notifier;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index f1419fbd776c..39219963dbb3 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -174,7 +174,7 @@ bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
const char *table = route ? "route" : "filter";
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
u32 size;
size = route ? ipa->route_count : ipa->filter_count + 1;
@@ -204,7 +204,7 @@ bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
/* Validate the memory region that holds headers */
static bool ipa_cmd_header_init_local_valid(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
const struct ipa_mem *mem;
u32 offset_max;
u32 size_max;
@@ -256,7 +256,7 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
const char *name, u32 offset)
{
struct ipa_cmd_register_write *payload;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
u32 offset_max;
u32 bit_count;
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index afa1d56d9095..dd490941615e 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -233,8 +233,8 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *other_data;
- struct device *dev = &ipa->pdev->dev;
enum ipa_endpoint_name other_name;
+ struct device *dev = ipa->dev;
if (ipa_gsi_endpoint_data_empty(data))
return true;
@@ -388,7 +388,7 @@ static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *dp = data;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
enum ipa_endpoint_name name;
u32 max;
@@ -606,7 +606,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
trans = ipa_cmd_trans_alloc(ipa, count);
if (!trans) {
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"no transaction to reset modem exception endpoints\n");
return -EBUSY;
}
@@ -1498,8 +1498,7 @@ ipa_endpoint_status_tag_valid(struct ipa_endpoint *endpoint, const void *data)
if (endpoint_id == command_endpoint->endpoint_id) {
complete(&ipa->completion);
} else {
- dev_err(&ipa->pdev->dev,
- "unexpected tagged packet from endpoint %u\n",
+ dev_err(ipa->dev, "unexpected tagged packet from endpoint %u\n",
endpoint_id);
}
@@ -1536,6 +1535,7 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
void *data = page_address(page) + NET_SKB_PAD;
u32 unused = buffer_size - total_len;
struct ipa *ipa = endpoint->ipa;
+ struct device *dev = ipa->dev;
u32 resid = total_len;
while (resid) {
@@ -1544,7 +1544,7 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
u32 len;
if (resid < IPA_STATUS_SIZE) {
- dev_err(&endpoint->ipa->pdev->dev,
+ dev_err(dev,
"short message (%u bytes < %zu byte status)\n",
resid, IPA_STATUS_SIZE);
break;
@@ -1666,8 +1666,8 @@ void ipa_endpoint_default_route_clear(struct ipa *ipa)
*/
static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
{
- struct device *dev = &endpoint->ipa->pdev->dev;
struct ipa *ipa = endpoint->ipa;
+ struct device *dev = ipa->dev;
struct gsi *gsi = &ipa->gsi;
bool suspended = false;
dma_addr_t addr;
@@ -1769,7 +1769,7 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
gsi_channel_reset(&ipa->gsi, channel_id, true);
if (ret)
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d resetting channel %u for endpoint %u\n",
ret, endpoint->channel_id, endpoint->endpoint_id);
}
@@ -1817,7 +1817,7 @@ int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
ret = gsi_channel_start(gsi, endpoint->channel_id);
if (ret) {
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d starting %cX channel %u for endpoint %u\n",
ret, endpoint->toward_ipa ? 'T' : 'R',
endpoint->channel_id, endpoint_id);
@@ -1854,14 +1854,13 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
/* Note that if stop fails, the channel's state is not well-defined */
ret = gsi_channel_stop(gsi, endpoint->channel_id);
if (ret)
- dev_err(&ipa->pdev->dev,
- "error %d attempting to stop endpoint %u\n", ret,
- endpoint_id);
+ dev_err(ipa->dev, "error %d attempting to stop endpoint %u\n",
+ ret, endpoint_id);
}
void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
{
- struct device *dev = &endpoint->ipa->pdev->dev;
+ struct device *dev = endpoint->ipa->dev;
struct gsi *gsi = &endpoint->ipa->gsi;
int ret;
@@ -1881,7 +1880,7 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
{
- struct device *dev = &endpoint->ipa->pdev->dev;
+ struct device *dev = endpoint->ipa->dev;
struct gsi *gsi = &endpoint->ipa->gsi;
int ret;
@@ -1983,7 +1982,7 @@ void ipa_endpoint_deconfig(struct ipa *ipa)
int ipa_endpoint_config(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
const struct reg *reg;
u32 endpoint_id;
u32 hw_limit;
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index 4bc05948f772..c3e8784d51d9 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -19,6 +19,7 @@
* time only these three are supported.
*/
+#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
@@ -43,6 +44,30 @@ struct ipa_interrupt {
u32 enabled;
};
+/* Clear the suspend interrupt for all endpoints that signaled it */
+static void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
+{
+ struct ipa *ipa = interrupt->ipa;
+ u32 unit_count;
+ u32 unit;
+
+ unit_count = DIV_ROUND_UP(ipa->endpoint_count, 32);
+ for (unit = 0; unit < unit_count; unit++) {
+ const struct reg *reg;
+ u32 val;
+
+ reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
+ val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
+
+ /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
+ if (!val || ipa->version == IPA_VERSION_3_0)
+ continue;
+
+ reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, unit));
+ }
+}
+
/* Process a particular interrupt type that has been received */
static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
{
@@ -70,7 +95,7 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
* caused the interrupt, so defer clearing until after
* the handler has been called.
*/
- ipa_power_suspend_handler(ipa, irq_id);
+ ipa_interrupt_suspend_clear_all(interrupt);
fallthrough;
default: /* Silently ignore (and clear) any other condition */
@@ -85,14 +110,13 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa;
u32 enabled = interrupt->enabled;
+ struct device *dev = ipa->dev;
const struct reg *reg;
- struct device *dev;
u32 pending;
u32 offset;
u32 mask;
int ret;
- dev = &ipa->pdev->dev;
ret = pm_runtime_get_sync(dev);
if (WARN_ON(ret < 0))
goto out_power_put;
@@ -205,30 +229,6 @@ ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
ipa_interrupt_suspend_control(interrupt, endpoint_id, false);
}
-/* Clear the suspend interrupt for all endpoints that signaled it */
-void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
-{
- struct ipa *ipa = interrupt->ipa;
- u32 unit_count;
- u32 unit;
-
- unit_count = roundup(ipa->endpoint_count, 32);
- for (unit = 0; unit < unit_count; unit++) {
- const struct reg *reg;
- u32 val;
-
- reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
- val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
-
- /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
- if (ipa->version == IPA_VERSION_3_0)
- continue;
-
- reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
- iowrite32(val, ipa->reg_virt + reg_n_offset(reg, unit));
- }
-}
-
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt)
{
@@ -236,29 +236,17 @@ void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt)
}
/* Configure the IPA interrupt framework */
-struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
+int ipa_interrupt_config(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
- struct ipa_interrupt *interrupt;
+ struct ipa_interrupt *interrupt = ipa->interrupt;
+ unsigned int irq = interrupt->irq;
+ struct device *dev = ipa->dev;
const struct reg *reg;
- unsigned int irq;
int ret;
- ret = platform_get_irq_byname(ipa->pdev, "ipa");
- if (ret <= 0) {
- dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n",
- ret);
- return ERR_PTR(ret ? : -EINVAL);
- }
- irq = ret;
-
- interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL);
- if (!interrupt)
- return ERR_PTR(-ENOMEM);
interrupt->ipa = ipa;
- interrupt->irq = irq;
- /* Start with all IPA interrupts disabled */
+ /* Disable all IPA interrupt types */
reg = ipa_reg(ipa, IPA_IRQ_EN);
iowrite32(0, ipa->reg_virt + reg_offset(reg));
@@ -271,26 +259,59 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
ret = dev_pm_set_wake_irq(dev, irq);
if (ret) {
- dev_err(dev, "error %d registering \"ipa\" IRQ as wakeirq\n", ret);
+ dev_err(dev, "error %d registering \"ipa\" IRQ as wakeirq\n",
+ ret);
goto err_free_irq;
}
- return interrupt;
+ ipa->interrupt = interrupt;
+
+ return 0;
err_free_irq:
free_irq(interrupt->irq, interrupt);
err_kfree:
kfree(interrupt);
- return ERR_PTR(ret);
+ return ret;
}
/* Inverse of ipa_interrupt_config() */
-void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt)
+void ipa_interrupt_deconfig(struct ipa *ipa)
{
- struct device *dev = &interrupt->ipa->pdev->dev;
+ struct ipa_interrupt *interrupt = ipa->interrupt;
+ struct device *dev = ipa->dev;
+
+ ipa->interrupt = NULL;
dev_pm_clear_wake_irq(dev);
free_irq(interrupt->irq, interrupt);
+}
+
+/* Initialize the IPA interrupt structure */
+struct ipa_interrupt *ipa_interrupt_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ipa_interrupt *interrupt;
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "ipa");
+ if (irq <= 0) {
+ dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n", irq);
+
+ return ERR_PTR(irq ? : -EINVAL);
+ }
+
+ interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL);
+ if (!interrupt)
+ return ERR_PTR(-ENOMEM);
+ interrupt->irq = irq;
+
+ return interrupt;
+}
+
+/* Inverse of ipa_interrupt_init() */
+void ipa_interrupt_exit(struct ipa_interrupt *interrupt)
+{
kfree(interrupt);
}
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index 12e3e798ccb3..f3f4f4330a59 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -35,14 +35,6 @@ void ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt,
u32 endpoint_id);
/**
- * ipa_interrupt_suspend_clear_all - clear all suspend interrupts
- * @interrupt: IPA interrupt structure
- *
- * Clear the TX_SUSPEND interrupt for all endpoints that signaled it.
- */
-void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt);
-
-/**
* ipa_interrupt_simulate_suspend() - Simulate TX_SUSPEND IPA interrupt
* @interrupt: IPA interrupt structure
*
@@ -84,17 +76,31 @@ void ipa_interrupt_irq_enable(struct ipa *ipa);
void ipa_interrupt_irq_disable(struct ipa *ipa);
/**
- * ipa_interrupt_config() - Configure the IPA interrupt framework
+ * ipa_interrupt_config() - Configure IPA interrupts
* @ipa: IPA pointer
*
- * Return: Pointer to IPA SMP2P info, or a pointer-coded error
+ * Return: 0 if successful, or a negative error code
*/
-struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa);
+int ipa_interrupt_config(struct ipa *ipa);
/**
* ipa_interrupt_deconfig() - Inverse of ipa_interrupt_config()
+ * @ipa: IPA pointer
+ */
+void ipa_interrupt_deconfig(struct ipa *ipa);
+
+/**
+ * ipa_interrupt_init() - Initialize the IPA interrupt structure
+ * @pdev: IPA platform device pointer
+ *
+ * Return: Pointer to an IPA interrupt structure, or a pointer-coded error
+ */
+struct ipa_interrupt *ipa_interrupt_init(struct platform_device *pdev);
+
+/**
+ * ipa_interrupt_exit() - Inverse of ipa_interrupt_init()
* @interrupt: IPA interrupt structure
*/
-void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt);
+void ipa_interrupt_exit(struct ipa_interrupt *interrupt);
#endif /* _IPA_INTERRUPT_H_ */
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 00475fd7a205..57b241417e8c 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -7,7 +7,6 @@
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/bitfield.h>
-#include <linux/device.h>
#include <linux/bug.h>
#include <linux/io.h>
#include <linux/firmware.h>
@@ -114,7 +113,7 @@ int ipa_setup(struct ipa *ipa)
{
struct ipa_endpoint *exception_endpoint;
struct ipa_endpoint *command_endpoint;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
int ret;
ret = gsi_setup(&ipa->gsi);
@@ -542,12 +541,9 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
if (ret)
goto err_hardware_deconfig;
- ipa->interrupt = ipa_interrupt_config(ipa);
- if (IS_ERR(ipa->interrupt)) {
- ret = PTR_ERR(ipa->interrupt);
- ipa->interrupt = NULL;
+ ret = ipa_interrupt_config(ipa);
+ if (ret)
goto err_mem_deconfig;
- }
ipa_uc_config(ipa);
@@ -572,8 +568,7 @@ err_endpoint_deconfig:
ipa_endpoint_deconfig(ipa);
err_uc_deconfig:
ipa_uc_deconfig(ipa);
- ipa_interrupt_deconfig(ipa->interrupt);
- ipa->interrupt = NULL;
+ ipa_interrupt_deconfig(ipa);
err_mem_deconfig:
ipa_mem_deconfig(ipa);
err_hardware_deconfig:
@@ -591,8 +586,7 @@ static void ipa_deconfig(struct ipa *ipa)
ipa_modem_deconfig(ipa);
ipa_endpoint_deconfig(ipa);
ipa_uc_deconfig(ipa);
- ipa_interrupt_deconfig(ipa->interrupt);
- ipa->interrupt = NULL;
+ ipa_interrupt_deconfig(ipa);
ipa_mem_deconfig(ipa);
ipa_hardware_deconfig(ipa);
}
@@ -808,6 +802,7 @@ out_self:
static int ipa_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct ipa_interrupt *interrupt;
enum ipa_firmware_loader loader;
const struct ipa_data *data;
struct ipa_power *power;
@@ -839,12 +834,21 @@ static int ipa_probe(struct platform_device *pdev)
if (loader == IPA_LOADER_DEFER)
return -EPROBE_DEFER;
- /* The clock and interconnects might not be ready when we're
- * probed, so might return -EPROBE_DEFER.
+ /* The IPA interrupt might not be ready when we're probed, so this
+ * might return -EPROBE_DEFER.
+ */
+ interrupt = ipa_interrupt_init(pdev);
+ if (IS_ERR(interrupt))
+ return PTR_ERR(interrupt);
+
+ /* The clock and interconnects might not be ready when we're probed,
+ * so this might return -EPROBE_DEFER.
*/
power = ipa_power_init(dev, data->power_data);
- if (IS_ERR(power))
- return PTR_ERR(power);
+ if (IS_ERR(power)) {
+ ret = PTR_ERR(power);
+ goto err_interrupt_exit;
+ }
/* No more EPROBE_DEFER. Allocate and initialize the IPA structure */
ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
@@ -853,18 +857,19 @@ static int ipa_probe(struct platform_device *pdev)
goto err_power_exit;
}
- ipa->pdev = pdev;
+ ipa->dev = dev;
dev_set_drvdata(dev, ipa);
+ ipa->interrupt = interrupt;
ipa->power = power;
ipa->version = data->version;
ipa->modem_route_count = data->modem_route_count;
init_completion(&ipa->completion);
- ret = ipa_reg_init(ipa);
+ ret = ipa_reg_init(ipa, pdev);
if (ret)
goto err_kfree_ipa;
- ret = ipa_mem_init(ipa, data->mem_data);
+ ret = ipa_mem_init(ipa, pdev, data->mem_data);
if (ret)
goto err_reg_exit;
@@ -882,7 +887,7 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_endpoint_exit;
- ret = ipa_smp2p_init(ipa, loader == IPA_LOADER_MODEM);
+ ret = ipa_smp2p_init(ipa, pdev, loader == IPA_LOADER_MODEM);
if (ret)
goto err_table_exit;
@@ -939,17 +944,27 @@ err_kfree_ipa:
kfree(ipa);
err_power_exit:
ipa_power_exit(power);
+err_interrupt_exit:
+ ipa_interrupt_exit(interrupt);
return ret;
}
static void ipa_remove(struct platform_device *pdev)
{
- struct ipa *ipa = dev_get_drvdata(&pdev->dev);
- struct ipa_power *power = ipa->power;
- struct device *dev = &pdev->dev;
+ struct ipa_interrupt *interrupt;
+ struct ipa_power *power;
+ struct device *dev;
+ struct ipa *ipa;
int ret;
+ ipa = dev_get_drvdata(&pdev->dev);
+ dev = ipa->dev;
+ WARN_ON(dev != &pdev->dev);
+
+ power = ipa->power;
+ interrupt = ipa->interrupt;
+
/* Prevent the modem from triggering a call to ipa_setup(). This
* also ensures a modem-initiated setup that's underway completes.
*/
@@ -991,6 +1006,7 @@ out_power_put:
ipa_reg_exit(ipa);
kfree(ipa);
ipa_power_exit(power);
+ ipa_interrupt_exit(interrupt);
dev_info(dev, "IPA driver removed");
}
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 694960537ecd..709f061ede61 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -9,6 +9,7 @@
#include <linux/bug.h>
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/soc/qcom/smem.h>
@@ -75,9 +76,9 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
- const struct reg *reg;
const struct ipa_mem *mem;
struct gsi_trans *trans;
+ const struct reg *reg;
u32 offset;
u16 size;
u32 val;
@@ -87,7 +88,7 @@ int ipa_mem_setup(struct ipa *ipa)
*/
trans = ipa_cmd_trans_alloc(ipa, 4);
if (!trans) {
- dev_err(&ipa->pdev->dev, "no transaction for memory setup\n");
+ dev_err(ipa->dev, "no transaction for memory setup\n");
return -EBUSY;
}
@@ -217,8 +218,8 @@ static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
{
- struct device *dev = &ipa->pdev->dev;
enum ipa_mem_id mem_id = mem->id;
+ struct device *dev = ipa->dev;
u16 size_multiple;
/* Make sure the memory region is valid for this version of IPA */
@@ -254,7 +255,7 @@ static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
{
DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { };
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
enum ipa_mem_id mem_id;
u32 i;
@@ -290,7 +291,7 @@ static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
/* Do all memory regions fit within the IPA local memory? */
static bool ipa_mem_size_valid(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
u32 limit = ipa->mem_size;
u32 i;
@@ -317,7 +318,7 @@ static bool ipa_mem_size_valid(struct ipa *ipa)
*/
int ipa_mem_config(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
const struct ipa_mem *mem;
const struct reg *reg;
dma_addr_t addr;
@@ -393,7 +394,7 @@ err_dma_free:
/* Inverse of ipa_mem_config() */
void ipa_mem_deconfig(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
dma_free_coherent(dev, ipa->zero_size, ipa->zero_virt, ipa->zero_addr);
ipa->zero_size = 0;
@@ -420,8 +421,7 @@ int ipa_mem_zero_modem(struct ipa *ipa)
*/
trans = ipa_cmd_trans_alloc(ipa, 3);
if (!trans) {
- dev_err(&ipa->pdev->dev,
- "no transaction to zero modem memory\n");
+ dev_err(ipa->dev, "no transaction to zero modem memory\n");
return -EBUSY;
}
@@ -452,7 +452,7 @@ int ipa_mem_zero_modem(struct ipa *ipa)
*/
static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
struct iommu_domain *domain;
unsigned long iova;
phys_addr_t phys;
@@ -485,13 +485,12 @@ static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
static void ipa_imem_exit(struct ipa *ipa)
{
+ struct device *dev = ipa->dev;
struct iommu_domain *domain;
- struct device *dev;
if (!ipa->imem_size)
return;
- dev = &ipa->pdev->dev;
domain = iommu_get_domain_for_dev(dev);
if (domain) {
size_t size;
@@ -527,7 +526,7 @@ static void ipa_imem_exit(struct ipa *ipa)
*/
static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
struct iommu_domain *domain;
unsigned long iova;
phys_addr_t phys;
@@ -594,7 +593,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
static void ipa_smem_exit(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
struct iommu_domain *domain;
domain = iommu_get_domain_for_dev(dev);
@@ -615,9 +614,10 @@ static void ipa_smem_exit(struct ipa *ipa)
}
/* Perform memory region-related initialization */
-int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
+int ipa_mem_init(struct ipa *ipa, struct platform_device *pdev,
+ const struct ipa_mem_data *mem_data)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = &pdev->dev;
struct resource *res;
int ret;
@@ -634,14 +634,13 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
if (!ipa_table_mem_valid(ipa, true))
return -EINVAL;
- ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(dev, "error %d setting DMA mask\n", ret);
return ret;
}
- res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
- "ipa-shared");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipa-shared");
if (!res) {
dev_err(dev,
"DT error getting \"ipa-shared\" memory property\n");
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
index 868e9c20e8c4..28aad00a151d 100644
--- a/drivers/net/ipa/ipa_mem.h
+++ b/drivers/net/ipa/ipa_mem.h
@@ -6,6 +6,8 @@
#ifndef _IPA_MEM_H_
#define _IPA_MEM_H_
+struct platform_device;
+
struct ipa;
struct ipa_mem_data;
@@ -100,7 +102,8 @@ int ipa_mem_setup(struct ipa *ipa); /* No ipa_mem_teardown() needed */
int ipa_mem_zero_modem(struct ipa *ipa);
-int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data);
+int ipa_mem_init(struct ipa *ipa, struct platform_device *pdev,
+ const struct ipa_mem_data *mem_data);
void ipa_mem_exit(struct ipa *ipa);
#endif /* _IPA_MEM_H_ */
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index 423422a2a445..c27ca3f27f7d 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -39,10 +39,14 @@ enum ipa_modem_state {
/**
* struct ipa_priv - IPA network device private data
* @ipa: IPA pointer
+ * @tx: Transmit endpoint pointer
+ * @rx: Receive endpoint pointer
* @work: Work structure used to wake the modem netdev TX queue
*/
struct ipa_priv {
struct ipa *ipa;
+ struct ipa_endpoint *tx;
+ struct ipa_endpoint *rx;
struct work_struct work;
};
@@ -54,16 +58,16 @@ static int ipa_open(struct net_device *netdev)
struct device *dev;
int ret;
- dev = &ipa->pdev->dev;
+ dev = ipa->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_power_put;
- ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ ret = ipa_endpoint_enable_one(priv->tx);
if (ret)
goto err_power_put;
- ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ ret = ipa_endpoint_enable_one(priv->rx);
if (ret)
goto err_disable_tx;
@@ -75,7 +79,7 @@ static int ipa_open(struct net_device *netdev)
return 0;
err_disable_tx:
- ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ ipa_endpoint_disable_one(priv->tx);
err_power_put:
pm_runtime_put_noidle(dev);
@@ -90,15 +94,15 @@ static int ipa_stop(struct net_device *netdev)
struct device *dev;
int ret;
- dev = &ipa->pdev->dev;
+ dev = ipa->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto out_power_put;
netif_stop_queue(netdev);
- ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
- ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ ipa_endpoint_disable_one(priv->rx);
+ ipa_endpoint_disable_one(priv->tx);
out_power_put:
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
@@ -106,13 +110,16 @@ out_power_put:
return 0;
}
-/** ipa_start_xmit() - Transmits an skb.
- * @skb: skb to be transmitted
- * @dev: network device
+/** ipa_start_xmit() - Transmit an skb
+ * @skb: Socket buffer to be transmitted
+ * @netdev: Network device
*
- * Return codes:
- * NETDEV_TX_OK: Success
- * NETDEV_TX_BUSY: Error while transmitting the skb. Try again later
+ * Return: NETDEV_TX_OK if successful (or dropped), NETDEV_TX_BUSY otherwise
+
+ * Normally NETDEV_TX_OK indicates the buffer was successfully transmitted.
+ * If the buffer has an unexpected protocol or its size is out of range it
+ * is quietly dropped, returning NETDEV_TX_OK. NETDEV_TX_BUSY indicates
+ * the buffer cannot be sent at this time and should retried later.
*/
static netdev_tx_t
ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
@@ -132,29 +139,41 @@ ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (endpoint->config.qmap && skb->protocol != htons(ETH_P_MAP))
goto err_drop_skb;
- /* The hardware must be powered for us to transmit */
- dev = &ipa->pdev->dev;
+ /* The hardware must be powered for us to transmit, so if we're not
+ * ready we want the network stack to stop queueing until power is
+ * ACTIVE. Once runtime resume has completed, we inform the network
+ * stack it's OK to try transmitting again.
+ *
+ * We learn from pm_runtime_get() whether the hardware is powered.
+ * If it was not, powering up is either started or already underway.
+ * And in that case we want to disable queueing, expecting it to be
+ * re-enabled once power is ACTIVE. But runtime PM and network
+ * transmit run concurrently, and if we're not careful the requests
+ * to stop and start queueing could occur in the wrong order.
+ *
+ * For that reason we *always* stop queueing here, *before* the call
+ * to pm_runtime_get(). If we determine here that power is ACTIVE,
+ * we restart queueing before transmitting the SKB. Otherwise
+ * queueing will eventually be enabled after resume completes.
+ */
+ netif_stop_queue(netdev);
+
+ dev = ipa->dev;
ret = pm_runtime_get(dev);
if (ret < 1) {
/* If a resume won't happen, just drop the packet */
if (ret < 0 && ret != -EINPROGRESS) {
- ipa_power_modem_queue_active(ipa);
+ netif_wake_queue(netdev);
pm_runtime_put_noidle(dev);
goto err_drop_skb;
}
- /* No power (yet). Stop the network stack from transmitting
- * until we're resumed; ipa_modem_resume() arranges for the
- * TX queue to be started again.
- */
- ipa_power_modem_queue_stop(ipa);
-
pm_runtime_put_noidle(dev);
return NETDEV_TX_BUSY;
}
- ipa_power_modem_queue_active(ipa);
+ netif_wake_queue(netdev);
ret = ipa_endpoint_skb_tx(endpoint, skb);
@@ -233,14 +252,14 @@ static void ipa_modem_netdev_setup(struct net_device *netdev)
*/
void ipa_modem_suspend(struct net_device *netdev)
{
- struct ipa_priv *priv = netdev_priv(netdev);
- struct ipa *ipa = priv->ipa;
+ struct ipa_priv *priv;
if (!(netdev->flags & IFF_UP))
return;
- ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
- ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
+ priv = netdev_priv(netdev);
+ ipa_endpoint_suspend_one(priv->rx);
+ ipa_endpoint_suspend_one(priv->tx);
}
/**
@@ -258,7 +277,7 @@ static void ipa_modem_wake_queue_work(struct work_struct *work)
{
struct ipa_priv *priv = container_of(work, struct ipa_priv, work);
- ipa_power_modem_queue_wake(priv->ipa);
+ netif_wake_queue(priv->tx->netdev);
}
/** ipa_modem_resume() - resume callback for runtime_pm
@@ -268,14 +287,14 @@ static void ipa_modem_wake_queue_work(struct work_struct *work)
*/
void ipa_modem_resume(struct net_device *netdev)
{
- struct ipa_priv *priv = netdev_priv(netdev);
- struct ipa *ipa = priv->ipa;
+ struct ipa_priv *priv;
if (!(netdev->flags & IFF_UP))
return;
- ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
- ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
+ priv = netdev_priv(netdev);
+ ipa_endpoint_resume_one(priv->tx);
+ ipa_endpoint_resume_one(priv->rx);
/* Arrange for the TX queue to be restarted */
(void)queue_pm_work(&priv->work);
@@ -303,19 +322,24 @@ int ipa_modem_start(struct ipa *ipa)
goto out_set_state;
}
- SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
+ SET_NETDEV_DEV(netdev, ipa->dev);
priv = netdev_priv(netdev);
priv->ipa = ipa;
+ priv->tx = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
+ priv->rx = ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX];
INIT_WORK(&priv->work, ipa_modem_wake_queue_work);
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
+
+ priv->tx->netdev = netdev;
+ priv->rx->netdev = netdev;
+
ipa->modem_netdev = netdev;
ret = register_netdev(netdev);
if (ret) {
ipa->modem_netdev = NULL;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
+ priv->rx->netdev = NULL;
+ priv->tx->netdev = NULL;
+
free_netdev(netdev);
}
@@ -355,9 +379,11 @@ int ipa_modem_stop(struct ipa *ipa)
if (netdev->flags & IFF_UP)
(void)ipa_stop(netdev);
unregister_netdev(netdev);
+
ipa->modem_netdev = NULL;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
- ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
+ priv->rx->netdev = NULL;
+ priv->tx->netdev = NULL;
+
free_netdev(netdev);
}
@@ -370,7 +396,7 @@ int ipa_modem_stop(struct ipa *ipa)
/* Treat a "clean" modem stop the same as a crash */
static void ipa_modem_crashed(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
int ret;
/* Prevent the modem from triggering a call to ipa_setup() */
@@ -417,7 +443,7 @@ static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
{
struct ipa *ipa = container_of(nb, struct ipa, nb);
struct qcom_ssr_notify_data *notify_data = data;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
switch (action) {
case QCOM_SSR_BEFORE_POWERUP:
@@ -466,7 +492,7 @@ int ipa_modem_config(struct ipa *ipa)
void ipa_modem_deconfig(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
int ret;
ret = qcom_unregister_ssr_notifier(ipa->notifier, &ipa->nb);
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index e223886123ce..41ca7ef5e20f 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -35,28 +35,10 @@
#define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */
/**
- * enum ipa_power_flag - IPA power flags
- * @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled
- * @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended
- * @IPA_POWER_FLAG_STOPPED: Modem TX is disabled by ipa_start_xmit()
- * @IPA_POWER_FLAG_STARTED: Modem TX was enabled by ipa_runtime_resume()
- * @IPA_POWER_FLAG_COUNT: Number of defined power flags
- */
-enum ipa_power_flag {
- IPA_POWER_FLAG_RESUMED,
- IPA_POWER_FLAG_SYSTEM,
- IPA_POWER_FLAG_STOPPED,
- IPA_POWER_FLAG_STARTED,
- IPA_POWER_FLAG_COUNT, /* Last; not a flag */
-};
-
-/**
* struct ipa_power - IPA power management information
* @dev: IPA device pointer
* @core: IPA core clock
* @qmp: QMP handle for AOSS communication
- * @spinlock: Protects modem TX queue enable/disable
- * @flags: Boolean state flags
* @interconnect_count: Number of elements in interconnect[]
* @interconnect: Interconnect array
*/
@@ -64,8 +46,6 @@ struct ipa_power {
struct device *dev;
struct clk *core;
struct qmp *qmp;
- spinlock_t spinlock; /* used with STOPPED/STARTED power flags */
- DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
struct icc_bulk_data interconnect[] __counted_by(interconnect_count);
};
@@ -147,7 +127,6 @@ static int ipa_runtime_suspend(struct device *dev)
/* Endpoints aren't usable until setup is complete */
if (ipa->setup_complete) {
- __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags);
ipa_endpoint_suspend(ipa);
gsi_suspend(&ipa->gsi);
}
@@ -179,8 +158,6 @@ static int ipa_suspend(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
- __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
-
/* Increment the disable depth to ensure that the IRQ won't
* be re-enabled until the matching _enable call in
* ipa_resume(). We do this to ensure that the interrupt
@@ -202,8 +179,6 @@ static int ipa_resume(struct device *dev)
ret = pm_runtime_force_resume(dev);
- __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
-
/* Now that PM runtime is enabled again it's safe
* to turn the IRQ back on and process any data
* that was received during suspend.
@@ -219,84 +194,6 @@ u32 ipa_core_clock_rate(struct ipa *ipa)
return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0;
}
-void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
-{
- /* To handle an IPA interrupt we will have resumed the hardware
- * just to handle the interrupt, so we're done. If we are in a
- * system suspend, trigger a system resume.
- */
- if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags))
- if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags))
- pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
-
- /* Acknowledge/clear the suspend interrupt on all endpoints */
- ipa_interrupt_suspend_clear_all(ipa->interrupt);
-}
-
-/* The next few functions coordinate stopping and starting the modem
- * network device transmit queue.
- *
- * Transmit can be running concurrent with power resume, and there's a
- * chance the resume completes before the transmit path stops the queue,
- * leaving the queue in a stopped state. The next two functions are used
- * to avoid this: ipa_power_modem_queue_stop() is used by ipa_start_xmit()
- * to conditionally stop the TX queue; and ipa_power_modem_queue_start()
- * is used by ipa_runtime_resume() to conditionally restart it.
- *
- * Two flags and a spinlock are used. If the queue is stopped, the STOPPED
- * power flag is set. And if the queue is started, the STARTED flag is set.
- * The queue is only started on resume if the STOPPED flag is set. And the
- * queue is only started in ipa_start_xmit() if the STARTED flag is *not*
- * set. As a result, the queue remains operational if the two activites
- * happen concurrently regardless of the order they complete. The spinlock
- * ensures the flag and TX queue operations are done atomically.
- *
- * The first function stops the modem netdev transmit queue, but only if
- * the STARTED flag is *not* set. That flag is cleared if it was set.
- * If the queue is stopped, the STOPPED flag is set. This is called only
- * from the power ->runtime_resume operation.
- */
-void ipa_power_modem_queue_stop(struct ipa *ipa)
-{
- struct ipa_power *power = ipa->power;
- unsigned long flags;
-
- spin_lock_irqsave(&power->spinlock, flags);
-
- if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, power->flags)) {
- netif_stop_queue(ipa->modem_netdev);
- __set_bit(IPA_POWER_FLAG_STOPPED, power->flags);
- }
-
- spin_unlock_irqrestore(&power->spinlock, flags);
-}
-
-/* This function starts the modem netdev transmit queue, but only if the
- * STOPPED flag is set. That flag is cleared if it was set. If the queue
- * was restarted, the STARTED flag is set; this allows ipa_start_xmit()
- * to skip stopping the queue in the event of a race.
- */
-void ipa_power_modem_queue_wake(struct ipa *ipa)
-{
- struct ipa_power *power = ipa->power;
- unsigned long flags;
-
- spin_lock_irqsave(&power->spinlock, flags);
-
- if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, power->flags)) {
- __set_bit(IPA_POWER_FLAG_STARTED, power->flags);
- netif_wake_queue(ipa->modem_netdev);
- }
-
- spin_unlock_irqrestore(&power->spinlock, flags);
-}
-
-/* This function clears the STARTED flag once the TX queue is operating */
-void ipa_power_modem_queue_active(struct ipa *ipa)
-{
- clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags);
-}
-
static int ipa_power_retention_init(struct ipa_power *power)
{
struct qmp *qmp = qmp_get(power->dev);
@@ -341,7 +238,7 @@ int ipa_power_setup(struct ipa *ipa)
ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND);
- ret = device_init_wakeup(&ipa->pdev->dev, true);
+ ret = device_init_wakeup(ipa->dev, true);
if (ret)
ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
@@ -350,7 +247,7 @@ int ipa_power_setup(struct ipa *ipa)
void ipa_power_teardown(struct ipa *ipa)
{
- (void)device_init_wakeup(&ipa->pdev->dev, false);
+ (void)device_init_wakeup(ipa->dev, false);
ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
}
@@ -385,7 +282,6 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
}
power->dev = dev;
power->core = clk;
- spin_lock_init(&power->spinlock);
power->interconnect_count = data->interconnect_count;
ret = ipa_interconnect_init(power, data->interconnect_data);
diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h
index 3a4c59ea1222..227cc04bea80 100644
--- a/drivers/net/ipa/ipa_power.h
+++ b/drivers/net/ipa/ipa_power.h
@@ -24,24 +24,6 @@ extern const struct dev_pm_ops ipa_pm_ops;
u32 ipa_core_clock_rate(struct ipa *ipa);
/**
- * ipa_power_modem_queue_stop() - Possibly stop the modem netdev TX queue
- * @ipa: IPA pointer
- */
-void ipa_power_modem_queue_stop(struct ipa *ipa);
-
-/**
- * ipa_power_modem_queue_wake() - Possibly wake the modem netdev TX queue
- * @ipa: IPA pointer
- */
-void ipa_power_modem_queue_wake(struct ipa *ipa);
-
-/**
- * ipa_power_modem_queue_active() - Report modem netdev TX queue active
- * @ipa: IPA pointer
- */
-void ipa_power_modem_queue_active(struct ipa *ipa);
-
-/**
* ipa_power_retention() - Control register retention on power collapse
* @ipa: IPA pointer
* @enable: Whether retention should be enabled or disabled
@@ -49,17 +31,6 @@ void ipa_power_modem_queue_active(struct ipa *ipa);
void ipa_power_retention(struct ipa *ipa, bool enable);
/**
- * ipa_power_suspend_handler() - Handler for SUSPEND IPA interrupts
- * @ipa: IPA pointer
- * @irq_id: IPA interrupt ID (unused)
- *
- * If an RX endpoint is suspended, and the IPA has a packet destined for
- * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP
- * that it should resume the endpoint.
- */
-void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id);
-
-/**
* ipa_power_setup() - Set up IPA power management
* @ipa: IPA pointer
*
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index f70f0a1d1cda..65c40e207802 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -96,7 +96,7 @@ static void ipa_server_init_complete(struct ipa_qmi *ipa_qmi)
IPA_QMI_INIT_COMPLETE_IND_SZ,
ipa_init_complete_ind_ei, &ind);
if (ret)
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d sending init complete indication\n", ret);
else
ipa_qmi->indication_sent = true;
@@ -148,7 +148,7 @@ static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi)
ipa = container_of(ipa_qmi, struct ipa, qmi);
ret = ipa_modem_start(ipa);
if (ret)
- dev_err(&ipa->pdev->dev, "error %d starting modem\n", ret);
+ dev_err(ipa->dev, "error %d starting modem\n", ret);
}
/* All QMI clients from the modem node are gone (modem shut down or crashed). */
@@ -199,7 +199,7 @@ static void ipa_server_indication_register(struct qmi_handle *qmi,
ipa_qmi->indication_requested = true;
ipa_qmi_ready(ipa_qmi); /* We might be ready now */
} else {
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d sending register indication response\n", ret);
}
}
@@ -228,7 +228,7 @@ static void ipa_server_driver_init_complete(struct qmi_handle *qmi,
ipa_qmi->uc_ready = true;
ipa_qmi_ready(ipa_qmi); /* We might be ready now */
} else {
- dev_err(&ipa->pdev->dev,
+ dev_err(ipa->dev,
"error %d sending init complete response\n", ret);
}
}
@@ -417,7 +417,7 @@ static void ipa_client_init_driver_work(struct work_struct *work)
qmi = &ipa_qmi->client_handle;
ipa = container_of(ipa_qmi, struct ipa, qmi);
- dev = &ipa->pdev->dev;
+ dev = ipa->dev;
ret = qmi_txn_init(qmi, &txn, NULL, NULL);
if (ret < 0) {
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
index 6a3203ae6f1e..98625956e0bb 100644
--- a/drivers/net/ipa/ipa_reg.c
+++ b/drivers/net/ipa/ipa_reg.c
@@ -4,6 +4,7 @@
* Copyright (C) 2019-2023 Linaro Ltd.
*/
+#include <linux/platform_device.h>
#include <linux/io.h>
#include "ipa.h"
@@ -132,9 +133,9 @@ static const struct regs *ipa_regs(enum ipa_version version)
}
}
-int ipa_reg_init(struct ipa *ipa)
+int ipa_reg_init(struct ipa *ipa, struct platform_device *pdev)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = &pdev->dev;
const struct regs *regs;
struct resource *res;
@@ -146,8 +147,7 @@ int ipa_reg_init(struct ipa *ipa)
return -EINVAL;
/* Setup IPA register memory */
- res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
- "ipa-reg");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipa-reg");
if (!res) {
dev_err(dev, "DT error getting \"ipa-reg\" memory property\n");
return -ENODEV;
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index 2998f115f12c..62c62495b796 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -12,6 +12,8 @@
#include "ipa_version.h"
#include "reg.h"
+struct platform_device;
+
struct ipa;
/**
@@ -643,7 +645,7 @@ extern const struct regs ipa_regs_v5_5;
const struct reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
-int ipa_reg_init(struct ipa *ipa);
+int ipa_reg_init(struct ipa *ipa, struct platform_device *pdev);
void ipa_reg_exit(struct ipa *ipa);
#endif /* _IPA_REG_H_ */
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index 5620dc271fac..aeccce9fab72 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -5,7 +5,7 @@
*/
#include <linux/types.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/panic_notifier.h>
@@ -84,15 +84,13 @@ struct ipa_smp2p {
*/
static void ipa_smp2p_notify(struct ipa_smp2p *smp2p)
{
- struct device *dev;
u32 value;
u32 mask;
if (smp2p->notified)
return;
- dev = &smp2p->ipa->pdev->dev;
- smp2p->power_on = pm_runtime_get_if_active(dev, true) > 0;
+ smp2p->power_on = pm_runtime_get_if_active(smp2p->ipa->dev, true) > 0;
/* Signal whether the IPA power is enabled */
mask = BIT(smp2p->enabled_bit);
@@ -152,15 +150,16 @@ static void ipa_smp2p_panic_notifier_unregister(struct ipa_smp2p *smp2p)
static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
{
struct ipa_smp2p *smp2p = dev_id;
+ struct ipa *ipa = smp2p->ipa;
struct device *dev;
int ret;
/* Ignore any (spurious) interrupts received after the first */
- if (smp2p->ipa->setup_complete)
+ if (ipa->setup_complete)
return IRQ_HANDLED;
/* Power needs to be active for setup */
- dev = &smp2p->ipa->pdev->dev;
+ dev = ipa->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "error %d getting power for setup\n", ret);
@@ -168,7 +167,7 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
}
/* An error here won't cause driver shutdown, so warn if one occurs */
- ret = ipa_setup(smp2p->ipa);
+ ret = ipa_setup(ipa);
WARN(ret != 0, "error %d from ipa_setup()\n", ret);
out_power_put:
@@ -179,14 +178,15 @@ out_power_put:
}
/* Initialize SMP2P interrupts */
-static int ipa_smp2p_irq_init(struct ipa_smp2p *smp2p, const char *name,
- irq_handler_t handler)
+static int ipa_smp2p_irq_init(struct ipa_smp2p *smp2p,
+ struct platform_device *pdev,
+ const char *name, irq_handler_t handler)
{
- struct device *dev = &smp2p->ipa->pdev->dev;
+ struct device *dev = &pdev->dev;
unsigned int irq;
int ret;
- ret = platform_get_irq_byname(smp2p->ipa->pdev, name);
+ ret = platform_get_irq_byname(pdev, name);
if (ret <= 0)
return ret ? : -EINVAL;
irq = ret;
@@ -208,7 +208,7 @@ static void ipa_smp2p_irq_exit(struct ipa_smp2p *smp2p, u32 irq)
/* Drop the power reference if it was taken in ipa_smp2p_notify() */
static void ipa_smp2p_power_release(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
if (!ipa->smp2p->power_on)
return;
@@ -219,10 +219,11 @@ static void ipa_smp2p_power_release(struct ipa *ipa)
}
/* Initialize the IPA SMP2P subsystem */
-int ipa_smp2p_init(struct ipa *ipa, bool modem_init)
+int
+ipa_smp2p_init(struct ipa *ipa, struct platform_device *pdev, bool modem_init)
{
struct qcom_smem_state *enabled_state;
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = &pdev->dev;
struct qcom_smem_state *valid_state;
struct ipa_smp2p *smp2p;
u32 enabled_bit;
@@ -261,7 +262,7 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init)
/* We have enough information saved to handle notifications */
ipa->smp2p = smp2p;
- ret = ipa_smp2p_irq_init(smp2p, "ipa-clock-query",
+ ret = ipa_smp2p_irq_init(smp2p, pdev, "ipa-clock-query",
ipa_smp2p_modem_clk_query_isr);
if (ret < 0)
goto err_null_smp2p;
@@ -273,7 +274,7 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init)
if (modem_init) {
/* Result will be non-zero (negative for error) */
- ret = ipa_smp2p_irq_init(smp2p, "ipa-setup-ready",
+ ret = ipa_smp2p_irq_init(smp2p, pdev, "ipa-setup-ready",
ipa_smp2p_modem_setup_ready_isr);
if (ret < 0)
goto err_notifier_unregister;
diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h
index 9b969b03d1a4..2a3d8eefb13b 100644
--- a/drivers/net/ipa/ipa_smp2p.h
+++ b/drivers/net/ipa/ipa_smp2p.h
@@ -8,17 +8,20 @@
#include <linux/types.h>
+struct platform_device;
+
struct ipa;
/**
* ipa_smp2p_init() - Initialize the IPA SMP2P subsystem
* @ipa: IPA pointer
+ * @pdev: Platform device pointer
* @modem_init: Whether the modem is responsible for GSI initialization
*
* Return: 0 if successful, or a negative error code
- *
*/
-int ipa_smp2p_init(struct ipa *ipa, bool modem_init);
+int ipa_smp2p_init(struct ipa *ipa, struct platform_device *pdev,
+ bool modem_init);
/**
* ipa_smp2p_exit() - Inverse of ipa_smp2p_init()
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 7b637bb8b41c..a24ac11b8893 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -163,7 +163,7 @@ ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6)
bool ipa_filtered_valid(struct ipa *ipa, u64 filtered)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
u32 count;
if (!filtered) {
@@ -236,8 +236,7 @@ ipa_filter_reset_table(struct ipa *ipa, bool hashed, bool ipv6, bool modem)
trans = ipa_cmd_trans_alloc(ipa, hweight64(ep_mask));
if (!trans) {
- dev_err(&ipa->pdev->dev,
- "no transaction for %s filter reset\n",
+ dev_err(ipa->dev, "no transaction for %s filter reset\n",
modem ? "modem" : "AP");
return -EBUSY;
}
@@ -298,8 +297,7 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
trans = ipa_cmd_trans_alloc(ipa, hash_support ? 4 : 2);
if (!trans) {
- dev_err(&ipa->pdev->dev,
- "no transaction for %s route reset\n",
+ dev_err(ipa->dev, "no transaction for %s route reset\n",
modem ? "modem" : "AP");
return -EBUSY;
}
@@ -327,7 +325,7 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
void ipa_table_reset(struct ipa *ipa, bool modem)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
const char *ee_name;
int ret;
@@ -356,7 +354,7 @@ int ipa_table_hash_flush(struct ipa *ipa)
trans = ipa_cmd_trans_alloc(ipa, 1);
if (!trans) {
- dev_err(&ipa->pdev->dev, "no transaction for hash flush\n");
+ dev_err(ipa->dev, "no transaction for hash flush\n");
return -EBUSY;
}
@@ -469,7 +467,7 @@ int ipa_table_setup(struct ipa *ipa)
*/
trans = ipa_cmd_trans_alloc(ipa, 8);
if (!trans) {
- dev_err(&ipa->pdev->dev, "no transaction for table setup\n");
+ dev_err(ipa->dev, "no transaction for table setup\n");
return -EBUSY;
}
@@ -713,7 +711,7 @@ bool ipa_table_mem_valid(struct ipa *ipa, bool filter)
*/
int ipa_table_init(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
dma_addr_t addr;
__le64 le_addr;
__le64 *virt;
@@ -763,7 +761,7 @@ int ipa_table_init(struct ipa *ipa)
void ipa_table_exit(struct ipa *ipa)
{
u32 count = max_t(u32, 1 + ipa->filter_count, ipa->route_count);
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
size_t size;
size = IPA_ZERO_RULE_SIZE + (1 + count) * sizeof(__le64);
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index 7eaa0b4ebed9..bfd5dc6dab43 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -127,7 +127,7 @@ static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
static void ipa_uc_event_handler(struct ipa *ipa)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
if (shared->event == IPA_UC_EVENT_ERROR)
dev_err(dev, "microcontroller error event\n");
@@ -141,7 +141,7 @@ static void ipa_uc_event_handler(struct ipa *ipa)
static void ipa_uc_response_hdlr(struct ipa *ipa)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
/* An INIT_COMPLETED response message is sent to the AP by the
* microcontroller when it is operational. Other than this, the AP
@@ -191,7 +191,7 @@ void ipa_uc_config(struct ipa *ipa)
/* Inverse of ipa_uc_config() */
void ipa_uc_deconfig(struct ipa *ipa)
{
- struct device *dev = &ipa->pdev->dev;
+ struct device *dev = ipa->dev;
ipa_interrupt_disable(ipa, IPA_IRQ_UC_1);
ipa_interrupt_disable(ipa, IPA_IRQ_UC_0);
@@ -208,8 +208,8 @@ void ipa_uc_deconfig(struct ipa *ipa)
/* Take a proxy power reference for the microcontroller */
void ipa_uc_power(struct ipa *ipa)
{
+ struct device *dev = ipa->dev;
static bool already;
- struct device *dev;
int ret;
if (already)
@@ -217,7 +217,6 @@ void ipa_uc_power(struct ipa *ipa)
already = true; /* Only do this on first boot */
/* This power reference dropped in ipa_uc_response_hdlr() above */
- dev = &ipa->pdev->dev;
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
pm_runtime_put_noidle(dev);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index df7c43a109e1..5920f7e63352 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -349,7 +349,7 @@ static int ipvlan_get_iflink(const struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
- return ipvlan->phy_dev->ifindex;
+ return READ_ONCE(ipvlan->phy_dev->ifindex);
}
static const struct net_device_ops ipvlan_netdev_ops = {
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 60944a4beada..1afc4c47be73 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -237,4 +237,5 @@ static void __exit ipvtap_exit(void)
module_exit(ipvtap_exit);
MODULE_ALIAS_RTNL_LINK("ipvtap");
MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>");
+MODULE_DESCRIPTION("IP-VLAN based tap driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index f6d53e63ef4e..f6eab66c2660 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -144,6 +144,7 @@ static int loopback_dev_init(struct net_device *dev)
dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
if (!dev->lstats)
return -ENOMEM;
+ netdev_lockdep_set_classes(dev);
return 0;
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 7f5426285c61..0206b84284ab 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3519,18 +3519,13 @@ static int macsec_dev_init(struct net_device *dev)
struct net_device *real_dev = macsec->real_dev;
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
err = gro_cells_init(&macsec->gro_cells, dev);
- if (err) {
- free_percpu(dev->tstats);
+ if (err)
return err;
- }
dev->features = real_dev->features & MACSEC_FEATURES;
dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
macsec_set_head_tail_room(dev);
@@ -3550,7 +3545,6 @@ static void macsec_dev_uninit(struct net_device *dev)
struct macsec_dev *macsec = macsec_priv(dev);
gro_cells_destroy(&macsec->gro_cells);
- free_percpu(dev->tstats);
}
static netdev_features_t macsec_fix_features(struct net_device *dev,
@@ -3753,7 +3747,7 @@ static void macsec_get_stats64(struct net_device *dev,
static int macsec_get_iflink(const struct net_device *dev)
{
- return macsec_priv(dev)->real_dev->ifindex;
+ return READ_ONCE(macsec_priv(dev)->real_dev->ifindex);
}
static const struct net_device_ops macsec_netdev_ops = {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a3cc665757e8..0cec2783a3e7 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1158,7 +1158,7 @@ static int macvlan_dev_get_iflink(const struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
- return vlan->lowerdev->ifindex;
+ return READ_ONCE(vlan->lowerdev->ifindex);
}
static const struct ethtool_ops macvlan_ethtool_ops = {
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index 68f8ee0ec8ba..f40eb50bb978 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -94,6 +94,10 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
int ret;
u32 cmd;
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
/* Prepare the read operation */
cmd = MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT);
unimac_mdio_writel(priv, cmd, MDIO_CMD);
@@ -103,7 +107,7 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
ret = priv->wait_func(priv->wait_func_data);
if (ret)
- return ret;
+ goto out;
cmd = unimac_mdio_readl(priv, MDIO_CMD);
@@ -112,10 +116,15 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* that condition here and ignore the MDIO controller read failure
* indication.
*/
- if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (cmd & MDIO_READ_FAIL))
- return -EIO;
+ if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (cmd & MDIO_READ_FAIL)) {
+ ret = -EIO;
+ goto out;
+ }
- return cmd & 0xffff;
+ ret = cmd & 0xffff;
+out:
+ clk_disable_unprepare(priv->clk);
+ return ret;
}
static int unimac_mdio_write(struct mii_bus *bus, int phy_id,
@@ -123,6 +132,11 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id,
{
struct unimac_mdio_priv *priv = bus->priv;
u32 cmd;
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
/* Prepare the write operation */
cmd = MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
@@ -131,7 +145,10 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id,
unimac_mdio_start(priv);
- return priv->wait_func(priv->wait_func_data);
+ ret = priv->wait_func(priv->wait_func_data);
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
}
/* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with
@@ -178,14 +195,19 @@ static int unimac_mdio_reset(struct mii_bus *bus)
return 0;
}
-static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
+static int unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
{
unsigned long rate;
u32 reg, div;
+ int ret;
/* Keep the hardware default values */
if (!priv->clk_freq)
- return;
+ return 0;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
if (!priv->clk)
rate = 250000000;
@@ -195,7 +217,8 @@ static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
div = (rate / (2 * priv->clk_freq)) - 1;
if (div & ~MDIO_CLK_DIV_MASK) {
pr_warn("Incorrect MDIO clock frequency, ignoring\n");
- return;
+ ret = 0;
+ goto out;
}
/* The MDIO clock is the reference clock (typically 250Mhz) divided by
@@ -205,6 +228,9 @@ static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
reg &= ~(MDIO_CLK_DIV_MASK << MDIO_CLK_DIV_SHIFT);
reg |= div << MDIO_CLK_DIV_SHIFT;
unimac_mdio_writel(priv, reg, MDIO_CFG);
+out:
+ clk_disable_unprepare(priv->clk);
+ return ret;
}
static int unimac_mdio_probe(struct platform_device *pdev)
@@ -235,24 +261,12 @@ static int unimac_mdio_probe(struct platform_device *pdev)
return -ENOMEM;
}
- priv->clk = devm_clk_get_optional(&pdev->dev, NULL);
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
-
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
if (of_property_read_u32(np, "clock-frequency", &priv->clk_freq))
priv->clk_freq = 0;
- unimac_mdio_clk_set(priv);
-
priv->mii_bus = mdiobus_alloc();
- if (!priv->mii_bus) {
- ret = -ENOMEM;
- goto out_clk_disable;
- }
+ if (!priv->mii_bus)
+ return -ENOMEM;
bus = priv->mii_bus;
bus->priv = priv;
@@ -261,17 +275,29 @@ static int unimac_mdio_probe(struct platform_device *pdev)
priv->wait_func = pdata->wait_func;
priv->wait_func_data = pdata->wait_func_data;
bus->phy_mask = ~pdata->phy_mask;
+ priv->clk = pdata->clk;
} else {
bus->name = "unimac MII bus";
priv->wait_func_data = priv;
priv->wait_func = unimac_mdio_poll;
+ priv->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ }
+
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto out_mdio_free;
}
+
bus->parent = &pdev->dev;
bus->read = unimac_mdio_read;
bus->write = unimac_mdio_write;
bus->reset = unimac_mdio_reset;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id);
+ ret = unimac_mdio_clk_set(priv);
+ if (ret)
+ goto out_mdio_free;
+
ret = of_mdiobus_register(bus, np);
if (ret) {
dev_err(&pdev->dev, "MDIO bus registration failed\n");
@@ -286,8 +312,6 @@ static int unimac_mdio_probe(struct platform_device *pdev)
out_mdio_free:
mdiobus_free(bus);
-out_clk_disable:
- clk_disable_unprepare(priv->clk);
return ret;
}
@@ -297,36 +321,20 @@ static void unimac_mdio_remove(struct platform_device *pdev)
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
- clk_disable_unprepare(priv->clk);
-}
-
-static int __maybe_unused unimac_mdio_suspend(struct device *d)
-{
- struct unimac_mdio_priv *priv = dev_get_drvdata(d);
-
- clk_disable_unprepare(priv->clk);
-
- return 0;
}
static int __maybe_unused unimac_mdio_resume(struct device *d)
{
struct unimac_mdio_priv *priv = dev_get_drvdata(d);
- int ret;
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
- unimac_mdio_clk_set(priv);
-
- return 0;
+ return unimac_mdio_clk_set(priv);
}
static SIMPLE_DEV_PM_OPS(unimac_mdio_pm_ops,
- unimac_mdio_suspend, unimac_mdio_resume);
+ NULL, unimac_mdio_resume);
static const struct of_device_id unimac_mdio_ids[] = {
+ { .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
{ .compatible = "brcm,asp-v2.0-mdio", },
{ .compatible = "brcm,genet-mdio-v5", },
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index abd8b508ec16..9d8f43b28aac 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -14,6 +14,20 @@
#include <linux/clk.h>
#define MDIO_MODE_REG 0x40
+#define MDIO_MODE_MDC_MODE BIT(12)
+/* 0 = Clause 22, 1 = Clause 45 */
+#define MDIO_MODE_C45 BIT(8)
+#define MDIO_MODE_DIV_MASK GENMASK(7, 0)
+#define MDIO_MODE_DIV(x) FIELD_PREP(MDIO_MODE_DIV_MASK, (x) - 1)
+#define MDIO_MODE_DIV_1 0x0
+#define MDIO_MODE_DIV_2 0x1
+#define MDIO_MODE_DIV_4 0x3
+#define MDIO_MODE_DIV_8 0x7
+#define MDIO_MODE_DIV_16 0xf
+#define MDIO_MODE_DIV_32 0x1f
+#define MDIO_MODE_DIV_64 0x3f
+#define MDIO_MODE_DIV_128 0x7f
+#define MDIO_MODE_DIV_256 0xff
#define MDIO_ADDR_REG 0x44
#define MDIO_DATA_WRITE_REG 0x48
#define MDIO_DATA_READ_REG 0x4c
@@ -26,9 +40,6 @@
#define MDIO_CMD_ACCESS_CODE_C45_WRITE 1
#define MDIO_CMD_ACCESS_CODE_C45_READ 2
-/* 0 = Clause 22, 1 = Clause 45 */
-#define MDIO_MODE_C45 BIT(8)
-
#define IPQ4019_MDIO_TIMEOUT 10000
#define IPQ4019_MDIO_SLEEP 10
@@ -41,6 +52,7 @@ struct ipq4019_mdio_data {
void __iomem *membase;
void __iomem *eth_ldo_rdy;
struct clk *mdio_clk;
+ unsigned int mdc_rate;
};
static int ipq4019_mdio_wait_busy(struct mii_bus *bus)
@@ -203,6 +215,38 @@ static int ipq4019_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
return 0;
}
+static int ipq4019_mdio_set_div(struct ipq4019_mdio_data *priv)
+{
+ unsigned long ahb_rate;
+ int div;
+ u32 val;
+
+ /* If we don't have a clock for AHB use the fixed value */
+ ahb_rate = IPQ_MDIO_CLK_RATE;
+ if (priv->mdio_clk)
+ ahb_rate = clk_get_rate(priv->mdio_clk);
+
+ /* MDC rate is ahb_rate/(MDIO_MODE_DIV + 1)
+ * While supported, internal documentation doesn't
+ * assure correct functionality of the MDIO bus
+ * with divider of 1, 2 or 4.
+ */
+ for (div = 8; div <= 256; div *= 2) {
+ /* The requested rate is supported by the div */
+ if (priv->mdc_rate == DIV_ROUND_UP(ahb_rate, div)) {
+ val = readl(priv->membase + MDIO_MODE_REG);
+ val &= ~MDIO_MODE_DIV_MASK;
+ val |= MDIO_MODE_DIV(div);
+ writel(val, priv->membase + MDIO_MODE_REG);
+
+ return 0;
+ }
+ }
+
+ /* The requested rate is not supported */
+ return -EINVAL;
+}
+
static int ipq_mdio_reset(struct mii_bus *bus)
{
struct ipq4019_mdio_data *priv = bus->priv;
@@ -225,10 +269,58 @@ static int ipq_mdio_reset(struct mii_bus *bus)
return ret;
ret = clk_prepare_enable(priv->mdio_clk);
- if (ret == 0)
- mdelay(10);
+ if (ret)
+ return ret;
- return ret;
+ mdelay(10);
+
+ /* Restore MDC rate */
+ return ipq4019_mdio_set_div(priv);
+}
+
+static void ipq4019_mdio_select_mdc_rate(struct platform_device *pdev,
+ struct ipq4019_mdio_data *priv)
+{
+ unsigned long ahb_rate;
+ int div;
+ u32 val;
+
+ /* MDC rate defined in DT, we don't have to decide a default value */
+ if (!of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &priv->mdc_rate))
+ return;
+
+ /* If we don't have a clock for AHB use the fixed value */
+ ahb_rate = IPQ_MDIO_CLK_RATE;
+ if (priv->mdio_clk)
+ ahb_rate = clk_get_rate(priv->mdio_clk);
+
+ /* Check what is the current div set */
+ val = readl(priv->membase + MDIO_MODE_REG);
+ div = FIELD_GET(MDIO_MODE_DIV_MASK, val);
+
+ /* div is not set to the default value of /256
+ * Probably someone changed that (bootloader, other drivers)
+ * Keep this and don't overwrite it.
+ */
+ if (div != MDIO_MODE_DIV_256) {
+ priv->mdc_rate = DIV_ROUND_UP(ahb_rate, div + 1);
+ return;
+ }
+
+ /* If div is /256 assume nobody have set this value and
+ * try to find one MDC rate that is close the 802.3 spec of
+ * 2.5MHz
+ */
+ for (div = 256; div >= 8; div /= 2) {
+ /* Stop as soon as we found a divider that
+ * reached the closest value to 2.5MHz
+ */
+ if (DIV_ROUND_UP(ahb_rate, div) > 2500000)
+ break;
+
+ priv->mdc_rate = DIV_ROUND_UP(ahb_rate, div);
+ }
}
static int ipq4019_mdio_probe(struct platform_device *pdev)
@@ -252,6 +344,11 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
if (IS_ERR(priv->mdio_clk))
return PTR_ERR(priv->mdio_clk);
+ ipq4019_mdio_select_mdc_rate(pdev, priv);
+ ret = ipq4019_mdio_set_div(priv);
+ if (ret)
+ return ret;
+
/* The platform resource is provided on the chipset IPQ5018 */
/* This resource is optional */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 64ebcb6d235c..08e607f62e10 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -139,6 +139,53 @@ bool of_mdiobus_child_is_phy(struct device_node *child)
}
EXPORT_SYMBOL(of_mdiobus_child_is_phy);
+static int __of_mdiobus_parse_phys(struct mii_bus *mdio, struct device_node *np,
+ bool *scanphys)
+{
+ struct device_node *child;
+ int addr, rc = 0;
+
+ /* Loop over the child nodes and register a phy_device for each phy */
+ for_each_available_child_of_node(np, child) {
+ if (of_node_name_eq(child, "ethernet-phy-package")) {
+ /* Ignore invalid ethernet-phy-package node */
+ if (!of_property_present(child, "reg"))
+ continue;
+
+ rc = __of_mdiobus_parse_phys(mdio, child, NULL);
+ if (rc && rc != -ENODEV)
+ goto exit;
+
+ continue;
+ }
+
+ addr = of_mdio_parse_addr(&mdio->dev, child);
+ if (addr < 0) {
+ /* Skip scanning for invalid ethernet-phy-package node */
+ if (scanphys)
+ *scanphys = true;
+ continue;
+ }
+
+ if (of_mdiobus_child_is_phy(child))
+ rc = of_mdiobus_register_phy(mdio, child, addr);
+ else
+ rc = of_mdiobus_register_device(mdio, child, addr);
+
+ if (rc == -ENODEV)
+ dev_err(&mdio->dev,
+ "MDIO device at address %d is missing.\n",
+ addr);
+ else if (rc)
+ goto exit;
+ }
+
+ return 0;
+exit:
+ of_node_put(child);
+ return rc;
+}
+
/**
* __of_mdiobus_register - Register mii_bus and create PHYs from the device tree
* @mdio: pointer to mii_bus structure
@@ -180,33 +227,18 @@ int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
return rc;
/* Loop over the child nodes and register a phy_device for each phy */
- for_each_available_child_of_node(np, child) {
- addr = of_mdio_parse_addr(&mdio->dev, child);
- if (addr < 0) {
- scanphys = true;
- continue;
- }
-
- if (of_mdiobus_child_is_phy(child))
- rc = of_mdiobus_register_phy(mdio, child, addr);
- else
- rc = of_mdiobus_register_device(mdio, child, addr);
-
- if (rc == -ENODEV)
- dev_err(&mdio->dev,
- "MDIO device at address %d is missing.\n",
- addr);
- else if (rc)
- goto unregister;
- }
+ rc = __of_mdiobus_parse_phys(mdio, np, &scanphys);
+ if (rc)
+ goto unregister;
if (!scanphys)
return 0;
/* auto scan for PHYs with empty reg property */
for_each_available_child_of_node(np, child) {
- /* Skip PHYs with reg property set */
- if (of_property_present(child, "reg"))
+ /* Skip PHYs with reg property set or ethernet-phy-package node */
+ if (of_property_present(child, "reg") ||
+ of_node_name_eq(child, "ethernet-phy-package"))
continue;
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
@@ -227,15 +259,16 @@ int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
if (!rc)
break;
if (rc != -ENODEV)
- goto unregister;
+ goto put_unregister;
}
}
}
return 0;
-unregister:
+put_unregister:
of_node_put(child);
+unregister:
mdiobus_unregister(mdio);
return rc;
}
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 6e14ba5e06c8..d7070dd4fe73 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -42,14 +42,20 @@ MODULE_AUTHOR("Maintainer: Matt Mackall <mpm@selenic.com>");
MODULE_DESCRIPTION("Console driver for network interfaces");
MODULE_LICENSE("GPL");
-#define MAX_PARAM_LENGTH 256
-#define MAX_PRINT_CHUNK 1000
+#define MAX_PARAM_LENGTH 256
+#define MAX_USERDATA_ENTRY_LENGTH 256
+#define MAX_USERDATA_VALUE_LENGTH 200
+/* The number 3 comes from userdata entry format characters (' ', '=', '\n') */
+#define MAX_USERDATA_NAME_LENGTH (MAX_USERDATA_ENTRY_LENGTH - \
+ MAX_USERDATA_VALUE_LENGTH - 3)
+#define MAX_USERDATA_ITEMS 16
+#define MAX_PRINT_CHUNK 1000
static char config[MAX_PARAM_LENGTH];
module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0);
MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]");
-static bool oops_only = false;
+static bool oops_only;
module_param(oops_only, bool, 0600);
MODULE_PARM_DESC(oops_only, "Only log oops messages");
@@ -79,7 +85,10 @@ static struct console netconsole_ext;
/**
* struct netconsole_target - Represents a configured netconsole target.
* @list: Links this target into the target_list.
- * @item: Links us into the configfs subsystem hierarchy.
+ * @group: Links us into the configfs subsystem hierarchy.
+ * @userdata_group: Links to the userdata configfs hierarchy
+ * @userdata_complete: Cached, formatted string of append
+ * @userdata_length: String length of userdata_complete
* @enabled: On / off knob to enable / disable target.
* Visible from userspace (read-write).
* We maintain a strict 1:1 correspondence between this and
@@ -102,7 +111,10 @@ static struct console netconsole_ext;
struct netconsole_target {
struct list_head list;
#ifdef CONFIG_NETCONSOLE_DYNAMIC
- struct config_item item;
+ struct config_group group;
+ struct config_group userdata_group;
+ char userdata_complete[MAX_USERDATA_ENTRY_LENGTH * MAX_USERDATA_ITEMS];
+ size_t userdata_length;
#endif
bool enabled;
bool extended;
@@ -134,14 +146,14 @@ static void __exit dynamic_netconsole_exit(void)
*/
static void netconsole_target_get(struct netconsole_target *nt)
{
- if (config_item_name(&nt->item))
- config_item_get(&nt->item);
+ if (config_item_name(&nt->group.cg_item))
+ config_group_get(&nt->group);
}
static void netconsole_target_put(struct netconsole_target *nt)
{
- if (config_item_name(&nt->item))
- config_item_put(&nt->item);
+ if (config_item_name(&nt->group.cg_item))
+ config_group_put(&nt->group);
}
#else /* !CONFIG_NETCONSOLE_DYNAMIC */
@@ -215,15 +227,33 @@ static struct netconsole_target *alloc_and_init(void)
* | remote_ip
* | local_mac
* | remote_mac
+ * | userdata/
+ * | <key>/
+ * | value
+ * | ...
* |
* <target>/...
*/
static struct netconsole_target *to_target(struct config_item *item)
{
- return item ?
- container_of(item, struct netconsole_target, item) :
- NULL;
+ struct config_group *cfg_group;
+
+ cfg_group = to_config_group(item);
+ if (!cfg_group)
+ return NULL;
+ return container_of(to_config_group(item),
+ struct netconsole_target, group);
+}
+
+/* Get rid of possible trailing newline, returning the new length */
+static void trim_newline(char *s, size_t maxlen)
+{
+ size_t len;
+
+ len = strnlen(s, maxlen);
+ if (s[len - 1] == '\n')
+ s[len - 1] = '\0';
}
/*
@@ -370,7 +400,7 @@ static ssize_t release_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
err = -EINVAL;
goto out_unlock;
}
@@ -398,7 +428,7 @@ static ssize_t extended_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
err = -EINVAL;
goto out_unlock;
}
@@ -420,22 +450,17 @@ static ssize_t dev_name_store(struct config_item *item, const char *buf,
size_t count)
{
struct netconsole_target *nt = to_target(item);
- size_t len;
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
mutex_unlock(&dynamic_netconsole_mutex);
return -EINVAL;
}
strscpy(nt->np.dev_name, buf, IFNAMSIZ);
-
- /* Get rid of possible trailing newline from echo(1) */
- len = strnlen(nt->np.dev_name, IFNAMSIZ);
- if (nt->np.dev_name[len - 1] == '\n')
- nt->np.dev_name[len - 1] = '\0';
+ trim_newline(nt->np.dev_name, IFNAMSIZ);
mutex_unlock(&dynamic_netconsole_mutex);
return strnlen(buf, count);
@@ -450,7 +475,7 @@ static ssize_t local_port_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
@@ -473,7 +498,7 @@ static ssize_t remote_port_store(struct config_item *item,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
@@ -495,12 +520,13 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
if (strnchr(buf, count, ':')) {
const char *end;
+
if (in6_pton(buf, count, nt->np.local_ip.in6.s6_addr, -1, &end) > 0) {
if (*end && *end != '\n') {
pr_err("invalid IPv6 address at: <%c>\n", *end);
@@ -510,9 +536,9 @@ static ssize_t local_ip_store(struct config_item *item, const char *buf,
} else
goto out_unlock;
} else {
- if (!nt->np.ipv6) {
+ if (!nt->np.ipv6)
nt->np.local_ip.ip = in_aton(buf);
- } else
+ else
goto out_unlock;
}
@@ -531,12 +557,13 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
if (strnchr(buf, count, ':')) {
const char *end;
+
if (in6_pton(buf, count, nt->np.remote_ip.in6.s6_addr, -1, &end) > 0) {
if (*end && *end != '\n') {
pr_err("invalid IPv6 address at: <%c>\n", *end);
@@ -546,9 +573,9 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf,
} else
goto out_unlock;
} else {
- if (!nt->np.ipv6) {
+ if (!nt->np.ipv6)
nt->np.remote_ip.ip = in_aton(buf);
- } else
+ else
goto out_unlock;
}
@@ -568,7 +595,7 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf,
mutex_lock(&dynamic_netconsole_mutex);
if (nt->enabled) {
pr_err("target (%s) is enabled, disable to update parameters\n",
- config_item_name(&nt->item));
+ config_item_name(&nt->group.cg_item));
goto out_unlock;
}
@@ -585,6 +612,180 @@ out_unlock:
return -EINVAL;
}
+struct userdatum {
+ struct config_item item;
+ char value[MAX_USERDATA_VALUE_LENGTH];
+};
+
+static struct userdatum *to_userdatum(struct config_item *item)
+{
+ return container_of(item, struct userdatum, item);
+}
+
+struct userdata {
+ struct config_group group;
+};
+
+static struct userdata *to_userdata(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct userdata, group);
+}
+
+static struct netconsole_target *userdata_to_target(struct userdata *ud)
+{
+ struct config_group *netconsole_group;
+
+ netconsole_group = to_config_group(ud->group.cg_item.ci_parent);
+ return to_target(&netconsole_group->cg_item);
+}
+
+static ssize_t userdatum_value_show(struct config_item *item, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", &(to_userdatum(item)->value[0]));
+}
+
+static void update_userdata(struct netconsole_target *nt)
+{
+ int complete_idx = 0, child_count = 0;
+ struct list_head *entry;
+
+ /* Clear the current string in case the last userdatum was deleted */
+ nt->userdata_length = 0;
+ nt->userdata_complete[0] = 0;
+
+ list_for_each(entry, &nt->userdata_group.cg_children) {
+ struct userdatum *udm_item;
+ struct config_item *item;
+
+ if (child_count >= MAX_USERDATA_ITEMS)
+ break;
+ child_count++;
+
+ item = container_of(entry, struct config_item, ci_entry);
+ udm_item = to_userdatum(item);
+
+ /* Skip userdata with no value set */
+ if (strnlen(udm_item->value, MAX_USERDATA_VALUE_LENGTH) == 0)
+ continue;
+
+ /* This doesn't overflow userdata_complete since it will write
+ * one entry length (1/MAX_USERDATA_ITEMS long), entry count is
+ * checked to not exceed MAX items with child_count above
+ */
+ complete_idx += scnprintf(&nt->userdata_complete[complete_idx],
+ MAX_USERDATA_ENTRY_LENGTH, " %s=%s\n",
+ item->ci_name, udm_item->value);
+ }
+ nt->userdata_length = strnlen(nt->userdata_complete,
+ sizeof(nt->userdata_complete));
+}
+
+static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
+ size_t count)
+{
+ struct userdatum *udm = to_userdatum(item);
+ struct netconsole_target *nt;
+ struct userdata *ud;
+ int ret;
+
+ if (count > MAX_USERDATA_VALUE_LENGTH)
+ return -EMSGSIZE;
+
+ mutex_lock(&dynamic_netconsole_mutex);
+
+ ret = strscpy(udm->value, buf, sizeof(udm->value));
+ if (ret < 0)
+ goto out_unlock;
+ trim_newline(udm->value, sizeof(udm->value));
+
+ ud = to_userdata(item->ci_parent);
+ nt = userdata_to_target(ud);
+ update_userdata(nt);
+
+ mutex_unlock(&dynamic_netconsole_mutex);
+ return count;
+out_unlock:
+ mutex_unlock(&dynamic_netconsole_mutex);
+ return ret;
+}
+
+CONFIGFS_ATTR(userdatum_, value);
+
+static struct configfs_attribute *userdatum_attrs[] = {
+ &userdatum_attr_value,
+ NULL,
+};
+
+static void userdatum_release(struct config_item *item)
+{
+ kfree(to_userdatum(item));
+}
+
+static struct configfs_item_operations userdatum_ops = {
+ .release = userdatum_release,
+};
+
+static const struct config_item_type userdatum_type = {
+ .ct_item_ops = &userdatum_ops,
+ .ct_attrs = userdatum_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item *userdatum_make_item(struct config_group *group,
+ const char *name)
+{
+ struct netconsole_target *nt;
+ struct userdatum *udm;
+ struct userdata *ud;
+ size_t child_count;
+
+ if (strlen(name) > MAX_USERDATA_NAME_LENGTH)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ ud = to_userdata(&group->cg_item);
+ nt = userdata_to_target(ud);
+ child_count = list_count_nodes(&nt->userdata_group.cg_children);
+ if (child_count >= MAX_USERDATA_ITEMS)
+ return ERR_PTR(-ENOSPC);
+
+ udm = kzalloc(sizeof(*udm), GFP_KERNEL);
+ if (!udm)
+ return ERR_PTR(-ENOMEM);
+
+ config_item_init_type_name(&udm->item, name, &userdatum_type);
+ return &udm->item;
+}
+
+static void userdatum_drop(struct config_group *group, struct config_item *item)
+{
+ struct netconsole_target *nt;
+ struct userdata *ud;
+
+ ud = to_userdata(&group->cg_item);
+ nt = userdata_to_target(ud);
+
+ mutex_lock(&dynamic_netconsole_mutex);
+ update_userdata(nt);
+ config_item_put(item);
+ mutex_unlock(&dynamic_netconsole_mutex);
+}
+
+static struct configfs_attribute *userdata_attrs[] = {
+ NULL,
+};
+
+static struct configfs_group_operations userdata_ops = {
+ .make_item = userdatum_make_item,
+ .drop_item = userdatum_drop,
+};
+
+static struct config_item_type userdata_type = {
+ .ct_item_ops = &userdatum_ops,
+ .ct_group_ops = &userdata_ops,
+ .ct_attrs = userdata_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
CONFIGFS_ATTR(, enabled);
CONFIGFS_ATTR(, extended);
CONFIGFS_ATTR(, dev_name);
@@ -629,6 +830,15 @@ static const struct config_item_type netconsole_target_type = {
.ct_owner = THIS_MODULE,
};
+static void init_target_config_group(struct netconsole_target *nt,
+ const char *name)
+{
+ config_group_init_type_name(&nt->group, name, &netconsole_target_type);
+ config_group_init_type_name(&nt->userdata_group, "userdata",
+ &userdata_type);
+ configfs_add_default_group(&nt->userdata_group, &nt->group);
+}
+
static struct netconsole_target *find_cmdline_target(const char *name)
{
struct netconsole_target *nt, *ret = NULL;
@@ -636,7 +846,7 @@ static struct netconsole_target *find_cmdline_target(const char *name)
spin_lock_irqsave(&target_list_lock, flags);
list_for_each_entry(nt, &target_list, list) {
- if (!strcmp(nt->item.ci_name, name)) {
+ if (!strcmp(nt->group.cg_item.ci_name, name)) {
ret = nt;
break;
}
@@ -650,8 +860,8 @@ static struct netconsole_target *find_cmdline_target(const char *name)
* Group operations and type for netconsole_subsys.
*/
-static struct config_item *make_netconsole_target(struct config_group *group,
- const char *name)
+static struct config_group *make_netconsole_target(struct config_group *group,
+ const char *name)
{
struct netconsole_target *nt;
unsigned long flags;
@@ -663,23 +873,25 @@ static struct config_item *make_netconsole_target(struct config_group *group,
if (!strncmp(name, NETCONSOLE_PARAM_TARGET_PREFIX,
strlen(NETCONSOLE_PARAM_TARGET_PREFIX))) {
nt = find_cmdline_target(name);
- if (nt)
- return &nt->item;
+ if (nt) {
+ init_target_config_group(nt, name);
+ return &nt->group;
+ }
}
nt = alloc_and_init();
if (!nt)
return ERR_PTR(-ENOMEM);
- /* Initialize the config_item member */
- config_item_init_type_name(&nt->item, name, &netconsole_target_type);
+ /* Initialize the config_group member */
+ init_target_config_group(nt, name);
/* Adding, but it is disabled */
spin_lock_irqsave(&target_list_lock, flags);
list_add(&nt->list, &target_list);
spin_unlock_irqrestore(&target_list_lock, flags);
- return &nt->item;
+ return &nt->group;
}
static void drop_netconsole_target(struct config_group *group,
@@ -699,11 +911,11 @@ static void drop_netconsole_target(struct config_group *group,
if (nt->enabled)
netpoll_cleanup(&nt->np);
- config_item_put(&nt->item);
+ config_item_put(&nt->group.cg_item);
}
static struct configfs_group_operations netconsole_subsys_group_ops = {
- .make_item = make_netconsole_target,
+ .make_group = make_netconsole_target,
.drop_item = drop_netconsole_target,
};
@@ -729,8 +941,7 @@ static void populate_configfs_item(struct netconsole_target *nt,
snprintf(target_name, sizeof(target_name), "%s%d",
NETCONSOLE_PARAM_TARGET_PREFIX, cmdline_count);
- config_item_init_type_name(&nt->item, target_name,
- &netconsole_target_type);
+ init_target_config_group(nt, target_name);
}
#endif /* CONFIG_NETCONSOLE_DYNAMIC */
@@ -781,6 +992,7 @@ restart:
spin_unlock_irqrestore(&target_list_lock, flags);
if (stopped) {
const char *msg = "had an event";
+
switch (event) {
case NETDEV_UNREGISTER:
msg = "unregistered";
@@ -824,19 +1036,34 @@ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg,
const char *msg_ready = msg;
const char *release;
int release_len = 0;
+ int userdata_len = 0;
+ char *userdata = NULL;
+
+#ifdef CONFIG_NETCONSOLE_DYNAMIC
+ userdata = nt->userdata_complete;
+ userdata_len = nt->userdata_length;
+#endif
if (nt->release) {
release = init_utsname()->release;
release_len = strlen(release) + 1;
}
- if (msg_len + release_len <= MAX_PRINT_CHUNK) {
+ if (msg_len + release_len + userdata_len <= MAX_PRINT_CHUNK) {
/* No fragmentation needed */
if (nt->release) {
scnprintf(buf, MAX_PRINT_CHUNK, "%s,%s", release, msg);
msg_len += release_len;
- msg_ready = buf;
+ } else {
+ memcpy(buf, msg, msg_len);
}
+
+ if (userdata)
+ msg_len += scnprintf(&buf[msg_len],
+ MAX_PRINT_CHUNK - msg_len,
+ "%s", userdata);
+
+ msg_ready = buf;
netpoll_send_udp(&nt->np, msg_ready, msg_len);
return;
}
@@ -860,24 +1087,48 @@ static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg,
memcpy(buf + release_len, header, header_len);
header_len += release_len;
- while (offset < body_len) {
+ while (offset < body_len + userdata_len) {
int this_header = header_len;
- int this_chunk;
+ int this_offset = 0;
+ int this_chunk = 0;
this_header += scnprintf(buf + this_header,
sizeof(buf) - this_header,
- ",ncfrag=%d/%d;", offset, body_len);
-
- this_chunk = min(body_len - offset,
- MAX_PRINT_CHUNK - this_header);
- if (WARN_ON_ONCE(this_chunk <= 0))
- return;
-
- memcpy(buf + this_header, body + offset, this_chunk);
-
- netpoll_send_udp(&nt->np, buf, this_header + this_chunk);
+ ",ncfrag=%d/%d;", offset,
+ body_len + userdata_len);
+
+ /* Not all body data has been written yet */
+ if (offset < body_len) {
+ this_chunk = min(body_len - offset,
+ MAX_PRINT_CHUNK - this_header);
+ if (WARN_ON_ONCE(this_chunk <= 0))
+ return;
+ memcpy(buf + this_header, body + offset, this_chunk);
+ this_offset += this_chunk;
+ }
+ /* Body is fully written and there is pending userdata to write,
+ * append userdata in this chunk
+ */
+ if (offset + this_offset >= body_len &&
+ offset + this_offset < userdata_len + body_len) {
+ int sent_userdata = (offset + this_offset) - body_len;
+ int preceding_bytes = this_chunk + this_header;
+
+ if (WARN_ON_ONCE(sent_userdata < 0))
+ return;
+
+ this_chunk = min(userdata_len - sent_userdata,
+ MAX_PRINT_CHUNK - preceding_bytes);
+ if (WARN_ON_ONCE(this_chunk <= 0))
+ return;
+ memcpy(buf + this_header + this_offset,
+ userdata + sent_userdata,
+ this_chunk);
+ this_offset += this_chunk;
+ }
- offset += this_chunk;
+ netpoll_send_udp(&nt->np, buf, this_header + this_offset);
+ offset += this_offset;
}
}
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index bcbc1e19edde..64c0cdd31bf8 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -129,7 +129,7 @@ static void nsim_bus_dev_release(struct device *dev)
complete(&nsim_bus_devs_released);
}
-static struct device_type nsim_bus_dev_type = {
+static const struct device_type nsim_bus_dev_type = {
.groups = nsim_bus_dev_attr_groups,
.release = nsim_bus_dev_release,
};
@@ -232,9 +232,154 @@ del_device_store(const struct bus_type *bus, const char *buf, size_t count)
}
static BUS_ATTR_WO(del_device);
+static ssize_t link_device_store(const struct bus_type *bus, const char *buf, size_t count)
+{
+ struct netdevsim *nsim_a, *nsim_b, *peer;
+ struct net_device *dev_a, *dev_b;
+ unsigned int ifidx_a, ifidx_b;
+ int netnsfd_a, netnsfd_b, err;
+ struct net *ns_a, *ns_b;
+
+ err = sscanf(buf, "%d:%u %d:%u", &netnsfd_a, &ifidx_a, &netnsfd_b,
+ &ifidx_b);
+ if (err != 4) {
+ pr_err("Format for linking two devices is \"netnsfd_a:ifidx_a netnsfd_b:ifidx_b\" (int uint int uint).\n");
+ return -EINVAL;
+ }
+
+ ns_a = get_net_ns_by_fd(netnsfd_a);
+ if (IS_ERR(ns_a)) {
+ pr_err("Could not find netns with fd: %d\n", netnsfd_a);
+ return -EINVAL;
+ }
+
+ ns_b = get_net_ns_by_fd(netnsfd_b);
+ if (IS_ERR(ns_b)) {
+ pr_err("Could not find netns with fd: %d\n", netnsfd_b);
+ put_net(ns_a);
+ return -EINVAL;
+ }
+
+ err = -EINVAL;
+ rtnl_lock();
+ dev_a = __dev_get_by_index(ns_a, ifidx_a);
+ if (!dev_a) {
+ pr_err("Could not find device with ifindex %u in netnsfd %d\n",
+ ifidx_a, netnsfd_a);
+ goto out_err;
+ }
+
+ if (!netdev_is_nsim(dev_a)) {
+ pr_err("Device with ifindex %u in netnsfd %d is not a netdevsim\n",
+ ifidx_a, netnsfd_a);
+ goto out_err;
+ }
+
+ dev_b = __dev_get_by_index(ns_b, ifidx_b);
+ if (!dev_b) {
+ pr_err("Could not find device with ifindex %u in netnsfd %d\n",
+ ifidx_b, netnsfd_b);
+ goto out_err;
+ }
+
+ if (!netdev_is_nsim(dev_b)) {
+ pr_err("Device with ifindex %u in netnsfd %d is not a netdevsim\n",
+ ifidx_b, netnsfd_b);
+ goto out_err;
+ }
+
+ if (dev_a == dev_b) {
+ pr_err("Cannot link a netdevsim to itself\n");
+ goto out_err;
+ }
+
+ err = -EBUSY;
+ nsim_a = netdev_priv(dev_a);
+ peer = rtnl_dereference(nsim_a->peer);
+ if (peer) {
+ pr_err("Netdevsim %d:%u is already linked\n", netnsfd_a,
+ ifidx_a);
+ goto out_err;
+ }
+
+ nsim_b = netdev_priv(dev_b);
+ peer = rtnl_dereference(nsim_b->peer);
+ if (peer) {
+ pr_err("Netdevsim %d:%u is already linked\n", netnsfd_b,
+ ifidx_b);
+ goto out_err;
+ }
+
+ err = 0;
+ rcu_assign_pointer(nsim_a->peer, nsim_b);
+ rcu_assign_pointer(nsim_b->peer, nsim_a);
+
+out_err:
+ put_net(ns_b);
+ put_net(ns_a);
+ rtnl_unlock();
+
+ return !err ? count : err;
+}
+static BUS_ATTR_WO(link_device);
+
+static ssize_t unlink_device_store(const struct bus_type *bus, const char *buf, size_t count)
+{
+ struct netdevsim *nsim, *peer;
+ struct net_device *dev;
+ unsigned int ifidx;
+ int netnsfd, err;
+ struct net *ns;
+
+ err = sscanf(buf, "%u:%u", &netnsfd, &ifidx);
+ if (err != 2) {
+ pr_err("Format for unlinking a device is \"netnsfd:ifidx\" (int uint).\n");
+ return -EINVAL;
+ }
+
+ ns = get_net_ns_by_fd(netnsfd);
+ if (IS_ERR(ns)) {
+ pr_err("Could not find netns with fd: %d\n", netnsfd);
+ return -EINVAL;
+ }
+
+ err = -EINVAL;
+ rtnl_lock();
+ dev = __dev_get_by_index(ns, ifidx);
+ if (!dev) {
+ pr_err("Could not find device with ifindex %u in netnsfd %d\n",
+ ifidx, netnsfd);
+ goto out_put_netns;
+ }
+
+ if (!netdev_is_nsim(dev)) {
+ pr_err("Device with ifindex %u in netnsfd %d is not a netdevsim\n",
+ ifidx, netnsfd);
+ goto out_put_netns;
+ }
+
+ nsim = netdev_priv(dev);
+ peer = rtnl_dereference(nsim->peer);
+ if (!peer)
+ goto out_put_netns;
+
+ err = 0;
+ RCU_INIT_POINTER(nsim->peer, NULL);
+ RCU_INIT_POINTER(peer->peer, NULL);
+
+out_put_netns:
+ put_net(ns);
+ rtnl_unlock();
+
+ return !err ? count : err;
+}
+static BUS_ATTR_WO(unlink_device);
+
static struct attribute *nsim_bus_attrs[] = {
&bus_attr_new_device.attr,
&bus_attr_del_device.attr,
+ &bus_attr_link_device.attr,
+ &bus_attr_unlink_device.attr,
NULL
};
ATTRIBUTE_GROUPS(nsim_bus);
@@ -260,7 +405,7 @@ static int nsim_num_vf(struct device *dev)
return nsim_bus_dev->num_vfs;
}
-static struct bus_type nsim_bus = {
+static const struct bus_type nsim_bus = {
.name = DRV_NAME,
.dev_name = DRV_NAME,
.bus_groups = nsim_bus_groups,
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index b4d3b9cde8bd..92a7a36b93ac 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -835,14 +835,14 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
trap_report_dw.work);
nsim_dev = nsim_trap_data->nsim_dev;
- /* For each running port and enabled packet trap, generate a UDP
- * packet with a random 5-tuple and report it.
- */
if (!devl_trylock(priv_to_devlink(nsim_dev))) {
- schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 0);
+ schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1);
return;
}
+ /* For each running port and enabled packet trap, generate a UDP
+ * packet with a random 5-tuple and report it.
+ */
list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) {
if (!netif_running(nsim_dev_port->ns->netdev))
continue;
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 77e8250282a5..8330bc0bcb7e 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -29,18 +29,35 @@
static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ unsigned int len = skb->len;
+ struct netdevsim *peer_ns;
+ rcu_read_lock();
if (!nsim_ipsec_tx(ns, skb))
- goto out;
+ goto out_drop_free;
+ peer_ns = rcu_dereference(ns->peer);
+ if (!peer_ns)
+ goto out_drop_free;
+
+ skb_tx_timestamp(skb);
+ if (unlikely(dev_forward_skb(peer_ns->netdev, skb) == NET_RX_DROP))
+ goto out_drop_cnt;
+
+ rcu_read_unlock();
u64_stats_update_begin(&ns->syncp);
ns->tx_packets++;
- ns->tx_bytes += skb->len;
+ ns->tx_bytes += len;
u64_stats_update_end(&ns->syncp);
+ return NETDEV_TX_OK;
-out:
+out_drop_free:
dev_kfree_skb(skb);
-
+out_drop_cnt:
+ rcu_read_unlock();
+ u64_stats_update_begin(&ns->syncp);
+ ns->tx_dropped++;
+ u64_stats_update_end(&ns->syncp);
return NETDEV_TX_OK;
}
@@ -70,6 +87,7 @@ nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
start = u64_stats_fetch_begin(&ns->syncp);
stats->tx_bytes = ns->tx_bytes;
stats->tx_packets = ns->tx_packets;
+ stats->tx_dropped = ns->tx_dropped;
} while (u64_stats_fetch_retry(&ns->syncp, start));
}
@@ -265,6 +283,21 @@ nsim_set_features(struct net_device *dev, netdev_features_t features)
return 0;
}
+static int nsim_get_iflink(const struct net_device *dev)
+{
+ struct netdevsim *nsim, *peer;
+ int iflink;
+
+ nsim = netdev_priv(dev);
+
+ rcu_read_lock();
+ peer = rcu_dereference(nsim->peer);
+ iflink = peer ? READ_ONCE(peer->netdev->ifindex) : 0;
+ rcu_read_unlock();
+
+ return iflink;
+}
+
static const struct net_device_ops nsim_netdev_ops = {
.ndo_start_xmit = nsim_start_xmit,
.ndo_set_rx_mode = nsim_set_rx_mode,
@@ -282,6 +315,7 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_set_vf_rss_query_en = nsim_set_vf_rss_query_en,
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
+ .ndo_get_iflink = nsim_get_iflink,
.ndo_bpf = nsim_bpf,
};
@@ -302,7 +336,6 @@ static void nsim_setup(struct net_device *dev)
eth_hw_addr_random(dev);
dev->tx_queue_len = 0;
- dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE |
IFF_NO_QUEUE;
@@ -413,8 +446,13 @@ err_free_netdev:
void nsim_destroy(struct netdevsim *ns)
{
struct net_device *dev = ns->netdev;
+ struct netdevsim *peer;
rtnl_lock();
+ peer = rtnl_dereference(ns->peer);
+ if (peer)
+ RCU_INIT_POINTER(peer->peer, NULL);
+ RCU_INIT_POINTER(ns->peer, NULL);
unregister_netdevice(dev);
if (nsim_dev_port_is_pf(ns->nsim_dev_port)) {
nsim_macsec_teardown(ns);
@@ -427,6 +465,11 @@ void nsim_destroy(struct netdevsim *ns)
free_netdev(dev);
}
+bool netdev_is_nsim(struct net_device *dev)
+{
+ return dev->netdev_ops == &nsim_netdev_ops;
+}
+
static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 028c825b86db..553c4b9b4f63 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -98,6 +98,7 @@ struct netdevsim {
u64 tx_packets;
u64 tx_bytes;
+ u64 tx_dropped;
struct u64_stats_sync syncp;
struct nsim_bus_dev *nsim_bus_dev;
@@ -125,11 +126,13 @@ struct netdevsim {
} udp_ports;
struct nsim_ethtool ethtool;
+ struct netdevsim __rcu *peer;
};
struct netdevsim *
nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port);
void nsim_destroy(struct netdevsim *ns);
+bool netdev_is_nsim(struct net_device *dev);
void nsim_ethtool_init(struct netdevsim *ns);
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index 39171380ccf2..a4d2e76a8d58 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -145,7 +145,7 @@ static int netkit_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(nk->peer);
if (peer)
- iflink = peer->ifindex;
+ iflink = READ_ONCE(peer->ifindex);
rcu_read_unlock();
return iflink;
}
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index 5e19a6839dea..e5a0987a263e 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -17,17 +17,6 @@ static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static int nlmon_dev_init(struct net_device *dev)
-{
- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
- return dev->lstats == NULL ? -ENOMEM : 0;
-}
-
-static void nlmon_dev_uninit(struct net_device *dev)
-{
- free_percpu(dev->lstats);
-}
-
struct nlmon {
struct netlink_tap nt;
};
@@ -51,15 +40,7 @@ static int nlmon_close(struct net_device *dev)
static void
nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- u64 packets, bytes;
-
- dev_lstats_read(dev, &packets, &bytes);
-
- stats->rx_packets = packets;
- stats->tx_packets = 0;
-
- stats->rx_bytes = bytes;
- stats->tx_bytes = 0;
+ dev_lstats_read(dev, &stats->rx_packets, &stats->rx_bytes);
}
static u32 always_on(struct net_device *dev)
@@ -72,8 +53,6 @@ static const struct ethtool_ops nlmon_ethtool_ops = {
};
static const struct net_device_ops nlmon_ops = {
- .ndo_init = nlmon_dev_init,
- .ndo_uninit = nlmon_dev_uninit,
.ndo_open = nlmon_open,
.ndo_stop = nlmon_close,
.ndo_start_xmit = nlmon_xmit,
@@ -92,6 +71,7 @@ static void nlmon_setup(struct net_device *dev)
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
NETIF_F_HIGHDMA | NETIF_F_LLTX;
dev->flags = IFF_NOARP;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS;
/* That's rather a softlimit here, which, of course,
* can be altered. Not a real MTU, but what is to be
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index dc3962b2aa6b..853b8c138718 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -398,4 +398,5 @@ void lynx_pcs_destroy(struct phylink_pcs *pcs)
}
EXPORT_SYMBOL(lynx_pcs_destroy);
+MODULE_DESCRIPTION("NXP Lynx PCS phylink library");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/pcs/pcs-mtk-lynxi.c b/drivers/net/pcs/pcs-mtk-lynxi.c
index 8501dd365279..4f63abe638c4 100644
--- a/drivers/net/pcs/pcs-mtk-lynxi.c
+++ b/drivers/net/pcs/pcs-mtk-lynxi.c
@@ -303,4 +303,5 @@ void mtk_pcs_lynxi_destroy(struct phylink_pcs *pcs)
}
EXPORT_SYMBOL(mtk_pcs_lynxi_destroy);
+MODULE_DESCRIPTION("MediaTek SGMII library for LynxI");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index d93f84fbb1fd..4bd66fdde367 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -183,7 +183,7 @@ static void miic_converter_enable(struct miic *miic, int port, int enable)
miic_reg_rmw(miic, MIIC_CONVRST, MIIC_CONVRST_PHYIF_RST(port), val);
}
-static int miic_config(struct phylink_pcs *pcs, unsigned int mode,
+static int miic_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising, bool permit)
{
@@ -234,7 +234,7 @@ static int miic_config(struct phylink_pcs *pcs, unsigned int mode,
return 0;
}
-static void miic_link_up(struct phylink_pcs *pcs, unsigned int mode,
+static void miic_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex)
{
struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
@@ -333,6 +333,7 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
miic_port->miic = miic;
miic_port->port = port - 1;
miic_port->pcs.ops = &miic_phylink_ops;
+ miic_port->pcs.neg_mode = true;
return &miic_port->pcs;
}
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 31f0beba638a..31525fe9c32e 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -10,7 +10,7 @@
#include <linux/pcs/pcs-xpcs.h>
#include <linux/mdio.h>
#include <linux/phylink.h>
-#include <linux/workqueue.h>
+
#include "pcs-xpcs.h"
#define phylink_pcs_to_xpcs(pl_pcs) \
@@ -130,7 +130,6 @@ static const phy_interface_t xpcs_1000basex_interfaces[] = {
static const phy_interface_t xpcs_2500basex_interfaces[] = {
PHY_INTERFACE_MODE_2500BASEX,
- PHY_INTERFACE_MODE_MAX,
};
enum {
@@ -293,7 +292,7 @@ static int xpcs_soft_reset(struct dw_xpcs *xpcs,
dev = MDIO_MMD_VEND2;
break;
default:
- return -1;
+ return -EINVAL;
}
ret = xpcs_write(xpcs, dev, MDIO_CTRL1, MDIO_CTRL1_RESET);
@@ -614,14 +613,15 @@ static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
xpcs = phylink_pcs_to_xpcs(pcs);
compat = xpcs_find_compat(xpcs->id, state->interface);
+ if (!compat)
+ return -EINVAL;
/* Populate the supported link modes for this PHY interface type.
* FIXME: what about the port modes and autoneg bit? This masks
* all those away.
*/
- if (compat)
- for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
- set_bit(compat->supported[i], xpcs_supported);
+ for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
+ set_bit(compat->supported[i], xpcs_supported);
linkmode_and(supported, supported, xpcs_supported);
@@ -636,8 +636,7 @@ void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
const struct xpcs_compat *compat = &xpcs->id->compat[i];
for (j = 0; j < compat->num_interfaces; j++)
- if (compat->interface[j] < PHY_INTERFACE_MODE_MAX)
- __set_bit(compat->interface[j], interfaces);
+ __set_bit(compat->interface[j], interfaces);
}
}
EXPORT_SYMBOL_GPL(xpcs_get_interfaces);
@@ -891,7 +890,7 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
return ret;
break;
default:
- return -1;
+ return -EINVAL;
}
if (compat->pma_config) {
@@ -1456,4 +1455,5 @@ struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr,
}
EXPORT_SYMBOL_GPL(xpcs_create_mdiodev);
+MODULE_DESCRIPTION("Synopsys DesignWare XPCS library");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 9e2672800f0b..1df0595c5ba9 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -232,6 +232,7 @@ config MARVELL_10G_PHY
config MARVELL_88Q2XXX_PHY
tristate "Marvell 88Q2XXX PHY"
+ depends on HWMON || HWMON=n
help
Support for the Marvell 88Q2XXX 100/1000BASE-T1 Automotive Ethernet
PHYs.
@@ -335,12 +336,7 @@ config NCN26000_PHY
Currently supports the NCN26000 10BASE-T1S Industrial PHY
with MII interface.
-config AT803X_PHY
- tristate "Qualcomm Atheros AR803X PHYs and QCA833x PHYs"
- depends on REGULATOR
- help
- Currently supports the AR8030, AR8031, AR8033, AR8035 and internal
- QCA8337(Internal qca8k PHY) model
+source "drivers/net/phy/qcom/Kconfig"
config QSEMI_PHY
tristate "Quality Semiconductor PHYs"
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 6097afd44392..197acfa0b412 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -36,7 +36,6 @@ obj-$(CONFIG_ADIN_PHY) += adin.o
obj-$(CONFIG_ADIN1100_PHY) += adin1100.o
obj-$(CONFIG_AMD_PHY) += amd.o
obj-$(CONFIG_AQUANTIA_PHY) += aquantia/
-obj-$(CONFIG_AT803X_PHY) += at803x.o
ifdef CONFIG_AX88796B_RUST_PHY
obj-$(CONFIG_AX88796B_PHY) += ax88796b_rust.o
else
@@ -91,6 +90,7 @@ endif
obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja.o
obj-$(CONFIG_NXP_CBTX_PHY) += nxp-cbtx.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
+obj-y += qcom/
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
obj-$(CONFIG_RENESAS_PHY) += uPD60620.o
diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c
index 7619d6185801..85f910e2d4fb 100644
--- a/drivers/net/phy/adin1100.c
+++ b/drivers/net/phy/adin1100.c
@@ -18,6 +18,12 @@
#define PHY_ID_ADIN1110 0x0283bc91
#define PHY_ID_ADIN2111 0x0283bca1
+#define ADIN_PHY_SUBSYS_IRQ_MASK 0x0021
+#define ADIN_LINK_STAT_CHNG_IRQ_EN BIT(1)
+
+#define ADIN_PHY_SUBSYS_IRQ_STATUS 0x0011
+#define ADIN_LINK_STAT_CHNG BIT(1)
+
#define ADIN_FORCED_MODE 0x8000
#define ADIN_FORCED_MODE_EN BIT(0)
@@ -136,6 +142,53 @@ static int adin_config_aneg(struct phy_device *phydev)
return genphy_c45_config_aneg(phydev);
}
+static int adin_phy_ack_intr(struct phy_device *phydev)
+{
+ /* Clear pending interrupts */
+ int rc = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ ADIN_PHY_SUBSYS_IRQ_STATUS);
+
+ return rc < 0 ? rc : 0;
+}
+
+static int adin_config_intr(struct phy_device *phydev)
+{
+ u16 irq_mask;
+ int ret;
+
+ ret = adin_phy_ack_intr(phydev);
+ if (ret)
+ return ret;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ irq_mask = ADIN_LINK_STAT_CHNG_IRQ_EN;
+ else
+ irq_mask = 0;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ ADIN_PHY_SUBSYS_IRQ_MASK,
+ ADIN_LINK_STAT_CHNG_IRQ_EN, irq_mask);
+}
+
+static irqreturn_t adin_phy_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ ADIN_PHY_SUBSYS_IRQ_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & ADIN_LINK_STAT_CHNG))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static int adin_set_powerdown_mode(struct phy_device *phydev, bool en)
{
int ret;
@@ -275,6 +328,8 @@ static struct phy_driver adin_driver[] = {
.probe = adin_probe,
.config_aneg = adin_config_aneg,
.read_status = adin_read_status,
+ .config_intr = adin_config_intr,
+ .handle_interrupt = adin_phy_handle_interrupt,
.set_loopback = adin_set_loopback,
.suspend = adin_suspend,
.resume = adin_resume,
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index 97a2fafa15ca..71bfddb8f453 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -22,9 +22,13 @@
#define PHY_ID_AQR107 0x03a1b4e0
#define PHY_ID_AQCS109 0x03a1b5c2
#define PHY_ID_AQR405 0x03a1b4b0
+#define PHY_ID_AQR111 0x03a1b610
+#define PHY_ID_AQR111B0 0x03a1b612
#define PHY_ID_AQR112 0x03a1b662
#define PHY_ID_AQR412 0x03a1b712
+#define PHY_ID_AQR113 0x31c31c40
#define PHY_ID_AQR113C 0x31c31c12
+#define PHY_ID_AQR813 0x31c31cb2
#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
@@ -727,6 +731,15 @@ static int aqr113c_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_TXDIS,
+ MDIO_PMD_TXDIS_GLOBAL);
+ if (ret)
+ return ret;
+
+ ret = aqr107_wait_processor_intensive_op(phydev);
+ if (ret)
+ return ret;
+
return aqr107_fill_interface_modes(phydev);
}
@@ -746,6 +759,16 @@ static int aqr107_probe(struct phy_device *phydev)
return aqr_hwmon_probe(phydev);
}
+static int aqr111_config_init(struct phy_device *phydev)
+{
+ /* AQR111 reports supporting speed up to 10G,
+ * however only speeds up to 5G are supported.
+ */
+ phy_set_max_speed(phydev, SPEED_5000);
+
+ return aqr107_config_init(phydev);
+}
+
static struct phy_driver aqr_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_AQ1202),
@@ -820,6 +843,44 @@ static struct phy_driver aqr_driver[] = {
.link_change_notify = aqr107_link_change_notify,
},
{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR111),
+ .name = "Aquantia AQR111",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr111_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0),
+ .name = "Aquantia AQR111B0",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr111_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
+{
PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
.name = "Aquantia AQR405",
.config_aneg = aqr_config_aneg,
@@ -864,6 +925,25 @@ static struct phy_driver aqr_driver[] = {
.link_change_notify = aqr107_link_change_notify,
},
{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR113),
+ .name = "Aquantia AQR113",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr113c_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
+{
PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
.name = "Aquantia AQR113C",
.probe = aqr107_probe,
@@ -882,6 +962,25 @@ static struct phy_driver aqr_driver[] = {
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR813),
+ .name = "Aquantia AQR813",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr107_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+},
};
module_phy_driver(aqr_driver);
@@ -894,9 +993,13 @@ static struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR107) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR405) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR111) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR112) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR412) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR113) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR813) },
{ }
};
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
deleted file mode 100644
index a62442a55774..000000000000
--- a/drivers/net/phy/at803x.c
+++ /dev/null
@@ -1,2432 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * drivers/net/phy/at803x.c
- *
- * Driver for Qualcomm Atheros AR803x PHY
- *
- * Author: Matus Ujhelyi <ujhelyi.m@gmail.com>
- */
-
-#include <linux/phy.h>
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool_netlink.h>
-#include <linux/bitfield.h>
-#include <linux/regulator/of_regulator.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/consumer.h>
-#include <linux/of.h>
-#include <linux/phylink.h>
-#include <linux/sfp.h>
-#include <dt-bindings/net/qca-ar803x.h>
-
-#define AT803X_SPECIFIC_FUNCTION_CONTROL 0x10
-#define AT803X_SFC_ASSERT_CRS BIT(11)
-#define AT803X_SFC_FORCE_LINK BIT(10)
-#define AT803X_SFC_MDI_CROSSOVER_MODE_M GENMASK(6, 5)
-#define AT803X_SFC_AUTOMATIC_CROSSOVER 0x3
-#define AT803X_SFC_MANUAL_MDIX 0x1
-#define AT803X_SFC_MANUAL_MDI 0x0
-#define AT803X_SFC_SQE_TEST BIT(2)
-#define AT803X_SFC_POLARITY_REVERSAL BIT(1)
-#define AT803X_SFC_DISABLE_JABBER BIT(0)
-
-#define AT803X_SPECIFIC_STATUS 0x11
-#define AT803X_SS_SPEED_MASK GENMASK(15, 14)
-#define AT803X_SS_SPEED_1000 2
-#define AT803X_SS_SPEED_100 1
-#define AT803X_SS_SPEED_10 0
-#define AT803X_SS_DUPLEX BIT(13)
-#define AT803X_SS_SPEED_DUPLEX_RESOLVED BIT(11)
-#define AT803X_SS_MDIX BIT(6)
-
-#define QCA808X_SS_SPEED_MASK GENMASK(9, 7)
-#define QCA808X_SS_SPEED_2500 4
-
-#define AT803X_INTR_ENABLE 0x12
-#define AT803X_INTR_ENABLE_AUTONEG_ERR BIT(15)
-#define AT803X_INTR_ENABLE_SPEED_CHANGED BIT(14)
-#define AT803X_INTR_ENABLE_DUPLEX_CHANGED BIT(13)
-#define AT803X_INTR_ENABLE_PAGE_RECEIVED BIT(12)
-#define AT803X_INTR_ENABLE_LINK_FAIL BIT(11)
-#define AT803X_INTR_ENABLE_LINK_SUCCESS BIT(10)
-#define AT803X_INTR_ENABLE_LINK_FAIL_BX BIT(8)
-#define AT803X_INTR_ENABLE_LINK_SUCCESS_BX BIT(7)
-#define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE BIT(5)
-#define AT803X_INTR_ENABLE_POLARITY_CHANGED BIT(1)
-#define AT803X_INTR_ENABLE_WOL BIT(0)
-
-#define AT803X_INTR_STATUS 0x13
-
-#define AT803X_SMART_SPEED 0x14
-#define AT803X_SMART_SPEED_ENABLE BIT(5)
-#define AT803X_SMART_SPEED_RETRY_LIMIT_MASK GENMASK(4, 2)
-#define AT803X_SMART_SPEED_BYPASS_TIMER BIT(1)
-#define AT803X_CDT 0x16
-#define AT803X_CDT_MDI_PAIR_MASK GENMASK(9, 8)
-#define AT803X_CDT_ENABLE_TEST BIT(0)
-#define AT803X_CDT_STATUS 0x1c
-#define AT803X_CDT_STATUS_STAT_NORMAL 0
-#define AT803X_CDT_STATUS_STAT_SHORT 1
-#define AT803X_CDT_STATUS_STAT_OPEN 2
-#define AT803X_CDT_STATUS_STAT_FAIL 3
-#define AT803X_CDT_STATUS_STAT_MASK GENMASK(9, 8)
-#define AT803X_CDT_STATUS_DELTA_TIME_MASK GENMASK(7, 0)
-#define AT803X_LED_CONTROL 0x18
-
-#define AT803X_PHY_MMD3_WOL_CTRL 0x8012
-#define AT803X_WOL_EN BIT(5)
-#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
-#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
-#define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
-#define AT803X_REG_CHIP_CONFIG 0x1f
-#define AT803X_BT_BX_REG_SEL 0x8000
-
-#define AT803X_DEBUG_ADDR 0x1D
-#define AT803X_DEBUG_DATA 0x1E
-
-#define AT803X_MODE_CFG_MASK 0x0F
-#define AT803X_MODE_CFG_BASET_RGMII 0x00
-#define AT803X_MODE_CFG_BASET_SGMII 0x01
-#define AT803X_MODE_CFG_BX1000_RGMII_50OHM 0x02
-#define AT803X_MODE_CFG_BX1000_RGMII_75OHM 0x03
-#define AT803X_MODE_CFG_BX1000_CONV_50OHM 0x04
-#define AT803X_MODE_CFG_BX1000_CONV_75OHM 0x05
-#define AT803X_MODE_CFG_FX100_RGMII_50OHM 0x06
-#define AT803X_MODE_CFG_FX100_CONV_50OHM 0x07
-#define AT803X_MODE_CFG_RGMII_AUTO_MDET 0x0B
-#define AT803X_MODE_CFG_FX100_RGMII_75OHM 0x0E
-#define AT803X_MODE_CFG_FX100_CONV_75OHM 0x0F
-
-#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
-#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
-
-#define AT803X_DEBUG_ANALOG_TEST_CTRL 0x00
-#define QCA8327_DEBUG_MANU_CTRL_EN BIT(2)
-#define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2)
-#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
-
-#define AT803X_DEBUG_SYSTEM_CTRL_MODE 0x05
-#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
-
-#define AT803X_DEBUG_REG_HIB_CTRL 0x0b
-#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10)
-#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13)
-#define AT803X_DEBUG_HIB_CTRL_PS_HIB_EN BIT(15)
-
-#define AT803X_DEBUG_REG_3C 0x3C
-
-#define AT803X_DEBUG_REG_GREEN 0x3D
-#define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6)
-
-#define AT803X_DEBUG_REG_1F 0x1F
-#define AT803X_DEBUG_PLL_ON BIT(2)
-#define AT803X_DEBUG_RGMII_1V8 BIT(3)
-
-#define MDIO_AZ_DEBUG 0x800D
-
-/* AT803x supports either the XTAL input pad, an internal PLL or the
- * DSP as clock reference for the clock output pad. The XTAL reference
- * is only used for 25 MHz output, all other frequencies need the PLL.
- * The DSP as a clock reference is used in synchronous ethernet
- * applications.
- *
- * By default the PLL is only enabled if there is a link. Otherwise
- * the PHY will go into low power state and disabled the PLL. You can
- * set the PLL_ON bit (see debug register 0x1f) to keep the PLL always
- * enabled.
- */
-#define AT803X_MMD7_CLK25M 0x8016
-#define AT803X_CLK_OUT_MASK GENMASK(4, 2)
-#define AT803X_CLK_OUT_25MHZ_XTAL 0
-#define AT803X_CLK_OUT_25MHZ_DSP 1
-#define AT803X_CLK_OUT_50MHZ_PLL 2
-#define AT803X_CLK_OUT_50MHZ_DSP 3
-#define AT803X_CLK_OUT_62_5MHZ_PLL 4
-#define AT803X_CLK_OUT_62_5MHZ_DSP 5
-#define AT803X_CLK_OUT_125MHZ_PLL 6
-#define AT803X_CLK_OUT_125MHZ_DSP 7
-
-/* The AR8035 has another mask which is compatible with the AR8031/AR8033 mask
- * but doesn't support choosing between XTAL/PLL and DSP.
- */
-#define AT8035_CLK_OUT_MASK GENMASK(4, 3)
-
-#define AT803X_CLK_OUT_STRENGTH_MASK GENMASK(8, 7)
-#define AT803X_CLK_OUT_STRENGTH_FULL 0
-#define AT803X_CLK_OUT_STRENGTH_HALF 1
-#define AT803X_CLK_OUT_STRENGTH_QUARTER 2
-
-#define AT803X_DEFAULT_DOWNSHIFT 5
-#define AT803X_MIN_DOWNSHIFT 2
-#define AT803X_MAX_DOWNSHIFT 9
-
-#define AT803X_MMD3_SMARTEEE_CTL1 0x805b
-#define AT803X_MMD3_SMARTEEE_CTL2 0x805c
-#define AT803X_MMD3_SMARTEEE_CTL3 0x805d
-#define AT803X_MMD3_SMARTEEE_CTL3_LPI_EN BIT(8)
-
-#define ATH9331_PHY_ID 0x004dd041
-#define ATH8030_PHY_ID 0x004dd076
-#define ATH8031_PHY_ID 0x004dd074
-#define ATH8032_PHY_ID 0x004dd023
-#define ATH8035_PHY_ID 0x004dd072
-#define AT8030_PHY_ID_MASK 0xffffffef
-
-#define QCA8081_PHY_ID 0x004dd101
-
-#define QCA8327_A_PHY_ID 0x004dd033
-#define QCA8327_B_PHY_ID 0x004dd034
-#define QCA8337_PHY_ID 0x004dd036
-#define QCA9561_PHY_ID 0x004dd042
-#define QCA8K_PHY_ID_MASK 0xffffffff
-
-#define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0)
-
-#define AT803X_PAGE_FIBER 0
-#define AT803X_PAGE_COPPER 1
-
-/* don't turn off internal PLL */
-#define AT803X_KEEP_PLL_ENABLED BIT(0)
-#define AT803X_DISABLE_SMARTEEE BIT(1)
-
-/* disable hibernation mode */
-#define AT803X_DISABLE_HIBERNATION_MODE BIT(2)
-
-/* ADC threshold */
-#define QCA808X_PHY_DEBUG_ADC_THRESHOLD 0x2c80
-#define QCA808X_ADC_THRESHOLD_MASK GENMASK(7, 0)
-#define QCA808X_ADC_THRESHOLD_80MV 0
-#define QCA808X_ADC_THRESHOLD_100MV 0xf0
-#define QCA808X_ADC_THRESHOLD_200MV 0x0f
-#define QCA808X_ADC_THRESHOLD_300MV 0xff
-
-/* CLD control */
-#define QCA808X_PHY_MMD3_ADDR_CLD_CTRL7 0x8007
-#define QCA808X_8023AZ_AFE_CTRL_MASK GENMASK(8, 4)
-#define QCA808X_8023AZ_AFE_EN 0x90
-
-/* AZ control */
-#define QCA808X_PHY_MMD3_AZ_TRAINING_CTRL 0x8008
-#define QCA808X_MMD3_AZ_TRAINING_VAL 0x1c32
-
-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB 0x8014
-#define QCA808X_MSE_THRESHOLD_20DB_VALUE 0x529
-
-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB 0x800E
-#define QCA808X_MSE_THRESHOLD_17DB_VALUE 0x341
-
-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB 0x801E
-#define QCA808X_MSE_THRESHOLD_27DB_VALUE 0x419
-
-#define QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB 0x8020
-#define QCA808X_MSE_THRESHOLD_28DB_VALUE 0x341
-
-#define QCA808X_PHY_MMD7_TOP_OPTION1 0x901c
-#define QCA808X_TOP_OPTION1_DATA 0x0
-
-#define QCA808X_PHY_MMD3_DEBUG_1 0xa100
-#define QCA808X_MMD3_DEBUG_1_VALUE 0x9203
-#define QCA808X_PHY_MMD3_DEBUG_2 0xa101
-#define QCA808X_MMD3_DEBUG_2_VALUE 0x48ad
-#define QCA808X_PHY_MMD3_DEBUG_3 0xa103
-#define QCA808X_MMD3_DEBUG_3_VALUE 0x1698
-#define QCA808X_PHY_MMD3_DEBUG_4 0xa105
-#define QCA808X_MMD3_DEBUG_4_VALUE 0x8001
-#define QCA808X_PHY_MMD3_DEBUG_5 0xa106
-#define QCA808X_MMD3_DEBUG_5_VALUE 0x1111
-#define QCA808X_PHY_MMD3_DEBUG_6 0xa011
-#define QCA808X_MMD3_DEBUG_6_VALUE 0x5f85
-
-/* master/slave seed config */
-#define QCA808X_PHY_DEBUG_LOCAL_SEED 9
-#define QCA808X_MASTER_SLAVE_SEED_ENABLE BIT(1)
-#define QCA808X_MASTER_SLAVE_SEED_CFG GENMASK(12, 2)
-#define QCA808X_MASTER_SLAVE_SEED_RANGE 0x32
-
-/* Hibernation yields lower power consumpiton in contrast with normal operation mode.
- * when the copper cable is unplugged, the PHY enters into hibernation mode in about 10s.
- */
-#define QCA808X_DBG_AN_TEST 0xb
-#define QCA808X_HIBERNATION_EN BIT(15)
-
-#define QCA808X_CDT_ENABLE_TEST BIT(15)
-#define QCA808X_CDT_INTER_CHECK_DIS BIT(13)
-#define QCA808X_CDT_STATUS BIT(11)
-#define QCA808X_CDT_LENGTH_UNIT BIT(10)
-
-#define QCA808X_MMD3_CDT_STATUS 0x8064
-#define QCA808X_MMD3_CDT_DIAG_PAIR_A 0x8065
-#define QCA808X_MMD3_CDT_DIAG_PAIR_B 0x8066
-#define QCA808X_MMD3_CDT_DIAG_PAIR_C 0x8067
-#define QCA808X_MMD3_CDT_DIAG_PAIR_D 0x8068
-#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT GENMASK(15, 8)
-#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT GENMASK(7, 0)
-
-#define QCA808X_CDT_CODE_PAIR_A GENMASK(15, 12)
-#define QCA808X_CDT_CODE_PAIR_B GENMASK(11, 8)
-#define QCA808X_CDT_CODE_PAIR_C GENMASK(7, 4)
-#define QCA808X_CDT_CODE_PAIR_D GENMASK(3, 0)
-
-#define QCA808X_CDT_STATUS_STAT_TYPE GENMASK(1, 0)
-#define QCA808X_CDT_STATUS_STAT_FAIL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
-#define QCA808X_CDT_STATUS_STAT_NORMAL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
-#define QCA808X_CDT_STATUS_STAT_SAME_OPEN FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
-#define QCA808X_CDT_STATUS_STAT_SAME_SHORT FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
-
-#define QCA808X_CDT_STATUS_STAT_MDI GENMASK(3, 2)
-#define QCA808X_CDT_STATUS_STAT_MDI1 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
-#define QCA808X_CDT_STATUS_STAT_MDI2 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
-#define QCA808X_CDT_STATUS_STAT_MDI3 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
-
-/* NORMAL are MDI with type set to 0 */
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI1
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
- QCA808X_CDT_STATUS_STAT_MDI1)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
- QCA808X_CDT_STATUS_STAT_MDI1)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI2
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
- QCA808X_CDT_STATUS_STAT_MDI2)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
- QCA808X_CDT_STATUS_STAT_MDI2)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI3
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
- QCA808X_CDT_STATUS_STAT_MDI3)
-#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
- QCA808X_CDT_STATUS_STAT_MDI3)
-
-/* Added for reference of existence but should be handled by wait_for_completion already */
-#define QCA808X_CDT_STATUS_STAT_BUSY (BIT(1) | BIT(3))
-
-/* QCA808X 1G chip type */
-#define QCA808X_PHY_MMD7_CHIP_TYPE 0x901d
-#define QCA808X_PHY_CHIP_TYPE_1G BIT(0)
-
-#define QCA8081_PHY_SERDES_MMD1_FIFO_CTRL 0x9072
-#define QCA8081_PHY_FIFO_RSTN BIT(11)
-
-MODULE_DESCRIPTION("Qualcomm Atheros AR803x and QCA808X PHY driver");
-MODULE_AUTHOR("Matus Ujhelyi");
-MODULE_LICENSE("GPL");
-
-enum stat_access_type {
- PHY,
- MMD
-};
-
-struct at803x_hw_stat {
- const char *string;
- u8 reg;
- u32 mask;
- enum stat_access_type access_type;
-};
-
-static struct at803x_hw_stat qca83xx_hw_stats[] = {
- { "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
- { "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
- { "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
-};
-
-struct at803x_ss_mask {
- u16 speed_mask;
- u8 speed_shift;
-};
-
-struct at803x_priv {
- int flags;
- u16 clk_25m_reg;
- u16 clk_25m_mask;
- u8 smarteee_lpi_tw_1g;
- u8 smarteee_lpi_tw_100m;
- bool is_fiber;
- bool is_1000basex;
- struct regulator_dev *vddio_rdev;
- struct regulator_dev *vddh_rdev;
- u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
-};
-
-struct at803x_context {
- u16 bmcr;
- u16 advertise;
- u16 control1000;
- u16 int_enable;
- u16 smart_speed;
- u16 led_control;
-};
-
-static int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data)
-{
- int ret;
-
- ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
- if (ret < 0)
- return ret;
-
- return phy_write(phydev, AT803X_DEBUG_DATA, data);
-}
-
-static int at803x_debug_reg_read(struct phy_device *phydev, u16 reg)
-{
- int ret;
-
- ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
- if (ret < 0)
- return ret;
-
- return phy_read(phydev, AT803X_DEBUG_DATA);
-}
-
-static int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
- u16 clear, u16 set)
-{
- u16 val;
- int ret;
-
- ret = at803x_debug_reg_read(phydev, reg);
- if (ret < 0)
- return ret;
-
- val = ret & 0xffff;
- val &= ~clear;
- val |= set;
-
- return phy_write(phydev, AT803X_DEBUG_DATA, val);
-}
-
-static int at803x_write_page(struct phy_device *phydev, int page)
-{
- int mask;
- int set;
-
- if (page == AT803X_PAGE_COPPER) {
- set = AT803X_BT_BX_REG_SEL;
- mask = 0;
- } else {
- set = 0;
- mask = AT803X_BT_BX_REG_SEL;
- }
-
- return __phy_modify(phydev, AT803X_REG_CHIP_CONFIG, mask, set);
-}
-
-static int at803x_read_page(struct phy_device *phydev)
-{
- int ccr = __phy_read(phydev, AT803X_REG_CHIP_CONFIG);
-
- if (ccr < 0)
- return ccr;
-
- if (ccr & AT803X_BT_BX_REG_SEL)
- return AT803X_PAGE_COPPER;
-
- return AT803X_PAGE_FIBER;
-}
-
-static int at803x_enable_rx_delay(struct phy_device *phydev)
-{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0,
- AT803X_DEBUG_RX_CLK_DLY_EN);
-}
-
-static int at803x_enable_tx_delay(struct phy_device *phydev)
-{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0,
- AT803X_DEBUG_TX_CLK_DLY_EN);
-}
-
-static int at803x_disable_rx_delay(struct phy_device *phydev)
-{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- AT803X_DEBUG_RX_CLK_DLY_EN, 0);
-}
-
-static int at803x_disable_tx_delay(struct phy_device *phydev)
-{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE,
- AT803X_DEBUG_TX_CLK_DLY_EN, 0);
-}
-
-/* save relevant PHY registers to private copy */
-static void at803x_context_save(struct phy_device *phydev,
- struct at803x_context *context)
-{
- context->bmcr = phy_read(phydev, MII_BMCR);
- context->advertise = phy_read(phydev, MII_ADVERTISE);
- context->control1000 = phy_read(phydev, MII_CTRL1000);
- context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE);
- context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED);
- context->led_control = phy_read(phydev, AT803X_LED_CONTROL);
-}
-
-/* restore relevant PHY registers from private copy */
-static void at803x_context_restore(struct phy_device *phydev,
- const struct at803x_context *context)
-{
- phy_write(phydev, MII_BMCR, context->bmcr);
- phy_write(phydev, MII_ADVERTISE, context->advertise);
- phy_write(phydev, MII_CTRL1000, context->control1000);
- phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable);
- phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed);
- phy_write(phydev, AT803X_LED_CONTROL, context->led_control);
-}
-
-static int at803x_set_wol(struct phy_device *phydev,
- struct ethtool_wolinfo *wol)
-{
- int ret, irq_enabled;
-
- if (wol->wolopts & WAKE_MAGIC) {
- struct net_device *ndev = phydev->attached_dev;
- const u8 *mac;
- unsigned int i;
- static const unsigned int offsets[] = {
- AT803X_LOC_MAC_ADDR_32_47_OFFSET,
- AT803X_LOC_MAC_ADDR_16_31_OFFSET,
- AT803X_LOC_MAC_ADDR_0_15_OFFSET,
- };
-
- if (!ndev)
- return -ENODEV;
-
- mac = (const u8 *)ndev->dev_addr;
-
- if (!is_valid_ether_addr(mac))
- return -EINVAL;
-
- for (i = 0; i < 3; i++)
- phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
- mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
-
- /* Enable WOL interrupt */
- ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
- if (ret)
- return ret;
- } else {
- /* Disable WOL interrupt */
- ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
- if (ret)
- return ret;
- }
-
- /* Clear WOL status */
- ret = phy_read(phydev, AT803X_INTR_STATUS);
- if (ret < 0)
- return ret;
-
- /* Check if there are other interrupts except for WOL triggered when PHY is
- * in interrupt mode, only the interrupts enabled by AT803X_INTR_ENABLE can
- * be passed up to the interrupt PIN.
- */
- irq_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
- if (irq_enabled < 0)
- return irq_enabled;
-
- irq_enabled &= ~AT803X_INTR_ENABLE_WOL;
- if (ret & irq_enabled && !phy_polling_mode(phydev))
- phy_trigger_machine(phydev);
-
- return 0;
-}
-
-static void at803x_get_wol(struct phy_device *phydev,
- struct ethtool_wolinfo *wol)
-{
- int value;
-
- wol->supported = WAKE_MAGIC;
- wol->wolopts = 0;
-
- value = phy_read(phydev, AT803X_INTR_ENABLE);
- if (value < 0)
- return;
-
- if (value & AT803X_INTR_ENABLE_WOL)
- wol->wolopts |= WAKE_MAGIC;
-}
-
-static int qca83xx_get_sset_count(struct phy_device *phydev)
-{
- return ARRAY_SIZE(qca83xx_hw_stats);
-}
-
-static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
- strscpy(data + i * ETH_GSTRING_LEN,
- qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
- }
-}
-
-static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
-{
- struct at803x_hw_stat stat = qca83xx_hw_stats[i];
- struct at803x_priv *priv = phydev->priv;
- int val;
- u64 ret;
-
- if (stat.access_type == MMD)
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, stat.reg);
- else
- val = phy_read(phydev, stat.reg);
-
- if (val < 0) {
- ret = U64_MAX;
- } else {
- val = val & stat.mask;
- priv->stats[i] += val;
- ret = priv->stats[i];
- }
-
- return ret;
-}
-
-static void qca83xx_get_stats(struct phy_device *phydev,
- struct ethtool_stats *stats, u64 *data)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
- data[i] = qca83xx_get_stat(phydev, i);
-}
-
-static int at803x_suspend(struct phy_device *phydev)
-{
- int value;
- int wol_enabled;
-
- value = phy_read(phydev, AT803X_INTR_ENABLE);
- wol_enabled = value & AT803X_INTR_ENABLE_WOL;
-
- if (wol_enabled)
- value = BMCR_ISOLATE;
- else
- value = BMCR_PDOWN;
-
- phy_modify(phydev, MII_BMCR, 0, value);
-
- return 0;
-}
-
-static int at803x_resume(struct phy_device *phydev)
-{
- return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
-}
-
-static int at803x_parse_dt(struct phy_device *phydev)
-{
- struct device_node *node = phydev->mdio.dev.of_node;
- struct at803x_priv *priv = phydev->priv;
- u32 freq, strength, tw;
- unsigned int sel;
- int ret;
-
- if (!IS_ENABLED(CONFIG_OF_MDIO))
- return 0;
-
- if (of_property_read_bool(node, "qca,disable-smarteee"))
- priv->flags |= AT803X_DISABLE_SMARTEEE;
-
- if (of_property_read_bool(node, "qca,disable-hibernation-mode"))
- priv->flags |= AT803X_DISABLE_HIBERNATION_MODE;
-
- if (!of_property_read_u32(node, "qca,smarteee-tw-us-1g", &tw)) {
- if (!tw || tw > 255) {
- phydev_err(phydev, "invalid qca,smarteee-tw-us-1g\n");
- return -EINVAL;
- }
- priv->smarteee_lpi_tw_1g = tw;
- }
-
- if (!of_property_read_u32(node, "qca,smarteee-tw-us-100m", &tw)) {
- if (!tw || tw > 255) {
- phydev_err(phydev, "invalid qca,smarteee-tw-us-100m\n");
- return -EINVAL;
- }
- priv->smarteee_lpi_tw_100m = tw;
- }
-
- ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
- if (!ret) {
- switch (freq) {
- case 25000000:
- sel = AT803X_CLK_OUT_25MHZ_XTAL;
- break;
- case 50000000:
- sel = AT803X_CLK_OUT_50MHZ_PLL;
- break;
- case 62500000:
- sel = AT803X_CLK_OUT_62_5MHZ_PLL;
- break;
- case 125000000:
- sel = AT803X_CLK_OUT_125MHZ_PLL;
- break;
- default:
- phydev_err(phydev, "invalid qca,clk-out-frequency\n");
- return -EINVAL;
- }
-
- priv->clk_25m_reg |= FIELD_PREP(AT803X_CLK_OUT_MASK, sel);
- priv->clk_25m_mask |= AT803X_CLK_OUT_MASK;
- }
-
- ret = of_property_read_u32(node, "qca,clk-out-strength", &strength);
- if (!ret) {
- priv->clk_25m_mask |= AT803X_CLK_OUT_STRENGTH_MASK;
- switch (strength) {
- case AR803X_STRENGTH_FULL:
- priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_FULL;
- break;
- case AR803X_STRENGTH_HALF:
- priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_HALF;
- break;
- case AR803X_STRENGTH_QUARTER:
- priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_QUARTER;
- break;
- default:
- phydev_err(phydev, "invalid qca,clk-out-strength\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int at803x_probe(struct phy_device *phydev)
-{
- struct device *dev = &phydev->mdio.dev;
- struct at803x_priv *priv;
- int ret;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- phydev->priv = priv;
-
- ret = at803x_parse_dt(phydev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int at803x_get_features(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int err;
-
- err = genphy_read_abilities(phydev);
- if (err)
- return err;
-
- if (phydev->drv->phy_id != ATH8031_PHY_ID)
- return 0;
-
- /* AR8031/AR8033 have different status registers
- * for copper and fiber operation. However, the
- * extended status register is the same for both
- * operation modes.
- *
- * As a result of that, ESTATUS_1000_XFULL is set
- * to 1 even when operating in copper TP mode.
- *
- * Remove this mode from the supported link modes
- * when not operating in 1000BaseX mode.
- */
- if (!priv->is_1000basex)
- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
- phydev->supported);
-
- return 0;
-}
-
-static int at803x_smarteee_config(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- u16 mask = 0, val = 0;
- int ret;
-
- if (priv->flags & AT803X_DISABLE_SMARTEEE)
- return phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_MMD3_SMARTEEE_CTL3,
- AT803X_MMD3_SMARTEEE_CTL3_LPI_EN, 0);
-
- if (priv->smarteee_lpi_tw_1g) {
- mask |= 0xff00;
- val |= priv->smarteee_lpi_tw_1g << 8;
- }
- if (priv->smarteee_lpi_tw_100m) {
- mask |= 0x00ff;
- val |= priv->smarteee_lpi_tw_100m;
- }
- if (!mask)
- return 0;
-
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL1,
- mask, val);
- if (ret)
- return ret;
-
- return phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL3,
- AT803X_MMD3_SMARTEEE_CTL3_LPI_EN,
- AT803X_MMD3_SMARTEEE_CTL3_LPI_EN);
-}
-
-static int at803x_clk_out_config(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- if (!priv->clk_25m_mask)
- return 0;
-
- return phy_modify_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M,
- priv->clk_25m_mask, priv->clk_25m_reg);
-}
-
-static int at8031_pll_config(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- /* The default after hardware reset is PLL OFF. After a soft reset, the
- * values are retained.
- */
- if (priv->flags & AT803X_KEEP_PLL_ENABLED)
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- 0, AT803X_DEBUG_PLL_ON);
- else
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- AT803X_DEBUG_PLL_ON, 0);
-}
-
-static int at803x_hibernation_mode_config(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- /* The default after hardware reset is hibernation mode enabled. After
- * software reset, the value is retained.
- */
- if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE))
- return 0;
-
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
- AT803X_DEBUG_HIB_CTRL_PS_HIB_EN, 0);
-}
-
-static int at803x_config_init(struct phy_device *phydev)
-{
- int ret;
-
- /* The RX and TX delay default is:
- * after HW reset: RX delay enabled and TX delay disabled
- * after SW reset: RX delay enabled, while TX delay retains the
- * value before reset.
- */
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- ret = at803x_enable_rx_delay(phydev);
- else
- ret = at803x_disable_rx_delay(phydev);
- if (ret < 0)
- return ret;
-
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- ret = at803x_enable_tx_delay(phydev);
- else
- ret = at803x_disable_tx_delay(phydev);
- if (ret < 0)
- return ret;
-
- ret = at803x_smarteee_config(phydev);
- if (ret < 0)
- return ret;
-
- ret = at803x_clk_out_config(phydev);
- if (ret < 0)
- return ret;
-
- ret = at803x_hibernation_mode_config(phydev);
- if (ret < 0)
- return ret;
-
- /* Ar803x extended next page bit is enabled by default. Cisco
- * multigig switches read this bit and attempt to negotiate 10Gbps
- * rates even if the next page bit is disabled. This is incorrect
- * behaviour but we still need to accommodate it. XNP is only needed
- * for 10Gbps support, so disable XNP.
- */
- return phy_modify(phydev, MII_ADVERTISE, MDIO_AN_CTRL1_XNP, 0);
-}
-
-static int at803x_ack_interrupt(struct phy_device *phydev)
-{
- int err;
-
- err = phy_read(phydev, AT803X_INTR_STATUS);
-
- return (err < 0) ? err : 0;
-}
-
-static int at803x_config_intr(struct phy_device *phydev)
-{
- int err;
- int value;
-
- value = phy_read(phydev, AT803X_INTR_ENABLE);
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- /* Clear any pending interrupts */
- err = at803x_ack_interrupt(phydev);
- if (err)
- return err;
-
- value |= AT803X_INTR_ENABLE_AUTONEG_ERR;
- value |= AT803X_INTR_ENABLE_SPEED_CHANGED;
- value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
- value |= AT803X_INTR_ENABLE_LINK_FAIL;
- value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
-
- err = phy_write(phydev, AT803X_INTR_ENABLE, value);
- } else {
- err = phy_write(phydev, AT803X_INTR_ENABLE, 0);
- if (err)
- return err;
-
- /* Clear any pending interrupts */
- err = at803x_ack_interrupt(phydev);
- }
-
- return err;
-}
-
-static irqreturn_t at803x_handle_interrupt(struct phy_device *phydev)
-{
- int irq_status, int_enabled;
-
- irq_status = phy_read(phydev, AT803X_INTR_STATUS);
- if (irq_status < 0) {
- phy_error(phydev);
- return IRQ_NONE;
- }
-
- /* Read the current enabled interrupts */
- int_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
- if (int_enabled < 0) {
- phy_error(phydev);
- return IRQ_NONE;
- }
-
- /* See if this was one of our enabled interrupts */
- if (!(irq_status & int_enabled))
- return IRQ_NONE;
-
- phy_trigger_machine(phydev);
-
- return IRQ_HANDLED;
-}
-
-static void at803x_link_change_notify(struct phy_device *phydev)
-{
- /*
- * Conduct a hardware reset for AT8030 every time a link loss is
- * signalled. This is necessary to circumvent a hardware bug that
- * occurs when the cable is unplugged while TX packets are pending
- * in the FIFO. In such cases, the FIFO enters an error mode it
- * cannot recover from by software.
- */
- if (phydev->state == PHY_NOLINK && phydev->mdio.reset_gpio) {
- struct at803x_context context;
-
- at803x_context_save(phydev, &context);
-
- phy_device_reset(phydev, 1);
- usleep_range(1000, 2000);
- phy_device_reset(phydev, 0);
- usleep_range(1000, 2000);
-
- at803x_context_restore(phydev, &context);
-
- phydev_dbg(phydev, "%s(): phy was reset\n", __func__);
- }
-}
-
-static int at803x_read_specific_status(struct phy_device *phydev,
- struct at803x_ss_mask ss_mask)
-{
- int ss;
-
- /* Read the AT8035 PHY-Specific Status register, which indicates the
- * speed and duplex that the PHY is actually using, irrespective of
- * whether we are in autoneg mode or not.
- */
- ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
- if (ss < 0)
- return ss;
-
- if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
- int sfc, speed;
-
- sfc = phy_read(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL);
- if (sfc < 0)
- return sfc;
-
- speed = ss & ss_mask.speed_mask;
- speed >>= ss_mask.speed_shift;
-
- switch (speed) {
- case AT803X_SS_SPEED_10:
- phydev->speed = SPEED_10;
- break;
- case AT803X_SS_SPEED_100:
- phydev->speed = SPEED_100;
- break;
- case AT803X_SS_SPEED_1000:
- phydev->speed = SPEED_1000;
- break;
- case QCA808X_SS_SPEED_2500:
- phydev->speed = SPEED_2500;
- break;
- }
- if (ss & AT803X_SS_DUPLEX)
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
-
- if (ss & AT803X_SS_MDIX)
- phydev->mdix = ETH_TP_MDI_X;
- else
- phydev->mdix = ETH_TP_MDI;
-
- switch (FIELD_GET(AT803X_SFC_MDI_CROSSOVER_MODE_M, sfc)) {
- case AT803X_SFC_MANUAL_MDI:
- phydev->mdix_ctrl = ETH_TP_MDI;
- break;
- case AT803X_SFC_MANUAL_MDIX:
- phydev->mdix_ctrl = ETH_TP_MDI_X;
- break;
- case AT803X_SFC_AUTOMATIC_CROSSOVER:
- phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
- break;
- }
- }
-
- return 0;
-}
-
-static int at803x_read_status(struct phy_device *phydev)
-{
- struct at803x_ss_mask ss_mask = { 0 };
- int err, old_link = phydev->link;
-
- /* Update the link, but return if there was an error */
- err = genphy_update_link(phydev);
- if (err)
- return err;
-
- /* why bother the PHY if nothing can have changed */
- if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
- return 0;
-
- phydev->speed = SPEED_UNKNOWN;
- phydev->duplex = DUPLEX_UNKNOWN;
- phydev->pause = 0;
- phydev->asym_pause = 0;
-
- err = genphy_read_lpa(phydev);
- if (err < 0)
- return err;
-
- ss_mask.speed_mask = AT803X_SS_SPEED_MASK;
- ss_mask.speed_shift = __bf_shf(AT803X_SS_SPEED_MASK);
- err = at803x_read_specific_status(phydev, ss_mask);
- if (err < 0)
- return err;
-
- if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
- phy_resolve_aneg_pause(phydev);
-
- return 0;
-}
-
-static int at803x_config_mdix(struct phy_device *phydev, u8 ctrl)
-{
- u16 val;
-
- switch (ctrl) {
- case ETH_TP_MDI:
- val = AT803X_SFC_MANUAL_MDI;
- break;
- case ETH_TP_MDI_X:
- val = AT803X_SFC_MANUAL_MDIX;
- break;
- case ETH_TP_MDI_AUTO:
- val = AT803X_SFC_AUTOMATIC_CROSSOVER;
- break;
- default:
- return 0;
- }
-
- return phy_modify_changed(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL,
- AT803X_SFC_MDI_CROSSOVER_MODE_M,
- FIELD_PREP(AT803X_SFC_MDI_CROSSOVER_MODE_M, val));
-}
-
-static int at803x_prepare_config_aneg(struct phy_device *phydev)
-{
- int ret;
-
- ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
- if (ret < 0)
- return ret;
-
- /* Changes of the midx bits are disruptive to the normal operation;
- * therefore any changes to these registers must be followed by a
- * software reset to take effect.
- */
- if (ret == 1) {
- ret = genphy_soft_reset(phydev);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static int at803x_config_aneg(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int ret;
-
- ret = at803x_prepare_config_aneg(phydev);
- if (ret)
- return ret;
-
- if (priv->is_1000basex)
- return genphy_c37_config_aneg(phydev);
-
- return genphy_config_aneg(phydev);
-}
-
-static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
-{
- int val;
-
- val = phy_read(phydev, AT803X_SMART_SPEED);
- if (val < 0)
- return val;
-
- if (val & AT803X_SMART_SPEED_ENABLE)
- *d = FIELD_GET(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, val) + 2;
- else
- *d = DOWNSHIFT_DEV_DISABLE;
-
- return 0;
-}
-
-static int at803x_set_downshift(struct phy_device *phydev, u8 cnt)
-{
- u16 mask, set;
- int ret;
-
- switch (cnt) {
- case DOWNSHIFT_DEV_DEFAULT_COUNT:
- cnt = AT803X_DEFAULT_DOWNSHIFT;
- fallthrough;
- case AT803X_MIN_DOWNSHIFT ... AT803X_MAX_DOWNSHIFT:
- set = AT803X_SMART_SPEED_ENABLE |
- AT803X_SMART_SPEED_BYPASS_TIMER |
- FIELD_PREP(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, cnt - 2);
- mask = AT803X_SMART_SPEED_RETRY_LIMIT_MASK;
- break;
- case DOWNSHIFT_DEV_DISABLE:
- set = 0;
- mask = AT803X_SMART_SPEED_ENABLE |
- AT803X_SMART_SPEED_BYPASS_TIMER;
- break;
- default:
- return -EINVAL;
- }
-
- ret = phy_modify_changed(phydev, AT803X_SMART_SPEED, mask, set);
-
- /* After changing the smart speed settings, we need to perform a
- * software reset, use phy_init_hw() to make sure we set the
- * reapply any values which might got lost during software reset.
- */
- if (ret == 1)
- ret = phy_init_hw(phydev);
-
- return ret;
-}
-
-static int at803x_get_tunable(struct phy_device *phydev,
- struct ethtool_tunable *tuna, void *data)
-{
- switch (tuna->id) {
- case ETHTOOL_PHY_DOWNSHIFT:
- return at803x_get_downshift(phydev, data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int at803x_set_tunable(struct phy_device *phydev,
- struct ethtool_tunable *tuna, const void *data)
-{
- switch (tuna->id) {
- case ETHTOOL_PHY_DOWNSHIFT:
- return at803x_set_downshift(phydev, *(const u8 *)data);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int at803x_cable_test_result_trans(u16 status)
-{
- switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
- case AT803X_CDT_STATUS_STAT_NORMAL:
- return ETHTOOL_A_CABLE_RESULT_CODE_OK;
- case AT803X_CDT_STATUS_STAT_SHORT:
- return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
- case AT803X_CDT_STATUS_STAT_OPEN:
- return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
- case AT803X_CDT_STATUS_STAT_FAIL:
- default:
- return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
- }
-}
-
-static bool at803x_cdt_test_failed(u16 status)
-{
- return FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status) ==
- AT803X_CDT_STATUS_STAT_FAIL;
-}
-
-static bool at803x_cdt_fault_length_valid(u16 status)
-{
- switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
- case AT803X_CDT_STATUS_STAT_OPEN:
- case AT803X_CDT_STATUS_STAT_SHORT:
- return true;
- }
- return false;
-}
-
-static int at803x_cdt_fault_length(int dt)
-{
- /* According to the datasheet the distance to the fault is
- * DELTA_TIME * 0.824 meters.
- *
- * The author suspect the correct formula is:
- *
- * fault_distance = DELTA_TIME * (c * VF) / 125MHz / 2
- *
- * where c is the speed of light, VF is the velocity factor of
- * the twisted pair cable, 125MHz the counter frequency and
- * we need to divide by 2 because the hardware will measure the
- * round trip time to the fault and back to the PHY.
- *
- * With a VF of 0.69 we get the factor 0.824 mentioned in the
- * datasheet.
- */
- return (dt * 824) / 10;
-}
-
-static int at803x_cdt_start(struct phy_device *phydev,
- u32 cdt_start)
-{
- return phy_write(phydev, AT803X_CDT, cdt_start);
-}
-
-static int at803x_cdt_wait_for_completion(struct phy_device *phydev,
- u32 cdt_en)
-{
- int val, ret;
-
- /* One test run takes about 25ms */
- ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
- !(val & cdt_en),
- 30000, 100000, true);
-
- return ret < 0 ? ret : 0;
-}
-
-static int at803x_cable_test_one_pair(struct phy_device *phydev, int pair)
-{
- static const int ethtool_pair[] = {
- ETHTOOL_A_CABLE_PAIR_A,
- ETHTOOL_A_CABLE_PAIR_B,
- ETHTOOL_A_CABLE_PAIR_C,
- ETHTOOL_A_CABLE_PAIR_D,
- };
- int ret, val;
-
- val = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
- AT803X_CDT_ENABLE_TEST;
- ret = at803x_cdt_start(phydev, val);
- if (ret)
- return ret;
-
- ret = at803x_cdt_wait_for_completion(phydev, AT803X_CDT_ENABLE_TEST);
- if (ret)
- return ret;
-
- val = phy_read(phydev, AT803X_CDT_STATUS);
- if (val < 0)
- return val;
-
- if (at803x_cdt_test_failed(val))
- return 0;
-
- ethnl_cable_test_result(phydev, ethtool_pair[pair],
- at803x_cable_test_result_trans(val));
-
- if (at803x_cdt_fault_length_valid(val)) {
- val = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, val);
- ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
- at803x_cdt_fault_length(val));
- }
-
- return 1;
-}
-
-static int at803x_cable_test_get_status(struct phy_device *phydev,
- bool *finished, unsigned long pair_mask)
-{
- int retries = 20;
- int pair, ret;
-
- *finished = false;
-
- /* According to the datasheet the CDT can be performed when
- * there is no link partner or when the link partner is
- * auto-negotiating. Starting the test will restart the AN
- * automatically. It seems that doing this repeatedly we will
- * get a slot where our link partner won't disturb our
- * measurement.
- */
- while (pair_mask && retries--) {
- for_each_set_bit(pair, &pair_mask, 4) {
- ret = at803x_cable_test_one_pair(phydev, pair);
- if (ret < 0)
- return ret;
- if (ret)
- clear_bit(pair, &pair_mask);
- }
- if (pair_mask)
- msleep(250);
- }
-
- *finished = true;
-
- return 0;
-}
-
-static void at803x_cable_test_autoneg(struct phy_device *phydev)
-{
- /* Enable auto-negotiation, but advertise no capabilities, no link
- * will be established. A restart of the auto-negotiation is not
- * required, because the cable test will automatically break the link.
- */
- phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
- phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
-}
-
-static int at803x_cable_test_start(struct phy_device *phydev)
-{
- at803x_cable_test_autoneg(phydev);
- /* we do all the (time consuming) work later */
- return 0;
-}
-
-static int at8031_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
- unsigned int selector)
-{
- struct phy_device *phydev = rdev_get_drvdata(rdev);
-
- if (selector)
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- 0, AT803X_DEBUG_RGMII_1V8);
- else
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
- AT803X_DEBUG_RGMII_1V8, 0);
-}
-
-static int at8031_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
-{
- struct phy_device *phydev = rdev_get_drvdata(rdev);
- int val;
-
- val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
- if (val < 0)
- return val;
-
- return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
-}
-
-static const struct regulator_ops vddio_regulator_ops = {
- .list_voltage = regulator_list_voltage_table,
- .set_voltage_sel = at8031_rgmii_reg_set_voltage_sel,
- .get_voltage_sel = at8031_rgmii_reg_get_voltage_sel,
-};
-
-static const unsigned int vddio_voltage_table[] = {
- 1500000,
- 1800000,
-};
-
-static const struct regulator_desc vddio_desc = {
- .name = "vddio",
- .of_match = of_match_ptr("vddio-regulator"),
- .n_voltages = ARRAY_SIZE(vddio_voltage_table),
- .volt_table = vddio_voltage_table,
- .ops = &vddio_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
-};
-
-static const struct regulator_ops vddh_regulator_ops = {
-};
-
-static const struct regulator_desc vddh_desc = {
- .name = "vddh",
- .of_match = of_match_ptr("vddh-regulator"),
- .n_voltages = 1,
- .fixed_uV = 2500000,
- .ops = &vddh_regulator_ops,
- .type = REGULATOR_VOLTAGE,
- .owner = THIS_MODULE,
-};
-
-static int at8031_register_regulators(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- struct device *dev = &phydev->mdio.dev;
- struct regulator_config config = { };
-
- config.dev = dev;
- config.driver_data = phydev;
-
- priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
- if (IS_ERR(priv->vddio_rdev)) {
- phydev_err(phydev, "failed to register VDDIO regulator\n");
- return PTR_ERR(priv->vddio_rdev);
- }
-
- priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
- if (IS_ERR(priv->vddh_rdev)) {
- phydev_err(phydev, "failed to register VDDH regulator\n");
- return PTR_ERR(priv->vddh_rdev);
- }
-
- return 0;
-}
-
-static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
-{
- struct phy_device *phydev = upstream;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
- DECLARE_PHY_INTERFACE_MASK(interfaces);
- phy_interface_t iface;
-
- linkmode_zero(phy_support);
- phylink_set(phy_support, 1000baseX_Full);
- phylink_set(phy_support, 1000baseT_Full);
- phylink_set(phy_support, Autoneg);
- phylink_set(phy_support, Pause);
- phylink_set(phy_support, Asym_Pause);
-
- linkmode_zero(sfp_support);
- sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
- /* Some modules support 10G modes as well as others we support.
- * Mask out non-supported modes so the correct interface is picked.
- */
- linkmode_and(sfp_support, phy_support, sfp_support);
-
- if (linkmode_empty(sfp_support)) {
- dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
- return -EINVAL;
- }
-
- iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
-
- /* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
- * interface for use with SFP modules.
- * However, some copper modules detected as having a preferred SGMII
- * interface do default to and function in 1000Base-X mode, so just
- * print a warning and allow such modules, as they may have some chance
- * of working.
- */
- if (iface == PHY_INTERFACE_MODE_SGMII)
- dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
- else if (iface != PHY_INTERFACE_MODE_1000BASEX)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct sfp_upstream_ops at8031_sfp_ops = {
- .attach = phy_sfp_attach,
- .detach = phy_sfp_detach,
- .module_insert = at8031_sfp_insert,
-};
-
-static int at8031_parse_dt(struct phy_device *phydev)
-{
- struct device_node *node = phydev->mdio.dev.of_node;
- struct at803x_priv *priv = phydev->priv;
- int ret;
-
- if (of_property_read_bool(node, "qca,keep-pll-enabled"))
- priv->flags |= AT803X_KEEP_PLL_ENABLED;
-
- ret = at8031_register_regulators(phydev);
- if (ret < 0)
- return ret;
-
- ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
- "vddio");
- if (ret) {
- phydev_err(phydev, "failed to get VDDIO regulator\n");
- return ret;
- }
-
- /* Only AR8031/8033 support 1000Base-X for SFP modules */
- return phy_sfp_probe(phydev, &at8031_sfp_ops);
-}
-
-static int at8031_probe(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int mode_cfg;
- int ccr;
- int ret;
-
- ret = at803x_probe(phydev);
- if (ret)
- return ret;
-
- /* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
- * options.
- */
- ret = at8031_parse_dt(phydev);
- if (ret)
- return ret;
-
- ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- if (ccr < 0)
- return ccr;
- mode_cfg = ccr & AT803X_MODE_CFG_MASK;
-
- switch (mode_cfg) {
- case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
- case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
- priv->is_1000basex = true;
- fallthrough;
- case AT803X_MODE_CFG_FX100_RGMII_50OHM:
- case AT803X_MODE_CFG_FX100_RGMII_75OHM:
- priv->is_fiber = true;
- break;
- }
-
- /* Disable WoL in 1588 register which is enabled
- * by default
- */
- return phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- AT803X_WOL_EN, 0);
-}
-
-static int at8031_config_init(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int ret;
-
- /* Some bootloaders leave the fiber page selected.
- * Switch to the appropriate page (fiber or copper), as otherwise we
- * read the PHY capabilities from the wrong page.
- */
- phy_lock_mdio_bus(phydev);
- ret = at803x_write_page(phydev,
- priv->is_fiber ? AT803X_PAGE_FIBER :
- AT803X_PAGE_COPPER);
- phy_unlock_mdio_bus(phydev);
- if (ret)
- return ret;
-
- ret = at8031_pll_config(phydev);
- if (ret < 0)
- return ret;
-
- return at803x_config_init(phydev);
-}
-
-static int at8031_set_wol(struct phy_device *phydev,
- struct ethtool_wolinfo *wol)
-{
- int ret;
-
- /* First setup MAC address and enable WOL interrupt */
- ret = at803x_set_wol(phydev, wol);
- if (ret)
- return ret;
-
- if (wol->wolopts & WAKE_MAGIC)
- /* Enable WOL function for 1588 */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- 0, AT803X_WOL_EN);
- else
- /* Disable WoL function for 1588 */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
- AT803X_PHY_MMD3_WOL_CTRL,
- AT803X_WOL_EN, 0);
-
- return ret;
-}
-
-static int at8031_config_intr(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
- int err, value = 0;
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED &&
- priv->is_fiber) {
- /* Clear any pending interrupts */
- err = at803x_ack_interrupt(phydev);
- if (err)
- return err;
-
- value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
- value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
-
- err = phy_set_bits(phydev, AT803X_INTR_ENABLE, value);
- if (err)
- return err;
- }
-
- return at803x_config_intr(phydev);
-}
-
-/* AR8031 and AR8033 share the same read status logic */
-static int at8031_read_status(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- if (priv->is_1000basex)
- return genphy_c37_read_status(phydev);
-
- return at803x_read_status(phydev);
-}
-
-/* AR8031 and AR8035 share the same cable test get status reg */
-static int at8031_cable_test_get_status(struct phy_device *phydev,
- bool *finished)
-{
- return at803x_cable_test_get_status(phydev, finished, 0xf);
-}
-
-/* AR8031 and AR8035 share the same cable test start logic */
-static int at8031_cable_test_start(struct phy_device *phydev)
-{
- at803x_cable_test_autoneg(phydev);
- phy_write(phydev, MII_CTRL1000, 0);
- /* we do all the (time consuming) work later */
- return 0;
-}
-
-/* AR8032, AR9331 and QCA9561 share the same cable test get status reg */
-static int at8032_cable_test_get_status(struct phy_device *phydev,
- bool *finished)
-{
- return at803x_cable_test_get_status(phydev, finished, 0x3);
-}
-
-static int at8035_parse_dt(struct phy_device *phydev)
-{
- struct at803x_priv *priv = phydev->priv;
-
- /* Mask is set by the generic at803x_parse_dt
- * if property is set. Assume property is set
- * with the mask not zero.
- */
- if (priv->clk_25m_mask) {
- /* Fixup for the AR8030/AR8035. This chip has another mask and
- * doesn't support the DSP reference. Eg. the lowest bit of the
- * mask. The upper two bits select the same frequencies. Mask
- * the lowest bit here.
- *
- * Warning:
- * There was no datasheet for the AR8030 available so this is
- * just a guess. But the AR8035 is listed as pin compatible
- * to the AR8030 so there might be a good chance it works on
- * the AR8030 too.
- */
- priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
- priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
- }
-
- return 0;
-}
-
-/* AR8030 and AR8035 shared the same special mask for clk_25m */
-static int at8035_probe(struct phy_device *phydev)
-{
- int ret;
-
- ret = at803x_probe(phydev);
- if (ret)
- return ret;
-
- return at8035_parse_dt(phydev);
-}
-
-static int qca83xx_config_init(struct phy_device *phydev)
-{
- u8 switch_revision;
-
- switch_revision = phydev->dev_flags & QCA8K_DEVFLAGS_REVISION_MASK;
-
- switch (switch_revision) {
- case 1:
- /* For 100M waveform */
- at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea);
- /* Turn on Gigabit clock */
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0);
- break;
-
- case 2:
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0);
- fallthrough;
- case 4:
- phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
- break;
- }
-
- /* Following original QCA sourcecode set port to prefer master */
- phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
-
- return 0;
-}
-
-static int qca8327_config_init(struct phy_device *phydev)
-{
- /* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
- * Disable on init and enable only with 100m speed following
- * qca original source code.
- */
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- QCA8327_DEBUG_MANU_CTRL_EN, 0);
-
- return qca83xx_config_init(phydev);
-}
-
-static void qca83xx_link_change_notify(struct phy_device *phydev)
-{
- /* Set DAC Amplitude adjustment to +6% for 100m on link running */
- if (phydev->state == PHY_RUNNING) {
- if (phydev->speed == SPEED_100)
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- QCA8327_DEBUG_MANU_CTRL_EN,
- QCA8327_DEBUG_MANU_CTRL_EN);
- } else {
- /* Reset DAC Amplitude adjustment */
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
- QCA8327_DEBUG_MANU_CTRL_EN, 0);
- }
-}
-
-static int qca83xx_resume(struct phy_device *phydev)
-{
- int ret, val;
-
- /* Skip reset if not suspended */
- if (!phydev->suspended)
- return 0;
-
- /* Reinit the port, reset values set by suspend */
- qca83xx_config_init(phydev);
-
- /* Reset the port on port resume */
- phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
-
- /* On resume from suspend the switch execute a reset and
- * restart auto-negotiation. Wait for reset to complete.
- */
- ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
- 50000, 600000, true);
- if (ret)
- return ret;
-
- usleep_range(1000, 2000);
-
- return 0;
-}
-
-static int qca83xx_suspend(struct phy_device *phydev)
-{
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
- AT803X_DEBUG_GATE_CLK_IN1000, 0);
-
- at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
- AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE |
- AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0);
-
- return 0;
-}
-
-static int qca8337_suspend(struct phy_device *phydev)
-{
- /* Only QCA8337 support actual suspend. */
- genphy_suspend(phydev);
-
- return qca83xx_suspend(phydev);
-}
-
-static int qca8327_suspend(struct phy_device *phydev)
-{
- u16 mask = 0;
-
- /* QCA8327 cause port unreliability when phy suspend
- * is set.
- */
- mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
- phy_modify(phydev, MII_BMCR, mask, 0);
-
- return qca83xx_suspend(phydev);
-}
-
-static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
-{
- int ret;
-
- /* Enable fast retrain */
- ret = genphy_c45_fast_retrain(phydev, true);
- if (ret)
- return ret;
-
- phy_write_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_TOP_OPTION1,
- QCA808X_TOP_OPTION1_DATA);
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB,
- QCA808X_MSE_THRESHOLD_20DB_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB,
- QCA808X_MSE_THRESHOLD_17DB_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB,
- QCA808X_MSE_THRESHOLD_27DB_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB,
- QCA808X_MSE_THRESHOLD_28DB_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_1,
- QCA808X_MMD3_DEBUG_1_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_4,
- QCA808X_MMD3_DEBUG_4_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_5,
- QCA808X_MMD3_DEBUG_5_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_3,
- QCA808X_MMD3_DEBUG_3_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_6,
- QCA808X_MMD3_DEBUG_6_VALUE);
- phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_2,
- QCA808X_MMD3_DEBUG_2_VALUE);
-
- return 0;
-}
-
-static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable)
-{
- u16 seed_value;
-
- if (!enable)
- return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
- QCA808X_MASTER_SLAVE_SEED_ENABLE, 0);
-
- seed_value = get_random_u32_below(QCA808X_MASTER_SLAVE_SEED_RANGE);
- return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
- QCA808X_MASTER_SLAVE_SEED_CFG | QCA808X_MASTER_SLAVE_SEED_ENABLE,
- FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value) |
- QCA808X_MASTER_SLAVE_SEED_ENABLE);
-}
-
-static bool qca808x_is_prefer_master(struct phy_device *phydev)
-{
- return (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_FORCE) ||
- (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_PREFERRED);
-}
-
-static bool qca808x_has_fast_retrain_or_slave_seed(struct phy_device *phydev)
-{
- return linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
-}
-
-static int qca808x_config_init(struct phy_device *phydev)
-{
- int ret;
-
- /* Active adc&vga on 802.3az for the link 1000M and 100M */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
- QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
- if (ret)
- return ret;
-
- /* Adjust the threshold on 802.3az for the link 1000M */
- ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
- QCA808X_PHY_MMD3_AZ_TRAINING_CTRL,
- QCA808X_MMD3_AZ_TRAINING_VAL);
- if (ret)
- return ret;
-
- if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
- /* Config the fast retrain for the link 2500M */
- ret = qca808x_phy_fast_retrain_config(phydev);
- if (ret)
- return ret;
-
- ret = genphy_read_master_slave(phydev);
- if (ret < 0)
- return ret;
-
- if (!qca808x_is_prefer_master(phydev)) {
- /* Enable seed and configure lower ramdom seed to make phy
- * linked as slave mode.
- */
- ret = qca808x_phy_ms_seed_enable(phydev, true);
- if (ret)
- return ret;
- }
- }
-
- /* Configure adc threshold as 100mv for the link 10M */
- return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
- QCA808X_ADC_THRESHOLD_MASK,
- QCA808X_ADC_THRESHOLD_100MV);
-}
-
-static int qca808x_read_status(struct phy_device *phydev)
-{
- struct at803x_ss_mask ss_mask = { 0 };
- int ret;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
- if (ret < 0)
- return ret;
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->lp_advertising,
- ret & MDIO_AN_10GBT_STAT_LP2_5G);
-
- ret = genphy_read_status(phydev);
- if (ret)
- return ret;
-
- /* qca8081 takes the different bits for speed value from at803x */
- ss_mask.speed_mask = QCA808X_SS_SPEED_MASK;
- ss_mask.speed_shift = __bf_shf(QCA808X_SS_SPEED_MASK);
- ret = at803x_read_specific_status(phydev, ss_mask);
- if (ret < 0)
- return ret;
-
- if (phydev->link) {
- if (phydev->speed == SPEED_2500)
- phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
- else
- phydev->interface = PHY_INTERFACE_MODE_SGMII;
- } else {
- /* generate seed as a lower random value to make PHY linked as SLAVE easily,
- * except for master/slave configuration fault detected or the master mode
- * preferred.
- *
- * the reason for not putting this code into the function link_change_notify is
- * the corner case where the link partner is also the qca8081 PHY and the seed
- * value is configured as the same value, the link can't be up and no link change
- * occurs.
- */
- if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
- if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR ||
- qca808x_is_prefer_master(phydev)) {
- qca808x_phy_ms_seed_enable(phydev, false);
- } else {
- qca808x_phy_ms_seed_enable(phydev, true);
- }
- }
- }
-
- return 0;
-}
-
-static int qca808x_soft_reset(struct phy_device *phydev)
-{
- int ret;
-
- ret = genphy_soft_reset(phydev);
- if (ret < 0)
- return ret;
-
- if (qca808x_has_fast_retrain_or_slave_seed(phydev))
- ret = qca808x_phy_ms_seed_enable(phydev, true);
-
- return ret;
-}
-
-static bool qca808x_cdt_fault_length_valid(int cdt_code)
-{
- switch (cdt_code) {
- case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
- return true;
- default:
- return false;
- }
-}
-
-static int qca808x_cable_test_result_trans(int cdt_code)
-{
- switch (cdt_code) {
- case QCA808X_CDT_STATUS_STAT_NORMAL:
- return ETHTOOL_A_CABLE_RESULT_CODE_OK;
- case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
- return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
- case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
- return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
- case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
- return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
- case QCA808X_CDT_STATUS_STAT_FAIL:
- default:
- return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
- }
-}
-
-static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
- int result)
-{
- int val;
- u32 cdt_length_reg = 0;
-
- switch (pair) {
- case ETHTOOL_A_CABLE_PAIR_A:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A;
- break;
- case ETHTOOL_A_CABLE_PAIR_B:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B;
- break;
- case ETHTOOL_A_CABLE_PAIR_C:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C;
- break;
- case ETHTOOL_A_CABLE_PAIR_D:
- cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D;
- break;
- default:
- return -EINVAL;
- }
-
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg);
- if (val < 0)
- return val;
-
- if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
- val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
- else
- val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
-
- return at803x_cdt_fault_length(val);
-}
-
-static int qca808x_cable_test_start(struct phy_device *phydev)
-{
- int ret;
-
- /* perform CDT with the following configs:
- * 1. disable hibernation.
- * 2. force PHY working in MDI mode.
- * 3. for PHY working in 1000BaseT.
- * 4. configure the threshold.
- */
-
- ret = at803x_debug_reg_mask(phydev, QCA808X_DBG_AN_TEST, QCA808X_HIBERNATION_EN, 0);
- if (ret < 0)
- return ret;
-
- ret = at803x_config_mdix(phydev, ETH_TP_MDI);
- if (ret < 0)
- return ret;
-
- /* Force 1000base-T needs to configure PMA/PMD and MII_BMCR */
- phydev->duplex = DUPLEX_FULL;
- phydev->speed = SPEED_1000;
- ret = genphy_c45_pma_setup_forced(phydev);
- if (ret < 0)
- return ret;
-
- ret = genphy_setup_forced(phydev);
- if (ret < 0)
- return ret;
-
- /* configure the thresholds for open, short, pair ok test */
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8074, 0xc040);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8076, 0xc040);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8077, 0xa060);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8078, 0xc050);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807a, 0xc060);
- phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807e, 0xb060);
-
- return 0;
-}
-
-static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
- u16 status)
-{
- int length, result;
- u16 pair_code;
-
- switch (pair) {
- case ETHTOOL_A_CABLE_PAIR_A:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
- break;
- case ETHTOOL_A_CABLE_PAIR_B:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
- break;
- case ETHTOOL_A_CABLE_PAIR_C:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
- break;
- case ETHTOOL_A_CABLE_PAIR_D:
- pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
- break;
- default:
- return -EINVAL;
- }
-
- result = qca808x_cable_test_result_trans(pair_code);
- ethnl_cable_test_result(phydev, pair, result);
-
- if (qca808x_cdt_fault_length_valid(pair_code)) {
- length = qca808x_cdt_fault_length(phydev, pair, result);
- ethnl_cable_test_fault_length(phydev, pair, length);
- }
-
- return 0;
-}
-
-static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
-{
- int ret, val;
-
- *finished = false;
-
- val = QCA808X_CDT_ENABLE_TEST |
- QCA808X_CDT_LENGTH_UNIT;
- ret = at803x_cdt_start(phydev, val);
- if (ret)
- return ret;
-
- ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
- if (ret)
- return ret;
-
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS);
- if (val < 0)
- return val;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
- if (ret)
- return ret;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
- if (ret)
- return ret;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
- if (ret)
- return ret;
-
- ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
- if (ret)
- return ret;
-
- *finished = true;
-
- return 0;
-}
-
-static int qca808x_get_features(struct phy_device *phydev)
-{
- int ret;
-
- ret = genphy_c45_pma_read_abilities(phydev);
- if (ret)
- return ret;
-
- /* The autoneg ability is not existed in bit3 of MMD7.1,
- * but it is supported by qca808x PHY, so we add it here
- * manually.
- */
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
-
- /* As for the qca8081 1G version chip, the 2500baseT ability is also
- * existed in the bit0 of MMD1.21, we need to remove it manually if
- * it is the qca8081 1G chip according to the bit0 of MMD7.0x901d.
- */
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_CHIP_TYPE);
- if (ret < 0)
- return ret;
-
- if (QCA808X_PHY_CHIP_TYPE_1G & ret)
- linkmode_clear_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
-
- return 0;
-}
-
-static int qca808x_config_aneg(struct phy_device *phydev)
-{
- int phy_ctrl = 0;
- int ret;
-
- ret = at803x_prepare_config_aneg(phydev);
- if (ret)
- return ret;
-
- /* The reg MII_BMCR also needs to be configured for force mode, the
- * genphy_config_aneg is also needed.
- */
- if (phydev->autoneg == AUTONEG_DISABLE)
- genphy_c45_pma_setup_forced(phydev);
-
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
- phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
-
- ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
- MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
- if (ret < 0)
- return ret;
-
- return __genphy_config_aneg(phydev, ret);
-}
-
-static void qca808x_link_change_notify(struct phy_device *phydev)
-{
- /* Assert interface sgmii fifo on link down, deassert it on link up,
- * the interface device address is always phy address added by 1.
- */
- mdiobus_c45_modify_changed(phydev->mdio.bus, phydev->mdio.addr + 1,
- MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
- QCA8081_PHY_FIFO_RSTN,
- phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
-}
-
-static struct phy_driver at803x_driver[] = {
-{
- /* Qualcomm Atheros AR8035 */
- PHY_ID_MATCH_EXACT(ATH8035_PHY_ID),
- .name = "Qualcomm Atheros AR8035",
- .flags = PHY_POLL_CABLE_TEST,
- .probe = at8035_probe,
- .config_aneg = at803x_config_aneg,
- .config_init = at803x_config_init,
- .soft_reset = genphy_soft_reset,
- .set_wol = at803x_set_wol,
- .get_wol = at803x_get_wol,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- /* PHY_GBIT_FEATURES */
- .read_status = at803x_read_status,
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .get_tunable = at803x_get_tunable,
- .set_tunable = at803x_set_tunable,
- .cable_test_start = at8031_cable_test_start,
- .cable_test_get_status = at8031_cable_test_get_status,
-}, {
- /* Qualcomm Atheros AR8030 */
- .phy_id = ATH8030_PHY_ID,
- .name = "Qualcomm Atheros AR8030",
- .phy_id_mask = AT8030_PHY_ID_MASK,
- .probe = at8035_probe,
- .config_init = at803x_config_init,
- .link_change_notify = at803x_link_change_notify,
- .set_wol = at803x_set_wol,
- .get_wol = at803x_get_wol,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- /* PHY_BASIC_FEATURES */
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
-}, {
- /* Qualcomm Atheros AR8031/AR8033 */
- PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
- .name = "Qualcomm Atheros AR8031/AR8033",
- .flags = PHY_POLL_CABLE_TEST,
- .probe = at8031_probe,
- .config_init = at8031_config_init,
- .config_aneg = at803x_config_aneg,
- .soft_reset = genphy_soft_reset,
- .set_wol = at8031_set_wol,
- .get_wol = at803x_get_wol,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- .read_page = at803x_read_page,
- .write_page = at803x_write_page,
- .get_features = at803x_get_features,
- .read_status = at8031_read_status,
- .config_intr = at8031_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .get_tunable = at803x_get_tunable,
- .set_tunable = at803x_set_tunable,
- .cable_test_start = at8031_cable_test_start,
- .cable_test_get_status = at8031_cable_test_get_status,
-}, {
- /* Qualcomm Atheros AR8032 */
- PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
- .name = "Qualcomm Atheros AR8032",
- .probe = at803x_probe,
- .flags = PHY_POLL_CABLE_TEST,
- .config_init = at803x_config_init,
- .link_change_notify = at803x_link_change_notify,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- /* PHY_BASIC_FEATURES */
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at8032_cable_test_get_status,
-}, {
- /* ATHEROS AR9331 */
- PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
- .name = "Qualcomm Atheros AR9331 built-in PHY",
- .probe = at803x_probe,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- .flags = PHY_POLL_CABLE_TEST,
- /* PHY_BASIC_FEATURES */
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at8032_cable_test_get_status,
- .read_status = at803x_read_status,
- .soft_reset = genphy_soft_reset,
- .config_aneg = at803x_config_aneg,
-}, {
- /* Qualcomm Atheros QCA9561 */
- PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
- .name = "Qualcomm Atheros QCA9561 built-in PHY",
- .probe = at803x_probe,
- .suspend = at803x_suspend,
- .resume = at803x_resume,
- .flags = PHY_POLL_CABLE_TEST,
- /* PHY_BASIC_FEATURES */
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .cable_test_start = at803x_cable_test_start,
- .cable_test_get_status = at8032_cable_test_get_status,
- .read_status = at803x_read_status,
- .soft_reset = genphy_soft_reset,
- .config_aneg = at803x_config_aneg,
-}, {
- /* QCA8337 */
- .phy_id = QCA8337_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
- .name = "Qualcomm Atheros 8337 internal PHY",
- /* PHY_GBIT_FEATURES */
- .probe = at803x_probe,
- .flags = PHY_IS_INTERNAL,
- .config_init = qca83xx_config_init,
- .soft_reset = genphy_soft_reset,
- .get_sset_count = qca83xx_get_sset_count,
- .get_strings = qca83xx_get_strings,
- .get_stats = qca83xx_get_stats,
- .suspend = qca8337_suspend,
- .resume = qca83xx_resume,
-}, {
- /* QCA8327-A from switch QCA8327-AL1A */
- .phy_id = QCA8327_A_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
- .name = "Qualcomm Atheros 8327-A internal PHY",
- /* PHY_GBIT_FEATURES */
- .link_change_notify = qca83xx_link_change_notify,
- .probe = at803x_probe,
- .flags = PHY_IS_INTERNAL,
- .config_init = qca8327_config_init,
- .soft_reset = genphy_soft_reset,
- .get_sset_count = qca83xx_get_sset_count,
- .get_strings = qca83xx_get_strings,
- .get_stats = qca83xx_get_stats,
- .suspend = qca8327_suspend,
- .resume = qca83xx_resume,
-}, {
- /* QCA8327-B from switch QCA8327-BL1A */
- .phy_id = QCA8327_B_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
- .name = "Qualcomm Atheros 8327-B internal PHY",
- /* PHY_GBIT_FEATURES */
- .link_change_notify = qca83xx_link_change_notify,
- .probe = at803x_probe,
- .flags = PHY_IS_INTERNAL,
- .config_init = qca8327_config_init,
- .soft_reset = genphy_soft_reset,
- .get_sset_count = qca83xx_get_sset_count,
- .get_strings = qca83xx_get_strings,
- .get_stats = qca83xx_get_stats,
- .suspend = qca8327_suspend,
- .resume = qca83xx_resume,
-}, {
- /* Qualcomm QCA8081 */
- PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
- .name = "Qualcomm QCA8081",
- .flags = PHY_POLL_CABLE_TEST,
- .probe = at803x_probe,
- .config_intr = at803x_config_intr,
- .handle_interrupt = at803x_handle_interrupt,
- .get_tunable = at803x_get_tunable,
- .set_tunable = at803x_set_tunable,
- .set_wol = at803x_set_wol,
- .get_wol = at803x_get_wol,
- .get_features = qca808x_get_features,
- .config_aneg = qca808x_config_aneg,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
- .read_status = qca808x_read_status,
- .config_init = qca808x_config_init,
- .soft_reset = qca808x_soft_reset,
- .cable_test_start = qca808x_cable_test_start,
- .cable_test_get_status = qca808x_cable_test_get_status,
- .link_change_notify = qca808x_link_change_notify,
-}, };
-
-module_phy_driver(at803x_driver);
-
-static struct mdio_device_id __maybe_unused atheros_tbl[] = {
- { ATH8030_PHY_ID, AT8030_PHY_ID_MASK },
- { PHY_ID_MATCH_EXACT(ATH8031_PHY_ID) },
- { PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
- { PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
- { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
- { PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
- { }
-};
-
-MODULE_DEVICE_TABLE(mdio, atheros_tbl);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 312a8bb35d78..370e4ed45098 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -665,10 +665,11 @@ static int bcm54616s_config_aneg(struct phy_device *phydev)
static int bcm54616s_read_status(struct phy_device *phydev)
{
struct bcm54616s_phy_priv *priv = phydev->priv;
+ bool changed;
int err;
if (priv->mode_1000bx_en)
- err = genphy_c37_read_status(phydev);
+ err = genphy_c37_read_status(phydev, &changed);
else
err = genphy_read_status(phydev);
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index b7cb71817780..c3426a17e6d0 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
+#include <linux/bitfield.h>
#define DP83822_PHY_ID 0x2000a240
#define DP83825S_PHY_ID 0x2000a140
@@ -34,6 +35,10 @@
#define MII_DP83822_GENCFG 0x465
#define MII_DP83822_SOR1 0x467
+/* DP83826 specific registers */
+#define MII_DP83826_VOD_CFG1 0x30b
+#define MII_DP83826_VOD_CFG2 0x30c
+
/* GENCFG */
#define DP83822_SIG_DET_LOW BIT(0)
@@ -95,6 +100,8 @@
#define DP83822_WOL_CLR_INDICATION BIT(11)
/* RCSR bits */
+#define DP83822_RMII_MODE_EN BIT(5)
+#define DP83822_RMII_MODE_SEL BIT(7)
#define DP83822_RGMII_MODE_EN BIT(9)
#define DP83822_RX_CLK_SHIFT BIT(12)
#define DP83822_TX_CLK_SHIFT BIT(11)
@@ -110,6 +117,19 @@
#define DP83822_RX_ER_STR_MASK GENMASK(9, 8)
#define DP83822_RX_ER_SHIFT 8
+/* DP83826: VOD_CFG1 & VOD_CFG2 */
+#define DP83826_VOD_CFG1_MINUS_MDIX_MASK GENMASK(13, 12)
+#define DP83826_VOD_CFG1_MINUS_MDI_MASK GENMASK(11, 6)
+#define DP83826_VOD_CFG2_MINUS_MDIX_MASK GENMASK(15, 12)
+#define DP83826_VOD_CFG2_PLUS_MDIX_MASK GENMASK(11, 6)
+#define DP83826_VOD_CFG2_PLUS_MDI_MASK GENMASK(5, 0)
+#define DP83826_CFG_DAC_MINUS_MDIX_5_TO_4 GENMASK(5, 4)
+#define DP83826_CFG_DAC_MINUS_MDIX_3_TO_0 GENMASK(3, 0)
+#define DP83826_CFG_DAC_PERCENT_PER_STEP 625
+#define DP83826_CFG_DAC_PERCENT_DEFAULT 10000
+#define DP83826_CFG_DAC_MINUS_DEFAULT 0x30
+#define DP83826_CFG_DAC_PLUS_DEFAULT 0x10
+
#define MII_DP83822_FIBER_ADVERTISE (ADVERTISED_TP | ADVERTISED_MII | \
ADVERTISED_FIBRE | \
ADVERTISED_Pause | ADVERTISED_Asym_Pause)
@@ -118,6 +138,8 @@ struct dp83822_private {
bool fx_signal_det_low;
int fx_enabled;
u16 fx_sd_enable;
+ u8 cfg_dac_minus;
+ u8 cfg_dac_plus;
};
static int dp83822_set_wol(struct phy_device *phydev,
@@ -233,7 +255,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
- /* Private data pointer is NULL on DP83825/26 */
+ /* Private data pointer is NULL on DP83825 */
if (!dp83822 || !dp83822->fx_enabled)
misr_status |= DP83822_ANEG_COMPLETE_INT_EN |
DP83822_DUP_MODE_CHANGE_INT_EN |
@@ -254,7 +276,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_PAGE_RX_INT_EN |
DP83822_EEE_ERROR_CHANGE_INT_EN);
- /* Private data pointer is NULL on DP83825/26 */
+ /* Private data pointer is NULL on DP83825 */
if (!dp83822 || !dp83822->fx_enabled)
misr_status |= DP83822_ANEG_ERR_INT_EN |
DP83822_WOL_PKT_INT_EN;
@@ -380,7 +402,7 @@ static int dp83822_config_init(struct phy_device *phydev)
{
struct dp83822_private *dp83822 = phydev->priv;
struct device *dev = &phydev->mdio.dev;
- int rgmii_delay;
+ int rgmii_delay = 0;
s32 rx_int_delay;
s32 tx_int_delay;
int err = 0;
@@ -390,30 +412,33 @@ static int dp83822_config_init(struct phy_device *phydev)
rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
true);
- if (rx_int_delay <= 0)
- rgmii_delay = 0;
- else
- rgmii_delay = DP83822_RX_CLK_SHIFT;
+ /* Set DP83822_RX_CLK_SHIFT to enable rx clk internal delay */
+ if (rx_int_delay > 0)
+ rgmii_delay |= DP83822_RX_CLK_SHIFT;
tx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
false);
+
+ /* Set DP83822_TX_CLK_SHIFT to disable tx clk internal delay */
if (tx_int_delay <= 0)
- rgmii_delay &= ~DP83822_TX_CLK_SHIFT;
- else
rgmii_delay |= DP83822_TX_CLK_SHIFT;
- if (rgmii_delay) {
- err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
- MII_DP83822_RCSR, rgmii_delay);
- if (err)
- return err;
- }
+ err = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RX_CLK_SHIFT | DP83822_TX_CLK_SHIFT, rgmii_delay);
+ if (err)
+ return err;
+
+ err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
- phy_set_bits_mmd(phydev, DP83822_DEVADDR,
- MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
+ if (err)
+ return err;
} else {
- phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
- MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
+ err = phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
+
+ if (err)
+ return err;
}
if (dp83822->fx_enabled) {
@@ -474,6 +499,85 @@ static int dp83822_config_init(struct phy_device *phydev)
return dp8382x_disable_wol(phydev);
}
+static int dp83826_config_rmii_mode(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ const char *of_val;
+ int ret;
+
+ if (!device_property_read_string(dev, "ti,rmii-mode", &of_val)) {
+ if (strcmp(of_val, "master") == 0) {
+ ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RMII_MODE_SEL);
+ } else if (strcmp(of_val, "slave") == 0) {
+ ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RMII_MODE_SEL);
+ } else {
+ phydev_err(phydev, "Invalid value for ti,rmii-mode property (%s)\n",
+ of_val);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dp83826_config_init(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ u16 val, mask;
+ int ret;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
+ ret = phy_set_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RMII_MODE_EN);
+ if (ret)
+ return ret;
+
+ ret = dp83826_config_rmii_mode(phydev);
+ if (ret)
+ return ret;
+ } else {
+ ret = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RMII_MODE_EN);
+ if (ret)
+ return ret;
+ }
+
+ if (dp83822->cfg_dac_minus != DP83826_CFG_DAC_MINUS_DEFAULT) {
+ val = FIELD_PREP(DP83826_VOD_CFG1_MINUS_MDI_MASK, dp83822->cfg_dac_minus) |
+ FIELD_PREP(DP83826_VOD_CFG1_MINUS_MDIX_MASK,
+ FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_5_TO_4,
+ dp83822->cfg_dac_minus));
+ mask = DP83826_VOD_CFG1_MINUS_MDIX_MASK | DP83826_VOD_CFG1_MINUS_MDI_MASK;
+ ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG1, mask, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(DP83826_VOD_CFG2_MINUS_MDIX_MASK,
+ FIELD_GET(DP83826_CFG_DAC_MINUS_MDIX_3_TO_0,
+ dp83822->cfg_dac_minus));
+ mask = DP83826_VOD_CFG2_MINUS_MDIX_MASK;
+ ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val);
+ if (ret)
+ return ret;
+ }
+
+ if (dp83822->cfg_dac_plus != DP83826_CFG_DAC_PLUS_DEFAULT) {
+ val = FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDIX_MASK, dp83822->cfg_dac_plus) |
+ FIELD_PREP(DP83826_VOD_CFG2_PLUS_MDI_MASK, dp83822->cfg_dac_plus);
+ mask = DP83826_VOD_CFG2_PLUS_MDIX_MASK | DP83826_VOD_CFG2_PLUS_MDI_MASK;
+ ret = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83826_VOD_CFG2, mask, val);
+ if (ret)
+ return ret;
+ }
+
+ return dp8382x_disable_wol(phydev);
+}
+
static int dp8382x_config_init(struct phy_device *phydev)
{
return dp8382x_disable_wol(phydev);
@@ -509,11 +613,44 @@ static int dp83822_of_init(struct phy_device *phydev)
return 0;
}
+
+static int dp83826_to_dac_minus_one_regval(int percent)
+{
+ int tmp = DP83826_CFG_DAC_PERCENT_DEFAULT - percent;
+
+ return tmp / DP83826_CFG_DAC_PERCENT_PER_STEP;
+}
+
+static int dp83826_to_dac_plus_one_regval(int percent)
+{
+ int tmp = percent - DP83826_CFG_DAC_PERCENT_DEFAULT;
+
+ return tmp / DP83826_CFG_DAC_PERCENT_PER_STEP;
+}
+
+static void dp83826_of_init(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ u32 val;
+
+ dp83822->cfg_dac_minus = DP83826_CFG_DAC_MINUS_DEFAULT;
+ if (!device_property_read_u32(dev, "ti,cfg-dac-minus-one-bp", &val))
+ dp83822->cfg_dac_minus += dp83826_to_dac_minus_one_regval(val);
+
+ dp83822->cfg_dac_plus = DP83826_CFG_DAC_PLUS_DEFAULT;
+ if (!device_property_read_u32(dev, "ti,cfg-dac-plus-one-bp", &val))
+ dp83822->cfg_dac_plus += dp83826_to_dac_plus_one_regval(val);
+}
#else
static int dp83822_of_init(struct phy_device *phydev)
{
return 0;
}
+
+static void dp83826_of_init(struct phy_device *phydev)
+{
+}
#endif /* CONFIG_OF_MDIO */
static int dp83822_read_straps(struct phy_device *phydev)
@@ -567,6 +704,22 @@ static int dp83822_probe(struct phy_device *phydev)
return 0;
}
+static int dp83826_probe(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822;
+
+ dp83822 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83822),
+ GFP_KERNEL);
+ if (!dp83822)
+ return -ENOMEM;
+
+ phydev->priv = dp83822;
+
+ dp83826_of_init(phydev);
+
+ return 0;
+}
+
static int dp83822_suspend(struct phy_device *phydev)
{
int value;
@@ -610,6 +763,22 @@ static int dp83822_resume(struct phy_device *phydev)
.resume = dp83822_resume, \
}
+#define DP83826_PHY_DRIVER(_id, _name) \
+ { \
+ PHY_ID_MATCH_MODEL(_id), \
+ .name = (_name), \
+ /* PHY_BASIC_FEATURES */ \
+ .probe = dp83826_probe, \
+ .soft_reset = dp83822_phy_reset, \
+ .config_init = dp83826_config_init, \
+ .get_wol = dp83822_get_wol, \
+ .set_wol = dp83822_set_wol, \
+ .config_intr = dp83822_config_intr, \
+ .handle_interrupt = dp83822_handle_interrupt, \
+ .suspend = dp83822_suspend, \
+ .resume = dp83822_resume, \
+ }
+
#define DP8382X_PHY_DRIVER(_id, _name) \
{ \
PHY_ID_MATCH_MODEL(_id), \
@@ -628,8 +797,8 @@ static int dp83822_resume(struct phy_device *phydev)
static struct phy_driver dp83822_driver[] = {
DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"),
DP8382X_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
- DP8382X_PHY_DRIVER(DP83826C_PHY_ID, "TI DP83826C"),
- DP8382X_PHY_DRIVER(DP83826NC_PHY_ID, "TI DP83826NC"),
+ DP83826_PHY_DRIVER(DP83826C_PHY_ID, "TI DP83826C"),
+ DP83826_PHY_DRIVER(DP83826NC_PHY_ID, "TI DP83826NC"),
DP8382X_PHY_DRIVER(DP83825S_PHY_ID, "TI DP83825S"),
DP8382X_PHY_DRIVER(DP83825CM_PHY_ID, "TI DP83825M"),
DP8382X_PHY_DRIVER(DP83825CS_PHY_ID, "TI DP83825CS"),
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 5f08f9d38bd7..4120385c5a79 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -158,6 +158,7 @@
/* LED_DRV bits */
#define DP83867_LED_DRV_EN(x) BIT((x) * 4)
#define DP83867_LED_DRV_VAL(x) BIT((x) * 4 + 1)
+#define DP83867_LED_POLARITY(x) BIT((x) * 4 + 2)
#define DP83867_LED_FN(idx, val) (((val) & 0xf) << ((idx) * 4))
#define DP83867_LED_FN_MASK(idx) (0xf << ((idx) * 4))
@@ -1152,6 +1153,26 @@ static int dp83867_led_hw_control_get(struct phy_device *phydev, u8 index,
return 0;
}
+static int dp83867_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ /* Default active high */
+ u16 polarity = DP83867_LED_POLARITY(index);
+ u32 mode;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ polarity = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return phy_modify(phydev, DP83867_LEDCR2,
+ DP83867_LED_POLARITY(index), polarity);
+}
+
static struct phy_driver dp83867_driver[] = {
{
.phy_id = DP83867_PHY_ID,
@@ -1184,6 +1205,7 @@ static struct phy_driver dp83867_driver[] = {
.led_hw_is_supported = dp83867_led_hw_is_supported,
.led_hw_control_set = dp83867_led_hw_control_set,
.led_hw_control_get = dp83867_led_hw_control_get,
+ .led_polarity_set = dp83867_led_polarity_set,
},
};
module_phy_driver(dp83867_driver);
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
index 1c3ff77de56b..6b4bd9883304 100644
--- a/drivers/net/phy/marvell-88q2xxx.c
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -1,10 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Marvell 88Q2XXX automotive 100BASE-T1/1000BASE-T1 PHY driver
+ *
+ * Derived from Marvell Q222x API
+ *
+ * Copyright (C) 2024 Liebherr-Electronics and Drives GmbH
*/
#include <linux/ethtool_netlink.h>
#include <linux/marvell_phy.h>
#include <linux/phy.h>
+#include <linux/hwmon.h>
+
+#define PHY_ID_88Q2220_REVB0 (MARVELL_PHY_ID_88Q2220 | 0x1)
#define MDIO_MMD_AN_MV_STAT 32769
#define MDIO_MMD_AN_MV_STAT_ANEG 0x0100
@@ -13,8 +20,38 @@
#define MDIO_MMD_AN_MV_STAT_LOCAL_MASTER 0x4000
#define MDIO_MMD_AN_MV_STAT_MS_CONF_FAULT 0x8000
+#define MDIO_MMD_AN_MV_STAT2 32794
+#define MDIO_MMD_AN_MV_STAT2_AN_RESOLVED 0x0800
+#define MDIO_MMD_AN_MV_STAT2_100BT1 0x2000
+#define MDIO_MMD_AN_MV_STAT2_1000BT1 0x4000
+
+#define MDIO_MMD_PCS_MV_INT_EN 32784
+#define MDIO_MMD_PCS_MV_INT_EN_LINK_UP 0x0040
+#define MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN 0x0080
+#define MDIO_MMD_PCS_MV_INT_EN_100BT1 0x1000
+
+#define MDIO_MMD_PCS_MV_GPIO_INT_STAT 32785
+#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_UP 0x0040
+#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_DOWN 0x0080
+#define MDIO_MMD_PCS_MV_GPIO_INT_STAT_100BT1_GEN 0x1000
+
+#define MDIO_MMD_PCS_MV_GPIO_INT_CTRL 32787
+#define MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS 0x0800
+
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR1 32833
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_RAW_INT 0x0001
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_INT 0x0040
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR1_INT_EN 0x0080
+
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR2 32834
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK 0xc000
+
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR3 32835
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK 0xff00
+#define MDIO_MMD_PCS_MV_TEMP_SENSOR3_MASK 0x00ff
+
#define MDIO_MMD_PCS_MV_100BT1_STAT1 33032
-#define MDIO_MMD_PCS_MV_100BT1_STAT1_IDLE_ERROR 0x00FF
+#define MDIO_MMD_PCS_MV_100BT1_STAT1_IDLE_ERROR 0x00ff
#define MDIO_MMD_PCS_MV_100BT1_STAT1_JABBER 0x0100
#define MDIO_MMD_PCS_MV_100BT1_STAT1_LINK 0x0200
#define MDIO_MMD_PCS_MV_100BT1_STAT1_LOCAL_RX 0x1000
@@ -27,6 +64,71 @@
#define MDIO_MMD_PCS_MV_100BT1_STAT2_LINK 0x0004
#define MDIO_MMD_PCS_MV_100BT1_STAT2_ANGE 0x0008
+#define MDIO_MMD_PCS_MV_100BT1_INT_EN 33042
+#define MDIO_MMD_PCS_MV_100BT1_INT_EN_LINKEVENT 0x0400
+
+#define MDIO_MMD_PCS_MV_COPPER_INT_STAT 33043
+#define MDIO_MMD_PCS_MV_COPPER_INT_STAT_LINKEVENT 0x0400
+
+#define MDIO_MMD_PCS_MV_RX_STAT 33328
+
+#define MDIO_MMD_PCS_MV_TDR_RESET 65226
+#define MDIO_MMD_PCS_MV_TDR_RESET_TDR_RST 0x1000
+
+#define MDIO_MMD_PCS_MV_TDR_OFF_SHORT_CABLE 65241
+
+#define MDIO_MMD_PCS_MV_TDR_OFF_LONG_CABLE 65242
+
+#define MDIO_MMD_PCS_MV_TDR_STATUS 65245
+#define MDIO_MMD_PCS_MV_TDR_STATUS_MASK 0x0003
+#define MDIO_MMD_PCS_MV_TDR_STATUS_OFF 0x0001
+#define MDIO_MMD_PCS_MV_TDR_STATUS_ON 0x0002
+#define MDIO_MMD_PCS_MV_TDR_STATUS_DIST_MASK 0xff00
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_MASK 0x00f0
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_SHORT 0x0030
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OPEN 0x00e0
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OK 0x0070
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_IN_PROGR 0x0080
+#define MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_NOISE 0x0050
+
+#define MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF 65246
+
+struct mmd_val {
+ int devad;
+ u32 regnum;
+ u16 val;
+};
+
+static const struct mmd_val mv88q222x_revb0_init_seq0[] = {
+ { MDIO_MMD_PCS, 0x8033, 0x6801 },
+ { MDIO_MMD_AN, MDIO_AN_T1_CTRL, 0x0 },
+ { MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER | MDIO_PMA_CTRL1_SPEED1000 },
+ { MDIO_MMD_PCS, 0xfe1b, 0x48 },
+ { MDIO_MMD_PCS, 0xffe4, 0x6b6 },
+ { MDIO_MMD_PMAPMD, MDIO_CTRL1, 0x0 },
+ { MDIO_MMD_PCS, MDIO_CTRL1, 0x0 },
+};
+
+static const struct mmd_val mv88q222x_revb0_init_seq1[] = {
+ { MDIO_MMD_PCS, 0xfe79, 0x0 },
+ { MDIO_MMD_PCS, 0xfe07, 0x125a },
+ { MDIO_MMD_PCS, 0xfe09, 0x1288 },
+ { MDIO_MMD_PCS, 0xfe08, 0x2588 },
+ { MDIO_MMD_PCS, 0xfe11, 0x1105 },
+ { MDIO_MMD_PCS, 0xfe72, 0x042c },
+ { MDIO_MMD_PCS, 0xfbba, 0xcb2 },
+ { MDIO_MMD_PCS, 0xfbbb, 0xc4a },
+ { MDIO_MMD_AN, 0x8032, 0x2020 },
+ { MDIO_MMD_AN, 0x8031, 0xa28 },
+ { MDIO_MMD_AN, 0x8031, 0xc28 },
+ { MDIO_MMD_PCS, 0xffdb, 0xfc10 },
+ { MDIO_MMD_PCS, 0xfe1b, 0x58 },
+ { MDIO_MMD_PCS, 0xfe79, 0x4 },
+ { MDIO_MMD_PCS, 0xfe5f, 0xe8 },
+ { MDIO_MMD_PCS, 0xfe05, 0x755c },
+};
+
static int mv88q2xxx_soft_reset(struct phy_device *phydev)
{
int ret;
@@ -50,20 +152,23 @@ static int mv88q2xxx_read_link_gbit(struct phy_device *phydev)
/* Read vendor specific Auto-Negotiation status register to get local
* and remote receiver status according to software initialization
- * guide.
+ * guide. However, when not in polling mode the local and remote
+ * receiver status are not evaluated due to the Marvell 88Q2xxx APIs.
*/
ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_MMD_AN_MV_STAT);
if (ret < 0) {
return ret;
- } else if ((ret & MDIO_MMD_AN_MV_STAT_LOCAL_RX) &&
- (ret & MDIO_MMD_AN_MV_STAT_REMOTE_RX)) {
+ } else if (((ret & MDIO_MMD_AN_MV_STAT_LOCAL_RX) &&
+ (ret & MDIO_MMD_AN_MV_STAT_REMOTE_RX)) ||
+ !phy_polling_mode(phydev)) {
/* The link state is latched low so that momentary link
* drops can be detected. Do not double-read the status
* in polling mode to detect such short link drops except
* the link was already down.
*/
if (!phy_polling_mode(phydev) || !phydev->link) {
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_STAT);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_PCS_1000BT1_STAT);
if (ret < 0)
return ret;
else if (ret & MDIO_PCS_1000BT1_STAT_LINK)
@@ -71,7 +176,8 @@ static int mv88q2xxx_read_link_gbit(struct phy_device *phydev)
}
if (!link) {
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_STAT);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_PCS_1000BT1_STAT);
if (ret < 0)
return ret;
else if (ret & MDIO_PCS_1000BT1_STAT_LINK)
@@ -94,8 +200,20 @@ static int mv88q2xxx_read_link_100m(struct phy_device *phydev)
* the link was already down. In case we are not polling,
* we always read the realtime status.
*/
- if (!phy_polling_mode(phydev) || !phydev->link) {
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_100BT1_STAT1);
+ if (!phy_polling_mode(phydev)) {
+ phydev->link = false;
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_100BT1_STAT2);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_MMD_PCS_MV_100BT1_STAT2_LINK)
+ phydev->link = true;
+
+ return 0;
+ } else if (!phydev->link) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_100BT1_STAT1);
if (ret < 0)
return ret;
else if (ret & MDIO_MMD_PCS_MV_100BT1_STAT1_LINK)
@@ -120,24 +238,90 @@ out:
static int mv88q2xxx_read_link(struct phy_device *phydev)
{
- int ret;
-
/* The 88Q2XXX PHYs do not have the PMA/PMD status register available,
* therefore we need to read the link status from the vendor specific
* registers depending on the speed.
*/
+
if (phydev->speed == SPEED_1000)
- ret = mv88q2xxx_read_link_gbit(phydev);
+ return mv88q2xxx_read_link_gbit(phydev);
+ else if (phydev->speed == SPEED_100)
+ return mv88q2xxx_read_link_100m(phydev);
+
+ phydev->link = false;
+ return 0;
+}
+
+static int mv88q2xxx_read_master_slave_state(struct phy_device *phydev)
+{
+ int ret;
+
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_MMD_AN_MV_STAT);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MDIO_MMD_AN_MV_STAT_LOCAL_MASTER)
+ phydev->master_slave_state = MASTER_SLAVE_STATE_MASTER;
else
- ret = mv88q2xxx_read_link_100m(phydev);
+ phydev->master_slave_state = MASTER_SLAVE_STATE_SLAVE;
+
+ return 0;
+}
+
+static int mv88q2xxx_read_aneg_speed(struct phy_device *phydev)
+{
+ int ret;
+
+ phydev->speed = SPEED_UNKNOWN;
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_MMD_AN_MV_STAT2);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & MDIO_MMD_AN_MV_STAT2_AN_RESOLVED))
+ return 0;
- return ret;
+ if (ret & MDIO_MMD_AN_MV_STAT2_100BT1)
+ phydev->speed = SPEED_100;
+ else if (ret & MDIO_MMD_AN_MV_STAT2_1000BT1)
+ phydev->speed = SPEED_1000;
+
+ return 0;
}
static int mv88q2xxx_read_status(struct phy_device *phydev)
{
int ret;
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ /* We have to get the negotiated speed first, otherwise we are
+ * not able to read the link.
+ */
+ ret = mv88q2xxx_read_aneg_speed(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = mv88q2xxx_read_link(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = genphy_c45_read_lpa(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = genphy_c45_baset1_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = mv88q2xxx_read_master_slave_state(phydev);
+ if (ret < 0)
+ return ret;
+
+ phy_resolve_aneg_linkmode(phydev);
+
+ return 0;
+ }
+
ret = mv88q2xxx_read_link(phydev);
if (ret < 0)
return ret;
@@ -166,7 +350,9 @@ static int mv88q2xxx_get_features(struct phy_device *phydev)
* sequence provided by Marvell. Disable it for now until a proper
* workaround is found or a new PHY revision is released.
*/
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+ if (phydev->drv->phy_id == MARVELL_PHY_ID_88Q2110)
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported);
return 0;
}
@@ -179,28 +365,29 @@ static int mv88q2xxx_config_aneg(struct phy_device *phydev)
if (ret)
return ret;
- return mv88q2xxx_soft_reset(phydev);
+ return phydev->drv->soft_reset(phydev);
}
static int mv88q2xxx_config_init(struct phy_device *phydev)
{
- int ret;
-
/* The 88Q2XXX PHYs do have the extended ability register available, but
* register MDIO_PMA_EXTABLE where they should signalize it does not
* work according to specification. Therefore, we force it here.
*/
phydev->pma_extable = MDIO_PMA_EXTABLE_BT1;
- /* Read the current PHY configuration */
- ret = genphy_c45_read_pma(phydev);
- if (ret)
- return ret;
+ /* Configure interrupt with default settings, output is driven low for
+ * active interrupt and high for inactive.
+ */
+ if (phy_interrupt_is_valid(phydev))
+ return phy_set_bits_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL,
+ MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS);
- return mv88q2xxx_config_aneg(phydev);
+ return 0;
}
-static int mv88q2xxxx_get_sqi(struct phy_device *phydev)
+static int mv88q2xxx_get_sqi(struct phy_device *phydev)
{
int ret;
@@ -208,7 +395,8 @@ static int mv88q2xxxx_get_sqi(struct phy_device *phydev)
/* Read the SQI from the vendor specific receiver status
* register
*/
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8230);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_RX_STAT);
if (ret < 0)
return ret;
@@ -218,7 +406,7 @@ static int mv88q2xxxx_get_sqi(struct phy_device *phydev)
* but can be found in the Software Initialization Guide. Only
* revisions >= A0 are supported.
*/
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, 0xFC5D, 0x00FF, 0x00AC);
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, 0xfc5d, 0xff, 0xac);
if (ret < 0)
return ret;
@@ -227,14 +415,386 @@ static int mv88q2xxxx_get_sqi(struct phy_device *phydev)
return ret;
}
- return ret & 0x0F;
+ return ret & 0x0f;
}
-static int mv88q2xxxx_get_sqi_max(struct phy_device *phydev)
+static int mv88q2xxx_get_sqi_max(struct phy_device *phydev)
{
return 15;
}
+static int mv88q2xxx_config_intr(struct phy_device *phydev)
+{
+ int ret;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* Enable interrupts for 1000BASE-T1 link up and down events
+ * and enable general interrupts for 100BASE-T1.
+ */
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_INT_EN,
+ MDIO_MMD_PCS_MV_INT_EN_LINK_UP |
+ MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN |
+ MDIO_MMD_PCS_MV_INT_EN_100BT1);
+ if (ret < 0)
+ return ret;
+
+ /* Enable interrupts for 100BASE-T1 link events */
+ return phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_100BT1_INT_EN,
+ MDIO_MMD_PCS_MV_100BT1_INT_EN_LINKEVENT);
+ } else {
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_INT_EN, 0);
+ if (ret < 0)
+ return ret;
+
+ return phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_100BT1_INT_EN, 0);
+ }
+}
+
+static irqreturn_t mv88q2xxx_handle_interrupt(struct phy_device *phydev)
+{
+ bool trigger_machine = false;
+ int irq;
+
+ /* Before we can acknowledge the 100BT1 general interrupt, that is in
+ * the 1000BT1 interrupt status register, we have to acknowledge any
+ * interrupts that are related to it. Therefore we read first the 100BT1
+ * interrupt status register, followed by reading the 1000BT1 interrupt
+ * status register.
+ */
+
+ irq = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_COPPER_INT_STAT);
+ if (irq < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* Check link status for 100BT1 */
+ if (irq & MDIO_MMD_PCS_MV_COPPER_INT_STAT_LINKEVENT)
+ trigger_machine = true;
+
+ irq = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_GPIO_INT_STAT);
+ if (irq < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* Check link status for 1000BT1 */
+ if ((irq & MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_UP) ||
+ (irq & MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_DOWN))
+ trigger_machine = true;
+
+ if (!trigger_machine)
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+static int mv88q2xxx_suspend(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Disable PHY interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_DISABLED;
+ ret = mv88q2xxx_config_intr(phydev);
+ if (ret)
+ return ret;
+ }
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+}
+
+static int mv88q2xxx_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable PHY interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_ENABLED;
+ ret = mv88q2xxx_config_intr(phydev);
+ if (ret)
+ return ret;
+ }
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+}
+
+#if IS_ENABLED(CONFIG_HWMON)
+static const struct hwmon_channel_info * const mv88q2xxx_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_ALARM),
+ NULL
+};
+
+static umode_t mv88q2xxx_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ case hwmon_temp_max:
+ return 0644;
+ case hwmon_temp_alarm:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static int mv88q2xxx_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int ret;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR3);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(MDIO_MMD_PCS_MV_TEMP_SENSOR3_MASK, ret);
+ *val = (ret - 75) * 1000;
+ return 0;
+ case hwmon_temp_max:
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR3);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK,
+ ret);
+ *val = (ret - 75) * 1000;
+ return 0;
+ case hwmon_temp_alarm:
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR1);
+ if (ret < 0)
+ return ret;
+
+ *val = !!(ret & MDIO_MMD_PCS_MV_TEMP_SENSOR1_RAW_INT);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mv88q2xxx_hwmon_write(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_temp_max:
+ clamp_val(val, -75000, 180000);
+ val = (val / 1000) + 75;
+ val = FIELD_PREP(MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK,
+ val);
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR3,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR3_INT_THRESH_MASK,
+ val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_ops mv88q2xxx_hwmon_hwmon_ops = {
+ .is_visible = mv88q2xxx_hwmon_is_visible,
+ .read = mv88q2xxx_hwmon_read,
+ .write = mv88q2xxx_hwmon_write,
+};
+
+static const struct hwmon_chip_info mv88q2xxx_hwmon_chip_info = {
+ .ops = &mv88q2xxx_hwmon_hwmon_ops,
+ .info = mv88q2xxx_hwmon_info,
+};
+
+static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct device *hwmon;
+ char *hwmon_name;
+ int ret;
+
+ /* Enable temperature sense */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TEMP_SENSOR2,
+ MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0);
+ if (ret < 0)
+ return ret;
+
+ hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev));
+ if (IS_ERR(hwmon_name))
+ return PTR_ERR(hwmon_name);
+
+ hwmon = devm_hwmon_device_register_with_info(dev,
+ hwmon_name,
+ phydev,
+ &mv88q2xxx_hwmon_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon);
+}
+
+#else
+static int mv88q2xxx_hwmon_probe(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif
+
+static int mv88q2xxx_probe(struct phy_device *phydev)
+{
+ return mv88q2xxx_hwmon_probe(phydev);
+}
+
+static int mv88q222x_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable RESET of DCL */
+ if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed == SPEED_1000) {
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, 0xfe1b, 0x48);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_1000BT1_CTRL,
+ MDIO_PCS_1000BT1_CTRL_RESET);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, 0xffe4, 0xc);
+ if (ret < 0)
+ return ret;
+
+ /* Disable RESET of DCL */
+ if (phydev->autoneg == AUTONEG_ENABLE || phydev->speed == SPEED_1000)
+ return phy_write_mmd(phydev, MDIO_MMD_PCS, 0xfe1b, 0x58);
+
+ return 0;
+}
+
+static int mv88q222x_revb0_config_init(struct phy_device *phydev)
+{
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq0); i++) {
+ ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq0[i].devad,
+ mv88q222x_revb0_init_seq0[i].regnum,
+ mv88q222x_revb0_init_seq0[i].val);
+ if (ret < 0)
+ return ret;
+ }
+
+ usleep_range(5000, 10000);
+
+ for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq1); i++) {
+ ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq1[i].devad,
+ mv88q222x_revb0_init_seq1[i].regnum,
+ mv88q222x_revb0_init_seq1[i].val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return mv88q2xxx_config_init(phydev);
+}
+
+static int mv88q222x_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF, 0x0058);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TDR_OFF_LONG_CABLE, 0x00eb);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ MDIO_MMD_PCS_MV_TDR_OFF_SHORT_CABLE, 0x010e);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_RESET,
+ 0x0d90);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_STATUS,
+ MDIO_MMD_PCS_MV_TDR_STATUS_ON);
+ if (ret < 0)
+ return ret;
+
+ /* According to the Marvell API the test is finished within 500 ms */
+ msleep(500);
+
+ return 0;
+}
+
+static int mv88q222x_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int ret, status;
+ u32 dist;
+
+ status = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_STATUS);
+ if (status < 0)
+ return status;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_TDR_RESET,
+ MDIO_MMD_PCS_MV_TDR_RESET_TDR_RST | 0xd90);
+ if (ret < 0)
+ return ret;
+
+ /* Test could not be finished */
+ if (FIELD_GET(MDIO_MMD_PCS_MV_TDR_STATUS_MASK, status) !=
+ MDIO_MMD_PCS_MV_TDR_STATUS_OFF)
+ return -ETIMEDOUT;
+
+ *finished = true;
+ /* Fault length reported in meters, convert to centimeters */
+ dist = FIELD_GET(MDIO_MMD_PCS_MV_TDR_STATUS_DIST_MASK, status) * 100;
+ switch (status & MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_MASK) {
+ case MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OPEN:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ dist);
+ break;
+ case MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_SHORT:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ dist);
+ break;
+ case MDIO_MMD_PCS_MV_TDR_STATUS_VCT_STAT_OK:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_OK);
+ break;
+ default:
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
+ }
+
+ return 0;
+}
+
static struct phy_driver mv88q2xxx_driver[] = {
{
.phy_id = MARVELL_PHY_ID_88Q2110,
@@ -246,8 +806,29 @@ static struct phy_driver mv88q2xxx_driver[] = {
.read_status = mv88q2xxx_read_status,
.soft_reset = mv88q2xxx_soft_reset,
.set_loopback = genphy_c45_loopback,
- .get_sqi = mv88q2xxxx_get_sqi,
- .get_sqi_max = mv88q2xxxx_get_sqi_max,
+ .get_sqi = mv88q2xxx_get_sqi,
+ .get_sqi_max = mv88q2xxx_get_sqi_max,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0),
+ .name = "mv88q2220",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = mv88q2xxx_probe,
+ .get_features = mv88q2xxx_get_features,
+ .config_aneg = mv88q2xxx_config_aneg,
+ .aneg_done = genphy_c45_aneg_done,
+ .config_init = mv88q222x_revb0_config_init,
+ .read_status = mv88q2xxx_read_status,
+ .soft_reset = mv88q222x_soft_reset,
+ .config_intr = mv88q2xxx_config_intr,
+ .handle_interrupt = mv88q2xxx_handle_interrupt,
+ .set_loopback = genphy_c45_loopback,
+ .cable_test_start = mv88q222x_cable_test_start,
+ .cable_test_get_status = mv88q222x_cable_test_get_status,
+ .get_sqi = mv88q2xxx_get_sqi,
+ .get_sqi_max = mv88q2xxx_get_sqi_max,
+ .suspend = mv88q2xxx_suspend,
+ .resume = mv88q2xxx_resume,
},
};
@@ -255,6 +836,7 @@ module_phy_driver(mv88q2xxx_driver);
static struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = {
{ MARVELL_PHY_ID_88Q2110, MARVELL_PHY_ID_MASK },
+ { PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0), },
{ /*sentinel*/ }
};
MODULE_DEVICE_TABLE(mdio, mv88q2xxx_tbl);
diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c
index e3aa30dad2e6..b88398e6872b 100644
--- a/drivers/net/phy/marvell-88x2222.c
+++ b/drivers/net/phy/marvell-88x2222.c
@@ -9,12 +9,10 @@
*/
#include <linux/module.h>
#include <linux/phy.h>
-#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/mdio.h>
#include <linux/marvell_phy.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/sfp.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index eba652a4c1d8..42ed013385bf 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -919,7 +919,10 @@ static int m88e1111_config_init_1000basex(struct phy_device *phydev)
if (extsr < 0)
return extsr;
- /* If using copper mode, ensure 1000BaseX auto-negotiation is enabled */
+ /* If using copper mode, ensure 1000BaseX auto-negotiation is enabled.
+ * FIXME: this does not actually enable 1000BaseX auto-negotiation if
+ * it was previously disabled in the Fiber BMCR!
+ */
mode = extsr & MII_M1111_HWCFG_MODE_MASK;
if (mode == MII_M1111_HWCFG_MODE_COPPER_1000X_NOAN) {
err = phy_modify(phydev, MII_M1111_PHY_EXT_SR,
@@ -1461,7 +1464,7 @@ static int m88e1540_get_fld(struct phy_device *phydev, u8 *msecs)
static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs)
{
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
int val, ret;
if (*msecs == ETHTOOL_PHY_FAST_LINK_DOWN_OFF)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index afbad1ad8683..8b9ead76e40e 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -13,7 +13,6 @@
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -25,7 +24,6 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/reset.h>
@@ -459,19 +457,34 @@ EXPORT_SYMBOL(of_mdio_find_bus);
* found, set the of_node pointer for the mdio device. This allows
* auto-probed phy devices to be supplied with information passed in
* via DT.
+ * If a PHY package is found, PHY is searched also there.
*/
-static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
- struct mdio_device *mdiodev)
+static int of_mdiobus_find_phy(struct device *dev, struct mdio_device *mdiodev,
+ struct device_node *np)
{
- struct device *dev = &mdiodev->dev;
struct device_node *child;
- if (dev->of_node || !bus->dev.of_node)
- return;
-
- for_each_available_child_of_node(bus->dev.of_node, child) {
+ for_each_available_child_of_node(np, child) {
int addr;
+ if (of_node_name_eq(child, "ethernet-phy-package")) {
+ /* Validate PHY package reg presence */
+ if (!of_property_present(child, "reg")) {
+ of_node_put(child);
+ return -EINVAL;
+ }
+
+ if (!of_mdiobus_find_phy(dev, mdiodev, child)) {
+ /* The refcount for the PHY package will be
+ * incremented later when PHY join the Package.
+ */
+ of_node_put(child);
+ return 0;
+ }
+
+ continue;
+ }
+
addr = of_mdio_parse_addr(dev, child);
if (addr < 0)
continue;
@@ -481,9 +494,22 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
/* The refcount on "child" is passed to the mdio
* device. Do _not_ use of_node_put(child) here.
*/
- return;
+ return 0;
}
}
+
+ return -ENODEV;
+}
+
+static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
+ struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+
+ if (dev->of_node || !bus->dev.of_node)
+ return;
+
+ of_mdiobus_find_phy(dev, mdiodev, bus->dev.of_node);
}
#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio,
@@ -1398,7 +1424,7 @@ static const struct attribute_group *mdio_bus_dev_groups[] = {
NULL,
};
-struct bus_type mdio_bus_type = {
+const struct bus_type mdio_bus_type = {
.name = "mdio_bus",
.dev_groups = mdio_bus_dev_groups,
.match = mdio_bus_match,
diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c
index 69b829e6ab35..7fd3377dbd79 100644
--- a/drivers/net/phy/mdio_devres.c
+++ b/drivers/net/phy/mdio_devres.c
@@ -131,4 +131,5 @@ int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
EXPORT_SYMBOL(__devm_of_mdiobus_register);
#endif /* CONFIG_OF_MDIO */
+MODULE_DESCRIPTION("Network MDIO bus devres helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index dad720138baa..8b8634600c51 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -114,12 +114,25 @@
#define LAN8814_INTR_CTRL_REG_POLARITY BIT(1)
#define LAN8814_INTR_CTRL_REG_INTR_ENABLE BIT(0)
+#define LAN8814_EEE_STATE 0x38
+#define LAN8814_EEE_STATE_MASK2P5P BIT(10)
+
+#define LAN8814_PD_CONTROLS 0x9d
+#define LAN8814_PD_CONTROLS_PD_MEAS_TIME_MASK GENMASK(3, 0)
+#define LAN8814_PD_CONTROLS_PD_MEAS_TIME_VAL 0xb
+
/* Represents 1ppm adjustment in 2^32 format with
* each nsec contains 4 clock cycles.
* The value is calculated as following: (1/1000000)/((2^-32)/4)
*/
#define LAN8814_1PPM_FORMAT 17179
+/* Represents 1ppm adjustment in 2^32 format with
+ * each nsec contains 8 clock cycles.
+ * The value is calculated as following: (1/1000000)/((2^-32)/8)
+ */
+#define LAN8841_1PPM_FORMAT 34360
+
#define PTP_RX_VERSION 0x0248
#define PTP_TX_VERSION 0x0288
#define PTP_MAX_VERSION(x) (((x) & GENMASK(7, 0)) << 8)
@@ -154,11 +167,13 @@
#define PTP_CMD_CTL_PTP_LTC_STEP_SEC_ BIT(5)
#define PTP_CMD_CTL_PTP_LTC_STEP_NSEC_ BIT(6)
+#define PTP_CLOCK_SET_SEC_HI 0x0205
#define PTP_CLOCK_SET_SEC_MID 0x0206
#define PTP_CLOCK_SET_SEC_LO 0x0207
#define PTP_CLOCK_SET_NS_HI 0x0208
#define PTP_CLOCK_SET_NS_LO 0x0209
+#define PTP_CLOCK_READ_SEC_HI 0x0229
#define PTP_CLOCK_READ_SEC_MID 0x022A
#define PTP_CLOCK_READ_SEC_LO 0x022B
#define PTP_CLOCK_READ_NS_HI 0x022C
@@ -2592,35 +2607,31 @@ static bool lan8814_rxtstamp(struct mii_timestamper *mii_ts, struct sk_buff *skb
}
static void lan8814_ptp_clock_set(struct phy_device *phydev,
- u32 seconds, u32 nano_seconds)
+ time64_t sec, u32 nsec)
{
- u32 sec_low, sec_high, nsec_low, nsec_high;
-
- sec_low = seconds & 0xffff;
- sec_high = (seconds >> 16) & 0xffff;
- nsec_low = nano_seconds & 0xffff;
- nsec_high = (nano_seconds >> 16) & 0x3fff;
-
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_LO, sec_low);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_MID, sec_high);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_LO, nsec_low);
- lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_HI, nsec_high);
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_LO, lower_16_bits(sec));
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_MID, upper_16_bits(sec));
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_SEC_HI, upper_32_bits(sec));
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_LO, lower_16_bits(nsec));
+ lanphy_write_page_reg(phydev, 4, PTP_CLOCK_SET_NS_HI, upper_16_bits(nsec));
lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_LOAD_);
}
static void lan8814_ptp_clock_get(struct phy_device *phydev,
- u32 *seconds, u32 *nano_seconds)
+ time64_t *sec, u32 *nsec)
{
lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_CLOCK_READ_);
- *seconds = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_MID);
- *seconds = (*seconds << 16) |
- lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_LO);
+ *sec = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_HI);
+ *sec <<= 16;
+ *sec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_MID);
+ *sec <<= 16;
+ *sec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_SEC_LO);
- *nano_seconds = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_HI);
- *nano_seconds = ((*nano_seconds & 0x3fff) << 16) |
- lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_LO);
+ *nsec = lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_HI);
+ *nsec <<= 16;
+ *nsec |= lanphy_read_page_reg(phydev, 4, PTP_CLOCK_READ_NS_LO);
}
static int lan8814_ptpci_gettime64(struct ptp_clock_info *ptpci,
@@ -2630,7 +2641,7 @@ static int lan8814_ptpci_gettime64(struct ptp_clock_info *ptpci,
ptp_clock_info);
struct phy_device *phydev = shared->phydev;
u32 nano_seconds;
- u32 seconds;
+ time64_t seconds;
mutex_lock(&shared->shared_lock);
lan8814_ptp_clock_get(phydev, &seconds, &nano_seconds);
@@ -2660,38 +2671,37 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
{
u32 nano_seconds_step;
u64 abs_time_step_ns;
- u32 unsigned_seconds;
+ time64_t set_seconds;
u32 nano_seconds;
u32 remainder;
s32 seconds;
if (time_step_ns > 15000000000LL) {
/* convert to clock set */
- lan8814_ptp_clock_get(phydev, &unsigned_seconds, &nano_seconds);
- unsigned_seconds += div_u64_rem(time_step_ns, 1000000000LL,
- &remainder);
+ lan8814_ptp_clock_get(phydev, &set_seconds, &nano_seconds);
+ set_seconds += div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
nano_seconds += remainder;
if (nano_seconds >= 1000000000) {
- unsigned_seconds++;
+ set_seconds++;
nano_seconds -= 1000000000;
}
- lan8814_ptp_clock_set(phydev, unsigned_seconds, nano_seconds);
+ lan8814_ptp_clock_set(phydev, set_seconds, nano_seconds);
return;
} else if (time_step_ns < -15000000000LL) {
/* convert to clock set */
time_step_ns = -time_step_ns;
- lan8814_ptp_clock_get(phydev, &unsigned_seconds, &nano_seconds);
- unsigned_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
- &remainder);
+ lan8814_ptp_clock_get(phydev, &set_seconds, &nano_seconds);
+ set_seconds -= div_u64_rem(time_step_ns, 1000000000LL,
+ &remainder);
nano_seconds_step = remainder;
if (nano_seconds < nano_seconds_step) {
- unsigned_seconds--;
+ set_seconds--;
nano_seconds += 1000000000;
}
nano_seconds -= nano_seconds_step;
- lan8814_ptp_clock_set(phydev, unsigned_seconds,
- nano_seconds);
+ lan8814_ptp_clock_set(phydev, set_seconds, nano_seconds);
return;
}
@@ -3285,6 +3295,33 @@ static int lan8814_release_coma_mode(struct phy_device *phydev)
return 0;
}
+static void lan8814_clear_2psp_bit(struct phy_device *phydev)
+{
+ u16 val;
+
+ /* It was noticed that when traffic is passing through the PHY and the
+ * cable is removed then the LED was still one even though there is no
+ * link
+ */
+ val = lanphy_read_page_reg(phydev, 2, LAN8814_EEE_STATE);
+ val &= ~LAN8814_EEE_STATE_MASK2P5P;
+ lanphy_write_page_reg(phydev, 2, LAN8814_EEE_STATE, val);
+}
+
+static void lan8814_update_meas_time(struct phy_device *phydev)
+{
+ u16 val;
+
+ /* By setting the measure time to a value of 0xb this will allow cables
+ * longer than 100m to be used. This configuration can be used
+ * regardless of the mode of operation of the PHY
+ */
+ val = lanphy_read_page_reg(phydev, 1, LAN8814_PD_CONTROLS);
+ val &= ~LAN8814_PD_CONTROLS_PD_MEAS_TIME_MASK;
+ val |= LAN8814_PD_CONTROLS_PD_MEAS_TIME_VAL;
+ lanphy_write_page_reg(phydev, 1, LAN8814_PD_CONTROLS, val);
+}
+
static int lan8814_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
@@ -3321,6 +3358,10 @@ static int lan8814_probe(struct phy_device *phydev)
lan8814_ptp_init(phydev);
+ /* Errata workarounds */
+ lan8814_clear_2psp_bit(phydev);
+ lan8814_update_meas_time(phydev);
+
return 0;
}
@@ -4118,8 +4159,8 @@ static int lan8841_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
faster = false;
}
- rate = LAN8814_1PPM_FORMAT * (upper_16_bits(scaled_ppm));
- rate += (LAN8814_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16;
+ rate = LAN8841_1PPM_FORMAT * (upper_16_bits(scaled_ppm));
+ rate += (LAN8841_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16;
mutex_lock(&ptp_priv->ptp_lock);
phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_RATE_ADJ_HI,
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index ea1073adc5a1..b2d36a3a96f1 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -274,6 +274,14 @@ static int gpy_config_init(struct phy_device *phydev)
return ret < 0 ? ret : 0;
}
+static int gpy21x_config_init(struct phy_device *phydev)
+{
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, phydev->possible_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII, phydev->possible_interfaces);
+
+ return gpy_config_init(phydev);
+}
+
static int gpy_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
@@ -867,7 +875,7 @@ static struct phy_driver gpy_drivers[] = {
.phy_id_mask = PHY_ID_GPY21xB_MASK,
.name = "Maxlinear Ethernet GPY211B",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -884,7 +892,7 @@ static struct phy_driver gpy_drivers[] = {
PHY_ID_MATCH_MODEL(PHY_ID_GPY211C),
.name = "Maxlinear Ethernet GPY211C",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -902,7 +910,7 @@ static struct phy_driver gpy_drivers[] = {
.phy_id_mask = PHY_ID_GPY21xB_MASK,
.name = "Maxlinear Ethernet GPY212B",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -919,7 +927,7 @@ static struct phy_driver gpy_drivers[] = {
PHY_ID_MATCH_MODEL(PHY_ID_GPY212C),
.name = "Maxlinear Ethernet GPY212C",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -937,7 +945,7 @@ static struct phy_driver gpy_drivers[] = {
.phy_id_mask = PHY_ID_GPYx15B_MASK,
.name = "Maxlinear Ethernet GPY215B",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -954,7 +962,7 @@ static struct phy_driver gpy_drivers[] = {
PHY_ID_MATCH_MODEL(PHY_ID_GPY215C),
.name = "Maxlinear Ethernet GPY215C",
.get_features = genphy_c45_pma_read_abilities,
- .config_init = gpy_config_init,
+ .config_init = gpy21x_config_init,
.probe = gpy_probe,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 747d14bf152c..5695935fdce9 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -208,7 +208,8 @@ static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev)
adv_l_mask = MDIO_AN_T1_ADV_L_FORCE_MS | MDIO_AN_T1_ADV_L_PAUSE_CAP |
MDIO_AN_T1_ADV_L_PAUSE_ASYM;
- adv_m_mask = MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L;
+ adv_m_mask = MDIO_AN_T1_ADV_M_1000BT1 | MDIO_AN_T1_ADV_M_100BT1 |
+ MDIO_AN_T1_ADV_M_MST | MDIO_AN_T1_ADV_M_B10L;
switch (phydev->master_slave_set) {
case MASTER_SLAVE_CFG_MASTER_FORCE:
@@ -706,6 +707,22 @@ int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv)
changed = 1;
}
+ if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP2_FEATURES)) {
+ val = linkmode_to_mii_eee_cap2_t(adv);
+
+ /* IEEE 802.3-2022 45.2.7.16 EEE advertisement 2
+ * (Register 7.62)
+ */
+ val = phy_modify_mmd_changed(phydev, MDIO_MMD_AN,
+ MDIO_AN_EEE_ADV2,
+ MDIO_EEE_2_5GT | MDIO_EEE_5GT,
+ val);
+ if (val < 0)
+ return val;
+ if (val > 0)
+ changed = 1;
+ }
+
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
phydev->supported_eee)) {
val = linkmode_adv_to_mii_10base_t1_t(adv);
@@ -745,6 +762,17 @@ int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv)
mii_eee_cap1_mod_linkmode_t(adv, val);
}
+ if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP2_FEATURES)) {
+ /* IEEE 802.3-2022 45.2.7.16 EEE advertisement 2
+ * (Register 7.62)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV2);
+ if (val < 0)
+ return val;
+
+ mii_eee_cap2_mod_linkmode_adv_t(adv, val);
+ }
+
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
phydev->supported_eee)) {
/* IEEE 802.3cg-2019 45.2.7.25 10BASE-T1 AN control register
@@ -781,6 +809,17 @@ static int genphy_c45_read_eee_lpa(struct phy_device *phydev,
mii_eee_cap1_mod_linkmode_t(lpa, val);
}
+ if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP2_FEATURES)) {
+ /* IEEE 802.3-2022 45.2.7.17 EEE link partner ability 2
+ * (Register 7.63)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE2);
+ if (val < 0)
+ return val;
+
+ mii_eee_cap2_mod_linkmode_adv_t(lpa, val);
+ }
+
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
phydev->supported_eee)) {
/* IEEE 802.3cg-2019 45.2.7.26 10BASE-T1 AN status register
@@ -831,6 +870,30 @@ static int genphy_c45_read_eee_cap1(struct phy_device *phydev)
}
/**
+ * genphy_c45_read_eee_cap2 - read supported EEE link modes from register 3.21
+ * @phydev: target phy_device struct
+ */
+static int genphy_c45_read_eee_cap2(struct phy_device *phydev)
+{
+ int val;
+
+ /* IEEE 802.3-2022 45.2.3.11 EEE control and capability 2
+ * (Register 3.21)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE2);
+ if (val < 0)
+ return val;
+
+ /* IEEE 802.3-2022 45.2.3.11 says 9 bits are reserved. */
+ if (val == 0xffff)
+ return 0;
+
+ mii_eee_cap2_mod_linkmode_sup_t(phydev->supported_eee, val);
+
+ return 0;
+}
+
+/**
* genphy_c45_read_eee_abilities - read supported EEE link modes
* @phydev: target phy_device struct
*/
@@ -848,6 +911,13 @@ int genphy_c45_read_eee_abilities(struct phy_device *phydev)
return val;
}
+ /* Same for cap2 (3.21) */
+ if (linkmode_intersects(phydev->supported, PHY_EEE_CAP2_FEATURES)) {
+ val = genphy_c45_read_eee_cap2(phydev);
+ if (val)
+ return val;
+ }
+
if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
phydev->supported)) {
/* IEEE 802.3cg-2019 45.2.1.186b 10BASE-T1L PMA status register
@@ -1443,17 +1513,17 @@ EXPORT_SYMBOL(genphy_c45_eee_is_active);
/**
* genphy_c45_ethtool_get_eee - get EEE supported and status
* @phydev: target phy_device struct
- * @data: ethtool_eee data
+ * @data: ethtool_keee data
*
* Description: it reports the Supported/Advertisement/LP Advertisement
* capabilities.
*/
int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
- struct ethtool_eee *data)
+ struct ethtool_keee *data)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv) = {};
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp) = {};
- bool overflow = false, is_enabled;
+ bool is_enabled;
int ret;
ret = genphy_c45_eee_is_active(phydev, adv, lp, &is_enabled);
@@ -1462,17 +1532,9 @@ int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
data->eee_enabled = is_enabled;
data->eee_active = ret;
-
- if (!ethtool_convert_link_mode_to_legacy_u32(&data->supported,
- phydev->supported_eee))
- overflow = true;
- if (!ethtool_convert_link_mode_to_legacy_u32(&data->advertised, adv))
- overflow = true;
- if (!ethtool_convert_link_mode_to_legacy_u32(&data->lp_advertised, lp))
- overflow = true;
-
- if (overflow)
- phydev_warn(phydev, "Not all supported or advertised EEE link modes were passed to the user space\n");
+ linkmode_copy(data->supported, phydev->supported_eee);
+ linkmode_copy(data->advertised, adv);
+ linkmode_copy(data->lp_advertised, lp);
return 0;
}
@@ -1481,50 +1543,53 @@ EXPORT_SYMBOL(genphy_c45_ethtool_get_eee);
/**
* genphy_c45_ethtool_set_eee - set EEE supported and status
* @phydev: target phy_device struct
- * @data: ethtool_eee data
+ * @data: ethtool_keee data
*
* Description: sets the Supported/Advertisement/LP Advertisement
* capabilities. If eee_enabled is false, no links modes are
* advertised, but the previously advertised link modes are
* retained. This allows EEE to be enabled/disabled in a
* non-destructive way.
+ * Returns either error code, 0 if there was no change, or positive
+ * value if there was a change which triggered auto-neg.
*/
int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
- struct ethtool_eee *data)
+ struct ethtool_keee *data)
{
int ret;
if (data->eee_enabled) {
- if (data->advertised) {
- __ETHTOOL_DECLARE_LINK_MODE_MASK(adv);
+ unsigned long *adv = data->advertised;
- ethtool_convert_legacy_u32_to_link_mode(adv,
- data->advertised);
- linkmode_andnot(adv, adv, phydev->supported_eee);
- if (!linkmode_empty(adv)) {
+ if (!linkmode_empty(adv)) {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
+
+ if (linkmode_andnot(tmp, adv, phydev->supported_eee)) {
phydev_warn(phydev, "At least some EEE link modes are not supported.\n");
return -EINVAL;
}
-
- ethtool_convert_legacy_u32_to_link_mode(phydev->advertising_eee,
- data->advertised);
} else {
- linkmode_copy(phydev->advertising_eee,
- phydev->supported_eee);
+ adv = phydev->supported_eee;
}
- phydev->eee_enabled = true;
- } else {
- phydev->eee_enabled = false;
+ linkmode_copy(phydev->advertising_eee, adv);
}
+ phydev->eee_enabled = data->eee_enabled;
+
ret = genphy_c45_an_config_eee_aneg(phydev);
- if (ret < 0)
- return ret;
- if (ret > 0)
- return phy_restart_aneg(phydev);
+ if (ret > 0) {
+ ret = phy_restart_aneg(phydev);
+ if (ret < 0)
+ return ret;
- return 0;
+ /* explicitly return 1, otherwise (ret > 0) value will be
+ * overwritten by phy_restart_aneg().
+ */
+ return 1;
+ }
+
+ return ret;
}
EXPORT_SYMBOL(genphy_c45_ethtool_set_eee);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3376e58e2b88..c4236564c1cd 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -983,9 +983,17 @@ static int phy_check_link_status(struct phy_device *phydev)
if (phydev->link && phydev->state != PHY_RUNNING) {
phy_check_downshift(phydev);
phydev->state = PHY_RUNNING;
+ err = genphy_c45_eee_is_active(phydev,
+ NULL, NULL, NULL);
+ if (err <= 0)
+ phydev->enable_tx_lpi = false;
+ else
+ phydev->enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled;
+
phy_link_up(phydev);
} else if (!phydev->link && phydev->state != PHY_NOLINK) {
phydev->state = PHY_NOLINK;
+ phydev->enable_tx_lpi = false;
phy_link_down(phydev);
}
@@ -1290,7 +1298,6 @@ int phy_disable_interrupts(struct phy_device *phydev)
static irqreturn_t phy_interrupt(int irq, void *phy_dat)
{
struct phy_device *phydev = phy_dat;
- struct phy_driver *drv = phydev->drv;
irqreturn_t ret;
/* Wakeup interrupts may occur during a system sleep transition.
@@ -1316,7 +1323,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
}
mutex_lock(&phydev->lock);
- ret = drv->handle_interrupt(phydev);
+ ret = phydev->drv->handle_interrupt(phydev);
mutex_unlock(&phydev->lock);
return ret;
@@ -1632,12 +1639,12 @@ EXPORT_SYMBOL(phy_get_eee_err);
/**
* phy_ethtool_get_eee - get EEE supported and status
* @phydev: target phy_device struct
- * @data: ethtool_eee data
+ * @data: ethtool_keee data
*
- * Description: it reportes the Supported/Advertisement/LP Advertisement
- * capabilities.
+ * Description: reports the Supported/Advertisement/LP Advertisement
+ * capabilities, etc.
*/
-int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
+int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data)
{
int ret;
@@ -1646,6 +1653,7 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
mutex_lock(&phydev->lock);
ret = genphy_c45_ethtool_get_eee(phydev, data);
+ eeecfg_to_eee(data, &phydev->eee_cfg);
mutex_unlock(&phydev->lock);
return ret;
@@ -1653,13 +1661,43 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
EXPORT_SYMBOL(phy_ethtool_get_eee);
/**
+ * phy_ethtool_set_eee_noneg - Adjusts MAC LPI configuration without PHY
+ * renegotiation
+ * @phydev: pointer to the target PHY device structure
+ * @data: pointer to the ethtool_keee structure containing the new EEE settings
+ *
+ * This function updates the Energy Efficient Ethernet (EEE) configuration
+ * for cases where only the MAC's Low Power Idle (LPI) configuration changes,
+ * without triggering PHY renegotiation. It ensures that the MAC is properly
+ * informed of the new LPI settings by cycling the link down and up, which
+ * is necessary for the MAC to adopt the new configuration. This adjustment
+ * is done only if there is a change in the tx_lpi_enabled or tx_lpi_timer
+ * configuration.
+ */
+static void phy_ethtool_set_eee_noneg(struct phy_device *phydev,
+ struct ethtool_keee *data)
+{
+ if (phydev->eee_cfg.tx_lpi_enabled != data->tx_lpi_enabled ||
+ phydev->eee_cfg.tx_lpi_timer != data->tx_lpi_timer) {
+ eee_to_eeecfg(&phydev->eee_cfg, data);
+ phydev->enable_tx_lpi = eeecfg_mac_can_tx_lpi(&phydev->eee_cfg);
+ if (phydev->link) {
+ phydev->link = false;
+ phy_link_down(phydev);
+ phydev->link = true;
+ phy_link_up(phydev);
+ }
+ }
+}
+
+/**
* phy_ethtool_set_eee - set EEE supported and status
* @phydev: target phy_device struct
- * @data: ethtool_eee data
+ * @data: ethtool_keee data
*
* Description: it is to program the Advertisement EEE register.
*/
-int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
+int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data)
{
int ret;
@@ -1668,9 +1706,14 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
mutex_lock(&phydev->lock);
ret = genphy_c45_ethtool_set_eee(phydev, data);
+ if (ret >= 0) {
+ if (ret == 0)
+ phy_ethtool_set_eee_noneg(phydev, data);
+ eee_to_eeecfg(&phydev->eee_cfg, data);
+ }
mutex_unlock(&phydev->lock);
- return ret;
+ return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL(phy_ethtool_set_eee);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 3611ea64875e..8297ef681bf5 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -148,6 +148,14 @@ static const int phy_eee_cap1_features_array[] = {
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_eee_cap1_features);
+static const int phy_eee_cap2_features_array[] = {
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+};
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_eee_cap2_features);
+
static void features_init(void)
{
/* 10/100 half/full*/
@@ -232,6 +240,9 @@ static void features_init(void)
linkmode_set_bit_array(phy_eee_cap1_features_array,
ARRAY_SIZE(phy_eee_cap1_features_array),
phy_eee_cap1_features);
+ linkmode_set_bit_array(phy_eee_cap2_features_array,
+ ARRAY_SIZE(phy_eee_cap2_features_array),
+ phy_eee_cap2_features);
}
@@ -780,7 +791,7 @@ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr,
* and identifiers in @c45_ids.
*
* Returns zero on success, %-EIO on bus access error, or %-ENODEV if
- * the "devices in package" is invalid.
+ * the "devices in package" is invalid or no device responds.
*/
static int get_phy_c45_ids(struct mii_bus *bus, int addr,
struct phy_c45_device_ids *c45_ids)
@@ -803,7 +814,11 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr,
*/
ret = phy_c45_probe_present(bus, addr, i);
if (ret < 0)
- return -EIO;
+ /* returning -ENODEV doesn't stop bus
+ * scanning
+ */
+ return (phy_reg == -EIO ||
+ phy_reg == -ENODEV) ? -ENODEV : -EIO;
if (!ret)
continue;
@@ -1413,6 +1428,11 @@ int phy_sfp_probe(struct phy_device *phydev,
}
EXPORT_SYMBOL(phy_sfp_probe);
+static bool phy_drv_supports_irq(const struct phy_driver *phydrv)
+{
+ return phydrv->config_intr && phydrv->handle_interrupt;
+}
+
/**
* phy_attach_direct - attach a network device to a given PHY device pointer
* @dev: network device to attach
@@ -1527,6 +1547,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (phydev->dev_flags & PHY_F_NO_IRQ)
phydev->irq = PHY_POLL;
+ if (!phy_drv_supports_irq(phydev->drv) && phy_interrupt_is_valid(phydev))
+ phydev->irq = PHY_POLL;
+
/* Port is set to PORT_TP by default and the actual PHY driver will set
* it to different value depending on the PHY configuration. If we have
* the generic PHY driver we can't figure it out, thus set the old
@@ -1592,7 +1615,6 @@ EXPORT_SYMBOL(phy_attach_direct);
struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
phy_interface_t interface)
{
- struct bus_type *bus = &mdio_bus_type;
struct phy_device *phydev;
struct device *d;
int rc;
@@ -1603,7 +1625,7 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
/* Search the list of PHY devices on the mdio bus for the
* PHY with the requested name
*/
- d = bus_find_device_by_name(bus, NULL, bus_id);
+ d = bus_find_device_by_name(&mdio_bus_type, NULL, bus_id);
if (!d) {
pr_err("PHY %s not found\n", bus_id);
return ERR_PTR(-ENODEV);
@@ -1700,6 +1722,7 @@ int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size)
shared->priv_size = priv_size;
}
shared->base_addr = base_addr;
+ shared->np = NULL;
refcount_set(&shared->refcnt, 1);
bus->shared[base_addr] = shared;
} else {
@@ -1723,6 +1746,63 @@ err_unlock:
EXPORT_SYMBOL_GPL(phy_package_join);
/**
+ * of_phy_package_join - join a common PHY group in PHY package
+ * @phydev: target phy_device struct
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * This is a variant of phy_package_join for PHY package defined in DT.
+ *
+ * The parent node of the @phydev is checked as a valid PHY package node
+ * structure (by matching the node name "ethernet-phy-package") and the
+ * base_addr for the PHY package is passed to phy_package_join.
+ *
+ * With this configuration the shared struct will also have the np value
+ * filled to use additional DT defined properties in PHY specific
+ * probe_once and config_init_once PHY package OPs.
+ *
+ * Returns < 0 on error, 0 on success. Esp. calling phy_package_join()
+ * with the same cookie but a different priv_size is an error. Or a parent
+ * node is not detected or is not valid or doesn't match the expected node
+ * name for PHY package.
+ */
+int of_phy_package_join(struct phy_device *phydev, size_t priv_size)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct device_node *package_node;
+ u32 base_addr;
+ int ret;
+
+ if (!node)
+ return -EINVAL;
+
+ package_node = of_get_parent(node);
+ if (!package_node)
+ return -EINVAL;
+
+ if (!of_node_name_eq(package_node, "ethernet-phy-package")) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (of_property_read_u32(package_node, "reg", &base_addr)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = phy_package_join(phydev, base_addr, priv_size);
+ if (ret)
+ goto exit;
+
+ phydev->shared->np = package_node;
+
+ return 0;
+exit:
+ of_node_put(package_node);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_phy_package_join);
+
+/**
* phy_package_leave - leave a common PHY group
* @phydev: target phy_device struct
*
@@ -1738,6 +1818,10 @@ void phy_package_leave(struct phy_device *phydev)
if (!shared)
return;
+ /* Decrease the node refcount on leave if present */
+ if (shared->np)
+ of_node_put(shared->np);
+
if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
bus->shared[shared->base_addr] = NULL;
mutex_unlock(&bus->shared_lock);
@@ -1791,6 +1875,40 @@ int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
EXPORT_SYMBOL_GPL(devm_phy_package_join);
/**
+ * devm_of_phy_package_join - resource managed of_phy_package_join()
+ * @dev: device that is registering this PHY package
+ * @phydev: target phy_device struct
+ * @priv_size: if non-zero allocate this amount of bytes for private data
+ *
+ * Managed of_phy_package_join(). Shared storage fetched by this function,
+ * phy_package_leave() is automatically called on driver detach. See
+ * of_phy_package_join() for more information.
+ */
+int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev,
+ size_t priv_size)
+{
+ struct phy_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = of_phy_package_join(phydev, priv_size);
+
+ if (!ret) {
+ *ptr = phydev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_of_phy_package_join);
+
+/**
* phy_detach - detach a PHY device from its network device
* @phydev: target phy_device struct
*
@@ -1859,7 +1977,7 @@ int phy_suspend(struct phy_device *phydev)
{
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
struct net_device *netdev = phydev->attached_dev;
- struct phy_driver *phydrv = phydev->drv;
+ const struct phy_driver *phydrv = phydev->drv;
int ret;
if (phydev->suspended)
@@ -1884,7 +2002,7 @@ EXPORT_SYMBOL(phy_suspend);
int __phy_resume(struct phy_device *phydev)
{
- struct phy_driver *phydrv = phydev->drv;
+ const struct phy_driver *phydrv = phydev->drv;
int ret;
lockdep_assert_held(&phydev->lock);
@@ -2513,12 +2631,15 @@ EXPORT_SYMBOL(genphy_read_status);
/**
* genphy_c37_read_status - check the link status and update current link state
* @phydev: target phy_device struct
+ * @changed: pointer where to store if link changed
*
* Description: Check the link, then figure out the current state
* by comparing what we advertise with what the link partner
* advertises. This function is for Clause 37 1000Base-X mode.
+ *
+ * If link has changed, @changed is set to true, false otherwise.
*/
-int genphy_c37_read_status(struct phy_device *phydev)
+int genphy_c37_read_status(struct phy_device *phydev, bool *changed)
{
int lpa, err, old_link = phydev->link;
@@ -2528,9 +2649,13 @@ int genphy_c37_read_status(struct phy_device *phydev)
return err;
/* why bother the PHY if nothing can have changed */
- if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link) {
+ *changed = false;
return 0;
+ }
+ /* Signal link has changed */
+ *changed = true;
phydev->duplex = DUPLEX_UNKNOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
@@ -2770,6 +2895,50 @@ void phy_advertise_supported(struct phy_device *phydev)
EXPORT_SYMBOL(phy_advertise_supported);
/**
+ * phy_advertise_eee_all - Advertise all supported EEE modes
+ * @phydev: target phy_device struct
+ *
+ * Description: Per default phylib preserves the EEE advertising at the time of
+ * phy probing, which might be a subset of the supported EEE modes. Use this
+ * function when all supported EEE modes should be advertised. This does not
+ * trigger auto-negotiation, so must be called before phy_start()/
+ * phylink_start() which will start auto-negotiation.
+ */
+void phy_advertise_eee_all(struct phy_device *phydev)
+{
+ linkmode_copy(phydev->advertising_eee, phydev->supported_eee);
+}
+EXPORT_SYMBOL_GPL(phy_advertise_eee_all);
+
+/**
+ * phy_support_eee - Set initial EEE policy configuration
+ * @phydev: Target phy_device struct
+ *
+ * This function configures the initial policy for Energy Efficient Ethernet
+ * (EEE) on the specified PHY device, influencing that EEE capabilities are
+ * advertised before the link is established. It should be called during PHY
+ * registration by the MAC driver and/or the PHY driver (for SmartEEE PHYs)
+ * if MAC supports LPI or PHY is capable to compensate missing LPI functionality
+ * of the MAC.
+ *
+ * The function sets default EEE policy parameters, including preparing the PHY
+ * to advertise EEE capabilities based on hardware support.
+ *
+ * It also sets the expected configuration for Low Power Idle (LPI) in the MAC
+ * driver. If the PHY framework determines that both local and remote
+ * advertisements support EEE, and the negotiated link mode is compatible with
+ * EEE, it will set enable_tx_lpi = true. The MAC driver is expected to act on
+ * this setting by enabling the LPI timer if enable_tx_lpi is set.
+ */
+void phy_support_eee(struct phy_device *phydev)
+{
+ linkmode_copy(phydev->advertising_eee, phydev->supported_eee);
+ phydev->eee_cfg.tx_lpi_enabled = true;
+ phydev->eee_cfg.eee_enabled = true;
+}
+EXPORT_SYMBOL(phy_support_eee);
+
+/**
* phy_support_sym_pause - Enable support of symmetrical pause
* @phydev: target phy_device struct
*
@@ -2959,7 +3128,7 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
if (delay < 0)
return delay;
- if (delay && size == 0)
+ if (size == 0)
return delay;
if (delay < delay_values[0] || delay > delay_values[size - 1]) {
@@ -2992,11 +3161,6 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
}
EXPORT_SYMBOL(phy_get_internal_delay);
-static bool phy_drv_supports_irq(struct phy_driver *phydrv)
-{
- return phydrv->config_intr && phydrv->handle_interrupt;
-}
-
static int phy_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
@@ -3097,6 +3261,7 @@ static int of_phy_led(struct phy_device *phydev,
struct device *dev = &phydev->mdio.dev;
struct led_init_data init_data = {};
struct led_classdev *cdev;
+ unsigned long modes = 0;
struct phy_led *phyled;
u32 index;
int err;
@@ -3114,6 +3279,21 @@ static int of_phy_led(struct phy_device *phydev,
if (index > U8_MAX)
return -EINVAL;
+ if (of_property_read_bool(led, "active-low"))
+ set_bit(PHY_LED_ACTIVE_LOW, &modes);
+ if (of_property_read_bool(led, "inactive-high-impedance"))
+ set_bit(PHY_LED_INACTIVE_HIGH_IMPEDANCE, &modes);
+
+ if (modes) {
+ /* Return error if asked to set polarity modes but not supported */
+ if (!phydev->drv->led_polarity_set)
+ return -EINVAL;
+
+ err = phydev->drv->led_polarity_set(phydev, index, modes);
+ if (err)
+ return err;
+ }
+
phyled->index = index;
if (phydev->drv->led_brightness_set)
cdev->brightness_set_blocking = phy_led_set_brightness;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index ed0b4ccaa6a6..503fd7c40523 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -2764,9 +2764,9 @@ EXPORT_SYMBOL_GPL(phylink_init_eee);
/**
* phylink_ethtool_get_eee() - read the energy efficient ethernet parameters
* @pl: a pointer to a &struct phylink returned from phylink_create()
- * @eee: a pointer to a &struct ethtool_eee for the read parameters
+ * @eee: a pointer to a &struct ethtool_keee for the read parameters
*/
-int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee)
+int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_keee *eee)
{
int ret = -EOPNOTSUPP;
@@ -2782,9 +2782,9 @@ EXPORT_SYMBOL_GPL(phylink_ethtool_get_eee);
/**
* phylink_ethtool_set_eee() - set the energy efficient ethernet parameters
* @pl: a pointer to a &struct phylink returned from phylink_create()
- * @eee: a pointer to a &struct ethtool_eee for the desired parameters
+ * @eee: a pointer to a &struct ethtool_keee for the desired parameters
*/
-int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_eee *eee)
+int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_keee *eee)
{
int ret = -EOPNOTSUPP;
diff --git a/drivers/net/phy/qcom/Kconfig b/drivers/net/phy/qcom/Kconfig
new file mode 100644
index 000000000000..570626cc8e14
--- /dev/null
+++ b/drivers/net/phy/qcom/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config QCOM_NET_PHYLIB
+ tristate
+
+config AT803X_PHY
+ tristate "Qualcomm Atheros AR803X PHYs"
+ select QCOM_NET_PHYLIB
+ depends on REGULATOR
+ help
+ Currently supports the AR8030, AR8031, AR8033, AR8035 model
+
+config QCA83XX_PHY
+ tristate "Qualcomm Atheros QCA833x PHYs"
+ select QCOM_NET_PHYLIB
+ help
+ Currently supports the internal QCA8337(Internal qca8k PHY) model
+
+config QCA808X_PHY
+ tristate "Qualcomm QCA808x PHYs"
+ select QCOM_NET_PHYLIB
+ help
+ Currently supports the QCA8081 model
+
+config QCA807X_PHY
+ tristate "Qualcomm QCA807x PHYs"
+ select QCOM_NET_PHYLIB
+ depends on OF_MDIO
+ help
+ Currently supports the Qualcomm QCA8072, QCA8075 and the PSGMII
+ control PHY.
diff --git a/drivers/net/phy/qcom/Makefile b/drivers/net/phy/qcom/Makefile
new file mode 100644
index 000000000000..f24fb550babd
--- /dev/null
+++ b/drivers/net/phy/qcom/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_NET_PHYLIB) += qcom-phy-lib.o
+obj-$(CONFIG_AT803X_PHY) += at803x.o
+obj-$(CONFIG_QCA83XX_PHY) += qca83xx.o
+obj-$(CONFIG_QCA808X_PHY) += qca808x.o
+obj-$(CONFIG_QCA807X_PHY) += qca807x.o
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
new file mode 100644
index 000000000000..4717c59d51d0
--- /dev/null
+++ b/drivers/net/phy/qcom/at803x.c
@@ -0,0 +1,1106 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * drivers/net/phy/at803x.c
+ *
+ * Driver for Qualcomm Atheros AR803x PHY
+ *
+ * Author: Matus Ujhelyi <ujhelyi.m@gmail.com>
+ */
+
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
+#include <linux/bitfield.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+#include <linux/phylink.h>
+#include <linux/sfp.h>
+#include <dt-bindings/net/qca-ar803x.h>
+
+#include "qcom.h"
+
+#define AT803X_LED_CONTROL 0x18
+
+#define AT803X_PHY_MMD3_WOL_CTRL 0x8012
+#define AT803X_WOL_EN BIT(5)
+
+#define AT803X_REG_CHIP_CONFIG 0x1f
+#define AT803X_BT_BX_REG_SEL 0x8000
+
+#define AT803X_MODE_CFG_MASK 0x0F
+#define AT803X_MODE_CFG_BASET_RGMII 0x00
+#define AT803X_MODE_CFG_BASET_SGMII 0x01
+#define AT803X_MODE_CFG_BX1000_RGMII_50OHM 0x02
+#define AT803X_MODE_CFG_BX1000_RGMII_75OHM 0x03
+#define AT803X_MODE_CFG_BX1000_CONV_50OHM 0x04
+#define AT803X_MODE_CFG_BX1000_CONV_75OHM 0x05
+#define AT803X_MODE_CFG_FX100_RGMII_50OHM 0x06
+#define AT803X_MODE_CFG_FX100_CONV_50OHM 0x07
+#define AT803X_MODE_CFG_RGMII_AUTO_MDET 0x0B
+#define AT803X_MODE_CFG_FX100_RGMII_75OHM 0x0E
+#define AT803X_MODE_CFG_FX100_CONV_75OHM 0x0F
+
+#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
+#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
+
+#define AT803X_DEBUG_REG_1F 0x1F
+#define AT803X_DEBUG_PLL_ON BIT(2)
+#define AT803X_DEBUG_RGMII_1V8 BIT(3)
+
+/* AT803x supports either the XTAL input pad, an internal PLL or the
+ * DSP as clock reference for the clock output pad. The XTAL reference
+ * is only used for 25 MHz output, all other frequencies need the PLL.
+ * The DSP as a clock reference is used in synchronous ethernet
+ * applications.
+ *
+ * By default the PLL is only enabled if there is a link. Otherwise
+ * the PHY will go into low power state and disabled the PLL. You can
+ * set the PLL_ON bit (see debug register 0x1f) to keep the PLL always
+ * enabled.
+ */
+#define AT803X_MMD7_CLK25M 0x8016
+#define AT803X_CLK_OUT_MASK GENMASK(4, 2)
+#define AT803X_CLK_OUT_25MHZ_XTAL 0
+#define AT803X_CLK_OUT_25MHZ_DSP 1
+#define AT803X_CLK_OUT_50MHZ_PLL 2
+#define AT803X_CLK_OUT_50MHZ_DSP 3
+#define AT803X_CLK_OUT_62_5MHZ_PLL 4
+#define AT803X_CLK_OUT_62_5MHZ_DSP 5
+#define AT803X_CLK_OUT_125MHZ_PLL 6
+#define AT803X_CLK_OUT_125MHZ_DSP 7
+
+/* The AR8035 has another mask which is compatible with the AR8031/AR8033 mask
+ * but doesn't support choosing between XTAL/PLL and DSP.
+ */
+#define AT8035_CLK_OUT_MASK GENMASK(4, 3)
+
+#define AT803X_CLK_OUT_STRENGTH_MASK GENMASK(8, 7)
+#define AT803X_CLK_OUT_STRENGTH_FULL 0
+#define AT803X_CLK_OUT_STRENGTH_HALF 1
+#define AT803X_CLK_OUT_STRENGTH_QUARTER 2
+
+#define AT803X_MMD3_SMARTEEE_CTL1 0x805b
+#define AT803X_MMD3_SMARTEEE_CTL2 0x805c
+#define AT803X_MMD3_SMARTEEE_CTL3 0x805d
+#define AT803X_MMD3_SMARTEEE_CTL3_LPI_EN BIT(8)
+
+#define ATH9331_PHY_ID 0x004dd041
+#define ATH8030_PHY_ID 0x004dd076
+#define ATH8031_PHY_ID 0x004dd074
+#define ATH8032_PHY_ID 0x004dd023
+#define ATH8035_PHY_ID 0x004dd072
+#define AT8030_PHY_ID_MASK 0xffffffef
+
+#define QCA9561_PHY_ID 0x004dd042
+
+#define AT803X_PAGE_FIBER 0
+#define AT803X_PAGE_COPPER 1
+
+/* don't turn off internal PLL */
+#define AT803X_KEEP_PLL_ENABLED BIT(0)
+#define AT803X_DISABLE_SMARTEEE BIT(1)
+
+/* disable hibernation mode */
+#define AT803X_DISABLE_HIBERNATION_MODE BIT(2)
+
+MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_LICENSE("GPL");
+
+struct at803x_priv {
+ int flags;
+ u16 clk_25m_reg;
+ u16 clk_25m_mask;
+ u8 smarteee_lpi_tw_1g;
+ u8 smarteee_lpi_tw_100m;
+ bool is_fiber;
+ bool is_1000basex;
+ struct regulator_dev *vddio_rdev;
+ struct regulator_dev *vddh_rdev;
+};
+
+struct at803x_context {
+ u16 bmcr;
+ u16 advertise;
+ u16 control1000;
+ u16 int_enable;
+ u16 smart_speed;
+ u16 led_control;
+};
+
+static int at803x_write_page(struct phy_device *phydev, int page)
+{
+ int mask;
+ int set;
+
+ if (page == AT803X_PAGE_COPPER) {
+ set = AT803X_BT_BX_REG_SEL;
+ mask = 0;
+ } else {
+ set = 0;
+ mask = AT803X_BT_BX_REG_SEL;
+ }
+
+ return __phy_modify(phydev, AT803X_REG_CHIP_CONFIG, mask, set);
+}
+
+static int at803x_read_page(struct phy_device *phydev)
+{
+ int ccr = __phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+
+ if (ccr < 0)
+ return ccr;
+
+ if (ccr & AT803X_BT_BX_REG_SEL)
+ return AT803X_PAGE_COPPER;
+
+ return AT803X_PAGE_FIBER;
+}
+
+static int at803x_enable_rx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0,
+ AT803X_DEBUG_RX_CLK_DLY_EN);
+}
+
+static int at803x_enable_tx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0,
+ AT803X_DEBUG_TX_CLK_DLY_EN);
+}
+
+static int at803x_disable_rx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ AT803X_DEBUG_RX_CLK_DLY_EN, 0);
+}
+
+static int at803x_disable_tx_delay(struct phy_device *phydev)
+{
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE,
+ AT803X_DEBUG_TX_CLK_DLY_EN, 0);
+}
+
+/* save relevant PHY registers to private copy */
+static void at803x_context_save(struct phy_device *phydev,
+ struct at803x_context *context)
+{
+ context->bmcr = phy_read(phydev, MII_BMCR);
+ context->advertise = phy_read(phydev, MII_ADVERTISE);
+ context->control1000 = phy_read(phydev, MII_CTRL1000);
+ context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE);
+ context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED);
+ context->led_control = phy_read(phydev, AT803X_LED_CONTROL);
+}
+
+/* restore relevant PHY registers from private copy */
+static void at803x_context_restore(struct phy_device *phydev,
+ const struct at803x_context *context)
+{
+ phy_write(phydev, MII_BMCR, context->bmcr);
+ phy_write(phydev, MII_ADVERTISE, context->advertise);
+ phy_write(phydev, MII_CTRL1000, context->control1000);
+ phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable);
+ phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed);
+ phy_write(phydev, AT803X_LED_CONTROL, context->led_control);
+}
+
+static int at803x_suspend(struct phy_device *phydev)
+{
+ int value;
+ int wol_enabled;
+
+ value = phy_read(phydev, AT803X_INTR_ENABLE);
+ wol_enabled = value & AT803X_INTR_ENABLE_WOL;
+
+ if (wol_enabled)
+ value = BMCR_ISOLATE;
+ else
+ value = BMCR_PDOWN;
+
+ phy_modify(phydev, MII_BMCR, 0, value);
+
+ return 0;
+}
+
+static int at803x_resume(struct phy_device *phydev)
+{
+ return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0);
+}
+
+static int at803x_parse_dt(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct at803x_priv *priv = phydev->priv;
+ u32 freq, strength, tw;
+ unsigned int sel;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_OF_MDIO))
+ return 0;
+
+ if (of_property_read_bool(node, "qca,disable-smarteee"))
+ priv->flags |= AT803X_DISABLE_SMARTEEE;
+
+ if (of_property_read_bool(node, "qca,disable-hibernation-mode"))
+ priv->flags |= AT803X_DISABLE_HIBERNATION_MODE;
+
+ if (!of_property_read_u32(node, "qca,smarteee-tw-us-1g", &tw)) {
+ if (!tw || tw > 255) {
+ phydev_err(phydev, "invalid qca,smarteee-tw-us-1g\n");
+ return -EINVAL;
+ }
+ priv->smarteee_lpi_tw_1g = tw;
+ }
+
+ if (!of_property_read_u32(node, "qca,smarteee-tw-us-100m", &tw)) {
+ if (!tw || tw > 255) {
+ phydev_err(phydev, "invalid qca,smarteee-tw-us-100m\n");
+ return -EINVAL;
+ }
+ priv->smarteee_lpi_tw_100m = tw;
+ }
+
+ ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
+ if (!ret) {
+ switch (freq) {
+ case 25000000:
+ sel = AT803X_CLK_OUT_25MHZ_XTAL;
+ break;
+ case 50000000:
+ sel = AT803X_CLK_OUT_50MHZ_PLL;
+ break;
+ case 62500000:
+ sel = AT803X_CLK_OUT_62_5MHZ_PLL;
+ break;
+ case 125000000:
+ sel = AT803X_CLK_OUT_125MHZ_PLL;
+ break;
+ default:
+ phydev_err(phydev, "invalid qca,clk-out-frequency\n");
+ return -EINVAL;
+ }
+
+ priv->clk_25m_reg |= FIELD_PREP(AT803X_CLK_OUT_MASK, sel);
+ priv->clk_25m_mask |= AT803X_CLK_OUT_MASK;
+ }
+
+ ret = of_property_read_u32(node, "qca,clk-out-strength", &strength);
+ if (!ret) {
+ priv->clk_25m_mask |= AT803X_CLK_OUT_STRENGTH_MASK;
+ switch (strength) {
+ case AR803X_STRENGTH_FULL:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_FULL;
+ break;
+ case AR803X_STRENGTH_HALF:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_HALF;
+ break;
+ case AR803X_STRENGTH_QUARTER:
+ priv->clk_25m_reg |= AT803X_CLK_OUT_STRENGTH_QUARTER;
+ break;
+ default:
+ phydev_err(phydev, "invalid qca,clk-out-strength\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int at803x_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct at803x_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ ret = at803x_parse_dt(phydev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int at803x_get_features(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int err;
+
+ err = genphy_read_abilities(phydev);
+ if (err)
+ return err;
+
+ if (phydev->drv->phy_id != ATH8031_PHY_ID)
+ return 0;
+
+ /* AR8031/AR8033 have different status registers
+ * for copper and fiber operation. However, the
+ * extended status register is the same for both
+ * operation modes.
+ *
+ * As a result of that, ESTATUS_1000_XFULL is set
+ * to 1 even when operating in copper TP mode.
+ *
+ * Remove this mode from the supported link modes
+ * when not operating in 1000BaseX mode.
+ */
+ if (!priv->is_1000basex)
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->supported);
+
+ return 0;
+}
+
+static int at803x_smarteee_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ u16 mask = 0, val = 0;
+ int ret;
+
+ if (priv->flags & AT803X_DISABLE_SMARTEEE)
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_MMD3_SMARTEEE_CTL3,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN, 0);
+
+ if (priv->smarteee_lpi_tw_1g) {
+ mask |= 0xff00;
+ val |= priv->smarteee_lpi_tw_1g << 8;
+ }
+ if (priv->smarteee_lpi_tw_100m) {
+ mask |= 0x00ff;
+ val |= priv->smarteee_lpi_tw_100m;
+ }
+ if (!mask)
+ return 0;
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL1,
+ mask, val);
+ if (ret)
+ return ret;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_MMD3_SMARTEEE_CTL3,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN,
+ AT803X_MMD3_SMARTEEE_CTL3_LPI_EN);
+}
+
+static int at803x_clk_out_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ if (!priv->clk_25m_mask)
+ return 0;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, AT803X_MMD7_CLK25M,
+ priv->clk_25m_mask, priv->clk_25m_reg);
+}
+
+static int at8031_pll_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* The default after hardware reset is PLL OFF. After a soft reset, the
+ * values are retained.
+ */
+ if (priv->flags & AT803X_KEEP_PLL_ENABLED)
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ 0, AT803X_DEBUG_PLL_ON);
+ else
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ AT803X_DEBUG_PLL_ON, 0);
+}
+
+static int at803x_hibernation_mode_config(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* The default after hardware reset is hibernation mode enabled. After
+ * software reset, the value is retained.
+ */
+ if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE))
+ return 0;
+
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+ AT803X_DEBUG_HIB_CTRL_PS_HIB_EN, 0);
+}
+
+static int at803x_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* The RX and TX delay default is:
+ * after HW reset: RX delay enabled and TX delay disabled
+ * after SW reset: RX delay enabled, while TX delay retains the
+ * value before reset.
+ */
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ ret = at803x_enable_rx_delay(phydev);
+ else
+ ret = at803x_disable_rx_delay(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ ret = at803x_enable_tx_delay(phydev);
+ else
+ ret = at803x_disable_tx_delay(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = at803x_smarteee_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = at803x_clk_out_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = at803x_hibernation_mode_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Ar803x extended next page bit is enabled by default. Cisco
+ * multigig switches read this bit and attempt to negotiate 10Gbps
+ * rates even if the next page bit is disabled. This is incorrect
+ * behaviour but we still need to accommodate it. XNP is only needed
+ * for 10Gbps support, so disable XNP.
+ */
+ return phy_modify(phydev, MII_ADVERTISE, MDIO_AN_CTRL1_XNP, 0);
+}
+
+static void at803x_link_change_notify(struct phy_device *phydev)
+{
+ /*
+ * Conduct a hardware reset for AT8030 every time a link loss is
+ * signalled. This is necessary to circumvent a hardware bug that
+ * occurs when the cable is unplugged while TX packets are pending
+ * in the FIFO. In such cases, the FIFO enters an error mode it
+ * cannot recover from by software.
+ */
+ if (phydev->state == PHY_NOLINK && phydev->mdio.reset_gpio) {
+ struct at803x_context context;
+
+ at803x_context_save(phydev, &context);
+
+ phy_device_reset(phydev, 1);
+ usleep_range(1000, 2000);
+ phy_device_reset(phydev, 0);
+ usleep_range(1000, 2000);
+
+ at803x_context_restore(phydev, &context);
+
+ phydev_dbg(phydev, "%s(): phy was reset\n", __func__);
+ }
+}
+
+static int at803x_config_aneg(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
+
+ ret = at803x_prepare_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ if (priv->is_1000basex)
+ return genphy_c37_config_aneg(phydev);
+
+ return genphy_config_aneg(phydev);
+}
+
+static int at803x_cable_test_result_trans(u16 status)
+{
+ switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
+ case AT803X_CDT_STATUS_STAT_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case AT803X_CDT_STATUS_STAT_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case AT803X_CDT_STATUS_STAT_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case AT803X_CDT_STATUS_STAT_FAIL:
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static bool at803x_cdt_test_failed(u16 status)
+{
+ return FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status) ==
+ AT803X_CDT_STATUS_STAT_FAIL;
+}
+
+static bool at803x_cdt_fault_length_valid(u16 status)
+{
+ switch (FIELD_GET(AT803X_CDT_STATUS_STAT_MASK, status)) {
+ case AT803X_CDT_STATUS_STAT_OPEN:
+ case AT803X_CDT_STATUS_STAT_SHORT:
+ return true;
+ }
+ return false;
+}
+
+static int at803x_cable_test_one_pair(struct phy_device *phydev, int pair)
+{
+ static const int ethtool_pair[] = {
+ ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_PAIR_B,
+ ETHTOOL_A_CABLE_PAIR_C,
+ ETHTOOL_A_CABLE_PAIR_D,
+ };
+ int ret, val;
+
+ val = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
+ AT803X_CDT_ENABLE_TEST;
+ ret = at803x_cdt_start(phydev, val);
+ if (ret)
+ return ret;
+
+ ret = at803x_cdt_wait_for_completion(phydev, AT803X_CDT_ENABLE_TEST);
+ if (ret)
+ return ret;
+
+ val = phy_read(phydev, AT803X_CDT_STATUS);
+ if (val < 0)
+ return val;
+
+ if (at803x_cdt_test_failed(val))
+ return 0;
+
+ ethnl_cable_test_result(phydev, ethtool_pair[pair],
+ at803x_cable_test_result_trans(val));
+
+ if (at803x_cdt_fault_length_valid(val)) {
+ val = FIELD_GET(AT803X_CDT_STATUS_DELTA_TIME_MASK, val);
+ ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
+ at803x_cdt_fault_length(val));
+ }
+
+ return 1;
+}
+
+static int at803x_cable_test_get_status(struct phy_device *phydev,
+ bool *finished, unsigned long pair_mask)
+{
+ int retries = 20;
+ int pair, ret;
+
+ *finished = false;
+
+ /* According to the datasheet the CDT can be performed when
+ * there is no link partner or when the link partner is
+ * auto-negotiating. Starting the test will restart the AN
+ * automatically. It seems that doing this repeatedly we will
+ * get a slot where our link partner won't disturb our
+ * measurement.
+ */
+ while (pair_mask && retries--) {
+ for_each_set_bit(pair, &pair_mask, 4) {
+ ret = at803x_cable_test_one_pair(phydev, pair);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ clear_bit(pair, &pair_mask);
+ }
+ if (pair_mask)
+ msleep(250);
+ }
+
+ *finished = true;
+
+ return 0;
+}
+
+static void at803x_cable_test_autoneg(struct phy_device *phydev)
+{
+ /* Enable auto-negotiation, but advertise no capabilities, no link
+ * will be established. A restart of the auto-negotiation is not
+ * required, because the cable test will automatically break the link.
+ */
+ phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
+ phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
+}
+
+static int at803x_cable_test_start(struct phy_device *phydev)
+{
+ at803x_cable_test_autoneg(phydev);
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
+static int at8031_rgmii_reg_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+
+ if (selector)
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ 0, AT803X_DEBUG_RGMII_1V8);
+ else
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_1F,
+ AT803X_DEBUG_RGMII_1V8, 0);
+}
+
+static int at8031_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct phy_device *phydev = rdev_get_drvdata(rdev);
+ int val;
+
+ val = at803x_debug_reg_read(phydev, AT803X_DEBUG_REG_1F);
+ if (val < 0)
+ return val;
+
+ return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
+}
+
+static const struct regulator_ops vddio_regulator_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = at8031_rgmii_reg_set_voltage_sel,
+ .get_voltage_sel = at8031_rgmii_reg_get_voltage_sel,
+};
+
+static const unsigned int vddio_voltage_table[] = {
+ 1500000,
+ 1800000,
+};
+
+static const struct regulator_desc vddio_desc = {
+ .name = "vddio",
+ .of_match = of_match_ptr("vddio-regulator"),
+ .n_voltages = ARRAY_SIZE(vddio_voltage_table),
+ .volt_table = vddio_voltage_table,
+ .ops = &vddio_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static const struct regulator_ops vddh_regulator_ops = {
+};
+
+static const struct regulator_desc vddh_desc = {
+ .name = "vddh",
+ .of_match = of_match_ptr("vddh-regulator"),
+ .n_voltages = 1,
+ .fixed_uV = 2500000,
+ .ops = &vddh_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+};
+
+static int at8031_register_regulators(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct regulator_config config = { };
+
+ config.dev = dev;
+ config.driver_data = phydev;
+
+ priv->vddio_rdev = devm_regulator_register(dev, &vddio_desc, &config);
+ if (IS_ERR(priv->vddio_rdev)) {
+ phydev_err(phydev, "failed to register VDDIO regulator\n");
+ return PTR_ERR(priv->vddio_rdev);
+ }
+
+ priv->vddh_rdev = devm_regulator_register(dev, &vddh_desc, &config);
+ if (IS_ERR(priv->vddh_rdev)) {
+ phydev_err(phydev, "failed to register VDDH regulator\n");
+ return PTR_ERR(priv->vddh_rdev);
+ }
+
+ return 0;
+}
+
+static int at8031_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_support);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(sfp_support);
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ phy_interface_t iface;
+
+ linkmode_zero(phy_support);
+ phylink_set(phy_support, 1000baseX_Full);
+ phylink_set(phy_support, 1000baseT_Full);
+ phylink_set(phy_support, Autoneg);
+ phylink_set(phy_support, Pause);
+ phylink_set(phy_support, Asym_Pause);
+
+ linkmode_zero(sfp_support);
+ sfp_parse_support(phydev->sfp_bus, id, sfp_support, interfaces);
+ /* Some modules support 10G modes as well as others we support.
+ * Mask out non-supported modes so the correct interface is picked.
+ */
+ linkmode_and(sfp_support, phy_support, sfp_support);
+
+ if (linkmode_empty(sfp_support)) {
+ dev_err(&phydev->mdio.dev, "incompatible SFP module inserted\n");
+ return -EINVAL;
+ }
+
+ iface = sfp_select_interface(phydev->sfp_bus, sfp_support);
+
+ /* Only 1000Base-X is supported by AR8031/8033 as the downstream SerDes
+ * interface for use with SFP modules.
+ * However, some copper modules detected as having a preferred SGMII
+ * interface do default to and function in 1000Base-X mode, so just
+ * print a warning and allow such modules, as they may have some chance
+ * of working.
+ */
+ if (iface == PHY_INTERFACE_MODE_SGMII)
+ dev_warn(&phydev->mdio.dev, "module may not function if 1000Base-X not supported\n");
+ else if (iface != PHY_INTERFACE_MODE_1000BASEX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct sfp_upstream_ops at8031_sfp_ops = {
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+ .module_insert = at8031_sfp_insert,
+};
+
+static int at8031_parse_dt(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
+
+ if (of_property_read_bool(node, "qca,keep-pll-enabled"))
+ priv->flags |= AT803X_KEEP_PLL_ENABLED;
+
+ ret = at8031_register_regulators(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
+ "vddio");
+ if (ret) {
+ phydev_err(phydev, "failed to get VDDIO regulator\n");
+ return ret;
+ }
+
+ /* Only AR8031/8033 support 1000Base-X for SFP modules */
+ return phy_sfp_probe(phydev, &at8031_sfp_ops);
+}
+
+static int at8031_probe(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int mode_cfg;
+ int ccr;
+ int ret;
+
+ ret = at803x_probe(phydev);
+ if (ret)
+ return ret;
+
+ /* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping
+ * options.
+ */
+ ret = at8031_parse_dt(phydev);
+ if (ret)
+ return ret;
+
+ ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ if (ccr < 0)
+ return ccr;
+ mode_cfg = ccr & AT803X_MODE_CFG_MASK;
+
+ switch (mode_cfg) {
+ case AT803X_MODE_CFG_BX1000_RGMII_50OHM:
+ case AT803X_MODE_CFG_BX1000_RGMII_75OHM:
+ priv->is_1000basex = true;
+ fallthrough;
+ case AT803X_MODE_CFG_FX100_RGMII_50OHM:
+ case AT803X_MODE_CFG_FX100_RGMII_75OHM:
+ priv->is_fiber = true;
+ break;
+ }
+
+ /* Disable WoL in 1588 register which is enabled
+ * by default
+ */
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ AT803X_WOL_EN, 0);
+}
+
+static int at8031_config_init(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int ret;
+
+ /* Some bootloaders leave the fiber page selected.
+ * Switch to the appropriate page (fiber or copper), as otherwise we
+ * read the PHY capabilities from the wrong page.
+ */
+ phy_lock_mdio_bus(phydev);
+ ret = at803x_write_page(phydev,
+ priv->is_fiber ? AT803X_PAGE_FIBER :
+ AT803X_PAGE_COPPER);
+ phy_unlock_mdio_bus(phydev);
+ if (ret)
+ return ret;
+
+ ret = at8031_pll_config(phydev);
+ if (ret < 0)
+ return ret;
+
+ return at803x_config_init(phydev);
+}
+
+static int at8031_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret;
+
+ /* First setup MAC address and enable WOL interrupt */
+ ret = at803x_set_wol(phydev, wol);
+ if (ret)
+ return ret;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ /* Enable WOL function for 1588 */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ 0, AT803X_WOL_EN);
+ else
+ /* Disable WoL function for 1588 */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ AT803X_WOL_EN, 0);
+
+ return ret;
+}
+
+static int at8031_config_intr(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ int err, value = 0;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED &&
+ priv->is_fiber) {
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ value |= AT803X_INTR_ENABLE_LINK_FAIL_BX;
+ value |= AT803X_INTR_ENABLE_LINK_SUCCESS_BX;
+
+ err = phy_set_bits(phydev, AT803X_INTR_ENABLE, value);
+ if (err)
+ return err;
+ }
+
+ return at803x_config_intr(phydev);
+}
+
+/* AR8031 and AR8033 share the same read status logic */
+static int at8031_read_status(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+ bool changed;
+
+ if (priv->is_1000basex)
+ return genphy_c37_read_status(phydev, &changed);
+
+ return at803x_read_status(phydev);
+}
+
+/* AR8031 and AR8035 share the same cable test get status reg */
+static int at8031_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ return at803x_cable_test_get_status(phydev, finished, 0xf);
+}
+
+/* AR8031 and AR8035 share the same cable test start logic */
+static int at8031_cable_test_start(struct phy_device *phydev)
+{
+ at803x_cable_test_autoneg(phydev);
+ phy_write(phydev, MII_CTRL1000, 0);
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
+/* AR8032, AR9331 and QCA9561 share the same cable test get status reg */
+static int at8032_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ return at803x_cable_test_get_status(phydev, finished, 0x3);
+}
+
+static int at8035_parse_dt(struct phy_device *phydev)
+{
+ struct at803x_priv *priv = phydev->priv;
+
+ /* Mask is set by the generic at803x_parse_dt
+ * if property is set. Assume property is set
+ * with the mask not zero.
+ */
+ if (priv->clk_25m_mask) {
+ /* Fixup for the AR8030/AR8035. This chip has another mask and
+ * doesn't support the DSP reference. Eg. the lowest bit of the
+ * mask. The upper two bits select the same frequencies. Mask
+ * the lowest bit here.
+ *
+ * Warning:
+ * There was no datasheet for the AR8030 available so this is
+ * just a guess. But the AR8035 is listed as pin compatible
+ * to the AR8030 so there might be a good chance it works on
+ * the AR8030 too.
+ */
+ priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
+ priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
+ }
+
+ return 0;
+}
+
+/* AR8030 and AR8035 shared the same special mask for clk_25m */
+static int at8035_probe(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = at803x_probe(phydev);
+ if (ret)
+ return ret;
+
+ return at8035_parse_dt(phydev);
+}
+
+static struct phy_driver at803x_driver[] = {
+{
+ /* Qualcomm Atheros AR8035 */
+ PHY_ID_MATCH_EXACT(ATH8035_PHY_ID),
+ .name = "Qualcomm Atheros AR8035",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = at8035_probe,
+ .config_aneg = at803x_config_aneg,
+ .config_init = at803x_config_init,
+ .soft_reset = genphy_soft_reset,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ /* PHY_GBIT_FEATURES */
+ .read_status = at803x_read_status,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .cable_test_start = at8031_cable_test_start,
+ .cable_test_get_status = at8031_cable_test_get_status,
+}, {
+ /* Qualcomm Atheros AR8030 */
+ .phy_id = ATH8030_PHY_ID,
+ .name = "Qualcomm Atheros AR8030",
+ .phy_id_mask = AT8030_PHY_ID_MASK,
+ .probe = at8035_probe,
+ .config_init = at803x_config_init,
+ .link_change_notify = at803x_link_change_notify,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+}, {
+ /* Qualcomm Atheros AR8031/AR8033 */
+ PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
+ .name = "Qualcomm Atheros AR8031/AR8033",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = at8031_probe,
+ .config_init = at8031_config_init,
+ .config_aneg = at803x_config_aneg,
+ .soft_reset = genphy_soft_reset,
+ .set_wol = at8031_set_wol,
+ .get_wol = at803x_get_wol,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .read_page = at803x_read_page,
+ .write_page = at803x_write_page,
+ .get_features = at803x_get_features,
+ .read_status = at8031_read_status,
+ .config_intr = at8031_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .cable_test_start = at8031_cable_test_start,
+ .cable_test_get_status = at8031_cable_test_get_status,
+}, {
+ /* Qualcomm Atheros AR8032 */
+ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
+ .name = "Qualcomm Atheros AR8032",
+ .probe = at803x_probe,
+ .flags = PHY_POLL_CABLE_TEST,
+ .config_init = at803x_config_init,
+ .link_change_notify = at803x_link_change_notify,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at8032_cable_test_get_status,
+}, {
+ /* ATHEROS AR9331 */
+ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
+ .name = "Qualcomm Atheros AR9331 built-in PHY",
+ .probe = at803x_probe,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at8032_cable_test_get_status,
+ .read_status = at803x_read_status,
+ .soft_reset = genphy_soft_reset,
+ .config_aneg = at803x_config_aneg,
+}, {
+ /* Qualcomm Atheros QCA9561 */
+ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
+ .name = "Qualcomm Atheros QCA9561 built-in PHY",
+ .probe = at803x_probe,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at8032_cable_test_get_status,
+ .read_status = at803x_read_status,
+ .soft_reset = genphy_soft_reset,
+ .config_aneg = at803x_config_aneg,
+}, };
+
+module_phy_driver(at803x_driver);
+
+static struct mdio_device_id __maybe_unused atheros_tbl[] = {
+ { ATH8030_PHY_ID, AT8030_PHY_ID_MASK },
+ { PHY_ID_MATCH_EXACT(ATH8031_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, atheros_tbl);
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
new file mode 100644
index 000000000000..672c6929119a
--- /dev/null
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -0,0 +1,849 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 Sartura Ltd.
+ *
+ * Author: Robert Marko <robert.marko@sartura.hr>
+ * Christian Marangi <ansuelsmth@gmail.com>
+ *
+ * Qualcomm QCA8072 and QCA8075 PHY driver
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/bitfield.h>
+#include <linux/gpio/driver.h>
+#include <linux/sfp.h>
+
+#include "qcom.h"
+
+#define QCA807X_CHIP_CONFIGURATION 0x1f
+#define QCA807X_BT_BX_REG_SEL BIT(15)
+#define QCA807X_BT_BX_REG_SEL_FIBER 0
+#define QCA807X_BT_BX_REG_SEL_COPPER 1
+#define QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK GENMASK(3, 0)
+#define QCA807X_CHIP_CONFIGURATION_MODE_QSGMII_SGMII 4
+#define QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_FIBER 3
+#define QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_ALL_COPPER 0
+
+#define QCA807X_MEDIA_SELECT_STATUS 0x1a
+#define QCA807X_MEDIA_DETECTED_COPPER BIT(5)
+#define QCA807X_MEDIA_DETECTED_1000_BASE_X BIT(4)
+#define QCA807X_MEDIA_DETECTED_100_BASE_FX BIT(3)
+
+#define QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION 0x807e
+#define QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION_EN BIT(0)
+
+#define QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH 0x801a
+#define QCA807X_CONTROL_DAC_MASK GENMASK(2, 0)
+/* List of tweaks enabled by this bit:
+ * - With both FULL amplitude and FULL bias current: bias current
+ * is set to half.
+ * - With only DSP amplitude: bias current is set to half and
+ * is set to 1/4 with cable < 10m.
+ * - With DSP bias current (included both DSP amplitude and
+ * DSP bias current): bias current is half the detected current
+ * with cable < 10m.
+ */
+#define QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK BIT(2)
+#define QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT BIT(1)
+#define QCA807X_CONTROL_DAC_DSP_AMPLITUDE BIT(0)
+
+#define QCA807X_MMD7_LED_100N_1 0x8074
+#define QCA807X_MMD7_LED_100N_2 0x8075
+#define QCA807X_MMD7_LED_1000N_1 0x8076
+#define QCA807X_MMD7_LED_1000N_2 0x8077
+
+#define QCA807X_MMD7_LED_CTRL(x) (0x8074 + ((x) * 2))
+#define QCA807X_MMD7_LED_FORCE_CTRL(x) (0x8075 + ((x) * 2))
+
+/* LED hw control pattern for fiber port */
+#define QCA807X_LED_FIBER_PATTERN_MASK GENMASK(11, 1)
+#define QCA807X_LED_FIBER_TXACT_BLK_EN BIT(10)
+#define QCA807X_LED_FIBER_RXACT_BLK_EN BIT(9)
+#define QCA807X_LED_FIBER_FDX_ON_EN BIT(6)
+#define QCA807X_LED_FIBER_HDX_ON_EN BIT(5)
+#define QCA807X_LED_FIBER_1000BX_ON_EN BIT(2)
+#define QCA807X_LED_FIBER_100FX_ON_EN BIT(1)
+
+/* Some device repurpose the LED as GPIO out */
+#define QCA807X_GPIO_FORCE_EN QCA808X_LED_FORCE_EN
+#define QCA807X_GPIO_FORCE_MODE_MASK QCA808X_LED_FORCE_MODE_MASK
+
+#define QCA807X_FUNCTION_CONTROL 0x10
+#define QCA807X_FC_MDI_CROSSOVER_MODE_MASK GENMASK(6, 5)
+#define QCA807X_FC_MDI_CROSSOVER_AUTO 3
+#define QCA807X_FC_MDI_CROSSOVER_MANUAL_MDIX 1
+#define QCA807X_FC_MDI_CROSSOVER_MANUAL_MDI 0
+
+/* PQSGMII Analog PHY specific */
+#define PQSGMII_CTRL_REG 0x0
+#define PQSGMII_ANALOG_SW_RESET BIT(6)
+#define PQSGMII_DRIVE_CONTROL_1 0xb
+#define PQSGMII_TX_DRIVER_MASK GENMASK(7, 4)
+#define PQSGMII_TX_DRIVER_140MV 0x0
+#define PQSGMII_TX_DRIVER_160MV 0x1
+#define PQSGMII_TX_DRIVER_180MV 0x2
+#define PQSGMII_TX_DRIVER_200MV 0x3
+#define PQSGMII_TX_DRIVER_220MV 0x4
+#define PQSGMII_TX_DRIVER_240MV 0x5
+#define PQSGMII_TX_DRIVER_260MV 0x6
+#define PQSGMII_TX_DRIVER_280MV 0x7
+#define PQSGMII_TX_DRIVER_300MV 0x8
+#define PQSGMII_TX_DRIVER_320MV 0x9
+#define PQSGMII_TX_DRIVER_400MV 0xa
+#define PQSGMII_TX_DRIVER_500MV 0xb
+#define PQSGMII_TX_DRIVER_600MV 0xc
+#define PQSGMII_MODE_CTRL 0x6d
+#define PQSGMII_MODE_CTRL_AZ_WORKAROUND_MASK BIT(0)
+#define PQSGMII_MMD3_SERDES_CONTROL 0x805a
+
+#define PHY_ID_QCA8072 0x004dd0b2
+#define PHY_ID_QCA8075 0x004dd0b1
+
+#define QCA807X_COMBO_ADDR_OFFSET 4
+#define QCA807X_PQSGMII_ADDR_OFFSET 5
+#define SERDES_RESET_SLEEP 100
+
+enum qca807x_global_phy {
+ QCA807X_COMBO_ADDR = 4,
+ QCA807X_PQSGMII_ADDR = 5,
+};
+
+struct qca807x_shared_priv {
+ unsigned int package_mode;
+ u32 tx_drive_strength;
+};
+
+struct qca807x_gpio_priv {
+ struct phy_device *phy;
+};
+
+struct qca807x_priv {
+ bool dac_full_amplitude;
+ bool dac_full_bias_current;
+ bool dac_disable_bias_current_tweak;
+};
+
+static int qca807x_cable_test_start(struct phy_device *phydev)
+{
+ /* we do all the (time consuming) work later */
+ return 0;
+}
+
+static int qca807x_led_parse_netdev(struct phy_device *phydev, unsigned long rules,
+ u16 *offload_trigger)
+{
+ /* Parsing specific to netdev trigger */
+ switch (phydev->port) {
+ case PORT_TP:
+ if (test_bit(TRIGGER_NETDEV_TX, &rules))
+ *offload_trigger |= QCA808X_LED_TX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_RX, &rules))
+ *offload_trigger |= QCA808X_LED_RX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED10_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED100_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED1000_ON;
+ if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_HALF_DUPLEX_ON;
+ if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_FULL_DUPLEX_ON;
+ break;
+ case PORT_FIBRE:
+ if (test_bit(TRIGGER_NETDEV_TX, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_TXACT_BLK_EN;
+ if (test_bit(TRIGGER_NETDEV_RX, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_RXACT_BLK_EN;
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_100FX_ON_EN;
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_1000BX_ON_EN;
+ if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_HDX_ON_EN;
+ if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+ *offload_trigger |= QCA807X_LED_FIBER_FDX_ON_EN;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (rules && !*offload_trigger)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int qca807x_led_hw_control_enable(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 1)
+ return -EINVAL;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_hw_control_enable(phydev, reg);
+}
+
+static int qca807x_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 offload_trigger = 0;
+
+ if (index > 1)
+ return -EINVAL;
+
+ return qca807x_led_parse_netdev(phydev, rules, &offload_trigger);
+}
+
+static int qca807x_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 reg, mask, offload_trigger = 0;
+ int ret;
+
+ if (index > 1)
+ return -EINVAL;
+
+ ret = qca807x_led_parse_netdev(phydev, rules, &offload_trigger);
+ if (ret)
+ return ret;
+
+ ret = qca807x_led_hw_control_enable(phydev, index);
+ if (ret)
+ return ret;
+
+ switch (phydev->port) {
+ case PORT_TP:
+ reg = QCA807X_MMD7_LED_CTRL(index);
+ mask = QCA808X_LED_PATTERN_MASK;
+ break;
+ case PORT_FIBRE:
+ /* HW control pattern bits are in LED FORCE reg */
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ mask = QCA807X_LED_FIBER_PATTERN_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, reg, mask,
+ offload_trigger);
+}
+
+static bool qca807x_led_hw_control_status(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 1)
+ return false;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_hw_control_status(phydev, reg);
+}
+
+static int qca807x_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ u16 reg;
+ int val;
+
+ if (index > 1)
+ return -EINVAL;
+
+ /* Check if we have hw control enabled */
+ if (qca807x_led_hw_control_status(phydev, index))
+ return -EINVAL;
+
+ /* Parsing specific to netdev trigger */
+ switch (phydev->port) {
+ case PORT_TP:
+ reg = QCA807X_MMD7_LED_CTRL(index);
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+ if (val & QCA808X_LED_TX_BLINK)
+ set_bit(TRIGGER_NETDEV_TX, rules);
+ if (val & QCA808X_LED_RX_BLINK)
+ set_bit(TRIGGER_NETDEV_RX, rules);
+ if (val & QCA808X_LED_SPEED10_ON)
+ set_bit(TRIGGER_NETDEV_LINK_10, rules);
+ if (val & QCA808X_LED_SPEED100_ON)
+ set_bit(TRIGGER_NETDEV_LINK_100, rules);
+ if (val & QCA808X_LED_SPEED1000_ON)
+ set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if (val & QCA808X_LED_HALF_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+ if (val & QCA808X_LED_FULL_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+ break;
+ case PORT_FIBRE:
+ /* HW control pattern bits are in LED FORCE reg */
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+ if (val & QCA807X_LED_FIBER_TXACT_BLK_EN)
+ set_bit(TRIGGER_NETDEV_TX, rules);
+ if (val & QCA807X_LED_FIBER_RXACT_BLK_EN)
+ set_bit(TRIGGER_NETDEV_RX, rules);
+ if (val & QCA807X_LED_FIBER_100FX_ON_EN)
+ set_bit(TRIGGER_NETDEV_LINK_100, rules);
+ if (val & QCA807X_LED_FIBER_1000BX_ON_EN)
+ set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if (val & QCA807X_LED_FIBER_HDX_ON_EN)
+ set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+ if (val & QCA807X_LED_FIBER_FDX_ON_EN)
+ set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qca807x_led_hw_control_reset(struct phy_device *phydev, u8 index)
+{
+ u16 reg, mask;
+
+ if (index > 1)
+ return -EINVAL;
+
+ switch (phydev->port) {
+ case PORT_TP:
+ reg = QCA807X_MMD7_LED_CTRL(index);
+ mask = QCA808X_LED_PATTERN_MASK;
+ break;
+ case PORT_FIBRE:
+ /* HW control pattern bits are in LED FORCE reg */
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ mask = QCA807X_LED_FIBER_PATTERN_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg, mask);
+}
+
+static int qca807x_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ u16 reg;
+ int ret;
+
+ if (index > 1)
+ return -EINVAL;
+
+ /* If we are setting off the LED reset any hw control rule */
+ if (!value) {
+ ret = qca807x_led_hw_control_reset(phydev, index);
+ if (ret)
+ return ret;
+ }
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_brightness_set(phydev, reg, value);
+}
+
+static int qca807x_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u16 reg;
+
+ if (index > 1)
+ return -EINVAL;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_blink_set(phydev, reg, delay_on, delay_off);
+}
+
+#ifdef CONFIG_GPIOLIB
+static int qca807x_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int qca807x_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct qca807x_gpio_priv *priv = gpiochip_get_data(gc);
+ u16 reg;
+ int val;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(offset);
+ val = phy_read_mmd(priv->phy, MDIO_MMD_AN, reg);
+
+ return FIELD_GET(QCA807X_GPIO_FORCE_MODE_MASK, val);
+}
+
+static void qca807x_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct qca807x_gpio_priv *priv = gpiochip_get_data(gc);
+ u16 reg;
+ int val;
+
+ reg = QCA807X_MMD7_LED_FORCE_CTRL(offset);
+
+ val = phy_read_mmd(priv->phy, MDIO_MMD_AN, reg);
+ val &= ~QCA807X_GPIO_FORCE_MODE_MASK;
+ val |= QCA807X_GPIO_FORCE_EN;
+ val |= FIELD_PREP(QCA807X_GPIO_FORCE_MODE_MASK, value);
+
+ phy_write_mmd(priv->phy, MDIO_MMD_AN, reg, val);
+}
+
+static int qca807x_gpio_dir_out(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ qca807x_gpio_set(gc, offset, value);
+
+ return 0;
+}
+
+static int qca807x_gpio(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct qca807x_gpio_priv *priv;
+ struct gpio_chip *gc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->phy = phydev;
+
+ gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
+ if (!gc)
+ return -ENOMEM;
+
+ gc->label = dev_name(dev);
+ gc->base = -1;
+ gc->ngpio = 2;
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->can_sleep = true;
+ gc->get_direction = qca807x_gpio_get_direction;
+ gc->direction_output = qca807x_gpio_dir_out;
+ gc->get = qca807x_gpio_get;
+ gc->set = qca807x_gpio_set;
+
+ return devm_gpiochip_add_data(dev, gc, priv);
+}
+#endif
+
+static int qca807x_read_fiber_status(struct phy_device *phydev)
+{
+ bool changed;
+ int ss, err;
+
+ err = genphy_c37_read_status(phydev, &changed);
+ if (err || !changed)
+ return err;
+
+ /* Read the QCA807x PHY-Specific Status register fiber page,
+ * which indicates the speed and duplex that the PHY is actually
+ * using, irrespective of whether we are in autoneg mode or not.
+ */
+ ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
+ if (ss < 0)
+ return ss;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
+ switch (FIELD_GET(AT803X_SS_SPEED_MASK, ss)) {
+ case AT803X_SS_SPEED_100:
+ phydev->speed = SPEED_100;
+ break;
+ case AT803X_SS_SPEED_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ }
+
+ if (ss & AT803X_SS_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+static int qca807x_read_status(struct phy_device *phydev)
+{
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported)) {
+ switch (phydev->port) {
+ case PORT_FIBRE:
+ return qca807x_read_fiber_status(phydev);
+ case PORT_TP:
+ return at803x_read_status(phydev);
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return at803x_read_status(phydev);
+}
+
+static int qca807x_phy_package_probe_once(struct phy_device *phydev)
+{
+ struct phy_package_shared *shared = phydev->shared;
+ struct qca807x_shared_priv *priv = shared->priv;
+ unsigned int tx_drive_strength;
+ const char *package_mode_name;
+
+ /* Default to 600mw if not defined */
+ if (of_property_read_u32(shared->np, "qcom,tx-drive-strength-milliwatt",
+ &tx_drive_strength))
+ tx_drive_strength = 600;
+
+ switch (tx_drive_strength) {
+ case 140:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_140MV;
+ break;
+ case 160:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_160MV;
+ break;
+ case 180:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_180MV;
+ break;
+ case 200:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_200MV;
+ break;
+ case 220:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_220MV;
+ break;
+ case 240:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_240MV;
+ break;
+ case 260:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_260MV;
+ break;
+ case 280:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_280MV;
+ break;
+ case 300:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_300MV;
+ break;
+ case 320:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_320MV;
+ break;
+ case 400:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_400MV;
+ break;
+ case 500:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_500MV;
+ break;
+ case 600:
+ priv->tx_drive_strength = PQSGMII_TX_DRIVER_600MV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ priv->package_mode = PHY_INTERFACE_MODE_NA;
+ if (!of_property_read_string(shared->np, "qcom,package-mode",
+ &package_mode_name)) {
+ if (!strcasecmp(package_mode_name,
+ phy_modes(PHY_INTERFACE_MODE_PSGMII)))
+ priv->package_mode = PHY_INTERFACE_MODE_PSGMII;
+ else if (!strcasecmp(package_mode_name,
+ phy_modes(PHY_INTERFACE_MODE_QSGMII)))
+ priv->package_mode = PHY_INTERFACE_MODE_QSGMII;
+ else
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qca807x_phy_package_config_init_once(struct phy_device *phydev)
+{
+ struct phy_package_shared *shared = phydev->shared;
+ struct qca807x_shared_priv *priv = shared->priv;
+ int val, ret;
+
+ /* Make sure PHY follow PHY package mode if enforced */
+ if (priv->package_mode != PHY_INTERFACE_MODE_NA &&
+ phydev->interface != priv->package_mode)
+ return -EINVAL;
+
+ phy_lock_mdio_bus(phydev);
+
+ /* Set correct PHY package mode */
+ val = __phy_package_read(phydev, QCA807X_COMBO_ADDR,
+ QCA807X_CHIP_CONFIGURATION);
+ val &= ~QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK;
+ /* package_mode can be QSGMII or PSGMII and we validate
+ * this in probe_once.
+ * With package_mode to NA, we default to PSGMII.
+ */
+ switch (priv->package_mode) {
+ case PHY_INTERFACE_MODE_QSGMII:
+ val |= QCA807X_CHIP_CONFIGURATION_MODE_QSGMII_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_PSGMII:
+ default:
+ val |= QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_ALL_COPPER;
+ }
+ ret = __phy_package_write(phydev, QCA807X_COMBO_ADDR,
+ QCA807X_CHIP_CONFIGURATION, val);
+ if (ret)
+ goto exit;
+
+ /* After mode change Serdes reset is required */
+ val = __phy_package_read(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_CTRL_REG);
+ val &= ~PQSGMII_ANALOG_SW_RESET;
+ ret = __phy_package_write(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_CTRL_REG, val);
+ if (ret)
+ goto exit;
+
+ msleep(SERDES_RESET_SLEEP);
+
+ val = __phy_package_read(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_CTRL_REG);
+ val |= PQSGMII_ANALOG_SW_RESET;
+ ret = __phy_package_write(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_CTRL_REG, val);
+ if (ret)
+ goto exit;
+
+ /* Workaround to enable AZ transmitting ability */
+ val = __phy_package_read_mmd(phydev, QCA807X_PQSGMII_ADDR,
+ MDIO_MMD_PMAPMD, PQSGMII_MODE_CTRL);
+ val &= ~PQSGMII_MODE_CTRL_AZ_WORKAROUND_MASK;
+ ret = __phy_package_write_mmd(phydev, QCA807X_PQSGMII_ADDR,
+ MDIO_MMD_PMAPMD, PQSGMII_MODE_CTRL, val);
+ if (ret)
+ goto exit;
+
+ /* Set PQSGMII TX AMP strength */
+ val = __phy_package_read(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_DRIVE_CONTROL_1);
+ val &= ~PQSGMII_TX_DRIVER_MASK;
+ val |= FIELD_PREP(PQSGMII_TX_DRIVER_MASK, priv->tx_drive_strength);
+ ret = __phy_package_write(phydev, QCA807X_PQSGMII_ADDR,
+ PQSGMII_DRIVE_CONTROL_1, val);
+ if (ret)
+ goto exit;
+
+ /* Prevent PSGMII going into hibernation via PSGMII self test */
+ val = __phy_package_read_mmd(phydev, QCA807X_COMBO_ADDR,
+ MDIO_MMD_PCS, PQSGMII_MMD3_SERDES_CONTROL);
+ val &= ~BIT(1);
+ ret = __phy_package_write_mmd(phydev, QCA807X_COMBO_ADDR,
+ MDIO_MMD_PCS, PQSGMII_MMD3_SERDES_CONTROL, val);
+
+exit:
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+static int qca807x_sfp_insert(void *upstream, const struct sfp_eeprom_id *id)
+{
+ struct phy_device *phydev = upstream;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
+ phy_interface_t iface;
+ int ret;
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+
+ sfp_parse_support(phydev->sfp_bus, id, support, interfaces);
+ iface = sfp_select_interface(phydev->sfp_bus, support);
+
+ dev_info(&phydev->mdio.dev, "%s SFP module inserted\n", phy_modes(iface));
+
+ switch (iface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_100BASEX:
+ /* Set PHY mode to PSGMII combo (1/4 copper + combo ports) mode */
+ ret = phy_modify(phydev,
+ QCA807X_CHIP_CONFIGURATION,
+ QCA807X_CHIP_CONFIGURATION_MODE_CFG_MASK,
+ QCA807X_CHIP_CONFIGURATION_MODE_PSGMII_FIBER);
+ /* Enable fiber mode autodection (1000Base-X or 100Base-FX) */
+ ret = phy_set_bits_mmd(phydev,
+ MDIO_MMD_AN,
+ QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION,
+ QCA807X_MMD7_FIBER_MODE_AUTO_DETECTION_EN);
+ /* Select fiber page */
+ ret = phy_clear_bits(phydev,
+ QCA807X_CHIP_CONFIGURATION,
+ QCA807X_BT_BX_REG_SEL);
+
+ phydev->port = PORT_FIBRE;
+ break;
+ default:
+ dev_err(&phydev->mdio.dev, "Incompatible SFP module inserted\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void qca807x_sfp_remove(void *upstream)
+{
+ struct phy_device *phydev = upstream;
+
+ /* Select copper page */
+ phy_set_bits(phydev,
+ QCA807X_CHIP_CONFIGURATION,
+ QCA807X_BT_BX_REG_SEL);
+
+ phydev->port = PORT_TP;
+}
+
+static const struct sfp_upstream_ops qca807x_sfp_ops = {
+ .attach = phy_sfp_attach,
+ .detach = phy_sfp_detach,
+ .module_insert = qca807x_sfp_insert,
+ .module_remove = qca807x_sfp_remove,
+};
+
+static int qca807x_probe(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ struct qca807x_shared_priv *shared_priv;
+ struct device *dev = &phydev->mdio.dev;
+ struct phy_package_shared *shared;
+ struct qca807x_priv *priv;
+ int ret;
+
+ ret = devm_of_phy_package_join(dev, phydev, sizeof(*shared_priv));
+ if (ret)
+ return ret;
+
+ if (phy_package_probe_once(phydev)) {
+ ret = qca807x_phy_package_probe_once(phydev);
+ if (ret)
+ return ret;
+ }
+
+ shared = phydev->shared;
+ shared_priv = shared->priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dac_full_amplitude = of_property_read_bool(node, "qcom,dac-full-amplitude");
+ priv->dac_full_bias_current = of_property_read_bool(node, "qcom,dac-full-bias-current");
+ priv->dac_disable_bias_current_tweak = of_property_read_bool(node,
+ "qcom,dac-disable-bias-current-tweak");
+
+#if IS_ENABLED(CONFIG_GPIOLIB)
+ /* Make sure we don't have mixed leds node and gpio-controller
+ * to prevent registering leds and having gpio-controller usage
+ * conflicting with them.
+ */
+ if (of_find_property(node, "leds", NULL) &&
+ of_find_property(node, "gpio-controller", NULL)) {
+ phydev_err(phydev, "Invalid property detected. LEDs and gpio-controller are mutually exclusive.");
+ return -EINVAL;
+ }
+
+ /* Do not register a GPIO controller unless flagged for it */
+ if (of_property_read_bool(node, "gpio-controller")) {
+ ret = qca807x_gpio(phydev);
+ if (ret)
+ return ret;
+ }
+#endif
+
+ /* Attach SFP bus on combo port*/
+ if (phy_read(phydev, QCA807X_CHIP_CONFIGURATION)) {
+ ret = phy_sfp_probe(phydev, &qca807x_sfp_ops);
+ if (ret)
+ return ret;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->advertising);
+ }
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static int qca807x_config_init(struct phy_device *phydev)
+{
+ struct qca807x_priv *priv = phydev->priv;
+ u16 control_dac;
+ int ret;
+
+ if (phy_package_init_once(phydev)) {
+ ret = qca807x_phy_package_config_init_once(phydev);
+ if (ret)
+ return ret;
+ }
+
+ control_dac = phy_read_mmd(phydev, MDIO_MMD_AN,
+ QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH);
+ control_dac &= ~QCA807X_CONTROL_DAC_MASK;
+ if (!priv->dac_full_amplitude)
+ control_dac |= QCA807X_CONTROL_DAC_DSP_AMPLITUDE;
+ if (!priv->dac_full_amplitude)
+ control_dac |= QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT;
+ if (!priv->dac_disable_bias_current_tweak)
+ control_dac |= QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK;
+ return phy_write_mmd(phydev, MDIO_MMD_AN,
+ QCA807X_MMD7_1000BASE_T_POWER_SAVE_PER_CABLE_LENGTH,
+ control_dac);
+}
+
+static struct phy_driver qca807x_drivers[] = {
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_QCA8072),
+ .name = "Qualcomm QCA8072",
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_GBIT_FEATURES */
+ .probe = qca807x_probe,
+ .config_init = qca807x_config_init,
+ .read_status = qca807x_read_status,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .soft_reset = genphy_soft_reset,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .cable_test_start = qca807x_cable_test_start,
+ .cable_test_get_status = qca808x_cable_test_get_status,
+ },
+ {
+ PHY_ID_MATCH_EXACT(PHY_ID_QCA8075),
+ .name = "Qualcomm QCA8075",
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_GBIT_FEATURES */
+ .probe = qca807x_probe,
+ .config_init = qca807x_config_init,
+ .read_status = qca807x_read_status,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .soft_reset = genphy_soft_reset,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .cable_test_start = qca807x_cable_test_start,
+ .cable_test_get_status = qca808x_cable_test_get_status,
+ .led_brightness_set = qca807x_led_brightness_set,
+ .led_blink_set = qca807x_led_blink_set,
+ .led_hw_is_supported = qca807x_led_hw_is_supported,
+ .led_hw_control_set = qca807x_led_hw_control_set,
+ .led_hw_control_get = qca807x_led_hw_control_get,
+ },
+};
+module_phy_driver(qca807x_drivers);
+
+static struct mdio_device_id __maybe_unused qca807x_tbl[] = {
+ { PHY_ID_MATCH_EXACT(PHY_ID_QCA8072) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_QCA8075) },
+ { }
+};
+
+MODULE_AUTHOR("Robert Marko <robert.marko@sartura.hr>");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("Qualcomm QCA807x PHY driver");
+MODULE_DEVICE_TABLE(mdio, qca807x_tbl);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c
new file mode 100644
index 000000000000..5048304ccc9e
--- /dev/null
+++ b/drivers/net/phy/qcom/qca808x.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/phy.h>
+#include <linux/module.h>
+
+#include "qcom.h"
+
+/* ADC threshold */
+#define QCA808X_PHY_DEBUG_ADC_THRESHOLD 0x2c80
+#define QCA808X_ADC_THRESHOLD_MASK GENMASK(7, 0)
+#define QCA808X_ADC_THRESHOLD_80MV 0
+#define QCA808X_ADC_THRESHOLD_100MV 0xf0
+#define QCA808X_ADC_THRESHOLD_200MV 0x0f
+#define QCA808X_ADC_THRESHOLD_300MV 0xff
+
+/* CLD control */
+#define QCA808X_PHY_MMD3_ADDR_CLD_CTRL7 0x8007
+#define QCA808X_8023AZ_AFE_CTRL_MASK GENMASK(8, 4)
+#define QCA808X_8023AZ_AFE_EN 0x90
+
+/* AZ control */
+#define QCA808X_PHY_MMD3_AZ_TRAINING_CTRL 0x8008
+#define QCA808X_MMD3_AZ_TRAINING_VAL 0x1c32
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB 0x8014
+#define QCA808X_MSE_THRESHOLD_20DB_VALUE 0x529
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB 0x800E
+#define QCA808X_MSE_THRESHOLD_17DB_VALUE 0x341
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB 0x801E
+#define QCA808X_MSE_THRESHOLD_27DB_VALUE 0x419
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB 0x8020
+#define QCA808X_MSE_THRESHOLD_28DB_VALUE 0x341
+
+#define QCA808X_PHY_MMD7_TOP_OPTION1 0x901c
+#define QCA808X_TOP_OPTION1_DATA 0x0
+
+#define QCA808X_PHY_MMD3_DEBUG_1 0xa100
+#define QCA808X_MMD3_DEBUG_1_VALUE 0x9203
+#define QCA808X_PHY_MMD3_DEBUG_2 0xa101
+#define QCA808X_MMD3_DEBUG_2_VALUE 0x48ad
+#define QCA808X_PHY_MMD3_DEBUG_3 0xa103
+#define QCA808X_MMD3_DEBUG_3_VALUE 0x1698
+#define QCA808X_PHY_MMD3_DEBUG_4 0xa105
+#define QCA808X_MMD3_DEBUG_4_VALUE 0x8001
+#define QCA808X_PHY_MMD3_DEBUG_5 0xa106
+#define QCA808X_MMD3_DEBUG_5_VALUE 0x1111
+#define QCA808X_PHY_MMD3_DEBUG_6 0xa011
+#define QCA808X_MMD3_DEBUG_6_VALUE 0x5f85
+
+/* master/slave seed config */
+#define QCA808X_PHY_DEBUG_LOCAL_SEED 9
+#define QCA808X_MASTER_SLAVE_SEED_ENABLE BIT(1)
+#define QCA808X_MASTER_SLAVE_SEED_CFG GENMASK(12, 2)
+#define QCA808X_MASTER_SLAVE_SEED_RANGE 0x32
+
+/* Hibernation yields lower power consumpiton in contrast with normal operation mode.
+ * when the copper cable is unplugged, the PHY enters into hibernation mode in about 10s.
+ */
+#define QCA808X_DBG_AN_TEST 0xb
+#define QCA808X_HIBERNATION_EN BIT(15)
+
+#define QCA808X_MMD7_LED2_CTRL 0x8074
+#define QCA808X_MMD7_LED2_FORCE_CTRL 0x8075
+#define QCA808X_MMD7_LED1_CTRL 0x8076
+#define QCA808X_MMD7_LED1_FORCE_CTRL 0x8077
+#define QCA808X_MMD7_LED0_CTRL 0x8078
+#define QCA808X_MMD7_LED_CTRL(x) (0x8078 - ((x) * 2))
+
+#define QCA808X_MMD7_LED0_FORCE_CTRL 0x8079
+#define QCA808X_MMD7_LED_FORCE_CTRL(x) (0x8079 - ((x) * 2))
+
+#define QCA808X_MMD7_LED_POLARITY_CTRL 0x901a
+/* QSDK sets by default 0x46 to this reg that sets BIT 6 for
+ * LED to active high. It's not clear what BIT 3 and BIT 4 does.
+ */
+#define QCA808X_LED_ACTIVE_HIGH BIT(6)
+
+/* QCA808X 1G chip type */
+#define QCA808X_PHY_MMD7_CHIP_TYPE 0x901d
+#define QCA808X_PHY_CHIP_TYPE_1G BIT(0)
+
+#define QCA8081_PHY_SERDES_MMD1_FIFO_CTRL 0x9072
+#define QCA8081_PHY_FIFO_RSTN BIT(11)
+
+#define QCA8081_PHY_ID 0x004dd101
+
+MODULE_DESCRIPTION("Qualcomm Atheros QCA808X PHY driver");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_LICENSE("GPL");
+
+struct qca808x_priv {
+ int led_polarity_mode;
+};
+
+static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable fast retrain */
+ ret = genphy_c45_fast_retrain(phydev, true);
+ if (ret)
+ return ret;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_TOP_OPTION1,
+ QCA808X_TOP_OPTION1_DATA);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB,
+ QCA808X_MSE_THRESHOLD_20DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB,
+ QCA808X_MSE_THRESHOLD_17DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB,
+ QCA808X_MSE_THRESHOLD_27DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB,
+ QCA808X_MSE_THRESHOLD_28DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_1,
+ QCA808X_MMD3_DEBUG_1_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_4,
+ QCA808X_MMD3_DEBUG_4_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_5,
+ QCA808X_MMD3_DEBUG_5_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_3,
+ QCA808X_MMD3_DEBUG_3_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_6,
+ QCA808X_MMD3_DEBUG_6_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_2,
+ QCA808X_MMD3_DEBUG_2_VALUE);
+
+ return 0;
+}
+
+static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable)
+{
+ u16 seed_value;
+
+ if (!enable)
+ return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
+ QCA808X_MASTER_SLAVE_SEED_ENABLE, 0);
+
+ seed_value = get_random_u32_below(QCA808X_MASTER_SLAVE_SEED_RANGE);
+ return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
+ QCA808X_MASTER_SLAVE_SEED_CFG | QCA808X_MASTER_SLAVE_SEED_ENABLE,
+ FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value) |
+ QCA808X_MASTER_SLAVE_SEED_ENABLE);
+}
+
+static bool qca808x_is_prefer_master(struct phy_device *phydev)
+{
+ return (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_FORCE) ||
+ (phydev->master_slave_get == MASTER_SLAVE_CFG_MASTER_PREFERRED);
+}
+
+static bool qca808x_has_fast_retrain_or_slave_seed(struct phy_device *phydev)
+{
+ return linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
+}
+
+static bool qca808x_is_1g_only(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_CHIP_TYPE);
+ if (ret < 0)
+ return true;
+
+ return !!(QCA808X_PHY_CHIP_TYPE_1G & ret);
+}
+
+static void qca808x_fill_possible_interfaces(struct phy_device *phydev)
+{
+ unsigned long *possible = phydev->possible_interfaces;
+
+ __set_bit(PHY_INTERFACE_MODE_SGMII, possible);
+
+ if (!qca808x_is_1g_only(phydev))
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, possible);
+}
+
+static int qca808x_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct qca808x_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Init LED polarity mode to -1 */
+ priv->led_polarity_mode = -1;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static int qca808x_config_init(struct phy_device *phydev)
+{
+ struct qca808x_priv *priv = phydev->priv;
+ int ret;
+
+ /* Default to LED Active High if active-low not in DT */
+ if (priv->led_polarity_mode == -1) {
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN,
+ QCA808X_MMD7_LED_POLARITY_CTRL,
+ QCA808X_LED_ACTIVE_HIGH);
+ if (ret)
+ return ret;
+ }
+
+ /* Active adc&vga on 802.3az for the link 1000M and 100M */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
+ QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
+ if (ret)
+ return ret;
+
+ /* Adjust the threshold on 802.3az for the link 1000M */
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ QCA808X_PHY_MMD3_AZ_TRAINING_CTRL,
+ QCA808X_MMD3_AZ_TRAINING_VAL);
+ if (ret)
+ return ret;
+
+ if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
+ /* Config the fast retrain for the link 2500M */
+ ret = qca808x_phy_fast_retrain_config(phydev);
+ if (ret)
+ return ret;
+
+ ret = genphy_read_master_slave(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (!qca808x_is_prefer_master(phydev)) {
+ /* Enable seed and configure lower ramdom seed to make phy
+ * linked as slave mode.
+ */
+ ret = qca808x_phy_ms_seed_enable(phydev, true);
+ if (ret)
+ return ret;
+ }
+ }
+
+ qca808x_fill_possible_interfaces(phydev);
+
+ /* Configure adc threshold as 100mv for the link 10M */
+ return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
+ QCA808X_ADC_THRESHOLD_MASK,
+ QCA808X_ADC_THRESHOLD_100MV);
+}
+
+static int qca808x_read_status(struct phy_device *phydev)
+{
+ struct at803x_ss_mask ss_mask = { 0 };
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
+ if (ret < 0)
+ return ret;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->lp_advertising,
+ ret & MDIO_AN_10GBT_STAT_LP2_5G);
+
+ ret = genphy_read_status(phydev);
+ if (ret)
+ return ret;
+
+ /* qca8081 takes the different bits for speed value from at803x */
+ ss_mask.speed_mask = QCA808X_SS_SPEED_MASK;
+ ss_mask.speed_shift = __bf_shf(QCA808X_SS_SPEED_MASK);
+ ret = at803x_read_specific_status(phydev, ss_mask);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->link) {
+ if (phydev->speed == SPEED_2500)
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ else
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ } else {
+ /* generate seed as a lower random value to make PHY linked as SLAVE easily,
+ * except for master/slave configuration fault detected or the master mode
+ * preferred.
+ *
+ * the reason for not putting this code into the function link_change_notify is
+ * the corner case where the link partner is also the qca8081 PHY and the seed
+ * value is configured as the same value, the link can't be up and no link change
+ * occurs.
+ */
+ if (qca808x_has_fast_retrain_or_slave_seed(phydev)) {
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR ||
+ qca808x_is_prefer_master(phydev)) {
+ qca808x_phy_ms_seed_enable(phydev, false);
+ } else {
+ qca808x_phy_ms_seed_enable(phydev, true);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int qca808x_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_soft_reset(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (qca808x_has_fast_retrain_or_slave_seed(phydev))
+ ret = qca808x_phy_ms_seed_enable(phydev, true);
+
+ return ret;
+}
+
+static int qca808x_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ /* perform CDT with the following configs:
+ * 1. disable hibernation.
+ * 2. force PHY working in MDI mode.
+ * 3. for PHY working in 1000BaseT.
+ * 4. configure the threshold.
+ */
+
+ ret = at803x_debug_reg_mask(phydev, QCA808X_DBG_AN_TEST, QCA808X_HIBERNATION_EN, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = at803x_config_mdix(phydev, ETH_TP_MDI);
+ if (ret < 0)
+ return ret;
+
+ /* Force 1000base-T needs to configure PMA/PMD and MII_BMCR */
+ phydev->duplex = DUPLEX_FULL;
+ phydev->speed = SPEED_1000;
+ ret = genphy_c45_pma_setup_forced(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = genphy_setup_forced(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* configure the thresholds for open, short, pair ok test */
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8074, 0xc040);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8076, 0xc040);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8077, 0xa060);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8078, 0xc050);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807a, 0xc060);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807e, 0xb060);
+
+ return 0;
+}
+
+static int qca808x_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_c45_pma_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* The autoneg ability is not existed in bit3 of MMD7.1,
+ * but it is supported by qca808x PHY, so we add it here
+ * manually.
+ */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+
+ /* As for the qca8081 1G version chip, the 2500baseT ability is also
+ * existed in the bit0 of MMD1.21, we need to remove it manually if
+ * it is the qca8081 1G chip according to the bit0 of MMD7.0x901d.
+ */
+ if (qca808x_is_1g_only(phydev))
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported);
+
+ return 0;
+}
+
+static int qca808x_config_aneg(struct phy_device *phydev)
+{
+ int phy_ctrl = 0;
+ int ret;
+
+ ret = at803x_prepare_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ /* The reg MII_BMCR also needs to be configured for force mode, the
+ * genphy_config_aneg is also needed.
+ */
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ genphy_c45_pma_setup_forced(phydev);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
+ phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
+ if (ret < 0)
+ return ret;
+
+ return __genphy_config_aneg(phydev, ret);
+}
+
+static void qca808x_link_change_notify(struct phy_device *phydev)
+{
+ /* Assert interface sgmii fifo on link down, deassert it on link up,
+ * the interface device address is always phy address added by 1.
+ */
+ mdiobus_c45_modify_changed(phydev->mdio.bus, phydev->mdio.addr + 1,
+ MDIO_MMD_PMAPMD, QCA8081_PHY_SERDES_MMD1_FIFO_CTRL,
+ QCA8081_PHY_FIFO_RSTN,
+ phydev->link ? QCA8081_PHY_FIFO_RSTN : 0);
+}
+
+static int qca808x_led_parse_netdev(struct phy_device *phydev, unsigned long rules,
+ u16 *offload_trigger)
+{
+ /* Parsing specific to netdev trigger */
+ if (test_bit(TRIGGER_NETDEV_TX, &rules))
+ *offload_trigger |= QCA808X_LED_TX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_RX, &rules))
+ *offload_trigger |= QCA808X_LED_RX_BLINK;
+ if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED10_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED100_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED1000_ON;
+ if (test_bit(TRIGGER_NETDEV_LINK_2500, &rules))
+ *offload_trigger |= QCA808X_LED_SPEED2500_ON;
+ if (test_bit(TRIGGER_NETDEV_HALF_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_HALF_DUPLEX_ON;
+ if (test_bit(TRIGGER_NETDEV_FULL_DUPLEX, &rules))
+ *offload_trigger |= QCA808X_LED_FULL_DUPLEX_ON;
+
+ if (rules && !*offload_trigger)
+ return -EOPNOTSUPP;
+
+ /* Enable BLINK_CHECK_BYPASS by default to make the LED
+ * blink even with duplex or speed mode not enabled.
+ */
+ *offload_trigger |= QCA808X_LED_BLINK_CHECK_BYPASS;
+
+ return 0;
+}
+
+static int qca808x_led_hw_control_enable(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_hw_control_enable(phydev, reg);
+}
+
+static int qca808x_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 offload_trigger = 0;
+
+ if (index > 2)
+ return -EINVAL;
+
+ return qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
+}
+
+static int qca808x_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 reg, offload_trigger = 0;
+ int ret;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_CTRL(index);
+
+ ret = qca808x_led_parse_netdev(phydev, rules, &offload_trigger);
+ if (ret)
+ return ret;
+
+ ret = qca808x_led_hw_control_enable(phydev, index);
+ if (ret)
+ return ret;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_PATTERN_MASK,
+ offload_trigger);
+}
+
+static bool qca808x_led_hw_control_status(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 2)
+ return false;
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_hw_control_status(phydev, reg);
+}
+
+static int qca808x_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ u16 reg;
+ int val;
+
+ if (index > 2)
+ return -EINVAL;
+
+ /* Check if we have hw control enabled */
+ if (qca808x_led_hw_control_status(phydev, index))
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_CTRL(index);
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+ if (val & QCA808X_LED_TX_BLINK)
+ set_bit(TRIGGER_NETDEV_TX, rules);
+ if (val & QCA808X_LED_RX_BLINK)
+ set_bit(TRIGGER_NETDEV_RX, rules);
+ if (val & QCA808X_LED_SPEED10_ON)
+ set_bit(TRIGGER_NETDEV_LINK_10, rules);
+ if (val & QCA808X_LED_SPEED100_ON)
+ set_bit(TRIGGER_NETDEV_LINK_100, rules);
+ if (val & QCA808X_LED_SPEED1000_ON)
+ set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+ if (val & QCA808X_LED_SPEED2500_ON)
+ set_bit(TRIGGER_NETDEV_LINK_2500, rules);
+ if (val & QCA808X_LED_HALF_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_HALF_DUPLEX, rules);
+ if (val & QCA808X_LED_FULL_DUPLEX_ON)
+ set_bit(TRIGGER_NETDEV_FULL_DUPLEX, rules);
+
+ return 0;
+}
+
+static int qca808x_led_hw_control_reset(struct phy_device *phydev, u8 index)
+{
+ u16 reg;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_CTRL(index);
+
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_PATTERN_MASK);
+}
+
+static int qca808x_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ u16 reg;
+ int ret;
+
+ if (index > 2)
+ return -EINVAL;
+
+ if (!value) {
+ ret = qca808x_led_hw_control_reset(phydev, index);
+ if (ret)
+ return ret;
+ }
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_brightness_set(phydev, reg, value);
+}
+
+static int qca808x_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u16 reg;
+
+ if (index > 2)
+ return -EINVAL;
+
+ reg = QCA808X_MMD7_LED_FORCE_CTRL(index);
+ return qca808x_led_reg_blink_set(phydev, reg, delay_on, delay_off);
+}
+
+static int qca808x_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes)
+{
+ struct qca808x_priv *priv = phydev->priv;
+ bool active_low = false;
+ u32 mode;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ active_low = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* PHY polarity is global and can't be set per LED.
+ * To detect this, check if last requested polarity mode
+ * match the new one.
+ */
+ if (priv->led_polarity_mode >= 0 &&
+ priv->led_polarity_mode != active_low) {
+ phydev_err(phydev, "PHY polarity is global. Mismatched polarity on different LED\n");
+ return -EINVAL;
+ }
+
+ /* Save the last PHY polarity mode */
+ priv->led_polarity_mode = active_low;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN,
+ QCA808X_MMD7_LED_POLARITY_CTRL,
+ QCA808X_LED_ACTIVE_HIGH,
+ active_low ? 0 : QCA808X_LED_ACTIVE_HIGH);
+}
+
+static struct phy_driver qca808x_driver[] = {
+{
+ /* Qualcomm QCA8081 */
+ PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
+ .name = "Qualcomm QCA8081",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = qca808x_probe,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .get_features = qca808x_get_features,
+ .config_aneg = qca808x_config_aneg,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_status = qca808x_read_status,
+ .config_init = qca808x_config_init,
+ .soft_reset = qca808x_soft_reset,
+ .cable_test_start = qca808x_cable_test_start,
+ .cable_test_get_status = qca808x_cable_test_get_status,
+ .link_change_notify = qca808x_link_change_notify,
+ .led_brightness_set = qca808x_led_brightness_set,
+ .led_blink_set = qca808x_led_blink_set,
+ .led_hw_is_supported = qca808x_led_hw_is_supported,
+ .led_hw_control_set = qca808x_led_hw_control_set,
+ .led_hw_control_get = qca808x_led_hw_control_get,
+ .led_polarity_set = qca808x_led_polarity_set,
+}, };
+
+module_phy_driver(qca808x_driver);
+
+static struct mdio_device_id __maybe_unused qca808x_tbl[] = {
+ { PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, qca808x_tbl);
diff --git a/drivers/net/phy/qcom/qca83xx.c b/drivers/net/phy/qcom/qca83xx.c
new file mode 100644
index 000000000000..5d083ef0250e
--- /dev/null
+++ b/drivers/net/phy/qcom/qca83xx.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/phy.h>
+#include <linux/module.h>
+
+#include "qcom.h"
+
+#define AT803X_DEBUG_REG_3C 0x3C
+
+#define AT803X_DEBUG_REG_GREEN 0x3D
+#define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6)
+
+#define MDIO_AZ_DEBUG 0x800D
+
+#define QCA8327_A_PHY_ID 0x004dd033
+#define QCA8327_B_PHY_ID 0x004dd034
+#define QCA8337_PHY_ID 0x004dd036
+#define QCA8K_PHY_ID_MASK 0xffffffff
+
+#define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0)
+
+static struct at803x_hw_stat qca83xx_hw_stats[] = {
+ { "phy_idle_errors", 0xa, GENMASK(7, 0), PHY},
+ { "phy_receive_errors", 0x15, GENMASK(15, 0), PHY},
+ { "eee_wake_errors", 0x16, GENMASK(15, 0), MMD},
+};
+
+struct qca83xx_priv {
+ u64 stats[ARRAY_SIZE(qca83xx_hw_stats)];
+};
+
+MODULE_DESCRIPTION("Qualcomm Atheros QCA83XX PHY driver");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_LICENSE("GPL");
+
+static int qca83xx_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(qca83xx_hw_stats);
+}
+
+static void qca83xx_get_strings(struct phy_device *phydev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++) {
+ strscpy(data + i * ETH_GSTRING_LEN,
+ qca83xx_hw_stats[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+static u64 qca83xx_get_stat(struct phy_device *phydev, int i)
+{
+ struct at803x_hw_stat stat = qca83xx_hw_stats[i];
+ struct qca83xx_priv *priv = phydev->priv;
+ int val;
+ u64 ret;
+
+ if (stat.access_type == MMD)
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, stat.reg);
+ else
+ val = phy_read(phydev, stat.reg);
+
+ if (val < 0) {
+ ret = U64_MAX;
+ } else {
+ val = val & stat.mask;
+ priv->stats[i] += val;
+ ret = priv->stats[i];
+ }
+
+ return ret;
+}
+
+static void qca83xx_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qca83xx_hw_stats); i++)
+ data[i] = qca83xx_get_stat(phydev, i);
+}
+
+static int qca83xx_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct qca83xx_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static int qca83xx_config_init(struct phy_device *phydev)
+{
+ u8 switch_revision;
+
+ switch_revision = phydev->dev_flags & QCA8K_DEVFLAGS_REVISION_MASK;
+
+ switch (switch_revision) {
+ case 1:
+ /* For 100M waveform */
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea);
+ /* Turn on Gigabit clock */
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0);
+ break;
+
+ case 2:
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0x0);
+ fallthrough;
+ case 4:
+ phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
+ break;
+ }
+
+ /* Following original QCA sourcecode set port to prefer master */
+ phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
+
+ return 0;
+}
+
+static int qca8327_config_init(struct phy_device *phydev)
+{
+ /* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
+ * Disable on init and enable only with 100m speed following
+ * qca original source code.
+ */
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
+
+ return qca83xx_config_init(phydev);
+}
+
+static void qca83xx_link_change_notify(struct phy_device *phydev)
+{
+ /* Set DAC Amplitude adjustment to +6% for 100m on link running */
+ if (phydev->state == PHY_RUNNING) {
+ if (phydev->speed == SPEED_100)
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN,
+ QCA8327_DEBUG_MANU_CTRL_EN);
+ } else {
+ /* Reset DAC Amplitude adjustment */
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
+ }
+}
+
+static int qca83xx_resume(struct phy_device *phydev)
+{
+ int ret, val;
+
+ /* Skip reset if not suspended */
+ if (!phydev->suspended)
+ return 0;
+
+ /* Reinit the port, reset values set by suspend */
+ qca83xx_config_init(phydev);
+
+ /* Reset the port on port resume */
+ phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
+
+ /* On resume from suspend the switch execute a reset and
+ * restart auto-negotiation. Wait for reset to complete.
+ */
+ ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
+ 50000, 600000, true);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+static int qca83xx_suspend(struct phy_device *phydev)
+{
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
+ AT803X_DEBUG_GATE_CLK_IN1000, 0);
+
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+ AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE |
+ AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0);
+
+ return 0;
+}
+
+static int qca8337_suspend(struct phy_device *phydev)
+{
+ /* Only QCA8337 support actual suspend. */
+ genphy_suspend(phydev);
+
+ return qca83xx_suspend(phydev);
+}
+
+static int qca8327_suspend(struct phy_device *phydev)
+{
+ u16 mask = 0;
+
+ /* QCA8327 cause port unreliability when phy suspend
+ * is set.
+ */
+ mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
+ phy_modify(phydev, MII_BMCR, mask, 0);
+
+ return qca83xx_suspend(phydev);
+}
+
+static struct phy_driver qca83xx_driver[] = {
+{
+ /* QCA8337 */
+ .phy_id = QCA8337_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8337 internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .probe = qca83xx_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca83xx_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8337_suspend,
+ .resume = qca83xx_resume,
+}, {
+ /* QCA8327-A from switch QCA8327-AL1A */
+ .phy_id = QCA8327_A_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8327-A internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .link_change_notify = qca83xx_link_change_notify,
+ .probe = qca83xx_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca8327_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8327_suspend,
+ .resume = qca83xx_resume,
+}, {
+ /* QCA8327-B from switch QCA8327-BL1A */
+ .phy_id = QCA8327_B_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8327-B internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .link_change_notify = qca83xx_link_change_notify,
+ .probe = qca83xx_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca8327_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = qca83xx_get_sset_count,
+ .get_strings = qca83xx_get_strings,
+ .get_stats = qca83xx_get_stats,
+ .suspend = qca8327_suspend,
+ .resume = qca83xx_resume,
+}, };
+
+module_phy_driver(qca83xx_driver);
+
+static struct mdio_device_id __maybe_unused qca83xx_tbl[] = {
+ { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, qca83xx_tbl);
diff --git a/drivers/net/phy/qcom/qcom-phy-lib.c b/drivers/net/phy/qcom/qcom-phy-lib.c
new file mode 100644
index 000000000000..d28815ef56bb
--- /dev/null
+++ b/drivers/net/phy/qcom/qcom-phy-lib.c
@@ -0,0 +1,676 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/phy.h>
+#include <linux/module.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool_netlink.h>
+
+#include "qcom.h"
+
+MODULE_DESCRIPTION("Qualcomm PHY driver Common Functions");
+MODULE_AUTHOR("Matus Ujhelyi");
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_LICENSE("GPL");
+
+int at803x_debug_reg_read(struct phy_device *phydev, u16 reg)
+{
+ int ret;
+
+ ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
+ if (ret < 0)
+ return ret;
+
+ return phy_read(phydev, AT803X_DEBUG_DATA);
+}
+EXPORT_SYMBOL_GPL(at803x_debug_reg_read);
+
+int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
+ u16 clear, u16 set)
+{
+ u16 val;
+ int ret;
+
+ ret = at803x_debug_reg_read(phydev, reg);
+ if (ret < 0)
+ return ret;
+
+ val = ret & 0xffff;
+ val &= ~clear;
+ val |= set;
+
+ return phy_write(phydev, AT803X_DEBUG_DATA, val);
+}
+EXPORT_SYMBOL_GPL(at803x_debug_reg_mask);
+
+int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data)
+{
+ int ret;
+
+ ret = phy_write(phydev, AT803X_DEBUG_ADDR, reg);
+ if (ret < 0)
+ return ret;
+
+ return phy_write(phydev, AT803X_DEBUG_DATA, data);
+}
+EXPORT_SYMBOL_GPL(at803x_debug_reg_write);
+
+int at803x_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int ret, irq_enabled;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ struct net_device *ndev = phydev->attached_dev;
+ const u8 *mac;
+ unsigned int i;
+ static const unsigned int offsets[] = {
+ AT803X_LOC_MAC_ADDR_32_47_OFFSET,
+ AT803X_LOC_MAC_ADDR_16_31_OFFSET,
+ AT803X_LOC_MAC_ADDR_0_15_OFFSET,
+ };
+
+ if (!ndev)
+ return -ENODEV;
+
+ mac = (const u8 *)ndev->dev_addr;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ for (i = 0; i < 3; i++)
+ phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
+ mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
+
+ /* Enable WOL interrupt */
+ ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
+ if (ret)
+ return ret;
+ } else {
+ /* Disable WOL interrupt */
+ ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
+ if (ret)
+ return ret;
+ }
+
+ /* Clear WOL status */
+ ret = phy_read(phydev, AT803X_INTR_STATUS);
+ if (ret < 0)
+ return ret;
+
+ /* Check if there are other interrupts except for WOL triggered when PHY is
+ * in interrupt mode, only the interrupts enabled by AT803X_INTR_ENABLE can
+ * be passed up to the interrupt PIN.
+ */
+ irq_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (irq_enabled < 0)
+ return irq_enabled;
+
+ irq_enabled &= ~AT803X_INTR_ENABLE_WOL;
+ if (ret & irq_enabled && !phy_polling_mode(phydev))
+ phy_trigger_machine(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(at803x_set_wol);
+
+void at803x_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int value;
+
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ value = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (value < 0)
+ return;
+
+ if (value & AT803X_INTR_ENABLE_WOL)
+ wol->wolopts |= WAKE_MAGIC;
+}
+EXPORT_SYMBOL_GPL(at803x_get_wol);
+
+int at803x_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, AT803X_INTR_STATUS);
+
+ return (err < 0) ? err : 0;
+}
+EXPORT_SYMBOL_GPL(at803x_ack_interrupt);
+
+int at803x_config_intr(struct phy_device *phydev)
+{
+ int err;
+ int value;
+
+ value = phy_read(phydev, AT803X_INTR_ENABLE);
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ value |= AT803X_INTR_ENABLE_AUTONEG_ERR;
+ value |= AT803X_INTR_ENABLE_SPEED_CHANGED;
+ value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
+ value |= AT803X_INTR_ENABLE_LINK_FAIL;
+ value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
+
+ err = phy_write(phydev, AT803X_INTR_ENABLE, value);
+ } else {
+ err = phy_write(phydev, AT803X_INTR_ENABLE, 0);
+ if (err)
+ return err;
+
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(at803x_config_intr);
+
+irqreturn_t at803x_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status, int_enabled;
+
+ irq_status = phy_read(phydev, AT803X_INTR_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* Read the current enabled interrupts */
+ int_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (int_enabled < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* See if this was one of our enabled interrupts */
+ if (!(irq_status & int_enabled))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(at803x_handle_interrupt);
+
+int at803x_read_specific_status(struct phy_device *phydev,
+ struct at803x_ss_mask ss_mask)
+{
+ int ss;
+
+ /* Read the AT8035 PHY-Specific Status register, which indicates the
+ * speed and duplex that the PHY is actually using, irrespective of
+ * whether we are in autoneg mode or not.
+ */
+ ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
+ if (ss < 0)
+ return ss;
+
+ if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
+ int sfc, speed;
+
+ sfc = phy_read(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL);
+ if (sfc < 0)
+ return sfc;
+
+ speed = ss & ss_mask.speed_mask;
+ speed >>= ss_mask.speed_shift;
+
+ switch (speed) {
+ case AT803X_SS_SPEED_10:
+ phydev->speed = SPEED_10;
+ break;
+ case AT803X_SS_SPEED_100:
+ phydev->speed = SPEED_100;
+ break;
+ case AT803X_SS_SPEED_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ case QCA808X_SS_SPEED_2500:
+ phydev->speed = SPEED_2500;
+ break;
+ }
+ if (ss & AT803X_SS_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (ss & AT803X_SS_MDIX)
+ phydev->mdix = ETH_TP_MDI_X;
+ else
+ phydev->mdix = ETH_TP_MDI;
+
+ switch (FIELD_GET(AT803X_SFC_MDI_CROSSOVER_MODE_M, sfc)) {
+ case AT803X_SFC_MANUAL_MDI:
+ phydev->mdix_ctrl = ETH_TP_MDI;
+ break;
+ case AT803X_SFC_MANUAL_MDIX:
+ phydev->mdix_ctrl = ETH_TP_MDI_X;
+ break;
+ case AT803X_SFC_AUTOMATIC_CROSSOVER:
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(at803x_read_specific_status);
+
+int at803x_config_mdix(struct phy_device *phydev, u8 ctrl)
+{
+ u16 val;
+
+ switch (ctrl) {
+ case ETH_TP_MDI:
+ val = AT803X_SFC_MANUAL_MDI;
+ break;
+ case ETH_TP_MDI_X:
+ val = AT803X_SFC_MANUAL_MDIX;
+ break;
+ case ETH_TP_MDI_AUTO:
+ val = AT803X_SFC_AUTOMATIC_CROSSOVER;
+ break;
+ default:
+ return 0;
+ }
+
+ return phy_modify_changed(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL,
+ AT803X_SFC_MDI_CROSSOVER_MODE_M,
+ FIELD_PREP(AT803X_SFC_MDI_CROSSOVER_MODE_M, val));
+}
+EXPORT_SYMBOL_GPL(at803x_config_mdix);
+
+int at803x_prepare_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
+ if (ret < 0)
+ return ret;
+
+ /* Changes of the midx bits are disruptive to the normal operation;
+ * therefore any changes to these registers must be followed by a
+ * software reset to take effect.
+ */
+ if (ret == 1) {
+ ret = genphy_soft_reset(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(at803x_prepare_config_aneg);
+
+int at803x_read_status(struct phy_device *phydev)
+{
+ struct at803x_ss_mask ss_mask = { 0 };
+ int err, old_link = phydev->link;
+
+ /* Update the link, but return if there was an error */
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ err = genphy_read_lpa(phydev);
+ if (err < 0)
+ return err;
+
+ ss_mask.speed_mask = AT803X_SS_SPEED_MASK;
+ ss_mask.speed_shift = __bf_shf(AT803X_SS_SPEED_MASK);
+ err = at803x_read_specific_status(phydev, ss_mask);
+ if (err < 0)
+ return err;
+
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
+ phy_resolve_aneg_pause(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(at803x_read_status);
+
+static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
+{
+ int val;
+
+ val = phy_read(phydev, AT803X_SMART_SPEED);
+ if (val < 0)
+ return val;
+
+ if (val & AT803X_SMART_SPEED_ENABLE)
+ *d = FIELD_GET(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, val) + 2;
+ else
+ *d = DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int at803x_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ u16 mask, set;
+ int ret;
+
+ switch (cnt) {
+ case DOWNSHIFT_DEV_DEFAULT_COUNT:
+ cnt = AT803X_DEFAULT_DOWNSHIFT;
+ fallthrough;
+ case AT803X_MIN_DOWNSHIFT ... AT803X_MAX_DOWNSHIFT:
+ set = AT803X_SMART_SPEED_ENABLE |
+ AT803X_SMART_SPEED_BYPASS_TIMER |
+ FIELD_PREP(AT803X_SMART_SPEED_RETRY_LIMIT_MASK, cnt - 2);
+ mask = AT803X_SMART_SPEED_RETRY_LIMIT_MASK;
+ break;
+ case DOWNSHIFT_DEV_DISABLE:
+ set = 0;
+ mask = AT803X_SMART_SPEED_ENABLE |
+ AT803X_SMART_SPEED_BYPASS_TIMER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = phy_modify_changed(phydev, AT803X_SMART_SPEED, mask, set);
+
+ /* After changing the smart speed settings, we need to perform a
+ * software reset, use phy_init_hw() to make sure we set the
+ * reapply any values which might got lost during software reset.
+ */
+ if (ret == 1)
+ ret = phy_init_hw(phydev);
+
+ return ret;
+}
+
+int at803x_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return at803x_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL_GPL(at803x_get_tunable);
+
+int at803x_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return at803x_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL_GPL(at803x_set_tunable);
+
+int at803x_cdt_fault_length(int dt)
+{
+ /* According to the datasheet the distance to the fault is
+ * DELTA_TIME * 0.824 meters.
+ *
+ * The author suspect the correct formula is:
+ *
+ * fault_distance = DELTA_TIME * (c * VF) / 125MHz / 2
+ *
+ * where c is the speed of light, VF is the velocity factor of
+ * the twisted pair cable, 125MHz the counter frequency and
+ * we need to divide by 2 because the hardware will measure the
+ * round trip time to the fault and back to the PHY.
+ *
+ * With a VF of 0.69 we get the factor 0.824 mentioned in the
+ * datasheet.
+ */
+ return (dt * 824) / 10;
+}
+EXPORT_SYMBOL_GPL(at803x_cdt_fault_length);
+
+int at803x_cdt_start(struct phy_device *phydev, u32 cdt_start)
+{
+ return phy_write(phydev, AT803X_CDT, cdt_start);
+}
+EXPORT_SYMBOL_GPL(at803x_cdt_start);
+
+int at803x_cdt_wait_for_completion(struct phy_device *phydev,
+ u32 cdt_en)
+{
+ int val, ret;
+
+ /* One test run takes about 25ms */
+ ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
+ !(val & cdt_en),
+ 30000, 100000, true);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(at803x_cdt_wait_for_completion);
+
+static bool qca808x_cdt_fault_length_valid(int cdt_code)
+{
+ switch (cdt_code) {
+ case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int qca808x_cable_test_result_trans(int cdt_code)
+{
+ switch (cdt_code) {
+ case QCA808X_CDT_STATUS_STAT_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case QCA808X_CDT_STATUS_STAT_SAME_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case QCA808X_CDT_STATUS_STAT_SAME_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN:
+ case QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+ case QCA808X_CDT_STATUS_STAT_FAIL:
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair,
+ int result)
+{
+ int val;
+ u32 cdt_length_reg = 0;
+
+ switch (pair) {
+ case ETHTOOL_A_CABLE_PAIR_A:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_B:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_C:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_D:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg);
+ if (val < 0)
+ return val;
+
+ if (result == ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT)
+ val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_SAME_SHORT, val);
+ else
+ val = FIELD_GET(QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT, val);
+
+ return at803x_cdt_fault_length(val);
+}
+
+static int qca808x_cable_test_get_pair_status(struct phy_device *phydev, u8 pair,
+ u16 status)
+{
+ int length, result;
+ u16 pair_code;
+
+ switch (pair) {
+ case ETHTOOL_A_CABLE_PAIR_A:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_B:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_C:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, status);
+ break;
+ case ETHTOOL_A_CABLE_PAIR_D:
+ pair_code = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, status);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ result = qca808x_cable_test_result_trans(pair_code);
+ ethnl_cable_test_result(phydev, pair, result);
+
+ if (qca808x_cdt_fault_length_valid(pair_code)) {
+ length = qca808x_cdt_fault_length(phydev, pair, result);
+ ethnl_cable_test_fault_length(phydev, pair, length);
+ }
+
+ return 0;
+}
+
+int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
+{
+ int ret, val;
+
+ *finished = false;
+
+ val = QCA808X_CDT_ENABLE_TEST |
+ QCA808X_CDT_LENGTH_UNIT;
+ ret = at803x_cdt_start(phydev, val);
+ if (ret)
+ return ret;
+
+ ret = at803x_cdt_wait_for_completion(phydev, QCA808X_CDT_ENABLE_TEST);
+ if (ret)
+ return ret;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS);
+ if (val < 0)
+ return val;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_A, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_B, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_C, val);
+ if (ret)
+ return ret;
+
+ ret = qca808x_cable_test_get_pair_status(phydev, ETHTOOL_A_CABLE_PAIR_D, val);
+ if (ret)
+ return ret;
+
+ *finished = true;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qca808x_cable_test_get_status);
+
+int qca808x_led_reg_hw_control_enable(struct phy_device *phydev, u16 reg)
+{
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_FORCE_EN);
+}
+EXPORT_SYMBOL_GPL(qca808x_led_reg_hw_control_enable);
+
+bool qca808x_led_reg_hw_control_status(struct phy_device *phydev, u16 reg)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, reg);
+ return !(val & QCA808X_LED_FORCE_EN);
+}
+EXPORT_SYMBOL_GPL(qca808x_led_reg_hw_control_status);
+
+int qca808x_led_reg_brightness_set(struct phy_device *phydev,
+ u16 reg, enum led_brightness value)
+{
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
+ QCA808X_LED_FORCE_EN | (value ? QCA808X_LED_FORCE_ON :
+ QCA808X_LED_FORCE_OFF));
+}
+EXPORT_SYMBOL_GPL(qca808x_led_reg_brightness_set);
+
+int qca808x_led_reg_blink_set(struct phy_device *phydev, u16 reg,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ int ret;
+
+ /* Set blink to 50% off, 50% on at 4Hz by default */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_AN, QCA808X_MMD7_LED_GLOBAL,
+ QCA808X_LED_BLINK_FREQ_MASK | QCA808X_LED_BLINK_DUTY_MASK,
+ QCA808X_LED_BLINK_FREQ_4HZ | QCA808X_LED_BLINK_DUTY_50_50);
+ if (ret)
+ return ret;
+
+ /* We use BLINK_1 for normal blinking */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_AN, reg,
+ QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_MODE_MASK,
+ QCA808X_LED_FORCE_EN | QCA808X_LED_FORCE_BLINK_1);
+ if (ret)
+ return ret;
+
+ /* We set blink to 4Hz, aka 250ms */
+ *delay_on = 250 / 2;
+ *delay_off = 250 / 2;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qca808x_led_reg_blink_set);
diff --git a/drivers/net/phy/qcom/qcom.h b/drivers/net/phy/qcom/qcom.h
new file mode 100644
index 000000000000..4bb541728846
--- /dev/null
+++ b/drivers/net/phy/qcom/qcom.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define AT803X_SPECIFIC_FUNCTION_CONTROL 0x10
+#define AT803X_SFC_ASSERT_CRS BIT(11)
+#define AT803X_SFC_FORCE_LINK BIT(10)
+#define AT803X_SFC_MDI_CROSSOVER_MODE_M GENMASK(6, 5)
+#define AT803X_SFC_AUTOMATIC_CROSSOVER 0x3
+#define AT803X_SFC_MANUAL_MDIX 0x1
+#define AT803X_SFC_MANUAL_MDI 0x0
+#define AT803X_SFC_SQE_TEST BIT(2)
+#define AT803X_SFC_POLARITY_REVERSAL BIT(1)
+#define AT803X_SFC_DISABLE_JABBER BIT(0)
+
+#define AT803X_SPECIFIC_STATUS 0x11
+#define AT803X_SS_SPEED_MASK GENMASK(15, 14)
+#define AT803X_SS_SPEED_1000 2
+#define AT803X_SS_SPEED_100 1
+#define AT803X_SS_SPEED_10 0
+#define AT803X_SS_DUPLEX BIT(13)
+#define AT803X_SS_SPEED_DUPLEX_RESOLVED BIT(11)
+#define AT803X_SS_MDIX BIT(6)
+
+#define QCA808X_SS_SPEED_MASK GENMASK(9, 7)
+#define QCA808X_SS_SPEED_2500 4
+
+#define AT803X_INTR_ENABLE 0x12
+#define AT803X_INTR_ENABLE_AUTONEG_ERR BIT(15)
+#define AT803X_INTR_ENABLE_SPEED_CHANGED BIT(14)
+#define AT803X_INTR_ENABLE_DUPLEX_CHANGED BIT(13)
+#define AT803X_INTR_ENABLE_PAGE_RECEIVED BIT(12)
+#define AT803X_INTR_ENABLE_LINK_FAIL BIT(11)
+#define AT803X_INTR_ENABLE_LINK_SUCCESS BIT(10)
+#define AT803X_INTR_ENABLE_LINK_FAIL_BX BIT(8)
+#define AT803X_INTR_ENABLE_LINK_SUCCESS_BX BIT(7)
+#define AT803X_INTR_ENABLE_WIRESPEED_DOWNGRADE BIT(5)
+#define AT803X_INTR_ENABLE_POLARITY_CHANGED BIT(1)
+#define AT803X_INTR_ENABLE_WOL BIT(0)
+
+#define AT803X_INTR_STATUS 0x13
+
+#define AT803X_SMART_SPEED 0x14
+#define AT803X_SMART_SPEED_ENABLE BIT(5)
+#define AT803X_SMART_SPEED_RETRY_LIMIT_MASK GENMASK(4, 2)
+#define AT803X_SMART_SPEED_BYPASS_TIMER BIT(1)
+
+#define AT803X_CDT 0x16
+#define AT803X_CDT_MDI_PAIR_MASK GENMASK(9, 8)
+#define AT803X_CDT_ENABLE_TEST BIT(0)
+#define AT803X_CDT_STATUS 0x1c
+#define AT803X_CDT_STATUS_STAT_NORMAL 0
+#define AT803X_CDT_STATUS_STAT_SHORT 1
+#define AT803X_CDT_STATUS_STAT_OPEN 2
+#define AT803X_CDT_STATUS_STAT_FAIL 3
+#define AT803X_CDT_STATUS_STAT_MASK GENMASK(9, 8)
+#define AT803X_CDT_STATUS_DELTA_TIME_MASK GENMASK(7, 0)
+
+#define QCA808X_CDT_ENABLE_TEST BIT(15)
+#define QCA808X_CDT_INTER_CHECK_DIS BIT(13)
+#define QCA808X_CDT_STATUS BIT(11)
+#define QCA808X_CDT_LENGTH_UNIT BIT(10)
+
+#define QCA808X_MMD3_CDT_STATUS 0x8064
+#define QCA808X_MMD3_CDT_DIAG_PAIR_A 0x8065
+#define QCA808X_MMD3_CDT_DIAG_PAIR_B 0x8066
+#define QCA808X_MMD3_CDT_DIAG_PAIR_C 0x8067
+#define QCA808X_MMD3_CDT_DIAG_PAIR_D 0x8068
+#define QCA808X_CDT_DIAG_LENGTH_SAME_SHORT GENMASK(15, 8)
+#define QCA808X_CDT_DIAG_LENGTH_CROSS_SHORT GENMASK(7, 0)
+
+#define QCA808X_CDT_CODE_PAIR_A GENMASK(15, 12)
+#define QCA808X_CDT_CODE_PAIR_B GENMASK(11, 8)
+#define QCA808X_CDT_CODE_PAIR_C GENMASK(7, 4)
+#define QCA808X_CDT_CODE_PAIR_D GENMASK(3, 0)
+
+#define QCA808X_CDT_STATUS_STAT_TYPE GENMASK(1, 0)
+#define QCA808X_CDT_STATUS_STAT_FAIL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 0)
+#define QCA808X_CDT_STATUS_STAT_NORMAL FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 1)
+#define QCA808X_CDT_STATUS_STAT_SAME_OPEN FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 2)
+#define QCA808X_CDT_STATUS_STAT_SAME_SHORT FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_TYPE, 3)
+
+#define QCA808X_CDT_STATUS_STAT_MDI GENMASK(3, 2)
+#define QCA808X_CDT_STATUS_STAT_MDI1 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 1)
+#define QCA808X_CDT_STATUS_STAT_MDI2 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 2)
+#define QCA808X_CDT_STATUS_STAT_MDI3 FIELD_PREP_CONST(QCA808X_CDT_STATUS_STAT_MDI, 3)
+
+/* NORMAL are MDI with type set to 0 */
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI1
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI1)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI1_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI1)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI2
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI2)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI2_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI2)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_NORMAL QCA808X_CDT_STATUS_STAT_MDI3
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_OPEN (QCA808X_CDT_STATUS_STAT_SAME_OPEN |\
+ QCA808X_CDT_STATUS_STAT_MDI3)
+#define QCA808X_CDT_STATUS_STAT_CROSS_SHORT_WITH_MDI3_SAME_SHORT (QCA808X_CDT_STATUS_STAT_SAME_SHORT |\
+ QCA808X_CDT_STATUS_STAT_MDI3)
+
+/* Added for reference of existence but should be handled by wait_for_completion already */
+#define QCA808X_CDT_STATUS_STAT_BUSY (BIT(1) | BIT(3))
+
+#define QCA808X_MMD7_LED_GLOBAL 0x8073
+#define QCA808X_LED_BLINK_1 GENMASK(11, 6)
+#define QCA808X_LED_BLINK_2 GENMASK(5, 0)
+/* Values are the same for both BLINK_1 and BLINK_2 */
+#define QCA808X_LED_BLINK_FREQ_MASK GENMASK(5, 3)
+#define QCA808X_LED_BLINK_FREQ_2HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x0)
+#define QCA808X_LED_BLINK_FREQ_4HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x1)
+#define QCA808X_LED_BLINK_FREQ_8HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x2)
+#define QCA808X_LED_BLINK_FREQ_16HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x3)
+#define QCA808X_LED_BLINK_FREQ_32HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x4)
+#define QCA808X_LED_BLINK_FREQ_64HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x5)
+#define QCA808X_LED_BLINK_FREQ_128HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x6)
+#define QCA808X_LED_BLINK_FREQ_256HZ FIELD_PREP(QCA808X_LED_BLINK_FREQ_MASK, 0x7)
+#define QCA808X_LED_BLINK_DUTY_MASK GENMASK(2, 0)
+#define QCA808X_LED_BLINK_DUTY_50_50 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x0)
+#define QCA808X_LED_BLINK_DUTY_75_25 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x1)
+#define QCA808X_LED_BLINK_DUTY_25_75 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x2)
+#define QCA808X_LED_BLINK_DUTY_33_67 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x3)
+#define QCA808X_LED_BLINK_DUTY_67_33 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x4)
+#define QCA808X_LED_BLINK_DUTY_17_83 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x5)
+#define QCA808X_LED_BLINK_DUTY_83_17 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x6)
+#define QCA808X_LED_BLINK_DUTY_8_92 FIELD_PREP(QCA808X_LED_BLINK_DUTY_MASK, 0x7)
+
+/* LED hw control pattern is the same for every LED */
+#define QCA808X_LED_PATTERN_MASK GENMASK(15, 0)
+#define QCA808X_LED_SPEED2500_ON BIT(15)
+#define QCA808X_LED_SPEED2500_BLINK BIT(14)
+/* Follow blink trigger even if duplex or speed condition doesn't match */
+#define QCA808X_LED_BLINK_CHECK_BYPASS BIT(13)
+#define QCA808X_LED_FULL_DUPLEX_ON BIT(12)
+#define QCA808X_LED_HALF_DUPLEX_ON BIT(11)
+#define QCA808X_LED_TX_BLINK BIT(10)
+#define QCA808X_LED_RX_BLINK BIT(9)
+#define QCA808X_LED_TX_ON_10MS BIT(8)
+#define QCA808X_LED_RX_ON_10MS BIT(7)
+#define QCA808X_LED_SPEED1000_ON BIT(6)
+#define QCA808X_LED_SPEED100_ON BIT(5)
+#define QCA808X_LED_SPEED10_ON BIT(4)
+#define QCA808X_LED_COLLISION_BLINK BIT(3)
+#define QCA808X_LED_SPEED1000_BLINK BIT(2)
+#define QCA808X_LED_SPEED100_BLINK BIT(1)
+#define QCA808X_LED_SPEED10_BLINK BIT(0)
+
+/* LED force ctrl is the same for every LED
+ * No documentation exist for this, not even internal one
+ * with NDA as QCOM gives only info about configuring
+ * hw control pattern rules and doesn't indicate any way
+ * to force the LED to specific mode.
+ * These define comes from reverse and testing and maybe
+ * lack of some info or some info are not entirely correct.
+ * For the basic LED control and hw control these finding
+ * are enough to support LED control in all the required APIs.
+ *
+ * On doing some comparison with implementation with qca807x,
+ * it was found that it's 1:1 equal to it and confirms all the
+ * reverse done. It was also found further specification with the
+ * force mode and the blink modes.
+ */
+#define QCA808X_LED_FORCE_EN BIT(15)
+#define QCA808X_LED_FORCE_MODE_MASK GENMASK(14, 13)
+#define QCA808X_LED_FORCE_BLINK_1 FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x3)
+#define QCA808X_LED_FORCE_BLINK_2 FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x2)
+#define QCA808X_LED_FORCE_ON FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x1)
+#define QCA808X_LED_FORCE_OFF FIELD_PREP(QCA808X_LED_FORCE_MODE_MASK, 0x0)
+
+#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
+#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
+#define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
+
+#define AT803X_DEBUG_ADDR 0x1D
+#define AT803X_DEBUG_DATA 0x1E
+
+#define AT803X_DEBUG_ANALOG_TEST_CTRL 0x00
+#define QCA8327_DEBUG_MANU_CTRL_EN BIT(2)
+#define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2)
+#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
+
+#define AT803X_DEBUG_SYSTEM_CTRL_MODE 0x05
+#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
+
+#define AT803X_DEBUG_REG_HIB_CTRL 0x0b
+#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10)
+#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13)
+#define AT803X_DEBUG_HIB_CTRL_PS_HIB_EN BIT(15)
+
+#define AT803X_DEFAULT_DOWNSHIFT 5
+#define AT803X_MIN_DOWNSHIFT 2
+#define AT803X_MAX_DOWNSHIFT 9
+
+enum stat_access_type {
+ PHY,
+ MMD
+};
+
+struct at803x_hw_stat {
+ const char *string;
+ u8 reg;
+ u32 mask;
+ enum stat_access_type access_type;
+};
+
+struct at803x_ss_mask {
+ u16 speed_mask;
+ u8 speed_shift;
+};
+
+int at803x_debug_reg_read(struct phy_device *phydev, u16 reg);
+int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg,
+ u16 clear, u16 set);
+int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data);
+int at803x_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol);
+void at803x_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol);
+int at803x_ack_interrupt(struct phy_device *phydev);
+int at803x_config_intr(struct phy_device *phydev);
+irqreturn_t at803x_handle_interrupt(struct phy_device *phydev);
+int at803x_read_specific_status(struct phy_device *phydev,
+ struct at803x_ss_mask ss_mask);
+int at803x_config_mdix(struct phy_device *phydev, u8 ctrl);
+int at803x_prepare_config_aneg(struct phy_device *phydev);
+int at803x_read_status(struct phy_device *phydev);
+int at803x_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data);
+int at803x_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data);
+int at803x_cdt_fault_length(int dt);
+int at803x_cdt_start(struct phy_device *phydev, u32 cdt_start);
+int at803x_cdt_wait_for_completion(struct phy_device *phydev,
+ u32 cdt_en);
+int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished);
+int qca808x_led_reg_hw_control_enable(struct phy_device *phydev, u16 reg);
+bool qca808x_led_reg_hw_control_status(struct phy_device *phydev, u16 reg);
+int qca808x_led_reg_brightness_set(struct phy_device *phydev,
+ u16 reg, enum led_brightness value);
+int qca808x_led_reg_blink_set(struct phy_device *phydev, u16 reg,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 894172a3e15f..1fa70427b2a2 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -57,14 +57,6 @@
#define RTL8366RB_POWER_SAVE 0x15
#define RTL8366RB_POWER_SAVE_ON BIT(12)
-#define RTL_SUPPORTS_5000FULL BIT(14)
-#define RTL_SUPPORTS_2500FULL BIT(13)
-#define RTL_SUPPORTS_10000FULL BIT(0)
-#define RTL_ADV_2500FULL BIT(7)
-#define RTL_LPADV_10000FULL BIT(11)
-#define RTL_LPADV_5000FULL BIT(6)
-#define RTL_LPADV_2500FULL BIT(5)
-
#define RTL9000A_GINMR 0x14
#define RTL9000A_GINMR_LINK_STATUS BIT(4)
@@ -421,9 +413,11 @@ static int rtl8211f_config_init(struct phy_device *phydev)
ERR_PTR(ret));
return ret;
}
+
+ return genphy_soft_reset(phydev);
}
- return genphy_soft_reset(phydev);
+ return 0;
}
static int rtl821x_suspend(struct phy_device *phydev)
@@ -674,11 +668,11 @@ static int rtl822x_get_features(struct phy_device *phydev)
return val;
linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
- phydev->supported, val & RTL_SUPPORTS_2500FULL);
+ phydev->supported, val & MDIO_PMA_SPEED_2_5G);
linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
- phydev->supported, val & RTL_SUPPORTS_5000FULL);
+ phydev->supported, val & MDIO_PMA_SPEED_5G);
linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- phydev->supported, val & RTL_SUPPORTS_10000FULL);
+ phydev->supported, val & MDIO_SPEED_10G);
return genphy_read_abilities(phydev);
}
@@ -688,14 +682,12 @@ static int rtl822x_config_aneg(struct phy_device *phydev)
int ret = 0;
if (phydev->autoneg == AUTONEG_ENABLE) {
- u16 adv2500 = 0;
-
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
- phydev->advertising))
- adv2500 = RTL_ADV_2500FULL;
+ u16 adv = linkmode_adv_to_mii_10gbt_adv_t(phydev->advertising);
ret = phy_modify_paged_changed(phydev, 0xa5d, 0x12,
- RTL_ADV_2500FULL, adv2500);
+ MDIO_AN_10GBT_CTRL_ADV2_5G |
+ MDIO_AN_10GBT_CTRL_ADV5G,
+ adv);
if (ret < 0)
return ret;
}
@@ -713,12 +705,8 @@ static int rtl822x_read_status(struct phy_device *phydev)
if (lpadv < 0)
return lpadv;
- linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- phydev->lp_advertising, lpadv & RTL_LPADV_10000FULL);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
- phydev->lp_advertising, lpadv & RTL_LPADV_5000FULL);
- linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
- phydev->lp_advertising, lpadv & RTL_LPADV_2500FULL);
+ mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising,
+ lpadv);
}
ret = genphy_read_status(phydev);
@@ -736,7 +724,7 @@ static bool rtlgen_supports_2_5gbps(struct phy_device *phydev)
val = phy_read(phydev, 0x13);
phy_write(phydev, RTL821x_PAGE_SELECT, 0);
- return val >= 0 && val & RTL_SUPPORTS_2500FULL;
+ return val >= 0 && val & MDIO_PMA_SPEED_2_5G;
}
static int rtlgen_match_phy_device(struct phy_device *phydev)
@@ -1048,6 +1036,16 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
+ PHY_ID_MATCH_EXACT(0x001cc862),
+ .name = "RTL8251B 5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
.config_init = &rtl8366rb_config_init,
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 7fd9fe6a602b..7b1bc5fcef9b 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -22,7 +22,7 @@
struct gmii2rgmii {
struct phy_device *phy_dev;
- struct phy_driver *phy_drv;
+ const struct phy_driver *phy_drv;
struct phy_driver conv_phy_drv;
struct mdio_device *mdio;
};
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 40ce8abe6999..cc7d1113ece0 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1437,4 +1437,5 @@ static int __init plip_init (void)
module_init(plip_init);
module_exit(plip_cleanup_module);
+MODULE_DESCRIPTION("PLIP (parallel port) network module");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ppp/bsd_comp.c b/drivers/net/ppp/bsd_comp.c
index db0dc36d12e3..55954594e157 100644
--- a/drivers/net/ppp/bsd_comp.c
+++ b/drivers/net/ppp/bsd_comp.c
@@ -1166,5 +1166,6 @@ static void __exit bsdcomp_cleanup(void)
module_init(bsdcomp_init);
module_exit(bsdcomp_cleanup);
+MODULE_DESCRIPTION("PPP BSD-Compress compression module");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("ppp-compress-" __stringify(CI_BSD_COMPRESS));
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index 840da924708b..c33c3db3cc08 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -87,6 +87,7 @@ struct asyncppp {
static int flag_time = HZ;
module_param(flag_time, int, 0);
MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)");
+MODULE_DESCRIPTION("PPP async serial channel module");
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_PPP);
@@ -460,6 +461,10 @@ ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
case PPPIOCSMRU:
if (get_user(val, p))
break;
+ if (val > U16_MAX) {
+ err = -EINVAL;
+ break;
+ }
if (val < PPP_MRU)
val = PPP_MRU;
ap->mru = val;
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index e6d48e5c65a3..4d2ff63f2ee2 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -630,6 +630,7 @@ static void __exit deflate_cleanup(void)
module_init(deflate_init);
module_exit(deflate_cleanup);
+MODULE_DESCRIPTION("PPP Deflate compression module");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE));
MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE_DRAFT));
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 0193af2d31c9..fe380fe196e7 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -295,7 +295,9 @@ static void ppp_setup(struct net_device *dev);
static const struct net_device_ops ppp_netdev_ops;
-static struct class *ppp_class;
+static const struct class ppp_class = {
+ .name = "ppp",
+};
/* per net-namespace data */
static inline struct ppp_net *ppp_pernet(struct net *net)
@@ -1394,11 +1396,9 @@ static int __init ppp_init(void)
goto out_net;
}
- ppp_class = class_create("ppp");
- if (IS_ERR(ppp_class)) {
- err = PTR_ERR(ppp_class);
+ err = class_register(&ppp_class);
+ if (err)
goto out_chrdev;
- }
err = rtnl_link_register(&ppp_link_ops);
if (err) {
@@ -1407,12 +1407,12 @@ static int __init ppp_init(void)
}
/* not a big deal if we fail here :-) */
- device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
+ device_create(&ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
return 0;
out_class:
- class_destroy(ppp_class);
+ class_unregister(&ppp_class);
out_chrdev:
unregister_chrdev(PPP_MAJOR, "ppp");
out_net:
@@ -1607,7 +1607,7 @@ static const struct net_device_ops ppp_netdev_ops = {
.ndo_fill_forward_path = ppp_fill_forward_path,
};
-static struct device_type ppp_type = {
+static const struct device_type ppp_type = {
.name = "ppp",
};
@@ -3549,8 +3549,8 @@ static void __exit ppp_cleanup(void)
pr_err("PPP: removing module but units remain!\n");
rtnl_link_unregister(&ppp_link_ops);
unregister_chrdev(PPP_MAJOR, "ppp");
- device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
- class_destroy(ppp_class);
+ device_destroy(&ppp_class, MKDEV(PPP_MAJOR, 0));
+ class_unregister(&ppp_class);
unregister_pernet_device(&ppp_net_ops);
}
@@ -3604,6 +3604,7 @@ EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
EXPORT_SYMBOL(ppp_register_compressor);
EXPORT_SYMBOL(ppp_unregister_compressor);
+MODULE_DESCRIPTION("Generic PPP layer driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
MODULE_ALIAS_RTNL_LINK("ppp");
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 52d05ce4a281..45bf59ac8f57 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -724,5 +724,6 @@ ppp_sync_cleanup(void)
module_init(ppp_sync_init);
module_exit(ppp_sync_cleanup);
+MODULE_DESCRIPTION("PPP synchronous TTY channel module");
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_SYNC_PPP);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 8e7238e97d0a..2ea4f4890d23 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1007,26 +1007,21 @@ static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
struct sk_buff *skb;
int error = 0;
- if (sk->sk_state & PPPOX_BOUND) {
- error = -EIO;
- goto end;
- }
+ if (sk->sk_state & PPPOX_BOUND)
+ return -EIO;
skb = skb_recv_datagram(sk, flags, &error);
- if (error < 0)
- goto end;
+ if (!skb)
+ return error;
- if (skb) {
- total_len = min_t(size_t, total_len, skb->len);
- error = skb_copy_datagram_msg(skb, 0, m, total_len);
- if (error == 0) {
- consume_skb(skb);
- return total_len;
- }
+ total_len = min_t(size_t, total_len, skb->len);
+ error = skb_copy_datagram_msg(skb, 0, m, total_len);
+ if (error == 0) {
+ consume_skb(skb);
+ return total_len;
}
kfree_skb(skb);
-end:
return error;
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index f575f225d417..0a44bbdcfb7b 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -25,7 +25,6 @@
#include <net/genetlink.h>
#include <net/netlink.h>
#include <net/sch_generic.h>
-#include <generated/utsrelease.h>
#include <linux/if_team.h>
#define DRV_NAME "team"
@@ -2074,7 +2073,6 @@ static void team_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
}
static int team_ethtool_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4a4f8c8e79fa..0b3f21cba552 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -54,6 +54,7 @@
#include <linux/if_tun.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
+#include <linux/math.h>
#include <linux/nsproxy.h>
#include <linux/virtio_net.h>
#include <linux/rcupdate.h>
@@ -77,6 +78,7 @@
#include <net/ax25.h>
#include <net/rose.h>
#include <net/6lowpan.h>
+#include <net/rps.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
@@ -523,8 +525,7 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
{
struct tun_flow_entry *e;
- u32 txq = 0;
- u32 numqueues = 0;
+ u32 txq, numqueues;
numqueues = READ_ONCE(tun->numqueues);
@@ -534,8 +535,7 @@ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
tun_flow_save_rps_rxhash(e, txq);
txq = e->queue_index;
} else {
- /* use multiply and shift instead of expensive divide */
- txq = ((u64)txq * numqueues) >> 32;
+ txq = reciprocal_scale(txq, numqueues);
}
return txq;
@@ -653,6 +653,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun->tfiles[tun->numqueues - 1]);
ntfile = rtnl_dereference(tun->tfiles[index]);
ntfile->queue_index = index;
+ ntfile->xdp_rxq.queue_index = index;
rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
NULL);
@@ -977,20 +978,15 @@ static int tun_net_init(struct net_device *dev)
struct ifreq *ifr = tun->ifr;
int err;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- return -ENOMEM;
-
spin_lock_init(&tun->lock);
err = security_tun_dev_alloc_security(&tun->security);
- if (err < 0) {
- free_percpu(dev->tstats);
+ if (err < 0)
return err;
- }
tun_flow_init(tun);
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
@@ -1008,7 +1004,6 @@ static int tun_net_init(struct net_device *dev)
if (err < 0) {
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
- free_percpu(dev->tstats);
return err;
}
return 0;
@@ -1344,7 +1339,6 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_select_queue = tun_select_queue,
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = tun_set_headroom,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_bpf = tun_xdp,
.ndo_xdp_xmit = tun_xdp_xmit,
.ndo_change_carrier = tun_net_change_carrier,
@@ -1927,7 +1921,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
- ret = do_xdp_generic(xdp_prog, skb);
+ ret = do_xdp_generic(xdp_prog, &skb);
if (ret != XDP_PASS) {
rcu_read_unlock();
local_bh_enable();
@@ -2317,7 +2311,6 @@ static void tun_free_netdev(struct net_device *dev)
BUG_ON(!(list_empty(&tun->disabled)));
- free_percpu(dev->tstats);
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
__tun_set_ebpf(tun, &tun->steering_prog, NULL);
@@ -2517,7 +2510,7 @@ build:
skb_record_rx_queue(skb, tfile->queue_index);
if (skb_xdp) {
- ret = do_xdp_generic(xdp_prog, skb);
+ ret = do_xdp_generic(xdp_prog, &skb);
if (ret != XDP_PASS) {
ret = 0;
goto out;
@@ -3644,12 +3637,22 @@ static int tun_set_coalesce(struct net_device *dev,
return 0;
}
+static void tun_get_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ channels->combined_count = tun->numqueues;
+ channels->max_combined = tun->flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1;
+}
+
static const struct ethtool_ops tun_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_drvinfo = tun_get_drvinfo,
.get_msglevel = tun_get_msglevel,
.set_msglevel = tun_set_msglevel,
.get_link = ethtool_op_get_link,
+ .get_channels = tun_get_channels,
.get_ts_info = ethtool_op_get_ts_info,
.get_coalesce = tun_get_coalesce,
.set_coalesce = tun_set_coalesce,
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3fd7dccf0f9c..3c360d4f0635 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -99,6 +99,7 @@ config USB_RTL8150
config USB_RTL8152
tristate "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
select MII
+ select PHYLIB
select CRC32
select CRYPTO
select CRYPTO_HASH
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index d837c1887416..88e084534853 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -667,7 +667,7 @@ static int ax88179_set_link_ksettings(struct net_device *net,
}
static int
-ax88179_ethtool_get_eee(struct usbnet *dev, struct ethtool_eee *data)
+ax88179_ethtool_get_eee(struct usbnet *dev, struct ethtool_keee *data)
{
int val;
@@ -676,29 +676,29 @@ ax88179_ethtool_get_eee(struct usbnet *dev, struct ethtool_eee *data)
MDIO_MMD_PCS);
if (val < 0)
return val;
- data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
+ mii_eee_cap1_mod_linkmode_t(data->supported, val);
/* Get advertisement EEE */
val = ax88179_phy_read_mmd_indirect(dev, MDIO_AN_EEE_ADV,
MDIO_MMD_AN);
if (val < 0)
return val;
- data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(data->advertised, val);
/* Get LP advertisement EEE */
val = ax88179_phy_read_mmd_indirect(dev, MDIO_AN_EEE_LPABLE,
MDIO_MMD_AN);
if (val < 0)
return val;
- data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(data->lp_advertised, val);
return 0;
}
static int
-ax88179_ethtool_set_eee(struct usbnet *dev, struct ethtool_eee *data)
+ax88179_ethtool_set_eee(struct usbnet *dev, struct ethtool_keee *data)
{
- u16 tmp16 = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
+ u16 tmp16 = linkmode_to_mii_eee_cap1_t(data->advertised);
return ax88179_phy_write_mmd_indirect(dev, MDIO_AN_EEE_ADV,
MDIO_MMD_AN, tmp16);
@@ -807,7 +807,7 @@ static void ax88179_enable_eee(struct usbnet *dev)
GMII_PHY_PAGE_SELECT, 2, &tmp16);
}
-static int ax88179_get_eee(struct net_device *net, struct ethtool_eee *edata)
+static int ax88179_get_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct usbnet *dev = netdev_priv(net);
struct ax88179_data *priv = dev->driver_priv;
@@ -818,7 +818,7 @@ static int ax88179_get_eee(struct net_device *net, struct ethtool_eee *edata)
return ax88179_ethtool_get_eee(dev, edata);
}
-static int ax88179_set_eee(struct net_device *net, struct ethtool_eee *edata)
+static int ax88179_set_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct usbnet *dev = netdev_priv(net);
struct ax88179_data *priv = dev->driver_priv;
@@ -1587,7 +1587,7 @@ static int ax88179_reset(struct usbnet *dev)
u16 *tmp16;
u8 *tmp;
struct ax88179_data *ax179_data = dev->driver_priv;
- struct ethtool_eee eee_data;
+ struct ethtool_keee eee_data;
tmp16 = (u16 *)buf;
tmp = (u8 *)buf;
@@ -1663,7 +1663,7 @@ static int ax88179_reset(struct usbnet *dev)
ax88179_disable_eee(dev);
ax88179_ethtool_get_eee(dev, &eee_data);
- eee_data.advertised = 0;
+ linkmode_zero(eee_data.advertised);
ax88179_ethtool_set_eee(dev, &eee_data);
/* Restart autoneg */
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index cd4083e0b3b9..e13e4920ee9b 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -339,7 +339,7 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
in6_dev = in6_dev_get(netdev);
if (!in6_dev)
goto out;
- is_router = !!in6_dev->cnf.forwarding;
+ is_router = !!READ_ONCE(in6_dev->cnf.forwarding);
in6_dev_put(in6_dev);
/* ipv6_stub != NULL if in6_dev_get returned an inet6_dev */
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 99ec1d4a972d..8b6d6a1b3c2e 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -232,7 +232,7 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
err = dm_read_shared_word(dev, 1, loc, &res);
if (err < 0) {
netdev_err(dev->net, "MDIO read error: %d\n", err);
- return err;
+ return 0;
}
netdev_dbg(dev->net,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index f088ea2ba6f3..1aeb36119d3f 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2465,7 +2465,7 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
}
}
-static struct device_type hso_type = {
+static const struct device_type hso_type = {
.name = "wwan",
};
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index a6d653ff552a..80ee4fcdfb36 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1501,7 +1501,9 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
lan78xx_rx_urb_submit_all(dev);
+ local_bh_disable();
napi_schedule(&dev->napi);
+ local_bh_enable();
}
return 0;
@@ -1673,7 +1675,7 @@ static int lan78xx_set_wol(struct net_device *netdev,
return ret;
}
-static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
+static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct lan78xx_net *dev = netdev_priv(net);
struct phy_device *phydev = net->phydev;
@@ -1709,7 +1711,7 @@ exit:
return ret;
}
-static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
+static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct lan78xx_net *dev = netdev_priv(net);
int ret;
@@ -3033,7 +3035,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
if (dev->chipid == ID_REV_CHIP_ID_7801_)
buf &= ~MAC_CR_GMII_EN_;
- if (dev->chipid == ID_REV_CHIP_ID_7800_) {
+ if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
+ dev->chipid == ID_REV_CHIP_ID_7850_) {
ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
if (!ret && sig != EEPROM_INDICATOR) {
/* Implies there is no external eeprom. Set mac speed */
@@ -3132,7 +3135,8 @@ static int lan78xx_open(struct net_device *net)
done:
mutex_unlock(&dev->dev_mutex);
- usb_autopm_put_interface(dev->intf);
+ if (ret < 0)
+ usb_autopm_put_interface(dev->intf);
return ret;
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0d0672d2a654..5d6aeb086fc7 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -10,6 +10,7 @@
#include <linux/etherdevice.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
+#include <linux/phy.h>
#include <linux/usb.h>
#include <linux/crc32.h>
#include <linux/if_vlan.h>
@@ -891,8 +892,8 @@ struct r8152 {
void (*up)(struct r8152 *tp);
void (*down)(struct r8152 *tp);
void (*unload)(struct r8152 *tp);
- int (*eee_get)(struct r8152 *tp, struct ethtool_eee *eee);
- int (*eee_set)(struct r8152 *tp, struct ethtool_eee *eee);
+ int (*eee_get)(struct r8152 *tp, struct ethtool_keee *eee);
+ int (*eee_set)(struct r8152 *tp, struct ethtool_keee *eee);
bool (*in_nway)(struct r8152 *tp);
void (*hw_phy_cfg)(struct r8152 *tp);
void (*autosuspend_en)(struct r8152 *tp, bool enable);
@@ -8922,32 +8923,31 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
}
-static int r8152_get_eee(struct r8152 *tp, struct ethtool_eee *eee)
+static int r8152_get_eee(struct r8152 *tp, struct ethtool_keee *eee)
{
- u32 lp, adv, supported = 0;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
u16 val;
val = r8152_mmd_read(tp, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
- supported = mmd_eee_cap_to_ethtool_sup_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->supported, val);
val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
- adv = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->advertised, val);
val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
- lp = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->lp_advertised, val);
eee->eee_enabled = tp->eee_en;
- eee->eee_active = !!(supported & adv & lp);
- eee->supported = supported;
- eee->advertised = tp->eee_adv;
- eee->lp_advertised = lp;
+
+ linkmode_and(common, eee->advertised, eee->lp_advertised);
+ eee->eee_active = phy_check_valid(tp->speed, tp->duplex, common);
return 0;
}
-static int r8152_set_eee(struct r8152 *tp, struct ethtool_eee *eee)
+static int r8152_set_eee(struct r8152 *tp, struct ethtool_keee *eee)
{
- u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised);
+ u16 val = linkmode_to_mii_eee_cap1_t(eee->advertised);
tp->eee_en = eee->eee_enabled;
tp->eee_adv = val;
@@ -8957,31 +8957,30 @@ static int r8152_set_eee(struct r8152 *tp, struct ethtool_eee *eee)
return 0;
}
-static int r8153_get_eee(struct r8152 *tp, struct ethtool_eee *eee)
+static int r8153_get_eee(struct r8152 *tp, struct ethtool_keee *eee)
{
- u32 lp, adv, supported = 0;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
u16 val;
val = ocp_reg_read(tp, OCP_EEE_ABLE);
- supported = mmd_eee_cap_to_ethtool_sup_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->supported, val);
val = ocp_reg_read(tp, OCP_EEE_ADV);
- adv = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->advertised, val);
val = ocp_reg_read(tp, OCP_EEE_LPABLE);
- lp = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(eee->lp_advertised, val);
eee->eee_enabled = tp->eee_en;
- eee->eee_active = !!(supported & adv & lp);
- eee->supported = supported;
- eee->advertised = tp->eee_adv;
- eee->lp_advertised = lp;
+
+ linkmode_and(common, eee->advertised, eee->lp_advertised);
+ eee->eee_active = phy_check_valid(tp->speed, tp->duplex, common);
return 0;
}
static int
-rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata)
+rtl_ethtool_get_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct r8152 *tp = netdev_priv(net);
int ret;
@@ -9008,7 +9007,7 @@ out:
}
static int
-rtl_ethtool_set_eee(struct net_device *net, struct ethtool_eee *edata)
+rtl_ethtool_set_eee(struct net_device *net, struct ethtool_keee *edata)
{
struct r8152 *tp = netdev_priv(net);
int ret;
@@ -10078,7 +10077,7 @@ static int rtl8152_cfgselector_choose_configuration(struct usb_device *udev)
* driver supports it.
*/
if (__rtl_get_hw_ver(udev) == RTL_VER_UNKNOWN)
- return 0;
+ return -ENODEV;
/* The vendor mode is not always config #1, so to find it out. */
c = udev->config;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index a530f20ee257..2fa46baa589e 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -2105,6 +2105,11 @@ static const struct usb_device_id products[] = {
.driver_info = (unsigned long) &smsc95xx_info,
},
{
+ /* SYSTEC USB-SPEmodule1 10BASE-T1L Ethernet Device */
+ USB_DEVICE(0x0878, 0x1400),
+ .driver_info = (unsigned long)&smsc95xx_info,
+ },
+ {
/* Microchip's EVB-LAN8670-USB 10BASE-T1S Ethernet Device */
USB_DEVICE(0x184F, 0x0051),
.driver_info = (unsigned long)&smsc95xx_info,
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 143bd4ab160d..57947a5590cc 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -737,7 +737,9 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
data->eeprom_len = SR9800_EEPROM_LEN;
- usbnet_get_endpoints(dev, intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ goto out;
/* LED Setting Rule :
* AABB:CCDD
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 2d14b0d78541..e84efa661589 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1633,7 +1633,6 @@ void usbnet_disconnect (struct usb_interface *intf)
usb_free_urb(dev->interrupt);
kfree(dev->padding_pkt);
- free_percpu(net->tstats);
free_netdev(net);
}
EXPORT_SYMBOL_GPL(usbnet_disconnect);
@@ -1645,7 +1644,6 @@ static const struct net_device_ops usbnet_netdev_ops = {
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_set_rx_mode = usbnet_set_rx_mode,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -1654,11 +1652,11 @@ static const struct net_device_ops usbnet_netdev_ops = {
// precondition: never called in_interrupt
-static struct device_type wlan_type = {
+static const struct device_type wlan_type = {
.name = "wlan",
};
-static struct device_type wwan_type = {
+static const struct device_type wwan_type = {
.name = "wwan",
};
@@ -1710,10 +1708,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->rx_speed = SPEED_UNSET;
dev->tx_speed = SPEED_UNSET;
- net->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!net->tstats)
- goto out0;
-
dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
| NETIF_MSG_PROBE | NETIF_MSG_LINK);
init_waitqueue_head(&dev->wait);
@@ -1743,6 +1737,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
net->netdev_ops = &usbnet_netdev_ops;
net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
net->ethtool_ops = &usbnet_ethtool_ops;
+ net->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
// allow device-specific bind/init procedures
// NOTE net->name still not usable ...
@@ -1861,8 +1856,6 @@ out1:
*/
cancel_work_sync(&dev->kevent);
del_timer_sync(&dev->delay);
- free_percpu(net->tstats);
-out0:
free_netdev(net);
out:
return status;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 578e36ea1589..13d902462d8e 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -729,80 +729,10 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
if (skb_shared(skb) || skb_head_is_locked(skb) ||
skb_shinfo(skb)->nr_frags ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
- u32 size, len, max_head_size, off, truesize, page_offset;
- struct sk_buff *nskb;
- struct page *page;
- int i, head_off;
- void *va;
-
- /* We need a private copy of the skb and data buffers since
- * the ebpf program can modify it. We segment the original skb
- * into order-0 pages without linearize it.
- *
- * Make sure we have enough space for linear and paged area
- */
- max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
- VETH_XDP_HEADROOM);
- if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size)
- goto drop;
-
- size = min_t(u32, skb->len, max_head_size);
- truesize = SKB_HEAD_ALIGN(size) + VETH_XDP_HEADROOM;
-
- /* Allocate skb head */
- va = page_pool_dev_alloc_va(rq->page_pool, &truesize);
- if (!va)
- goto drop;
-
- nskb = napi_build_skb(va, truesize);
- if (!nskb) {
- page_pool_free_va(rq->page_pool, va, true);
+ if (skb_pp_cow_data(rq->page_pool, pskb, XDP_PACKET_HEADROOM))
goto drop;
- }
-
- skb_reserve(nskb, VETH_XDP_HEADROOM);
- skb_copy_header(nskb, skb);
- skb_mark_for_recycle(nskb);
-
- if (skb_copy_bits(skb, 0, nskb->data, size)) {
- consume_skb(nskb);
- goto drop;
- }
- skb_put(nskb, size);
- head_off = skb_headroom(nskb) - skb_headroom(skb);
- skb_headers_offset_update(nskb, head_off);
-
- /* Allocate paged area of new skb */
- off = size;
- len = skb->len - off;
-
- for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
- size = min_t(u32, len, PAGE_SIZE);
- truesize = size;
-
- page = page_pool_dev_alloc(rq->page_pool, &page_offset,
- &truesize);
- if (!page) {
- consume_skb(nskb);
- goto drop;
- }
-
- skb_add_rx_frag(nskb, i, page, page_offset, size,
- truesize);
- if (skb_copy_bits(skb, off,
- page_address(page) + page_offset,
- size)) {
- consume_skb(nskb);
- goto drop;
- }
-
- len -= size;
- off += size;
- }
-
- consume_skb(skb);
- skb = nskb;
+ skb = *pskb;
}
/* SKB "head" area always have tailroom for skb_shared_info */
@@ -1208,14 +1138,6 @@ static int veth_enable_xdp(struct net_device *dev)
veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
return err;
}
-
- if (!veth_gro_requested(dev)) {
- /* user-space did not require GRO, but adding XDP
- * is supposed to get GRO working
- */
- dev->features |= NETIF_F_GRO;
- netdev_features_change(dev);
- }
}
}
@@ -1235,18 +1157,9 @@ static void veth_disable_xdp(struct net_device *dev)
for (i = 0; i < dev->real_num_rx_queues; i++)
rcu_assign_pointer(priv->rq[i].xdp_prog, NULL);
- if (!netif_running(dev) || !veth_gro_requested(dev)) {
+ if (!netif_running(dev) || !veth_gro_requested(dev))
veth_napi_del(dev);
- /* if user-space did not require GRO, since adding XDP
- * enabled it, clear it now
- */
- if (!veth_gro_requested(dev) && netif_running(dev)) {
- dev->features &= ~NETIF_F_GRO;
- netdev_features_change(dev);
- }
- }
-
veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
}
@@ -1478,7 +1391,8 @@ static int veth_alloc_queues(struct net_device *dev)
struct veth_priv *priv = netdev_priv(dev);
int i;
- priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL_ACCOUNT);
+ priv->rq = kvcalloc(dev->num_rx_queues, sizeof(*priv->rq),
+ GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
if (!priv->rq)
return -ENOMEM;
@@ -1494,11 +1408,12 @@ static void veth_free_queues(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
- kfree(priv->rq);
+ kvfree(priv->rq);
}
static int veth_dev_init(struct net_device *dev)
{
+ netdev_lockdep_set_classes(dev);
return veth_alloc_queues(dev);
}
@@ -1530,7 +1445,7 @@ static int veth_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(priv->peer);
- iflink = peer ? peer->ifindex : 0;
+ iflink = peer ? READ_ONCE(peer->ifindex) : 0;
rcu_read_unlock();
return iflink;
@@ -1654,6 +1569,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
}
if (!old_prog) {
+ if (!veth_gro_requested(dev)) {
+ /* user-space did not require GRO, but adding
+ * XDP is supposed to get GRO working
+ */
+ dev->features |= NETIF_F_GRO;
+ netdev_features_change(dev);
+ }
+
peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
peer->max_mtu = max_mtu;
}
@@ -1669,6 +1592,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (dev->flags & IFF_UP)
veth_disable_xdp(dev);
+ /* if user-space did not require GRO, since adding XDP
+ * enabled it, clear it now
+ */
+ if (!veth_gro_requested(dev)) {
+ dev->features &= ~NETIF_F_GRO;
+ netdev_features_change(dev);
+ }
+
if (peer) {
peer->hw_features |= NETIF_F_GSO_SOFTWARE;
peer->max_mtu = ETH_MAX_MTU;
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
index b1bb1b04b664..a1ba5169ed5d 100644
--- a/drivers/net/vsockmon.c
+++ b/drivers/net/vsockmon.c
@@ -13,19 +13,6 @@
#define DEFAULT_MTU (VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + \
sizeof(struct af_vsockmon_hdr))
-static int vsockmon_dev_init(struct net_device *dev)
-{
- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
- if (!dev->lstats)
- return -ENOMEM;
- return 0;
-}
-
-static void vsockmon_dev_uninit(struct net_device *dev)
-{
- free_percpu(dev->lstats);
-}
-
struct vsockmon {
struct vsock_tap vt;
};
@@ -59,9 +46,6 @@ static void
vsockmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
dev_lstats_read(dev, &stats->rx_packets, &stats->rx_bytes);
-
- stats->tx_packets = 0;
- stats->tx_bytes = 0;
}
static int vsockmon_is_valid_mtu(int new_mtu)
@@ -79,8 +63,6 @@ static int vsockmon_change_mtu(struct net_device *dev, int new_mtu)
}
static const struct net_device_ops vsockmon_ops = {
- .ndo_init = vsockmon_dev_init,
- .ndo_uninit = vsockmon_dev_uninit,
.ndo_open = vsockmon_open,
.ndo_stop = vsockmon_close,
.ndo_start_xmit = vsockmon_xmit,
@@ -112,6 +94,7 @@ static void vsockmon_setup(struct net_device *dev)
dev->flags = IFF_NOARP;
dev->mtu = DEFAULT_MTU;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS;
}
static struct rtnl_link_ops vsockmon_link_ops __read_mostly = {
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 16106e088c63..3495591a5c29 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -2841,26 +2841,19 @@ static int vxlan_init(struct net_device *dev)
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
vxlan_vnigroup_init(vxlan);
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats) {
- err = -ENOMEM;
- goto err_vnigroup_uninit;
- }
-
err = gro_cells_init(&vxlan->gro_cells, dev);
if (err)
- goto err_free_percpu;
+ goto err_vnigroup_uninit;
err = vxlan_mdb_init(vxlan);
if (err)
goto err_gro_cells_destroy;
+ netdev_lockdep_set_classes(dev);
return 0;
err_gro_cells_destroy:
gro_cells_destroy(&vxlan->gro_cells);
-err_free_percpu:
- free_percpu(dev->tstats);
err_vnigroup_uninit:
if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
vxlan_vnigroup_uninit(vxlan);
@@ -2891,8 +2884,6 @@ static void vxlan_uninit(struct net_device *dev)
gro_cells_destroy(&vxlan->gro_cells);
vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
-
- free_percpu(dev->tstats);
}
/* Start ageing timer and join group when device is brought up */
@@ -3223,7 +3214,6 @@ static const struct net_device_ops vxlan_netdev_ether_ops = {
.ndo_open = vxlan_open,
.ndo_stop = vxlan_stop,
.ndo_start_xmit = vxlan_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_rx_mode = vxlan_set_multicast_list,
.ndo_change_mtu = vxlan_change_mtu,
.ndo_validate_addr = eth_validate_addr,
@@ -3247,13 +3237,12 @@ static const struct net_device_ops vxlan_netdev_raw_ops = {
.ndo_open = vxlan_open,
.ndo_stop = vxlan_stop,
.ndo_start_xmit = vxlan_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = vxlan_change_mtu,
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
};
/* Info for udev, that this is a virtual tunnel endpoint */
-static struct device_type vxlan_type = {
+static const struct device_type vxlan_type = {
.name = "vxlan",
};
@@ -3315,6 +3304,7 @@ static void vxlan_setup(struct net_device *dev)
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = ETH_MAX_MTU;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
INIT_LIST_HEAD(&vxlan->next);
timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
@@ -4826,55 +4816,43 @@ static __net_init int vxlan_init_net(struct net *net)
NULL);
}
-static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
+static void __net_exit vxlan_destroy_tunnels(struct vxlan_net *vn,
+ struct list_head *dev_to_kill)
{
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan, *next;
- struct net_device *dev, *aux;
-
- for_each_netdev_safe(net, dev, aux)
- if (dev->rtnl_link_ops == &vxlan_link_ops)
- unregister_netdevice_queue(dev, head);
-
- list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next) {
- /* If vxlan->dev is in the same netns, it has already been added
- * to the list by the previous loop.
- */
- if (!net_eq(dev_net(vxlan->dev), net))
- unregister_netdevice_queue(vxlan->dev, head);
- }
+ list_for_each_entry_safe(vxlan, next, &vn->vxlan_list, next)
+ vxlan_dellink(vxlan->dev, dev_to_kill);
}
-static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
+static void __net_exit vxlan_exit_batch_rtnl(struct list_head *net_list,
+ struct list_head *dev_to_kill)
{
struct net *net;
- LIST_HEAD(list);
- unsigned int h;
+ ASSERT_RTNL();
list_for_each_entry(net, net_list, exit_list) {
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
- unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
- }
- rtnl_lock();
- list_for_each_entry(net, net_list, exit_list)
- vxlan_destroy_tunnels(net, &list);
+ __unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ vxlan_destroy_tunnels(vn, dev_to_kill);
+ }
+}
- list_for_each_entry(net, net_list, exit_list) {
- struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+static void __net_exit vxlan_exit_net(struct net *net)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ unsigned int h;
- for (h = 0; h < PORT_HASH_SIZE; ++h)
- WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
- }
+ for (h = 0; h < PORT_HASH_SIZE; ++h)
+ WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
}
static struct pernet_operations vxlan_net_ops = {
.init = vxlan_init_net,
- .exit_batch = vxlan_exit_batch_net,
+ .exit_batch_rtnl = vxlan_exit_batch_rtnl,
+ .exit = vxlan_exit_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
};
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 7dda87756d3f..31ab2136cdf1 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -197,6 +197,18 @@ config FARSYNC
To compile this driver as a module, choose M here: the
module will be called farsync.
+config FSL_QMC_HDLC
+ tristate "Freescale QMC HDLC support"
+ depends on HDLC
+ depends on CPM_QMC
+ help
+ HDLC support using the Freescale QUICC Multichannel Controller (QMC).
+
+ To compile this driver as a module, choose M here: the
+ module will be called fsl_qmc_hdlc.
+
+ If unsure, say N.
+
config FSL_UCC_HDLC
tristate "Freescale QUICC Engine HDLC support"
depends on HDLC
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 8119b49d1da9..00e9b7ee1e01 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_WANXL) += wanxl.o
obj-$(CONFIG_PCI200SYN) += pci200syn.o
obj-$(CONFIG_PC300TOO) += pc300too.o
obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o
+obj-$(CONFIG_FSL_QMC_HDLC) += fsl_qmc_hdlc.o
obj-$(CONFIG_FSL_UCC_HDLC) += fsl_ucc_hdlc.o
obj-$(CONFIG_SLIC_DS26522) += slic_ds26522.o
diff --git a/drivers/net/wan/framer/framer-core.c b/drivers/net/wan/framer/framer-core.c
index c04dc88bda6c..f547c22e26ac 100644
--- a/drivers/net/wan/framer/framer-core.c
+++ b/drivers/net/wan/framer/framer-core.c
@@ -18,7 +18,12 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-static struct class *framer_class;
+static void framer_release(struct device *dev);
+static const struct class framer_class = {
+ .name = "framer",
+ .dev_release = framer_release,
+};
+
static DEFINE_MUTEX(framer_provider_mutex);
static LIST_HEAD(framer_provider_list);
static DEFINE_IDA(framer_ida);
@@ -384,7 +389,7 @@ static struct framer_provider *framer_provider_of_lookup(const struct device_nod
return ERR_PTR(-EPROBE_DEFER);
}
-static struct framer *framer_of_get_from_provider(struct of_phandle_args *args)
+static struct framer *framer_of_get_from_provider(const struct of_phandle_args *args)
{
struct framer_provider *framer_provider;
struct framer *framer;
@@ -627,7 +632,7 @@ struct framer *framer_create(struct device *dev, struct device_node *node,
INIT_DELAYED_WORK(&framer->polling_work, framer_polling_work);
BLOCKING_INIT_NOTIFIER_HEAD(&framer->notifier_list);
- framer->dev.class = framer_class;
+ framer->dev.class = &framer_class;
framer->dev.parent = dev;
framer->dev.of_node = node ? node : dev->of_node;
framer->id = id;
@@ -735,12 +740,13 @@ EXPORT_SYMBOL_GPL(devm_framer_create);
* should provide a custom of_xlate function that reads the *args* and returns
* the appropriate framer.
*/
-struct framer *framer_provider_simple_of_xlate(struct device *dev, struct of_phandle_args *args)
+struct framer *framer_provider_simple_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
{
struct class_dev_iter iter;
struct framer *framer;
- class_dev_iter_init(&iter, framer_class, NULL, NULL);
+ class_dev_iter_init(&iter, &framer_class, NULL, NULL);
while ((dev = class_dev_iter_next(&iter))) {
framer = dev_to_framer(dev);
if (args->np != framer->dev.of_node)
@@ -768,7 +774,7 @@ EXPORT_SYMBOL_GPL(framer_provider_simple_of_xlate);
struct framer_provider *
__framer_provider_of_register(struct device *dev, struct module *owner,
struct framer *(*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
struct framer_provider *framer_provider;
@@ -830,7 +836,7 @@ static void devm_framer_provider_of_unregister(struct device *dev, void *res)
struct framer_provider *
__devm_framer_provider_of_register(struct device *dev, struct module *owner,
struct framer *(*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
struct framer_provider **ptr, *framer_provider;
@@ -869,14 +875,6 @@ static void framer_release(struct device *dev)
static int __init framer_core_init(void)
{
- framer_class = class_create("framer");
- if (IS_ERR(framer_class)) {
- pr_err("failed to create framer class (%pe)\n", framer_class);
- return PTR_ERR(framer_class);
- }
-
- framer_class->dev_release = framer_release;
-
- return 0;
+ return class_register(&framer_class);
}
device_initcall(framer_core_init);
diff --git a/drivers/net/wan/framer/pef2256/pef2256.c b/drivers/net/wan/framer/pef2256/pef2256.c
index 4f81053ee4f0..413a3c1d15bb 100644
--- a/drivers/net/wan/framer/pef2256/pef2256.c
+++ b/drivers/net/wan/framer/pef2256/pef2256.c
@@ -838,7 +838,7 @@ static int pef2256_probe(struct platform_device *pdev)
return 0;
}
-static int pef2256_remove(struct platform_device *pdev)
+static void pef2256_remove(struct platform_device *pdev)
{
struct pef2256 *pef2256 = platform_get_drvdata(pdev);
@@ -849,8 +849,6 @@ static int pef2256_remove(struct platform_device *pdev)
pef2256_write8(pef2256, PEF2256_IMR3, 0xff);
pef2256_write8(pef2256, PEF2256_IMR4, 0xff);
pef2256_write8(pef2256, PEF2256_IMR5, 0xff);
-
- return 0;
}
static const struct of_device_id pef2256_id_table[] = {
@@ -865,7 +863,7 @@ static struct platform_driver pef2256_driver = {
.of_match_table = pef2256_id_table,
},
.probe = pef2256_probe,
- .remove = pef2256_remove,
+ .remove_new = pef2256_remove,
};
module_platform_driver(pef2256_driver);
diff --git a/drivers/net/wan/fsl_qmc_hdlc.c b/drivers/net/wan/fsl_qmc_hdlc.c
new file mode 100644
index 000000000000..960371df470a
--- /dev/null
+++ b/drivers/net/wan/fsl_qmc_hdlc.c
@@ -0,0 +1,797 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Freescale QMC HDLC Device Driver
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bug.h>
+#include <linux/cleanup.h>
+#include <linux/bitmap.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/framer/framer.h>
+#include <linux/hdlc.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <soc/fsl/qe/qmc.h>
+
+struct qmc_hdlc_desc {
+ struct net_device *netdev;
+ struct sk_buff *skb; /* NULL if the descriptor is not in use */
+ dma_addr_t dma_addr;
+ size_t dma_size;
+};
+
+struct qmc_hdlc {
+ struct device *dev;
+ struct qmc_chan *qmc_chan;
+ struct net_device *netdev;
+ struct framer *framer;
+ spinlock_t carrier_lock; /* Protect carrier detection */
+ struct notifier_block nb;
+ bool is_crc32;
+ spinlock_t tx_lock; /* Protect tx descriptors */
+ struct qmc_hdlc_desc tx_descs[8];
+ unsigned int tx_out;
+ struct qmc_hdlc_desc rx_descs[4];
+ u32 slot_map;
+};
+
+static struct qmc_hdlc *netdev_to_qmc_hdlc(struct net_device *netdev)
+{
+ return dev_to_hdlc(netdev)->priv;
+}
+
+static int qmc_hdlc_framer_set_carrier(struct qmc_hdlc *qmc_hdlc)
+{
+ struct framer_status framer_status;
+ int ret;
+
+ if (!qmc_hdlc->framer)
+ return 0;
+
+ guard(spinlock_irqsave)(&qmc_hdlc->carrier_lock);
+
+ ret = framer_get_status(qmc_hdlc->framer, &framer_status);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "get framer status failed (%d)\n", ret);
+ return ret;
+ }
+ if (framer_status.link_is_on)
+ netif_carrier_on(qmc_hdlc->netdev);
+ else
+ netif_carrier_off(qmc_hdlc->netdev);
+
+ return 0;
+}
+
+static int qmc_hdlc_framer_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct qmc_hdlc *qmc_hdlc = container_of(nb, struct qmc_hdlc, nb);
+ int ret;
+
+ if (action != FRAMER_EVENT_STATUS)
+ return NOTIFY_DONE;
+
+ ret = qmc_hdlc_framer_set_carrier(qmc_hdlc);
+ return ret ? NOTIFY_DONE : NOTIFY_OK;
+}
+
+static int qmc_hdlc_framer_start(struct qmc_hdlc *qmc_hdlc)
+{
+ struct framer_status framer_status;
+ int ret;
+
+ if (!qmc_hdlc->framer)
+ return 0;
+
+ ret = framer_power_on(qmc_hdlc->framer);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "framer power-on failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* Be sure that get_status is supported */
+ ret = framer_get_status(qmc_hdlc->framer, &framer_status);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "get framer status failed (%d)\n", ret);
+ goto framer_power_off;
+ }
+
+ qmc_hdlc->nb.notifier_call = qmc_hdlc_framer_notifier;
+ ret = framer_notifier_register(qmc_hdlc->framer, &qmc_hdlc->nb);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "framer notifier register failed (%d)\n", ret);
+ goto framer_power_off;
+ }
+
+ return 0;
+
+framer_power_off:
+ framer_power_off(qmc_hdlc->framer);
+ return ret;
+}
+
+static void qmc_hdlc_framer_stop(struct qmc_hdlc *qmc_hdlc)
+{
+ if (!qmc_hdlc->framer)
+ return;
+
+ framer_notifier_unregister(qmc_hdlc->framer, &qmc_hdlc->nb);
+ framer_power_off(qmc_hdlc->framer);
+}
+
+static int qmc_hdlc_framer_set_iface(struct qmc_hdlc *qmc_hdlc, int if_iface,
+ const te1_settings *te1)
+{
+ struct framer_config config;
+ int ret;
+
+ if (!qmc_hdlc->framer)
+ return 0;
+
+ ret = framer_get_config(qmc_hdlc->framer, &config);
+ if (ret)
+ return ret;
+
+ switch (if_iface) {
+ case IF_IFACE_E1:
+ config.iface = FRAMER_IFACE_E1;
+ break;
+ case IF_IFACE_T1:
+ config.iface = FRAMER_IFACE_T1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (te1->clock_type) {
+ case CLOCK_DEFAULT:
+ /* Keep current value */
+ break;
+ case CLOCK_EXT:
+ config.clock_type = FRAMER_CLOCK_EXT;
+ break;
+ case CLOCK_INT:
+ config.clock_type = FRAMER_CLOCK_INT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ config.line_clock_rate = te1->clock_rate;
+
+ return framer_set_config(qmc_hdlc->framer, &config);
+}
+
+static int qmc_hdlc_framer_get_iface(struct qmc_hdlc *qmc_hdlc, int *if_iface, te1_settings *te1)
+{
+ struct framer_config config;
+ int ret;
+
+ if (!qmc_hdlc->framer) {
+ *if_iface = IF_IFACE_E1;
+ return 0;
+ }
+
+ ret = framer_get_config(qmc_hdlc->framer, &config);
+ if (ret)
+ return ret;
+
+ switch (config.iface) {
+ case FRAMER_IFACE_E1:
+ *if_iface = IF_IFACE_E1;
+ break;
+ case FRAMER_IFACE_T1:
+ *if_iface = IF_IFACE_T1;
+ break;
+ }
+
+ if (!te1)
+ return 0; /* Only iface type requested */
+
+ switch (config.clock_type) {
+ case FRAMER_CLOCK_EXT:
+ te1->clock_type = CLOCK_EXT;
+ break;
+ case FRAMER_CLOCK_INT:
+ te1->clock_type = CLOCK_INT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ te1->clock_rate = config.line_clock_rate;
+ return 0;
+}
+
+static int qmc_hdlc_framer_init(struct qmc_hdlc *qmc_hdlc)
+{
+ int ret;
+
+ if (!qmc_hdlc->framer)
+ return 0;
+
+ ret = framer_init(qmc_hdlc->framer);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "framer init failed (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qmc_hdlc_framer_exit(struct qmc_hdlc *qmc_hdlc)
+{
+ if (!qmc_hdlc->framer)
+ return;
+
+ framer_exit(qmc_hdlc->framer);
+}
+
+static int qmc_hdlc_recv_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc, size_t size);
+
+#define QMC_HDLC_RX_ERROR_FLAGS \
+ (QMC_RX_FLAG_HDLC_OVF | QMC_RX_FLAG_HDLC_UNA | \
+ QMC_RX_FLAG_HDLC_CRC | QMC_RX_FLAG_HDLC_ABORT)
+
+static void qmc_hcld_recv_complete(void *context, size_t length, unsigned int flags)
+{
+ struct qmc_hdlc_desc *desc = context;
+ struct net_device *netdev;
+ struct qmc_hdlc *qmc_hdlc;
+ int ret;
+
+ netdev = desc->netdev;
+ qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_FROM_DEVICE);
+
+ if (flags & QMC_HDLC_RX_ERROR_FLAGS) {
+ netdev->stats.rx_errors++;
+ if (flags & QMC_RX_FLAG_HDLC_OVF) /* Data overflow */
+ netdev->stats.rx_over_errors++;
+ if (flags & QMC_RX_FLAG_HDLC_UNA) /* bits received not multiple of 8 */
+ netdev->stats.rx_frame_errors++;
+ if (flags & QMC_RX_FLAG_HDLC_ABORT) /* Received an abort sequence */
+ netdev->stats.rx_frame_errors++;
+ if (flags & QMC_RX_FLAG_HDLC_CRC) /* CRC error */
+ netdev->stats.rx_crc_errors++;
+ kfree_skb(desc->skb);
+ } else {
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += length;
+
+ skb_put(desc->skb, length);
+ desc->skb->protocol = hdlc_type_trans(desc->skb, netdev);
+ netif_rx(desc->skb);
+ }
+
+ /* Re-queue a transfer using the same descriptor */
+ ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, desc->dma_size);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "queue recv desc failed (%d)\n", ret);
+ netdev->stats.rx_errors++;
+ }
+}
+
+static int qmc_hdlc_recv_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc, size_t size)
+{
+ int ret;
+
+ desc->skb = dev_alloc_skb(size);
+ if (!desc->skb)
+ return -ENOMEM;
+
+ desc->dma_size = size;
+ desc->dma_addr = dma_map_single(qmc_hdlc->dev, desc->skb->data,
+ desc->dma_size, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(qmc_hdlc->dev, desc->dma_addr);
+ if (ret)
+ goto free_skb;
+
+ ret = qmc_chan_read_submit(qmc_hdlc->qmc_chan, desc->dma_addr, desc->dma_size,
+ qmc_hcld_recv_complete, desc);
+ if (ret)
+ goto dma_unmap;
+
+ return 0;
+
+dma_unmap:
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_FROM_DEVICE);
+free_skb:
+ kfree_skb(desc->skb);
+ desc->skb = NULL;
+ return ret;
+}
+
+static void qmc_hdlc_xmit_complete(void *context)
+{
+ struct qmc_hdlc_desc *desc = context;
+ struct net_device *netdev;
+ struct qmc_hdlc *qmc_hdlc;
+ struct sk_buff *skb;
+
+ netdev = desc->netdev;
+ qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+
+ scoped_guard(spinlock_irqsave, &qmc_hdlc->tx_lock) {
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_TO_DEVICE);
+ skb = desc->skb;
+ desc->skb = NULL; /* Release the descriptor */
+ if (netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+ }
+
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+
+ dev_consume_skb_any(skb);
+}
+
+static int qmc_hdlc_xmit_queue(struct qmc_hdlc *qmc_hdlc, struct qmc_hdlc_desc *desc)
+{
+ int ret;
+
+ desc->dma_addr = dma_map_single(qmc_hdlc->dev, desc->skb->data,
+ desc->dma_size, DMA_TO_DEVICE);
+ ret = dma_mapping_error(qmc_hdlc->dev, desc->dma_addr);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "failed to map skb\n");
+ return ret;
+ }
+
+ ret = qmc_chan_write_submit(qmc_hdlc->qmc_chan, desc->dma_addr, desc->dma_size,
+ qmc_hdlc_xmit_complete, desc);
+ if (ret) {
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size, DMA_TO_DEVICE);
+ dev_err(qmc_hdlc->dev, "qmc chan write returns %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t qmc_hdlc_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+ struct qmc_hdlc_desc *desc;
+ int err;
+
+ guard(spinlock_irqsave)(&qmc_hdlc->tx_lock);
+
+ desc = &qmc_hdlc->tx_descs[qmc_hdlc->tx_out];
+ if (WARN_ONCE(desc->skb, "No tx descriptors available\n")) {
+ /* Should never happen.
+ * Previous xmit should have already stopped the queue.
+ */
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ desc->netdev = netdev;
+ desc->dma_size = skb->len;
+ desc->skb = skb;
+ err = qmc_hdlc_xmit_queue(qmc_hdlc, desc);
+ if (err) {
+ desc->skb = NULL; /* Release the descriptor */
+ if (err == -EBUSY) {
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+ dev_kfree_skb(skb);
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ qmc_hdlc->tx_out = (qmc_hdlc->tx_out + 1) % ARRAY_SIZE(qmc_hdlc->tx_descs);
+
+ if (qmc_hdlc->tx_descs[qmc_hdlc->tx_out].skb)
+ netif_stop_queue(netdev);
+
+ return NETDEV_TX_OK;
+}
+
+static int qmc_hdlc_xlate_slot_map(struct qmc_hdlc *qmc_hdlc,
+ u32 slot_map, struct qmc_chan_ts_info *ts_info)
+{
+ DECLARE_BITMAP(ts_mask_avail, 64);
+ DECLARE_BITMAP(ts_mask, 64);
+ DECLARE_BITMAP(map, 64);
+
+ /* Tx and Rx available masks must be identical */
+ if (ts_info->rx_ts_mask_avail != ts_info->tx_ts_mask_avail) {
+ dev_err(qmc_hdlc->dev, "tx and rx available timeslots mismatch (0x%llx, 0x%llx)\n",
+ ts_info->rx_ts_mask_avail, ts_info->tx_ts_mask_avail);
+ return -EINVAL;
+ }
+
+ bitmap_from_u64(ts_mask_avail, ts_info->rx_ts_mask_avail);
+ bitmap_from_u64(map, slot_map);
+ bitmap_scatter(ts_mask, map, ts_mask_avail, 64);
+
+ if (bitmap_weight(ts_mask, 64) != bitmap_weight(map, 64)) {
+ dev_err(qmc_hdlc->dev, "Cannot translate timeslots %64pb -> (%64pb, %64pb)\n",
+ map, ts_mask_avail, ts_mask);
+ return -EINVAL;
+ }
+
+ bitmap_to_arr64(&ts_info->tx_ts_mask, ts_mask, 64);
+ ts_info->rx_ts_mask = ts_info->tx_ts_mask;
+ return 0;
+}
+
+static int qmc_hdlc_xlate_ts_info(struct qmc_hdlc *qmc_hdlc,
+ const struct qmc_chan_ts_info *ts_info, u32 *slot_map)
+{
+ DECLARE_BITMAP(ts_mask_avail, 64);
+ DECLARE_BITMAP(ts_mask, 64);
+ DECLARE_BITMAP(map, 64);
+ u32 slot_array[2];
+
+ /* Tx and Rx masks and available masks must be identical */
+ if (ts_info->rx_ts_mask_avail != ts_info->tx_ts_mask_avail) {
+ dev_err(qmc_hdlc->dev, "tx and rx available timeslots mismatch (0x%llx, 0x%llx)\n",
+ ts_info->rx_ts_mask_avail, ts_info->tx_ts_mask_avail);
+ return -EINVAL;
+ }
+ if (ts_info->rx_ts_mask != ts_info->tx_ts_mask) {
+ dev_err(qmc_hdlc->dev, "tx and rx timeslots mismatch (0x%llx, 0x%llx)\n",
+ ts_info->rx_ts_mask, ts_info->tx_ts_mask);
+ return -EINVAL;
+ }
+
+ bitmap_from_u64(ts_mask_avail, ts_info->rx_ts_mask_avail);
+ bitmap_from_u64(ts_mask, ts_info->rx_ts_mask);
+ bitmap_gather(map, ts_mask, ts_mask_avail, 64);
+
+ if (bitmap_weight(ts_mask, 64) != bitmap_weight(map, 64)) {
+ dev_err(qmc_hdlc->dev, "Cannot translate timeslots (%64pb, %64pb) -> %64pb\n",
+ ts_mask_avail, ts_mask, map);
+ return -EINVAL;
+ }
+
+ bitmap_to_arr32(slot_array, map, 64);
+ if (slot_array[1]) {
+ dev_err(qmc_hdlc->dev, "Slot map out of 32bit (%64pb, %64pb) -> %64pb\n",
+ ts_mask_avail, ts_mask, map);
+ return -EINVAL;
+ }
+
+ *slot_map = slot_array[0];
+ return 0;
+}
+
+static int qmc_hdlc_set_iface(struct qmc_hdlc *qmc_hdlc, int if_iface, const te1_settings *te1)
+{
+ struct qmc_chan_ts_info ts_info;
+ int ret;
+
+ ret = qmc_chan_get_ts_info(qmc_hdlc->qmc_chan, &ts_info);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "get QMC channel ts info failed %d\n", ret);
+ return ret;
+ }
+ ret = qmc_hdlc_xlate_slot_map(qmc_hdlc, te1->slot_map, &ts_info);
+ if (ret)
+ return ret;
+
+ ret = qmc_chan_set_ts_info(qmc_hdlc->qmc_chan, &ts_info);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "set QMC channel ts info failed %d\n", ret);
+ return ret;
+ }
+
+ qmc_hdlc->slot_map = te1->slot_map;
+
+ ret = qmc_hdlc_framer_set_iface(qmc_hdlc, if_iface, te1);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "framer set iface failed %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qmc_hdlc_ioctl(struct net_device *netdev, struct if_settings *ifs)
+{
+ struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+ te1_settings te1;
+ int ret;
+
+ switch (ifs->type) {
+ case IF_GET_IFACE:
+ if (ifs->size < sizeof(te1)) {
+ /* Retrieve type only */
+ ret = qmc_hdlc_framer_get_iface(qmc_hdlc, &ifs->type, NULL);
+ if (ret)
+ return ret;
+
+ if (!ifs->size)
+ return 0; /* only type requested */
+
+ ifs->size = sizeof(te1); /* data size wanted */
+ return -ENOBUFS;
+ }
+
+ memset(&te1, 0, sizeof(te1));
+
+ /* Retrieve info from framer */
+ ret = qmc_hdlc_framer_get_iface(qmc_hdlc, &ifs->type, &te1);
+ if (ret)
+ return ret;
+
+ /* Update slot_map */
+ te1.slot_map = qmc_hdlc->slot_map;
+
+ if (copy_to_user(ifs->ifs_ifsu.te1, &te1, sizeof(te1)))
+ return -EFAULT;
+ return 0;
+
+ case IF_IFACE_E1:
+ case IF_IFACE_T1:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (netdev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (copy_from_user(&te1, ifs->ifs_ifsu.te1, sizeof(te1)))
+ return -EFAULT;
+
+ return qmc_hdlc_set_iface(qmc_hdlc, ifs->type, &te1);
+
+ default:
+ return hdlc_ioctl(netdev, ifs);
+ }
+}
+
+static int qmc_hdlc_open(struct net_device *netdev)
+{
+ struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+ struct qmc_chan_param chan_param;
+ struct qmc_hdlc_desc *desc;
+ int ret;
+ int i;
+
+ ret = qmc_hdlc_framer_start(qmc_hdlc);
+ if (ret)
+ return ret;
+
+ ret = hdlc_open(netdev);
+ if (ret)
+ goto framer_stop;
+
+ /* Update carrier */
+ qmc_hdlc_framer_set_carrier(qmc_hdlc);
+
+ chan_param.mode = QMC_HDLC;
+ /* HDLC_MAX_MRU + 4 for the CRC
+ * HDLC_MAX_MRU + 4 + 8 for the CRC and some extraspace needed by the QMC
+ */
+ chan_param.hdlc.max_rx_buf_size = HDLC_MAX_MRU + 4 + 8;
+ chan_param.hdlc.max_rx_frame_size = HDLC_MAX_MRU + 4;
+ chan_param.hdlc.is_crc32 = qmc_hdlc->is_crc32;
+ ret = qmc_chan_set_param(qmc_hdlc->qmc_chan, &chan_param);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "failed to set param (%d)\n", ret);
+ goto hdlc_close;
+ }
+
+ /* Queue as many recv descriptors as possible */
+ for (i = 0; i < ARRAY_SIZE(qmc_hdlc->rx_descs); i++) {
+ desc = &qmc_hdlc->rx_descs[i];
+
+ desc->netdev = netdev;
+ ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, chan_param.hdlc.max_rx_buf_size);
+ if (ret == -EBUSY && i != 0)
+ break; /* We use all the QMC chan capability */
+ if (ret)
+ goto free_desc;
+ }
+
+ ret = qmc_chan_start(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
+ if (ret) {
+ dev_err(qmc_hdlc->dev, "qmc chan start failed (%d)\n", ret);
+ goto free_desc;
+ }
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+free_desc:
+ qmc_chan_reset(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
+ while (i--) {
+ desc = &qmc_hdlc->rx_descs[i];
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size,
+ DMA_FROM_DEVICE);
+ kfree_skb(desc->skb);
+ desc->skb = NULL;
+ }
+hdlc_close:
+ hdlc_close(netdev);
+framer_stop:
+ qmc_hdlc_framer_stop(qmc_hdlc);
+ return ret;
+}
+
+static int qmc_hdlc_close(struct net_device *netdev)
+{
+ struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+ struct qmc_hdlc_desc *desc;
+ int i;
+
+ qmc_chan_stop(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
+ qmc_chan_reset(qmc_hdlc->qmc_chan, QMC_CHAN_ALL);
+
+ netif_stop_queue(netdev);
+
+ for (i = 0; i < ARRAY_SIZE(qmc_hdlc->tx_descs); i++) {
+ desc = &qmc_hdlc->tx_descs[i];
+ if (!desc->skb)
+ continue;
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size,
+ DMA_TO_DEVICE);
+ kfree_skb(desc->skb);
+ desc->skb = NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qmc_hdlc->rx_descs); i++) {
+ desc = &qmc_hdlc->rx_descs[i];
+ if (!desc->skb)
+ continue;
+ dma_unmap_single(qmc_hdlc->dev, desc->dma_addr, desc->dma_size,
+ DMA_FROM_DEVICE);
+ kfree_skb(desc->skb);
+ desc->skb = NULL;
+ }
+
+ hdlc_close(netdev);
+ qmc_hdlc_framer_stop(qmc_hdlc);
+ return 0;
+}
+
+static int qmc_hdlc_attach(struct net_device *netdev, unsigned short encoding,
+ unsigned short parity)
+{
+ struct qmc_hdlc *qmc_hdlc = netdev_to_qmc_hdlc(netdev);
+
+ if (encoding != ENCODING_NRZ)
+ return -EINVAL;
+
+ switch (parity) {
+ case PARITY_CRC16_PR1_CCITT:
+ qmc_hdlc->is_crc32 = false;
+ break;
+ case PARITY_CRC32_PR1_CCITT:
+ qmc_hdlc->is_crc32 = true;
+ break;
+ default:
+ dev_err(qmc_hdlc->dev, "unsupported parity %u\n", parity);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops qmc_hdlc_netdev_ops = {
+ .ndo_open = qmc_hdlc_open,
+ .ndo_stop = qmc_hdlc_close,
+ .ndo_start_xmit = hdlc_start_xmit,
+ .ndo_siocwandev = qmc_hdlc_ioctl,
+};
+
+static int qmc_hdlc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qmc_chan_ts_info ts_info;
+ struct qmc_hdlc *qmc_hdlc;
+ struct qmc_chan_info info;
+ hdlc_device *hdlc;
+ int ret;
+
+ qmc_hdlc = devm_kzalloc(dev, sizeof(*qmc_hdlc), GFP_KERNEL);
+ if (!qmc_hdlc)
+ return -ENOMEM;
+
+ qmc_hdlc->dev = dev;
+ spin_lock_init(&qmc_hdlc->tx_lock);
+ spin_lock_init(&qmc_hdlc->carrier_lock);
+
+ qmc_hdlc->qmc_chan = devm_qmc_chan_get_bychild(dev, dev->of_node);
+ if (IS_ERR(qmc_hdlc->qmc_chan))
+ return dev_err_probe(dev, PTR_ERR(qmc_hdlc->qmc_chan),
+ "get QMC channel failed\n");
+
+ ret = qmc_chan_get_info(qmc_hdlc->qmc_chan, &info);
+ if (ret)
+ return dev_err_probe(dev, ret, "get QMC channel info failed\n");
+
+ if (info.mode != QMC_HDLC)
+ return dev_err_probe(dev, -EINVAL, "QMC chan mode %d is not QMC_HDLC\n",
+ info.mode);
+
+ ret = qmc_chan_get_ts_info(qmc_hdlc->qmc_chan, &ts_info);
+ if (ret)
+ return dev_err_probe(dev, ret, "get QMC channel ts info failed\n");
+
+ ret = qmc_hdlc_xlate_ts_info(qmc_hdlc, &ts_info, &qmc_hdlc->slot_map);
+ if (ret)
+ return ret;
+
+ qmc_hdlc->framer = devm_framer_optional_get(dev, "fsl,framer");
+ if (IS_ERR(qmc_hdlc->framer))
+ return PTR_ERR(qmc_hdlc->framer);
+
+ ret = qmc_hdlc_framer_init(qmc_hdlc);
+ if (ret)
+ return ret;
+
+ qmc_hdlc->netdev = alloc_hdlcdev(qmc_hdlc);
+ if (!qmc_hdlc->netdev) {
+ ret = -ENOMEM;
+ goto framer_exit;
+ }
+
+ hdlc = dev_to_hdlc(qmc_hdlc->netdev);
+ hdlc->attach = qmc_hdlc_attach;
+ hdlc->xmit = qmc_hdlc_xmit;
+ SET_NETDEV_DEV(qmc_hdlc->netdev, dev);
+ qmc_hdlc->netdev->tx_queue_len = ARRAY_SIZE(qmc_hdlc->tx_descs);
+ qmc_hdlc->netdev->netdev_ops = &qmc_hdlc_netdev_ops;
+ ret = register_hdlc_device(qmc_hdlc->netdev);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to register hdlc device\n");
+ goto free_netdev;
+ }
+
+ platform_set_drvdata(pdev, qmc_hdlc);
+ return 0;
+
+free_netdev:
+ free_netdev(qmc_hdlc->netdev);
+framer_exit:
+ qmc_hdlc_framer_exit(qmc_hdlc);
+ return ret;
+}
+
+static int qmc_hdlc_remove(struct platform_device *pdev)
+{
+ struct qmc_hdlc *qmc_hdlc = platform_get_drvdata(pdev);
+
+ unregister_hdlc_device(qmc_hdlc->netdev);
+ free_netdev(qmc_hdlc->netdev);
+ qmc_hdlc_framer_exit(qmc_hdlc);
+
+ return 0;
+}
+
+static const struct of_device_id qmc_hdlc_id_table[] = {
+ { .compatible = "fsl,qmc-hdlc" },
+ {} /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, qmc_hdlc_driver);
+
+static struct platform_driver qmc_hdlc_driver = {
+ .driver = {
+ .name = "fsl-qmc-hdlc",
+ .of_match_table = qmc_hdlc_id_table,
+ },
+ .probe = qmc_hdlc_probe,
+ .remove = qmc_hdlc_remove,
+};
+module_platform_driver(qmc_hdlc_driver);
+
+MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
+MODULE_DESCRIPTION("QMC HDLC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index a176653c8861..df275b4fccb6 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -263,7 +263,7 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
* call skb_cow_data, so that there's no chance that data is removed
* from the skb, so that later we can extract the original endpoint.
*/
- offset = skb->data - skb_network_header(skb);
+ offset = -skb_network_offset(skb);
skb_push(skb, offset);
num_frags = skb_cow_data(skb, 0, &trailer);
offset += sizeof(struct message_data);
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 2fceea9f6550..e3fd48dd3909 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -1759,6 +1759,10 @@ static int adm8211_alloc_rings(struct ieee80211_hw *dev)
}
static const struct ieee80211_ops adm8211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = adm8211_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = adm8211_start,
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 43e0db78d42b..815f8f599f5d 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1358,6 +1358,10 @@ static void ar5523_configure_filter(struct ieee80211_hw *hw,
}
static const struct ieee80211_ops ar5523_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = ar5523_start,
.stop = ar5523_stop,
.tx = ar5523_tx,
@@ -1803,5 +1807,6 @@ static struct usb_driver ar5523_driver = {
module_usb_driver(ar5523_driver);
+MODULE_DESCRIPTION("Atheros AR5523 wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_FIRMWARE(AR5523_FIRMWARE_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 0032f8aa892f..9ce6f49ab261 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -3613,7 +3613,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
default:
ath10k_err(ar, "unsupported core hardware revision %d\n",
hw_rev);
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
goto err_free_mac;
}
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
index e5ef0352e319..8d274e0f374b 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.h
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: ISC */
/*
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _COREDUMP_H_
@@ -13,7 +13,11 @@
/**
* enum ath10k_fw_crash_dump_type - types of data in the dump file
- * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
+ * @ATH10K_FW_CRASH_DUMP_REGISTERS: Register crash dump in binary format
+ * @ATH10K_FW_CRASH_DUMP_CE_DATA: Copy Engine crash dump data
+ * @ATH10K_FW_CRASH_DUMP_RAM_DATA: RAM crash dump data, contains multiple
+ * struct ath10k_dump_ram_data_hdr
+ * @ATH10K_FW_CRASH_DUMP_MAX: Maximum enumeration
*/
enum ath10k_fw_crash_dump_type {
ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 907e1e13871a..dbaf262cd7c1 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/slab.h>
@@ -381,7 +382,7 @@ static int ath10k_htt_verify_version(struct ath10k_htt *htt)
htt->target_version_major != 3) {
ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n",
htt->target_version_major);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 4a9270e2a4c8..603f6de62b0a 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021, 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _HTT_H_
@@ -906,7 +906,7 @@ struct htt_data_tx_completion_ext {
__le16 msdus_rssi[];
} __packed;
-/**
+/*
* @brief target -> host TX completion indication message definition
*
* @details
@@ -1474,15 +1474,19 @@ enum htt_q_depth_type {
#define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0
/**
- * htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
+ * struct htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
*
* Defines host q state format and behavior. See htt_q_state.
*
+ * @paddr: Queue physical address
+ * @num_peers: Number of supported peers
+ * @num_tids: Number of supported TIDs
* @record_size: Defines the size of each host q entry in bytes. In practice
* however firmware (at least 10.4.3-00191) ignores this host
* configuration value and uses hardcoded value of 1.
* @record_multiplier: This is valid only when q depth type is MSDUs. It
* defines the exponent for the power of 2 multiplication.
+ * @pad: struct padding for 32-bit alignment
*/
struct htt_q_state_conf {
__le32 paddr;
@@ -1518,7 +1522,7 @@ struct htt_frag_desc_bank_cfg64 {
#define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6
/**
- * htt_q_state - shared between host and firmware via DMA
+ * struct htt_q_state - shared between host and firmware via DMA
*
* This structure is used for the host to expose it's software queue state to
* firmware so that its rate control can schedule fetch requests for optimized
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 090bcf148d0c..e322b528baaf 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "mac.h"
@@ -2034,8 +2034,8 @@ static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
if (!arvif->is_up)
return;
- if (!ieee80211_beacon_cntdwn_is_complete(vif)) {
- ieee80211_beacon_update_cntdwn(vif);
+ if (!ieee80211_beacon_cntdwn_is_complete(vif, 0)) {
+ ieee80211_beacon_update_cntdwn(vif, 0);
ret = ath10k_mac_setup_bcn_tmpl(arvif);
if (ret)
@@ -2047,7 +2047,7 @@ static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
ret);
} else {
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
}
@@ -4056,7 +4056,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
!(skb_cb->flags & ATH10K_SKB_F_RAW_TX)) {
WARN_ON_ONCE(1);
ieee80211_free_txskb(hw, skb);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
}
@@ -7065,7 +7065,7 @@ static int ath10k_mac_set_tid_config(struct ath10k *ar, struct ieee80211_sta *st
if (sta) {
if (!sta->wme)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
arsta = (struct ath10k_sta *)sta->drv_priv;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 3de2de6d44bc..5c34b156b4ff 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/pci.h>
@@ -889,7 +889,7 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return ar_pci->targ_cpu_to_ce_addr(ar, addr);
}
@@ -2668,7 +2668,7 @@ static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (!ar_pci->pci_soft_reset)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return ar_pci->pci_soft_reset(ar);
}
@@ -2808,7 +2808,7 @@ static int ath10k_pci_chip_reset(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (WARN_ON(!ar_pci->pci_hard_reset))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return ar_pci->pci_hard_reset(ar);
}
@@ -3594,7 +3594,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
break;
default:
WARN_ON(1);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 6b6aa3c36744..aed97fd121ba 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -3,6 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "debug.h"
@@ -851,6 +852,10 @@ ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
}
ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
arg->desc_id = ev->desc_id;
arg->status = ev->status;
@@ -1347,7 +1352,7 @@ static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
__le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
__le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
__le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
arg->min_tx_power = ev->hw_min_tx_power;
@@ -2119,9 +2124,9 @@ static int ath10k_wmi_tlv_op_get_vdev_subtype(struct ath10k *ar,
case WMI_VDEV_SUBTYPE_MESH_11S:
return WMI_TLV_VDEV_SUBTYPE_MESH_11S;
case WMI_VDEV_SUBTYPE_MESH_NON_11S:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static struct sk_buff *
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 83a8f07a687f..8a2f87d0a3a3 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _WMI_TLV_H
#define _WMI_TLV_H
@@ -2343,7 +2343,7 @@ struct wmi_tlv_adaptive_qcs {
} __packed;
/**
- * wmi_tlv_tx_pause_id - firmware tx queue pause reason types
+ * enum wmi_tlv_tx_pause_id - firmware tx queue pause reason types
*
* @WMI_TLV_TX_PAUSE_ID_MCC: used for by multi-channel firmware scheduler.
* Only vdev_map is valid.
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 88befe92f95d..2e9661f4bea8 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
@@ -3884,8 +3884,8 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
* actual channel switch is done
*/
if (arvif->vif->bss_conf.csa_active &&
- ieee80211_beacon_cntdwn_is_complete(arvif->vif)) {
- ieee80211_csa_finish(arvif->vif);
+ ieee80211_beacon_cntdwn_is_complete(arvif->vif, 0)) {
+ ieee80211_csa_finish(arvif->vif, 0);
continue;
}
@@ -6927,14 +6927,14 @@ void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
}
static void
-ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
+ath10k_wmi_put_start_scan_tlvs(u8 *tlvs,
const struct wmi_start_scan_arg *arg)
{
struct wmi_ie_data *ie;
struct wmi_chan_list *channels;
struct wmi_ssid_list *ssids;
struct wmi_bssid_list *bssids;
- void *ptr = tlvs->tlvs;
+ void *ptr = tlvs;
int i;
if (arg->n_channels) {
@@ -7012,7 +7012,7 @@ ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
cmd = (struct wmi_start_scan_cmd *)skb->data;
ath10k_wmi_put_start_scan_common(&cmd->common, arg);
- ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
+ ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg);
cmd->burst_duration_ms = __cpu_to_le32(0);
@@ -7041,7 +7041,7 @@ ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
ath10k_wmi_put_start_scan_common(&cmd->common, arg);
- ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
+ ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
return skb;
@@ -8733,9 +8733,9 @@ int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA;
case WMI_VDEV_SUBTYPE_MESH_11S:
case WMI_VDEV_SUBTYPE_MESH_NON_11S:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
@@ -8755,9 +8755,9 @@ static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
case WMI_VDEV_SUBTYPE_MESH_11S:
return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S;
case WMI_VDEV_SUBTYPE_MESH_NON_11S:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
@@ -8779,7 +8779,7 @@ static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
case WMI_VDEV_SUBTYPE_MESH_NON_11S:
return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static struct sk_buff *
@@ -8918,8 +8918,6 @@ ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar,
if (!skb)
return ERR_PTR(-ENOMEM);
- memset(skb->data, 0, sizeof(*cmd));
-
cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 9146df98fcee..2379501225a4 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -3,7 +3,7 @@
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _WMI_H_
@@ -3008,8 +3008,11 @@ enum wmi_coex_version {
* @WMI_10_4_TDLS_UAPSD_SLEEP_STA: TDLS sleep sta support enable/disable
* @WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE: TDLS connection tracker in host
* enable/disable
- * @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY:Explicit TDLS mode enable/disable
+ * @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY: Explicit TDLS mode enable/disable
* @WMI_10_4_TX_DATA_ACK_RSSI: Enable DATA ACK RSSI if firmware is capable
+ * @WMI_10_4_EXT_PEER_TID_CONFIGS_SUPPORT: Firmware supports Extended Peer
+ * TID configuration for QoS related settings
+ * @WMI_10_4_REPORT_AIRTIME: Firmware supports transmit airtime reporting
*/
enum wmi_10_4_feature_mask {
WMI_10_4_LTEU_SUPPORT = BIT(0),
@@ -3069,7 +3072,10 @@ struct host_memory_chunk {
struct wmi_host_mem_chunks {
__le32 count;
/* some fw revisions require at least 1 chunk regardless of count */
- struct host_memory_chunk items[1];
+ union {
+ struct host_memory_chunk item;
+ DECLARE_FLEX_ARRAY(struct host_memory_chunk, items);
+ };
} __packed;
struct wmi_init_cmd {
@@ -3215,23 +3221,16 @@ struct wmi_start_scan_common {
__le32 scan_ctrl_flags;
} __packed;
-struct wmi_start_scan_tlvs {
- /* TLV parameters. These includes channel list, ssid list, bssid list,
- * extra ies.
- */
- u8 tlvs[0];
-} __packed;
-
struct wmi_start_scan_cmd {
struct wmi_start_scan_common common;
__le32 burst_duration_ms;
- struct wmi_start_scan_tlvs tlvs;
+ u8 tlvs[];
} __packed;
/* This is the definition from 10.X firmware branch */
struct wmi_10x_start_scan_cmd {
struct wmi_start_scan_common common;
- struct wmi_start_scan_tlvs tlvs;
+ u8 tlvs[];
} __packed;
struct wmi_ssid_arg {
@@ -4260,13 +4259,6 @@ struct wmi_peer_sta_ps_state_chg_event {
__le32 peer_ps_state;
} __packed;
-struct wmi_pdev_chanlist_update_event {
- /* number of channels */
- __le32 num_chan;
- /* array of channels */
- struct wmi_channel channel_list[1];
-} __packed;
-
#define WMI_MAX_DEBUG_MESG (sizeof(u32) * 32)
struct wmi_debug_mesg_event {
@@ -5793,30 +5785,6 @@ struct wmi_bcn_prb_info {
/* app IE */
} __packed;
-struct wmi_bcn_tmpl_cmd {
- /* unique id identifying the VDEV, generated by the caller */
- __le32 vdev_id;
- /* TIM IE offset from the beginning of the template. */
- __le32 tim_ie_offset;
- /* beacon probe capabilities and IEs */
- struct wmi_bcn_prb_info bcn_prb_info;
- /* beacon buffer length */
- __le32 buf_len;
- /* variable length data */
- u8 data[1];
-} __packed;
-
-struct wmi_prb_tmpl_cmd {
- /* unique id identifying the VDEV, generated by the caller */
- __le32 vdev_id;
- /* beacon probe capabilities and IEs */
- struct wmi_bcn_prb_info bcn_prb_info;
- /* beacon buffer length */
- __le32 buf_len;
- /* Variable length data */
- u8 data[1];
-} __packed;
-
enum wmi_sta_ps_mode {
/* enable power save for the given STA VDEV */
WMI_STA_PS_MODE_DISABLED = 0,
@@ -7197,7 +7165,13 @@ struct wmi_tdls_peer_capabilities {
__le32 is_peer_responder;
__le32 pref_offchan_num;
__le32 pref_offchan_bw;
- struct wmi_channel peer_chan_list[1];
+ union {
+ /* to match legacy implementation allocate room for
+ * at least one record even if peer_chan_len is 0
+ */
+ struct wmi_channel peer_chan_min_allocation;
+ DECLARE_FLEX_ARRAY(struct wmi_channel, peer_chan_list);
+ };
} __packed;
struct wmi_10_4_tdls_peer_update_cmd {
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 0c6ecbb9a066..c78bce19bd75 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -122,6 +122,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
+ .support_dual_stations = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -205,6 +206,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
+ .support_dual_stations = false,
},
{
.name = "qca6390 hw2.0",
@@ -255,7 +257,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
- .num_vdevs = 16 + 1,
+ .num_vdevs = 2 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
@@ -290,6 +292,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .support_dual_stations = true,
},
{
.name = "qcn9074 hw1.0",
@@ -372,6 +375,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
+ .support_dual_stations = false,
},
{
.name = "wcn6855 hw2.0",
@@ -422,7 +426,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
- .num_vdevs = 16 + 1,
+ .num_vdevs = 2 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
@@ -457,6 +461,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .support_dual_stations = true,
},
{
.name = "wcn6855 hw2.1",
@@ -505,7 +510,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = false,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
- .num_vdevs = 16 + 1,
+ .num_vdevs = 2 + 1,
.num_peers = 512,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
@@ -540,6 +545,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .support_dual_stations = true,
},
{
.name = "wcn6750 hw1.0",
@@ -621,6 +627,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
.smp2p_wow_exit = true,
.support_fw_mac_sequence = true,
+ .support_dual_stations = false,
},
{
.hw_rev = ATH11K_HW_IPQ5018_HW10,
@@ -702,6 +709,93 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
+ .support_dual_stations = false,
+ },
+ {
+ .name = "qca2066 hw2.1",
+ .hw_rev = ATH11K_HW_QCA2066_HW21,
+ .fw = {
+ .dir = "QCA2066/hw2.1",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6855_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &wcn6855_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+ .full_monitor_mode = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = false,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 2 + 1,
+ .num_peers = 512,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
+ .support_dual_stations = true,
},
};
@@ -1775,10 +1869,9 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
mutex_lock(&ab->core_lock);
ath11k_thermal_unregister(ab);
- ath11k_hif_irq_disable(ab);
ath11k_dp_pdev_free(ab);
ath11k_spectral_deinit(ab);
- ath11k_hif_stop(ab);
+ ath11k_ce_cleanup_pipes(ab);
ath11k_wmi_detach(ab);
ath11k_dp_pdev_reo_cleanup(ab);
mutex_unlock(&ab->core_lock);
@@ -2033,6 +2126,9 @@ static void ath11k_core_reset(struct work_struct *work)
time_left = wait_for_completion_timeout(&ab->recovery_start,
ATH11K_RECOVER_START_TIMEOUT_HZ);
+ ath11k_hif_irq_disable(ab);
+ ath11k_hif_ce_irq_disable(ab);
+
ath11k_hif_power_down(ab);
ath11k_hif_power_up(ab);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 02e160d831be..b3fb74a226fb 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -147,6 +147,7 @@ enum ath11k_hw_rev {
ATH11K_HW_WCN6855_HW21,
ATH11K_HW_WCN6750_HW10,
ATH11K_HW_IPQ5018_HW10,
+ ATH11K_HW_QCA2066_HW21,
};
enum ath11k_firmware_mode {
@@ -314,6 +315,43 @@ struct ath11k_rekey_data {
bool enable_offload;
};
+/**
+ * struct ath11k_chan_power_info - TPE containing power info per channel chunk
+ * @chan_cfreq: channel center freq (MHz)
+ * e.g.
+ * channel 37/20 MHz, it is 6135
+ * channel 37/40 MHz, it is 6125
+ * channel 37/80 MHz, it is 6145
+ * channel 37/160 MHz, it is 6185
+ * @tx_power: transmit power (dBm)
+ */
+struct ath11k_chan_power_info {
+ u16 chan_cfreq;
+ s8 tx_power;
+};
+
+/**
+ * struct ath11k_reg_tpc_power_info - regulatory TPC power info
+ * @is_psd_power: is PSD power or not
+ * @eirp_power: Maximum EIRP power (dBm), valid only if power is PSD
+ * @ap_power_type: type of power (SP/LPI/VLP)
+ * @num_pwr_levels: number of power levels
+ * @reg_max: Array of maximum TX power (dBm) per PSD value
+ * @ap_constraint_power: AP constraint power (dBm)
+ * @tpe: TPE values processed from TPE IE
+ * @chan_power_info: power info to send to firmware
+ */
+struct ath11k_reg_tpc_power_info {
+ bool is_psd_power;
+ u8 eirp_power;
+ enum wmi_reg_6ghz_ap_type ap_power_type;
+ u8 num_pwr_levels;
+ u8 reg_max[IEEE80211_MAX_NUM_PWR_LEVEL];
+ u8 ap_constraint_power;
+ s8 tpe[IEEE80211_MAX_NUM_PWR_LEVEL];
+ struct ath11k_chan_power_info chan_power_info[IEEE80211_MAX_NUM_PWR_LEVEL];
+};
+
struct ath11k_vif {
u32 vdev_id;
enum wmi_vdev_type vdev_type;
@@ -368,6 +406,8 @@ struct ath11k_vif {
struct ieee80211_chanctx_conf chanctx;
struct ath11k_arp_ns_offload arp_ns_offload;
struct ath11k_rekey_data rekey_data;
+
+ struct ath11k_reg_tpc_power_info reg_tpc_info;
};
struct ath11k_vif_iter {
@@ -735,6 +775,7 @@ struct ath11k {
/* protected by conf_mutex */
bool ps_state_enable;
bool ps_timekeeper_enable;
+ s8 max_allowed_tx_power;
};
struct ath11k_band_cap {
@@ -918,6 +959,7 @@ struct ath11k_base {
* This may or may not be used during the runtime
*/
struct ieee80211_regdomain *new_regd[MAX_RADIOS];
+ struct cur_regulatory_info *reg_info_store;
/* Current DFS Regulatory */
enum ath11k_dfs_region dfs_region;
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 8975dc57ad77..1a62407e5a9f 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -104,11 +104,14 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
if (!ring->vaddr_unaligned)
return;
- if (ring->cached)
+ if (ring->cached) {
+ dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size,
+ DMA_FROM_DEVICE);
kfree(ring->vaddr_unaligned);
- else
+ } else {
dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
ring->paddr_unaligned);
+ }
ring->vaddr_unaligned = NULL;
}
@@ -249,7 +252,18 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
if (cached) {
ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
- ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
+ if (!ring->vaddr_unaligned)
+ return -ENOMEM;
+
+ ring->paddr_unaligned = dma_map_single(ab->dev,
+ ring->vaddr_unaligned,
+ ring->size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, ring->paddr_unaligned)) {
+ kfree(ring->vaddr_unaligned);
+ ring->vaddr_unaligned = NULL;
+ return -ENOMEM;
+ }
}
}
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index c1072e66e3e8..272b1c35f98d 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -103,7 +103,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control)))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
@@ -1018,7 +1018,7 @@ int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab)
if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
ath11k_err(ab, "unsupported htt major version %d supported version is %d\n",
dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index c060c4b5c0cc..f3d04568c221 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -626,15 +626,30 @@ u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
return NULL;
}
+static u32 *ath11k_hal_srng_dst_peek_with_dma(struct ath11k_base *ab,
+ struct hal_srng *srng, dma_addr_t *paddr)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
+ *paddr = srng->ring_base_paddr +
+ sizeof(*srng->ring_base_vaddr) * srng->u.dst_ring.tp;
+ return srng->ring_base_vaddr + srng->u.dst_ring.tp;
+ }
+
+ return NULL;
+}
+
static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab,
struct hal_srng *srng)
{
+ dma_addr_t desc_paddr;
u32 *desc;
/* prefetch only if desc is available */
- desc = ath11k_hal_srng_dst_peek(ab, srng);
+ desc = ath11k_hal_srng_dst_peek_with_dma(ab, srng, &desc_paddr);
if (likely(desc)) {
- dma_sync_single_for_cpu(ab->dev, virt_to_phys(desc),
+ dma_sync_single_for_cpu(ab->dev, desc_paddr,
(srng->entry_size * sizeof(u32)),
DMA_FROM_DEVICE);
prefetch(desc);
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 80447f488954..65e8f244ebb9 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HAL_H
@@ -674,6 +674,7 @@ struct hal_srng_config {
* @HAL_RX_BUF_RBM_SW1_BM: For Tx completion -- returned to host
* @HAL_RX_BUF_RBM_SW2_BM: For Tx completion -- returned to host
* @HAL_RX_BUF_RBM_SW3_BM: For Rx release -- returned to host
+ * @HAL_RX_BUF_RBM_SW4_BM: For Tx completion -- returned to host
*/
enum hal_rx_buf_return_buf_manager {
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index e758ee8e17c9..8f7dd43dc1bd 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "debug.h"
@@ -246,7 +246,7 @@ int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
case HAL_REO_CMD_UNBLOCK_CACHE:
case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
ath11k_warn(ab, "Unsupported reo command %d\n", type);
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
break;
default:
ath11k_warn(ab, "Unknown reo command %d\n", type);
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index 77d8f9237680..caa6dc12a790 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -58,7 +58,7 @@ static void ath11k_hw_wcn6855_tx_mesh_enable(struct ath11k_base *ab,
static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
struct target_resource_config *config)
{
- config->num_vdevs = 4;
+ config->num_vdevs = ab->hw_params.num_vdevs;
config->num_peers = 16;
config->num_tids = 32;
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 1b070747a5db..14ef4eb48f80 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -226,6 +226,7 @@ struct ath11k_hw_params {
u32 tx_ring_size;
bool smp2p_wow_exit;
bool support_fw_mac_sequence;
+ bool support_dual_stations;
};
struct ath11k_hw_ops {
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index b13525bbbb80..a6a37d67a50a 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
@@ -255,9 +255,6 @@ static const u32 ath11k_smps_map[] = {
[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
};
-static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-
enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy)
{
enum nl80211_he_ru_alloc ret;
@@ -1580,7 +1577,7 @@ void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
return;
if (vif->bss_conf.color_change_active &&
- ieee80211_beacon_cntdwn_is_complete(vif)) {
+ ieee80211_beacon_cntdwn_is_complete(vif, 0)) {
arvif->bcca_zero_sent = true;
ieee80211_color_change_finish(vif);
return;
@@ -1589,7 +1586,7 @@ void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
arvif->bcca_zero_sent = false;
if (vif->bss_conf.color_change_active)
- ieee80211_beacon_update_cntdwn(vif);
+ ieee80211_beacon_update_cntdwn(vif, 0);
ath11k_mac_setup_bcn_tmpl(arvif);
}
@@ -2297,6 +2294,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ /* Initialize rx_mcs_160 to 9 which is an invalid value */
+ rx_mcs_160 = 9;
if (support_160) {
for (i = 7; i >= 0; i--) {
u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
@@ -2308,6 +2307,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
}
}
+ /* Initialize rx_mcs_80 to 9 which is an invalid value */
+ rx_mcs_80 = 9;
for (i = 7; i >= 0; i--) {
u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
@@ -3026,7 +3027,14 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
rcu_read_unlock();
+ if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
+ ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
+ arvif->vdev_id, bss_conf->bssid);
+ return;
+ }
+
peer_arg.is_assoc = true;
+
ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
@@ -3049,12 +3057,6 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
return;
}
- if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
- ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
- arvif->vdev_id, bss_conf->bssid);
- return;
- }
-
WARN_ON(arvif->is_up);
arvif->aid = vif->cfg.aid;
@@ -3397,6 +3399,18 @@ static int ath11k_mac_config_obss_pd(struct ath11k *ar,
return 0;
}
+static bool ath11k_mac_supports_station_tpc(struct ath11k *ar,
+ struct ath11k_vif *arvif,
+ const struct cfg80211_chan_def *chandef)
+{
+ return ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+ test_bit(WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT, ar->ab->wmi_ab.svc_map) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE &&
+ chandef->chan &&
+ chandef->chan->band == NL80211_BAND_6GHZ;
+}
+
static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -3596,7 +3610,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_TXPOWER) {
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev_id %i txpower %d\n",
arvif->vdev_id, info->txpower);
-
arvif->txpower = info->txpower;
ath11k_mac_txpower_recalc(ar);
}
@@ -4000,7 +4013,7 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
req->ssids[i].ssid_len);
}
} else {
- arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg->scan_f_passive = 1;
}
if (req->n_channels) {
@@ -4906,100 +4919,6 @@ static void ath11k_mac_dec_num_stations(struct ath11k_vif *arvif,
ar->num_stations--;
}
-static int ath11k_mac_station_add(struct ath11k *ar,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct ath11k_base *ab = ar->ab;
- struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
- struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
- struct peer_create_params peer_param;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- ret = ath11k_mac_inc_num_stations(arvif, sta);
- if (ret) {
- ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
- ar->max_num_stations);
- goto exit;
- }
-
- arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
- if (!arsta->rx_stats) {
- ret = -ENOMEM;
- goto dec_num_station;
- }
-
- peer_param.vdev_id = arvif->vdev_id;
- peer_param.peer_addr = sta->addr;
- peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
-
- ret = ath11k_peer_create(ar, arvif, sta, &peer_param);
- if (ret) {
- ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- goto free_rx_stats;
- }
-
- ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
-
- if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
- arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL);
- if (!arsta->tx_stats) {
- ret = -ENOMEM;
- goto free_peer;
- }
- }
-
- if (ieee80211_vif_is_mesh(vif)) {
- ath11k_dbg(ab, ATH11K_DBG_MAC,
- "setting USE_4ADDR for mesh STA %pM\n", sta->addr);
- ret = ath11k_wmi_set_peer_param(ar, sta->addr,
- arvif->vdev_id,
- WMI_PEER_USE_4ADDR, 1);
- if (ret) {
- ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n",
- sta->addr, ret);
- goto free_tx_stats;
- }
- }
-
- ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
- if (ret) {
- ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
- sta->addr, arvif->vdev_id, ret);
- goto free_tx_stats;
- }
-
- if (ab->hw_params.vdev_start_delay &&
- !arvif->is_started &&
- arvif->vdev_type != WMI_VDEV_TYPE_AP) {
- ret = ath11k_start_vdev_delay(ar->hw, vif);
- if (ret) {
- ath11k_warn(ab, "failed to delay vdev start: %d\n", ret);
- goto free_tx_stats;
- }
- }
-
- ewma_avg_rssi_init(&arsta->avg_rssi);
- return 0;
-
-free_tx_stats:
- kfree(arsta->tx_stats);
- arsta->tx_stats = NULL;
-free_peer:
- ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
-free_rx_stats:
- kfree(arsta->rx_stats);
- arsta->rx_stats = NULL;
-dec_num_station:
- ath11k_mac_dec_num_stations(arvif, sta);
-exit:
- return ret;
-}
-
static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar,
struct ieee80211_sta *sta)
{
@@ -5028,140 +4947,6 @@ static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar,
return bw;
}
-static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- enum ieee80211_sta_state old_state,
- enum ieee80211_sta_state new_state)
-{
- struct ath11k *ar = hw->priv;
- struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
- struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
- struct ath11k_peer *peer;
- int ret = 0;
-
- /* cancel must be done outside the mutex to avoid deadlock */
- if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST)) {
- cancel_work_sync(&arsta->update_wk);
- cancel_work_sync(&arsta->set_4addr_wk);
- }
-
- mutex_lock(&ar->conf_mutex);
-
- if (old_state == IEEE80211_STA_NOTEXIST &&
- new_state == IEEE80211_STA_NONE) {
- memset(arsta, 0, sizeof(*arsta));
- arsta->arvif = arvif;
- arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
- INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
- INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
-
- ret = ath11k_mac_station_add(ar, vif, sta);
- if (ret)
- ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- } else if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST)) {
- bool skip_peer_delete = ar->ab->hw_params.vdev_start_delay &&
- vif->type == NL80211_IFTYPE_STATION;
-
- ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
-
- if (!skip_peer_delete) {
- ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
- if (ret)
- ath11k_warn(ar->ab,
- "Failed to delete peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- else
- ath11k_dbg(ar->ab,
- ATH11K_DBG_MAC,
- "Removed peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- }
-
- ath11k_mac_dec_num_stations(arvif, sta);
- mutex_lock(&ar->ab->tbl_mtx_lock);
- spin_lock_bh(&ar->ab->base_lock);
- peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (skip_peer_delete && peer) {
- peer->sta = NULL;
- } else if (peer && peer->sta == sta) {
- ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
- vif->addr, arvif->vdev_id);
- ath11k_peer_rhash_delete(ar->ab, peer);
- peer->sta = NULL;
- list_del(&peer->list);
- kfree(peer);
- ar->num_peers--;
- }
- spin_unlock_bh(&ar->ab->base_lock);
- mutex_unlock(&ar->ab->tbl_mtx_lock);
-
- kfree(arsta->tx_stats);
- arsta->tx_stats = NULL;
-
- kfree(arsta->rx_stats);
- arsta->rx_stats = NULL;
- } else if (old_state == IEEE80211_STA_AUTH &&
- new_state == IEEE80211_STA_ASSOC &&
- (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_MESH_POINT ||
- vif->type == NL80211_IFTYPE_ADHOC)) {
- ret = ath11k_station_assoc(ar, vif, sta, false);
- if (ret)
- ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
- sta->addr);
-
- spin_lock_bh(&ar->data_lock);
- /* Set arsta bw and prev bw */
- arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
- arsta->bw_prev = arsta->bw;
- spin_unlock_bh(&ar->data_lock);
- } else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTHORIZED) {
- spin_lock_bh(&ar->ab->base_lock);
-
- peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer)
- peer->is_authorized = true;
-
- spin_unlock_bh(&ar->ab->base_lock);
-
- if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
- ret = ath11k_wmi_set_peer_param(ar, sta->addr,
- arvif->vdev_id,
- WMI_PEER_AUTHORIZE,
- 1);
- if (ret)
- ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
- sta->addr, arvif->vdev_id, ret);
- }
- } else if (old_state == IEEE80211_STA_AUTHORIZED &&
- new_state == IEEE80211_STA_ASSOC) {
- spin_lock_bh(&ar->ab->base_lock);
-
- peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer)
- peer->is_authorized = false;
-
- spin_unlock_bh(&ar->ab->base_lock);
- } else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTH &&
- (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_MESH_POINT ||
- vif->type == NL80211_IFTYPE_ADHOC)) {
- ret = ath11k_station_disassoc(ar, vif, sta);
- if (ret)
- ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n",
- sta->addr);
- }
-
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -6940,6 +6725,14 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
ret);
}
+ if (ath11k_wmi_supports_6ghz_cc_ext(ar)) {
+ struct cur_regulatory_info *reg_info;
+
+ reg_info = &ab->reg_info_store[ar->pdev_idx];
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "interface added to change reg rules\n");
+ ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_LPI_AP);
+ }
+
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -7266,6 +7059,15 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
return ret;
}
+ /* TODO: For now we only set TPC power here. However when
+ * channel changes, say CSA, it should be updated again.
+ */
+ if (ath11k_mac_supports_station_tpc(ar, arvif, chandef)) {
+ ath11k_mac_fill_reg_tpc_info(ar, arvif->vif, &arvif->chanctx);
+ ath11k_wmi_send_vdev_set_tpc_power(ar, arvif->vdev_id,
+ &arvif->reg_tpc_info);
+ }
+
if (!restart)
ar->num_started_vdevs++;
@@ -7542,8 +7344,8 @@ unlock:
mutex_unlock(&ar->conf_mutex);
}
-static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int ath11k_mac_start_vdev_delay(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
@@ -7589,6 +7391,501 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
return 0;
}
+static int ath11k_mac_stop_vdev_early(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ int ret;
+
+ if (WARN_ON(!arvif->is_started))
+ return -EBUSY;
+
+ ret = ath11k_mac_vdev_stop(arvif);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ arvif->is_started = false;
+
+ /* TODO: Setup ps and cts/rts protection */
+ return 0;
+}
+
+static u8 ath11k_mac_get_tpe_count(u8 txpwr_intrprt, u8 txpwr_cnt)
+{
+ switch (txpwr_intrprt) {
+ /* Refer "Table 9-276-Meaning of Maximum Transmit Power Count subfield
+ * if the Maximum Transmit Power Interpretation subfield is 0 or 2" of
+ * "IEEE Std 802.11ax 2021".
+ */
+ case IEEE80211_TPE_LOCAL_EIRP:
+ case IEEE80211_TPE_REG_CLIENT_EIRP:
+ txpwr_cnt = txpwr_cnt <= 3 ? txpwr_cnt : 3;
+ txpwr_cnt = txpwr_cnt + 1;
+ break;
+ /* Refer "Table 9-277-Meaning of Maximum Transmit Power Count subfield
+ * if Maximum Transmit Power Interpretation subfield is 1 or 3" of
+ * "IEEE Std 802.11ax 2021".
+ */
+ case IEEE80211_TPE_LOCAL_EIRP_PSD:
+ case IEEE80211_TPE_REG_CLIENT_EIRP_PSD:
+ txpwr_cnt = txpwr_cnt <= 4 ? txpwr_cnt : 4;
+ txpwr_cnt = txpwr_cnt ? (BIT(txpwr_cnt - 1)) : 1;
+ break;
+ }
+
+ return txpwr_cnt;
+}
+
+static u8 ath11k_mac_get_num_pwr_levels(struct cfg80211_chan_def *chan_def)
+{
+ if (chan_def->chan->flags & IEEE80211_CHAN_PSD) {
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_20:
+ return 1;
+ case NL80211_CHAN_WIDTH_40:
+ return 2;
+ case NL80211_CHAN_WIDTH_80:
+ return 4;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ return 8;
+ default:
+ return 1;
+ }
+ } else {
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_20:
+ return 1;
+ case NL80211_CHAN_WIDTH_40:
+ return 2;
+ case NL80211_CHAN_WIDTH_80:
+ return 3;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ return 4;
+ default:
+ return 1;
+ }
+ }
+}
+
+static u16 ath11k_mac_get_6ghz_start_frequency(struct cfg80211_chan_def *chan_def)
+{
+ u16 diff_seq;
+
+ /* It is to get the lowest channel number's center frequency of the chan.
+ * For example,
+ * bandwidth=40 MHz, center frequency is 5965, lowest channel is 1
+ * with center frequency 5955, its diff is 5965 - 5955 = 10.
+ * bandwidth=80 MHz, center frequency is 5985, lowest channel is 1
+ * with center frequency 5955, its diff is 5985 - 5955 = 30.
+ * bandwidth=160 MHz, center frequency is 6025, lowest channel is 1
+ * with center frequency 5955, its diff is 6025 - 5955 = 70.
+ */
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_160:
+ diff_seq = 70;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ diff_seq = 30;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ diff_seq = 10;
+ break;
+ default:
+ diff_seq = 0;
+ }
+
+ return chan_def->center_freq1 - diff_seq;
+}
+
+static u16 ath11k_mac_get_seg_freq(struct cfg80211_chan_def *chan_def,
+ u16 start_seq, u8 seq)
+{
+ u16 seg_seq;
+
+ /* It is to get the center frequency of the specific bandwidth.
+ * start_seq means the lowest channel number's center frequency.
+ * seq 0/1/2/3 means 20 MHz/40 MHz/80 MHz/160 MHz&80P80.
+ * For example,
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5955 when bandwidth=20 MHz, its diff is 5955 - 5955 = 0.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5965 when bandwidth=40 MHz, its diff is 5965 - 5955 = 10.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5985 when bandwidth=80 MHz, its diff is 5985 - 5955 = 30.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 6025 when bandwidth=160 MHz, its diff is 6025 - 5955 = 70.
+ */
+ if (chan_def->width == NL80211_CHAN_WIDTH_80P80 && seq == 3)
+ return chan_def->center_freq2;
+
+ seg_seq = 10 * (BIT(seq) - 1);
+ return seg_seq + start_seq;
+}
+
+static void ath11k_mac_get_psd_channel(struct ath11k *ar,
+ u16 step_freq,
+ u16 *start_freq,
+ u16 *center_freq,
+ u8 i,
+ struct ieee80211_channel **temp_chan,
+ s8 *tx_power)
+{
+ /* It is to get the center frequency for each 20 MHz.
+ * For example, if the chan is 160 MHz and center frequency is 6025,
+ * then it include 8 channels, they are 1/5/9/13/17/21/25/29,
+ * channel number 1's center frequency is 5955, it is parameter start_freq.
+ * parameter i is the step of the 8 channels. i is 0~7 for the 8 channels.
+ * the channel 1/5/9/13/17/21/25/29 maps i=0/1/2/3/4/5/6/7,
+ * and maps its center frequency is 5955/5975/5995/6015/6035/6055/6075/6095,
+ * the gap is 20 for each channel, parameter step_freq means the gap.
+ * after get the center frequency of each channel, it is easy to find the
+ * struct ieee80211_channel of it and get the max_reg_power.
+ */
+ *center_freq = *start_freq + i * step_freq;
+ *temp_chan = ieee80211_get_channel(ar->hw->wiphy, *center_freq);
+ *tx_power = (*temp_chan)->max_reg_power;
+}
+
+static void ath11k_mac_get_eirp_power(struct ath11k *ar,
+ u16 *start_freq,
+ u16 *center_freq,
+ u8 i,
+ struct ieee80211_channel **temp_chan,
+ struct cfg80211_chan_def *def,
+ s8 *tx_power)
+{
+ /* It is to get the center frequency for 20 MHz/40 MHz/80 MHz/
+ * 160 MHz&80P80 bandwidth, and then plus 10 to the center frequency,
+ * it is the center frequency of a channel number.
+ * For example, when configured channel number is 1.
+ * center frequency is 5965 when bandwidth=40 MHz, after plus 10, it is 5975,
+ * then it is channel number 5.
+ * center frequency is 5985 when bandwidth=80 MHz, after plus 10, it is 5995,
+ * then it is channel number 9.
+ * center frequency is 6025 when bandwidth=160 MHz, after plus 10, it is 6035,
+ * then it is channel number 17.
+ * after get the center frequency of each channel, it is easy to find the
+ * struct ieee80211_channel of it and get the max_reg_power.
+ */
+ *center_freq = ath11k_mac_get_seg_freq(def, *start_freq, i);
+
+ /* For the 20 MHz, its center frequency is same with same channel */
+ if (i != 0)
+ *center_freq += 10;
+
+ *temp_chan = ieee80211_get_channel(ar->hw->wiphy, *center_freq);
+ *tx_power = (*temp_chan)->max_reg_power;
+}
+
+void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ath11k_reg_tpc_power_info *reg_tpc_info = &arvif->reg_tpc_info;
+ struct ieee80211_channel *chan, *temp_chan;
+ u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction;
+ bool is_psd_power = false, is_tpe_present = false;
+ s8 max_tx_power[IEEE80211_MAX_NUM_PWR_LEVEL],
+ psd_power, tx_power;
+ s8 eirp_power = 0;
+ u16 start_freq, center_freq;
+
+ chan = ctx->def.chan;
+ start_freq = ath11k_mac_get_6ghz_start_frequency(&ctx->def);
+ pwr_reduction = bss_conf->pwr_reduction;
+
+ if (arvif->reg_tpc_info.num_pwr_levels) {
+ is_tpe_present = true;
+ num_pwr_levels = arvif->reg_tpc_info.num_pwr_levels;
+ } else {
+ num_pwr_levels = ath11k_mac_get_num_pwr_levels(&ctx->def);
+ }
+
+ for (pwr_lvl_idx = 0; pwr_lvl_idx < num_pwr_levels; pwr_lvl_idx++) {
+ /* STA received TPE IE*/
+ if (is_tpe_present) {
+ /* local power is PSD power*/
+ if (chan->flags & IEEE80211_CHAN_PSD) {
+ /* Connecting AP is psd power */
+ if (reg_tpc_info->is_psd_power) {
+ is_psd_power = true;
+ ath11k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ psd_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ /* Connecting AP is not psd power */
+ } else {
+ ath11k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ /* convert psd power to EIRP power based
+ * on channel width
+ */
+ tx_power =
+ min_t(s8, tx_power,
+ psd_power + 13 + pwr_lvl_idx * 3);
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ tx_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ }
+ /* local power is not PSD power */
+ } else {
+ /* Connecting AP is psd power */
+ if (reg_tpc_info->is_psd_power) {
+ is_psd_power = true;
+ ath11k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] =
+ reg_tpc_info->tpe[pwr_lvl_idx];
+ /* Connecting AP is not psd power */
+ } else {
+ ath11k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ tx_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ }
+ }
+ /* STA not received TPE IE */
+ } else {
+ /* local power is PSD power*/
+ if (chan->flags & IEEE80211_CHAN_PSD) {
+ is_psd_power = true;
+ ath11k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] = psd_power;
+ } else {
+ ath11k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ max_tx_power[pwr_lvl_idx] = tx_power;
+ }
+ }
+
+ if (is_psd_power) {
+ /* If AP local power constraint is present */
+ if (pwr_reduction)
+ eirp_power = eirp_power - pwr_reduction;
+
+ /* If firmware updated max tx power is non zero, then take
+ * the min of firmware updated ap tx power
+ * and max power derived from above mentioned parameters.
+ */
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "eirp power : %d firmware report power : %d\n",
+ eirp_power, ar->max_allowed_tx_power);
+ /* Firmware reports lower max_allowed_tx_power during vdev
+ * start response. In case of 6 GHz, firmware is not aware
+ * of EIRP power unless driver sets EIRP power through WMI
+ * TPC command. So radio which does not support idle power
+ * save can set maximum calculated EIRP power directly to
+ * firmware through TPC command without min comparison with
+ * vdev start response's max_allowed_tx_power.
+ */
+ if (ar->max_allowed_tx_power && ab->hw_params.idle_ps)
+ eirp_power = min_t(s8,
+ eirp_power,
+ ar->max_allowed_tx_power);
+ } else {
+ /* If AP local power constraint is present */
+ if (pwr_reduction)
+ max_tx_power[pwr_lvl_idx] =
+ max_tx_power[pwr_lvl_idx] - pwr_reduction;
+ /* If firmware updated max tx power is non zero, then take
+ * the min of firmware updated ap tx power
+ * and max power derived from above mentioned parameters.
+ */
+ if (ar->max_allowed_tx_power && ab->hw_params.idle_ps)
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ max_tx_power[pwr_lvl_idx],
+ ar->max_allowed_tx_power);
+ }
+ reg_tpc_info->chan_power_info[pwr_lvl_idx].chan_cfreq = center_freq;
+ reg_tpc_info->chan_power_info[pwr_lvl_idx].tx_power =
+ max_tx_power[pwr_lvl_idx];
+ }
+
+ reg_tpc_info->num_pwr_levels = num_pwr_levels;
+ reg_tpc_info->is_psd_power = is_psd_power;
+ reg_tpc_info->eirp_power = eirp_power;
+ reg_tpc_info->ap_power_type =
+ ath11k_reg_ap_pwr_convert(vif->bss_conf.power_type);
+}
+
+static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ieee80211_tx_pwr_env *single_tpe;
+ enum wmi_reg_6ghz_client_type client_type;
+ struct cur_regulatory_info *reg_info;
+ int i;
+ u8 pwr_count, pwr_interpret, pwr_category;
+ u8 psd_index = 0, non_psd_index = 0, local_tpe_count = 0, reg_tpe_count = 0;
+ bool use_local_tpe, non_psd_set = false, psd_set = false;
+
+ reg_info = &ab->reg_info_store[ar->pdev_idx];
+ client_type = reg_info->client_type;
+
+ for (i = 0; i < bss_conf->tx_pwr_env_num; i++) {
+ single_tpe = &bss_conf->tx_pwr_env[i];
+ pwr_category = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
+ pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+
+ if (pwr_category == client_type) {
+ if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP ||
+ pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD)
+ local_tpe_count++;
+ else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP ||
+ pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD)
+ reg_tpe_count++;
+ }
+ }
+
+ if (!reg_tpe_count && !local_tpe_count) {
+ ath11k_warn(ab,
+ "no transmit power envelope match client power type %d\n",
+ client_type);
+ return;
+ } else if (!reg_tpe_count) {
+ use_local_tpe = true;
+ } else {
+ use_local_tpe = false;
+ }
+
+ for (i = 0; i < bss_conf->tx_pwr_env_num; i++) {
+ single_tpe = &bss_conf->tx_pwr_env[i];
+ pwr_category = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
+ pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+
+ if (pwr_category != client_type)
+ continue;
+
+ /* get local transmit power envelope */
+ if (use_local_tpe) {
+ if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP) {
+ non_psd_index = i;
+ non_psd_set = true;
+ } else if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD) {
+ psd_index = i;
+ psd_set = true;
+ }
+ /* get regulatory transmit power envelope */
+ } else {
+ if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP) {
+ non_psd_index = i;
+ non_psd_set = true;
+ } else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD) {
+ psd_index = i;
+ psd_set = true;
+ }
+ }
+ }
+
+ if (non_psd_set && !psd_set) {
+ single_tpe = &bss_conf->tx_pwr_env[non_psd_index];
+ pwr_count = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_COUNT);
+ pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+ arvif->reg_tpc_info.is_psd_power = false;
+ arvif->reg_tpc_info.eirp_power = 0;
+
+ arvif->reg_tpc_info.num_pwr_levels =
+ ath11k_mac_get_tpe_count(pwr_interpret, pwr_count);
+
+ for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "non PSD power[%d] : %d\n",
+ i, single_tpe->tx_power[i]);
+ arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2;
+ }
+ }
+
+ if (psd_set) {
+ single_tpe = &bss_conf->tx_pwr_env[psd_index];
+ pwr_count = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_COUNT);
+ pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
+ IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+ arvif->reg_tpc_info.is_psd_power = true;
+
+ if (pwr_count == 0) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "TPE PSD power : %d\n", single_tpe->tx_power[0]);
+ arvif->reg_tpc_info.num_pwr_levels =
+ ath11k_mac_get_num_pwr_levels(&ctx->def);
+
+ for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++)
+ arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[0] / 2;
+ } else {
+ arvif->reg_tpc_info.num_pwr_levels =
+ ath11k_mac_get_tpe_count(pwr_interpret, pwr_count);
+
+ for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "TPE PSD power[%d] : %d\n",
+ i, single_tpe->tx_power[i]);
+ arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2;
+ }
+ }
+ }
+}
+
static int
ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -7599,7 +7896,8 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
int ret;
- struct peer_create_params param;
+ struct cur_regulatory_info *reg_info;
+ enum ieee80211_ap_reg_power power_type;
mutex_lock(&ar->conf_mutex);
@@ -7607,6 +7905,24 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
"chanctx assign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
+ if (ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+ ctx->def.chan->band == NL80211_BAND_6GHZ &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ reg_info = &ab->reg_info_store[ar->pdev_idx];
+ power_type = vif->bss_conf.power_type;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx power type %d\n", power_type);
+
+ if (power_type == IEEE80211_REG_UNSET_AP) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ath11k_reg_handle_chan_list(ab, reg_info, power_type);
+ arvif->chanctx = *ctx;
+ ath11k_mac_parse_tx_pwr_env(ar, vif, ctx);
+ }
+
/* for QCA6390 bss peer must be created before vdev_start */
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type != WMI_VDEV_TYPE_AP &&
@@ -7622,21 +7938,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- if (ab->hw_params.vdev_start_delay &&
- arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
- param.vdev_id = arvif->vdev_id;
- param.peer_type = WMI_PEER_TYPE_DEFAULT;
- param.peer_addr = ar->mac_addr;
-
- ret = ath11k_peer_create(ar, arvif, NULL, &param);
- if (ret) {
- ath11k_warn(ab, "failed to create peer after vdev start delay: %d",
- ret);
- goto out;
- }
- }
-
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath11k_mac_monitor_start(ar);
if (ret) {
@@ -7649,15 +7950,17 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- ret = ath11k_mac_vdev_start(arvif, ctx);
- if (ret) {
- ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
- arvif->vdev_id, vif->addr,
- ctx->def.chan->center_freq, ret);
- goto out;
- }
+ if (!arvif->is_started) {
+ ret = ath11k_mac_vdev_start(arvif, ctx);
+ if (ret) {
+ ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ ctx->def.chan->center_freq, ret);
+ goto out;
+ }
- arvif->is_started = true;
+ arvif->is_started = true;
+ }
if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
@@ -7697,8 +8000,6 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
"chanctx unassign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
- WARN_ON(!arvif->is_started);
-
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
spin_lock_bh(&ab->base_lock);
@@ -7722,24 +8023,13 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
return;
}
- ret = ath11k_mac_vdev_stop(arvif);
- if (ret)
- ath11k_warn(ab, "failed to stop vdev %i: %d\n",
- arvif->vdev_id, ret);
-
- arvif->is_started = false;
-
- if (ab->hw_params.vdev_start_delay &&
- arvif->vdev_type == WMI_VDEV_TYPE_STA) {
- ret = ath11k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
+ if (arvif->is_started) {
+ ret = ath11k_mac_vdev_stop(arvif);
if (ret)
- ath11k_warn(ar->ab,
- "failed to delete peer %pM for vdev %d: %d\n",
- arvif->bssid, arvif->vdev_id, ret);
- else
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
- "removed peer %pM vdev %d after vdev stop\n",
- arvif->bssid, arvif->vdev_id);
+ ath11k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
}
if (ab->hw_params.vdev_start_delay &&
@@ -8962,8 +9252,8 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
arg->dwell_time_active = scan_time_msec;
arg->dwell_time_passive = scan_time_msec;
arg->max_scan_time = scan_time_msec;
- arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
- arg->scan_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ arg->scan_f_passive = 1;
+ arg->scan_f_filter_prb_req = 1;
arg->burst_duration = duration;
ret = ath11k_start_scan(ar, arg);
@@ -9097,6 +9387,252 @@ err_fallback:
return 0;
}
+static int ath11k_mac_station_add(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct peer_create_params peer_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath11k_mac_inc_num_stations(arvif, sta);
+ if (ret) {
+ ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
+ ar->max_num_stations);
+ goto exit;
+ }
+
+ arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
+ if (!arsta->rx_stats) {
+ ret = -ENOMEM;
+ goto dec_num_station;
+ }
+
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = sta->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+
+ ret = ath11k_peer_create(ar, arvif, sta, &peer_param);
+ if (ret) {
+ ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ goto free_rx_stats;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
+ arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL);
+ if (!arsta->tx_stats) {
+ ret = -ENOMEM;
+ goto free_peer;
+ }
+ }
+
+ if (ieee80211_vif_is_mesh(vif)) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "setting USE_4ADDR for mesh STA %pM\n", sta->addr);
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_USE_4ADDR, 1);
+ if (ret) {
+ ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n",
+ sta->addr, ret);
+ goto free_tx_stats;
+ }
+ }
+
+ ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
+ sta->addr, arvif->vdev_id, ret);
+ goto free_tx_stats;
+ }
+
+ if (ab->hw_params.vdev_start_delay &&
+ !arvif->is_started &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP) {
+ ret = ath11k_mac_start_vdev_delay(ar->hw, vif);
+ if (ret) {
+ ath11k_warn(ab, "failed to delay vdev start: %d\n", ret);
+ goto free_tx_stats;
+ }
+ }
+
+ ewma_avg_rssi_init(&arsta->avg_rssi);
+ return 0;
+
+free_tx_stats:
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+free_peer:
+ ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+free_rx_stats:
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+dec_num_station:
+ ath11k_mac_dec_num_stations(arvif, sta);
+exit:
+ return ret;
+}
+
+static int ath11k_mac_station_remove(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ int ret;
+
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->is_started &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP) {
+ ret = ath11k_mac_stop_vdev_early(ar->hw, vif);
+ if (ret) {
+ ath11k_warn(ab, "failed to do early vdev stop: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
+
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath11k_warn(ab, "Failed to delete peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ else
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ ath11k_mac_dec_num_stations(arvif, sta);
+
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+
+ return ret;
+}
+
+static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k_peer *peer;
+ int ret = 0;
+
+ /* cancel must be done outside the mutex to avoid deadlock */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ cancel_work_sync(&arsta->update_wk);
+ cancel_work_sync(&arsta->set_4addr_wk);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(arsta, 0, sizeof(*arsta));
+ arsta->arvif = arvif;
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
+ INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
+ INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
+
+ ret = ath11k_mac_station_add(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ } else if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ ret = ath11k_mac_station_remove(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ mutex_lock(&ar->ab->tbl_mtx_lock);
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer && peer->sta == sta) {
+ ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ ath11k_peer_rhash_delete(ar->ab, peer);
+ peer->sta = NULL;
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_assoc(ar, vif, sta, false);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
+ sta->addr);
+
+ spin_lock_bh(&ar->data_lock);
+ /* Set arsta bw and prev bw */
+ arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
+ arsta->bw_prev = arsta->bw;
+ spin_unlock_bh(&ar->data_lock);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret)
+ ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ }
+ } else if (old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = false;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_disassoc(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n",
+ sta->addr);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
static const struct ieee80211_ops ath11k_ops = {
.tx = ath11k_mac_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
@@ -9288,6 +9824,33 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
return 0;
}
+static void ath11k_mac_setup_mac_address_list(struct ath11k *ar)
+{
+ struct mac_address *addresses;
+ u16 n_addresses;
+ int i;
+
+ if (!ar->ab->hw_params.support_dual_stations)
+ return;
+
+ n_addresses = ar->ab->hw_params.num_vdevs;
+ addresses = kcalloc(n_addresses, sizeof(*addresses), GFP_KERNEL);
+ if (!addresses)
+ return;
+
+ memcpy(addresses[0].addr, ar->mac_addr, ETH_ALEN);
+ for (i = 1; i < n_addresses; i++) {
+ memcpy(addresses[i].addr, ar->mac_addr, ETH_ALEN);
+ /* set Local Administered Address bit */
+ addresses[i].addr[0] |= 0x2;
+
+ addresses[i].addr[0] += (i - 1) << 4;
+ }
+
+ ar->hw->wiphy->addresses = addresses;
+ ar->hw->wiphy->n_addresses = n_addresses;
+}
+
static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
@@ -9307,28 +9870,46 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
return -ENOMEM;
}
- limits[0].max = 1;
- limits[0].types |= BIT(NL80211_IFTYPE_STATION);
-
- limits[1].max = 16;
- limits[1].types |= BIT(NL80211_IFTYPE_AP);
+ if (ab->hw_params.support_dual_stations) {
+ limits[0].max = 2;
+ limits[0].types |= BIT(NL80211_IFTYPE_STATION);
- if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
- ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
- limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+ limits[1].max = 1;
+ limits[1].types |= BIT(NL80211_IFTYPE_AP);
+ if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
+ ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
+ limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
- combinations[0].limits = limits;
- combinations[0].n_limits = n_limits;
- combinations[0].max_interfaces = 16;
- combinations[0].num_different_channels = 1;
- combinations[0].beacon_int_infra_match = true;
- combinations[0].beacon_int_min_gcd = 100;
- combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80) |
- BIT(NL80211_CHAN_WIDTH_80P80) |
- BIT(NL80211_CHAN_WIDTH_160);
+ combinations[0].limits = limits;
+ combinations[0].n_limits = 2;
+ combinations[0].max_interfaces = ab->hw_params.num_vdevs;
+ combinations[0].num_different_channels = 2;
+ combinations[0].beacon_int_infra_match = true;
+ combinations[0].beacon_int_min_gcd = 100;
+ } else {
+ limits[0].max = 1;
+ limits[0].types |= BIT(NL80211_IFTYPE_STATION);
+
+ limits[1].max = 16;
+ limits[1].types |= BIT(NL80211_IFTYPE_AP);
+
+ if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
+ ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
+ limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+
+ combinations[0].limits = limits;
+ combinations[0].n_limits = 2;
+ combinations[0].max_interfaces = 16;
+ combinations[0].num_different_channels = 1;
+ combinations[0].beacon_int_infra_match = true;
+ combinations[0].beacon_int_min_gcd = 100;
+ combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160);
+ }
ar->hw->wiphy->iface_combinations = combinations;
ar->hw->wiphy->n_iface_combinations = 1;
@@ -9393,6 +9974,8 @@ static void __ath11k_mac_unregister(struct ath11k *ar)
kfree(ar->hw->wiphy->iface_combinations[0].limits);
kfree(ar->hw->wiphy->iface_combinations);
+ kfree(ar->hw->wiphy->addresses);
+
SET_IEEE80211_DEV(ar->hw, NULL);
}
@@ -9435,6 +10018,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ath11k_pdev_caps_update(ar);
SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+ ath11k_mac_setup_mac_address_list(ar);
SET_IEEE80211_DEV(ar->hw, ab->dev);
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 0dfdeed5177b..f5800fbecff8 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_MAC_H
@@ -176,4 +176,7 @@ int ath11k_mac_wait_tx_complete(struct ath11k *ar);
int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
enum wmi_sta_keepalive_method method,
u32 interval);
+void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 6835c14b82cc..fb4ecf9a103e 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
@@ -20,35 +20,7 @@
#define MHI_TIMEOUT_DEFAULT_MS 20000
#define RDDM_DUMP_SIZE 0x420000
-static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
- {
- .num = 0,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 0,
- .dir = DMA_TO_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
- .num = 1,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 0,
- .dir = DMA_FROM_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
+static const struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
{
.num = 20,
.name = "IPCR",
@@ -102,46 +74,18 @@ static struct mhi_event_config ath11k_mhi_events_qca6390[] = {
},
};
-static struct mhi_controller_config ath11k_mhi_config_qca6390 = {
+static const struct mhi_controller_config ath11k_mhi_config_qca6390 = {
.max_channels = 128,
.timeout_ms = 2000,
.use_bounce_buf = false,
- .buf_len = 0,
+ .buf_len = 8192,
.num_channels = ARRAY_SIZE(ath11k_mhi_channels_qca6390),
.ch_cfg = ath11k_mhi_channels_qca6390,
.num_events = ARRAY_SIZE(ath11k_mhi_events_qca6390),
.event_cfg = ath11k_mhi_events_qca6390,
};
-static struct mhi_channel_config ath11k_mhi_channels_qcn9074[] = {
- {
- .num = 0,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 1,
- .dir = DMA_TO_DEVICE,
- .ee_mask = 0x14,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
- .num = 1,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 1,
- .dir = DMA_FROM_DEVICE,
- .ee_mask = 0x14,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
+static const struct mhi_channel_config ath11k_mhi_channels_qcn9074[] = {
{
.num = 20,
.name = "IPCR",
@@ -195,7 +139,7 @@ static struct mhi_event_config ath11k_mhi_events_qcn9074[] = {
},
};
-static struct mhi_controller_config ath11k_mhi_config_qcn9074 = {
+static const struct mhi_controller_config ath11k_mhi_config_qcn9074 = {
.max_channels = 30,
.timeout_ms = 10000,
.use_bounce_buf = false,
@@ -384,7 +328,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
struct mhi_controller *mhi_ctrl;
- struct mhi_controller_config *ath11k_mhi_config;
+ const struct mhi_controller_config *ath11k_mhi_config;
int ret;
mhi_ctrl = mhi_alloc_controller();
@@ -423,7 +367,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
goto free_controller;
} else {
mhi_ctrl->iova_start = 0;
- mhi_ctrl->iova_stop = 0xFFFFFFFF;
+ mhi_ctrl->iova_stop = ab_pci->dma_mask;
}
mhi_ctrl->rddm_size = RDDM_DUMP_SIZE;
@@ -443,6 +387,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
case ATH11K_HW_QCA6390_HW20:
case ATH11K_HW_WCN6855_HW20:
case ATH11K_HW_WCN6855_HW21:
+ case ATH11K_HW_QCA2066_HW21:
ath11k_mhi_config = &ath11k_mhi_config_qca6390;
break;
default:
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 09e65c5e55c4..be9d2c69cc41 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -18,7 +18,8 @@
#include "qmi.h"
#define ATH11K_PCI_BAR_NUM 0
-#define ATH11K_PCI_DMA_MASK 32
+#define ATH11K_PCI_DMA_MASK 36
+#define ATH11K_PCI_COHERENT_DMA_MASK 32
#define TCSR_SOC_HW_VERSION 0x0224
#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
@@ -28,6 +29,8 @@
#define QCN9074_DEVICE_ID 0x1104
#define WCN6855_DEVICE_ID 0x1103
+#define TCSR_SOC_HW_SUB_VER 0x1910010
+
static const struct pci_device_id ath11k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
@@ -526,14 +529,24 @@ static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
goto disable_device;
}
- ret = dma_set_mask_and_coherent(&pdev->dev,
- DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
+ ret = dma_set_mask(&pdev->dev,
+ DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
if (ret) {
ath11k_err(ab, "failed to set pci dma mask to %d: %d\n",
ATH11K_PCI_DMA_MASK, ret);
goto release_region;
}
+ ab_pci->dma_mask = DMA_BIT_MASK(ATH11K_PCI_DMA_MASK);
+
+ ret = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(ATH11K_PCI_COHERENT_DMA_MASK));
+ if (ret) {
+ ath11k_err(ab, "failed to set pci coherent dma mask to %d: %d\n",
+ ATH11K_PCI_COHERENT_DMA_MASK, ret);
+ goto release_region;
+ }
+
pci_set_master(pdev);
ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM);
@@ -731,8 +744,8 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
struct ath11k_base *ab;
struct ath11k_pci *ab_pci;
u32 soc_hw_version_major, soc_hw_version_minor, addr;
- const struct ath11k_pci_ops *pci_ops;
int ret;
+ u32 sub_version;
ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI);
@@ -777,6 +790,12 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
switch (pci_dev->device) {
case QCA6390_DEVICE_ID:
+ ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
@@ -790,13 +809,21 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
goto err_pci_free_region;
}
- pci_ops = &ath11k_pci_ops_qca6390;
break;
case QCN9074_DEVICE_ID:
- pci_ops = &ath11k_pci_ops_qcn9074;
+ ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qcn9074);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
ab->hw_rev = ATH11K_HW_QCN9074_HW10;
break;
case WCN6855_DEVICE_ID:
+ ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
ab->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD;
ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
@@ -809,7 +836,19 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
break;
case 0x10:
case 0x11:
- ab->hw_rev = ATH11K_HW_WCN6855_HW21;
+ sub_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_SUB_VER);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "sub_version 0x%x\n",
+ sub_version);
+ switch (sub_version) {
+ case 0x1019A0E1:
+ case 0x1019B0E1:
+ case 0x1019C0E1:
+ case 0x1019D0E1:
+ ab->hw_rev = ATH11K_HW_QCA2066_HW21;
+ break;
+ default:
+ ab->hw_rev = ATH11K_HW_WCN6855_HW21;
+ }
break;
default:
goto unsupported_wcn6855_soc;
@@ -823,7 +862,6 @@ unsupported_wcn6855_soc:
goto err_pci_free_region;
}
- pci_ops = &ath11k_pci_ops_qca6390;
break;
default:
dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
@@ -832,12 +870,6 @@ unsupported_wcn6855_soc:
goto err_pci_free_region;
}
- ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
- if (ret) {
- ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
- goto err_pci_free_region;
- }
-
ret = ath11k_pcic_init_msi_config(ab);
if (ret) {
ath11k_err(ab, "failed to init msi config: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
index e9a01f344ec6..6be73333d90b 100644
--- a/drivers/net/wireless/ath/ath11k/pci.h
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022,2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_PCI_H
#define _ATH11K_PCI_H
@@ -72,6 +72,7 @@ struct ath11k_pci {
/* enum ath11k_pci_flags */
unsigned long flags;
u16 link_ctl;
+ u64 dma_mask;
};
static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index 15e2ceb22a44..add4db4c50bc 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -115,6 +115,17 @@ static const struct ath11k_msi_config ath11k_msi_config[] = {
},
.hw_rev = ATH11K_HW_WCN6750_HW10,
},
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_QCA2066_HW21,
+ },
};
int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 2c7cab62b9bb..5006f81f779b 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
@@ -3249,7 +3249,8 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
case ATH11K_QMI_EVENT_FW_INIT_DONE:
clear_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
- ath11k_hal_dump_srng_stats(ab);
+ if (ab->is_reset)
+ ath11k_hal_dump_srng_stats(ab);
queue_work(ab->workqueue, &ab->restart_work);
break;
}
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index b4fd4d2107c7..737fcd450d4b 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -425,6 +425,11 @@ static void ath11k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
/* Use the flags of both the rules */
new_rule->flags = rule1->flags | rule2->flags;
+ if ((rule1->flags & NL80211_RRF_PSD) && (rule2->flags & NL80211_RRF_PSD))
+ new_rule->psd = min_t(s8, rule1->psd, rule2->psd);
+ else
+ new_rule->flags &= ~NL80211_RRF_PSD;
+
/* To be safe, lts use the max cac timeout of both rules */
new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
rule2->dfs_cac_ms);
@@ -527,13 +532,14 @@ ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
static void
ath11k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
- u32 reg_flags)
+ s8 psd, u32 reg_flags)
{
reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
+ reg_rule->psd = psd;
reg_rule->flags = reg_flags;
}
@@ -563,7 +569,7 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
@@ -584,7 +590,7 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
ath11k_reg_update_rule(regd->reg_rules + i, start_freq,
end_freq, bw, reg_rule->ant_gain,
- reg_rule->reg_power, flags);
+ reg_rule->reg_power, reg_rule->psd_eirp, flags);
regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
@@ -605,7 +611,7 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
@@ -618,25 +624,68 @@ ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
*rule_idx = i;
}
+enum wmi_reg_6ghz_ap_type
+ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type)
+{
+ switch (power_type) {
+ case IEEE80211_REG_LPI_AP:
+ return WMI_REG_INDOOR_AP;
+ case IEEE80211_REG_SP_AP:
+ return WMI_REG_STANDARD_POWER_AP;
+ case IEEE80211_REG_VLP_AP:
+ return WMI_REG_VERY_LOW_POWER_AP;
+ default:
+ return WMI_REG_MAX_AP_TYPE;
+ }
+}
+
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
- struct cur_regulatory_info *reg_info, bool intersect)
+ struct cur_regulatory_info *reg_info, bool intersect,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type)
{
struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
- struct cur_reg_rule *reg_rule;
+ struct cur_reg_rule *reg_rule, *reg_rule_6ghz;
u8 i = 0, j = 0, k = 0;
u8 num_rules;
u16 max_bw;
- u32 flags;
+ u32 flags, reg_6ghz_number, max_bw_6ghz;
char alpha2[3];
num_rules = reg_info->num_5ghz_reg_rules + reg_info->num_2ghz_reg_rules;
- /* FIXME: Currently taking reg rules for 6 GHz only from Indoor AP mode list.
- * This can be updated after complete 6 GHz regulatory support is added.
- */
- if (reg_info->is_ext_reg_event)
- num_rules += reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP];
+ if (reg_info->is_ext_reg_event) {
+ if (vdev_type == WMI_VDEV_TYPE_STA) {
+ enum wmi_reg_6ghz_ap_type ap_type;
+
+ ap_type = ath11k_reg_ap_pwr_convert(power_type);
+
+ if (ap_type == WMI_REG_MAX_AP_TYPE)
+ ap_type = WMI_REG_INDOOR_AP;
+
+ reg_6ghz_number = reg_info->num_6ghz_rules_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+
+ if (reg_6ghz_number == 0) {
+ ap_type = WMI_REG_INDOOR_AP;
+ reg_6ghz_number = reg_info->num_6ghz_rules_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ }
+
+ reg_rule_6ghz = reg_info->reg_rules_6ghz_client_ptr
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ max_bw_6ghz = reg_info->max_bw_6ghz_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ } else {
+ reg_6ghz_number = reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP];
+ reg_rule_6ghz =
+ reg_info->reg_rules_6ghz_ap_ptr[WMI_REG_INDOOR_AP];
+ max_bw_6ghz = reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP];
+ }
+
+ num_rules += reg_6ghz_number;
+ }
if (!num_rules)
goto ret;
@@ -683,14 +732,13 @@ ath11k_reg_build_regd(struct ath11k_base *ab,
* per other BW rule flags we pass from here
*/
flags = NL80211_RRF_AUTO_BW;
- } else if (reg_info->is_ext_reg_event &&
- reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP] &&
- (k < reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP])) {
- reg_rule = reg_info->reg_rules_6ghz_ap_ptr[WMI_REG_INDOOR_AP] +
- k++;
- max_bw = min_t(u16, reg_rule->max_bw,
- reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP]);
+ } else if (reg_info->is_ext_reg_event && reg_6ghz_number &&
+ k < reg_6ghz_number) {
+ reg_rule = reg_rule_6ghz + k++;
+ max_bw = min_t(u16, reg_rule->max_bw, max_bw_6ghz);
flags = NL80211_RRF_AUTO_BW;
+ if (reg_rule->psd_flag)
+ flags |= NL80211_RRF_PSD;
} else {
break;
}
@@ -702,7 +750,7 @@ ath11k_reg_build_regd(struct ath11k_base *ab,
reg_rule->start_freq,
reg_rule->end_freq, max_bw,
reg_rule->ant_gain, reg_rule->reg_power,
- flags);
+ reg_rule->psd_eirp, flags);
/* Update dfs cac timeout if the dfs domain is ETSI and the
* new rule covers weather radar band.
@@ -758,6 +806,159 @@ ret:
return new_regd;
}
+static bool ath11k_reg_is_world_alpha(char *alpha)
+{
+ if (alpha[0] == '0' && alpha[1] == '0')
+ return true;
+
+ if (alpha[0] == 'n' && alpha[1] == 'a')
+ return true;
+
+ return false;
+}
+
+static enum wmi_vdev_type ath11k_reg_get_ar_vdev_type(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+
+ /* Currently each struct ath11k maps to one struct ieee80211_hw/wiphy
+ * and one struct ieee80211_regdomain, so it could only store one group
+ * reg rules. It means multi-interface concurrency in the same ath11k is
+ * not support for the regdomain. So get the vdev type of the first entry
+ * now. After concurrency support for the regdomain, this should change.
+ */
+ arvif = list_first_entry_or_null(&ar->arvifs, struct ath11k_vif, list);
+ if (arvif)
+ return arvif->vdev_type;
+
+ return WMI_VDEV_TYPE_UNSPEC;
+}
+
+int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info,
+ enum ieee80211_ap_reg_power power_type)
+{
+ struct ieee80211_regdomain *regd;
+ bool intersect = false;
+ int pdev_idx;
+ struct ath11k *ar;
+ enum wmi_vdev_type vdev_type;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event reg handle chan list");
+
+ if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
+ /* In case of failure to set the requested ctry,
+ * fw retains the current regd. We print a failure info
+ * and return from here.
+ */
+ ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
+ return -EINVAL;
+ }
+
+ pdev_idx = reg_info->phy_id;
+
+ /* Avoid default reg rule updates sent during FW recovery if
+ * it is already available
+ */
+ spin_lock_bh(&ab->base_lock);
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
+ ab->default_regd[pdev_idx]) {
+ spin_unlock_bh(&ab->base_lock);
+ goto retfail;
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ if (pdev_idx >= ab->num_radios) {
+ /* Process the event for phy0 only if single_pdev_only
+ * is true. If pdev_idx is valid but not 0, discard the
+ * event. Otherwise, it goes to fallback. In either case
+ * ath11k_reg_reset_info() needs to be called to avoid
+ * memory leak issue.
+ */
+ ath11k_reg_reset_info(reg_info);
+
+ if (ab->hw_params.single_pdev_only &&
+ pdev_idx < ab->hw_params.num_rxmda_per_pdev)
+ return 0;
+ goto fallback;
+ }
+
+ /* Avoid multiple overwrites to default regd, during core
+ * stop-start after mac registration.
+ */
+ if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
+ !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
+ (char *)reg_info->alpha2, 2))
+ goto retfail;
+
+ /* Intersect new rules with default regd if a new country setting was
+ * requested, i.e a default regd was already set during initialization
+ * and the regd coming from this event has a valid country info.
+ */
+ if (ab->default_regd[pdev_idx] &&
+ !ath11k_reg_is_world_alpha((char *)
+ ab->default_regd[pdev_idx]->alpha2) &&
+ !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
+ intersect = true;
+
+ ar = ab->pdevs[pdev_idx].ar;
+ vdev_type = ath11k_reg_get_ar_vdev_type(ar);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi handle chan list power type %d vdev type %d intersect %d\n",
+ power_type, vdev_type, intersect);
+
+ regd = ath11k_reg_build_regd(ab, reg_info, intersect, vdev_type, power_type);
+ if (!regd) {
+ ath11k_warn(ab, "failed to build regd from reg_info\n");
+ goto fallback;
+ }
+
+ if (power_type == IEEE80211_REG_UNSET_AP) {
+ ath11k_reg_reset_info(&ab->reg_info_store[pdev_idx]);
+ ab->reg_info_store[pdev_idx] = *reg_info;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ if (ab->default_regd[pdev_idx]) {
+ /* The initial rules from FW after WMI Init is to build
+ * the default regd. From then on, any rules updated for
+ * the pdev could be due to user reg changes.
+ * Free previously built regd before assigning the newly
+ * generated regd to ar. NULL pointer handling will be
+ * taken care by kfree itself.
+ */
+ ar = ab->pdevs[pdev_idx].ar;
+ kfree(ab->new_regd[pdev_idx]);
+ ab->new_regd[pdev_idx] = regd;
+ queue_work(ab->workqueue, &ar->regd_update_work);
+ } else {
+ /* This regd would be applied during mac registration and is
+ * held constant throughout for regd intersection purpose
+ */
+ ab->default_regd[pdev_idx] = regd;
+ }
+ ab->dfs_region = reg_info->dfs_region;
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+
+fallback:
+ /* Fallback to older reg (by sending previous country setting
+ * again if fw has succeeded and we failed to process here.
+ * The Regdomain should be uniform across driver and fw. Since the
+ * FW has processed the command and sent a success status, we expect
+ * this function to succeed as well. If it doesn't, CTRY needs to be
+ * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
+ */
+ /* TODO: This is rare, but still should also be handled */
+ WARN_ON(1);
+
+retfail:
+
+ return -EINVAL;
+}
+
void ath11k_regd_update_work(struct work_struct *work)
{
struct ath11k *ar = container_of(work, struct ath11k,
@@ -781,10 +982,36 @@ void ath11k_reg_init(struct ath11k *ar)
ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
}
+void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info)
+{
+ int i, j;
+
+ if (!reg_info)
+ return;
+
+ kfree(reg_info->reg_rules_2ghz_ptr);
+ kfree(reg_info->reg_rules_5ghz_ptr);
+
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
+
+ for (j = 0; j < WMI_REG_MAX_CLIENT_TYPE; j++)
+ kfree(reg_info->reg_rules_6ghz_client_ptr[i][j]);
+ }
+
+ memset(reg_info, 0, sizeof(*reg_info));
+}
+
void ath11k_reg_free(struct ath11k_base *ab)
{
int i;
+ for (i = 0; i < ab->num_radios; i++)
+ ath11k_reg_reset_info(&ab->reg_info_store[i]);
+
+ kfree(ab->reg_info_store);
+ ab->reg_info_store = NULL;
+
for (i = 0; i < ab->hw_params.max_radios; i++) {
kfree(ab->default_regd[i]);
kfree(ab->new_regd[i]);
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
index f28902f85e41..64edb794260a 100644
--- a/drivers/net/wireless/ath/ath11k/reg.h
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -30,11 +30,20 @@ enum ath11k_dfs_region {
/* ATH11K Regulatory API's */
void ath11k_reg_init(struct ath11k *ar);
+void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info);
void ath11k_reg_free(struct ath11k_base *ab);
void ath11k_regd_update_work(struct work_struct *work);
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
- struct cur_regulatory_info *reg_info, bool intersect);
+ struct cur_regulatory_info *reg_info, bool intersect,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type);
int ath11k_regd_update(struct ath11k *ar);
int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait);
+enum wmi_reg_6ghz_ap_type
+ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type);
+int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info,
+ enum ieee80211_ap_reg_power power_type);
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c
index 43bb23265d34..302d66092b97 100644
--- a/drivers/net/wireless/ath/ath11k/testmode.c
+++ b/drivers/net/wireless/ath/ath11k/testmode.c
@@ -198,7 +198,7 @@ static void ath11k_tm_wmi_event_segmented(struct ath11k_base *ab, u32 cmd_id,
u16 length;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
index c29b11ab5bfa..41e7499f075f 100644
--- a/drivers/net/wireless/ath/ath11k/thermal.c
+++ b/drivers/net/wireless/ath/ath11k/thermal.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/device.h>
@@ -163,6 +163,9 @@ int ath11k_thermal_register(struct ath11k_base *ab)
struct ath11k_pdev *pdev;
int i, ret;
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 8a65fa04b48d..34ab9631ff36 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -238,8 +238,8 @@ static int ath11k_wmi_tlv_parse(struct ath11k_base *ar, const void **tb,
(void *)tb);
}
-const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
- size_t len, gfp_t gfp)
+const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab,
+ struct sk_buff *skb, gfp_t gfp)
{
const void **tb;
int ret;
@@ -248,7 +248,7 @@ const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
if (!tb)
return ERR_PTR(-ENOMEM);
- ret = ath11k_wmi_tlv_parse(ab, tb, ptr, len);
+ ret = ath11k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
if (ret) {
kfree(tb);
return ERR_PTR(ret);
@@ -2098,7 +2098,7 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar,
WMI_SCAN_EVENT_BSS_CHANNEL |
WMI_SCAN_EVENT_FOREIGN_CHAN |
WMI_SCAN_EVENT_DEQUEUED;
- arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->scan_f_chan_stat_evnt = 1;
if (test_bit(WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE,
ar->ab->wmi_ab.svc_map))
@@ -2379,6 +2379,70 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
return ret;
}
+int ath11k_wmi_send_vdev_set_tpc_power(struct ath11k *ar,
+ u32 vdev_id,
+ struct ath11k_reg_tpc_power_info *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_set_tpc_power_cmd *cmd;
+ struct wmi_vdev_ch_power_info *ch;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ u8 *ptr;
+ int i, ret, len, array_len;
+
+ array_len = sizeof(*ch) * param->num_pwr_levels;
+ len = sizeof(*cmd) + TLV_HDR_SIZE + array_len;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_TPC_POWER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->psd_power = param->is_psd_power;
+ cmd->eirp_power = param->eirp_power;
+ cmd->power_type_6ghz = param->ap_power_type;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n",
+ vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type);
+
+ ptr += sizeof(*cmd);
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, array_len);
+
+ ptr += TLV_HDR_SIZE;
+ ch = (struct wmi_vdev_ch_power_info *)ptr;
+
+ for (i = 0; i < param->num_pwr_levels; i++, ch++) {
+ ch->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_CH_POWER_INFO) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*ch) - TLV_HDR_SIZE);
+
+ ch->chan_cfreq = param->chan_power_info[i].chan_cfreq;
+ ch->tx_power = param->chan_power_info[i].tx_power;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tpc chan freq %d TX power %d\n",
+ ch->chan_cfreq, ch->tx_power);
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
struct scan_cancel_param *param)
{
@@ -3930,7 +3994,7 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
struct ath11k_vif *arvif;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -3956,8 +4020,7 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
switch (ev->evt_type) {
case WMI_BSS_COLOR_COLLISION_DETECTION:
- ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
- GFP_KERNEL);
+ ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
@@ -4749,6 +4812,14 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
soc->pdevs[0].pdev_id = 0;
}
+ if (!soc->reg_info_store) {
+ soc->reg_info_store = kcalloc(soc->num_radios,
+ sizeof(*soc->reg_info_store),
+ GFP_ATOMIC);
+ if (!soc->reg_info_store)
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -4786,6 +4857,7 @@ static void ath11k_wmi_free_dbring_caps(struct ath11k_base *ab)
{
kfree(ab->db_caps);
ab->db_caps = NULL;
+ ab->num_db_cap = 0;
}
static int ath11k_wmi_tlv_dma_ring_caps(struct ath11k_base *ab,
@@ -5003,7 +5075,7 @@ static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buf
const struct wmi_vdev_start_resp_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5028,6 +5100,7 @@ static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buf
vdev_rsp->mac_id = ev->mac_id;
vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
+ vdev_rsp->max_allowed_tx_power = ev->max_allowed_tx_power;
kfree(tb);
return 0;
@@ -5102,7 +5175,7 @@ static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab,
ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory channel list\n");
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5278,7 +5351,7 @@ static int ath11k_pull_reg_chan_list_ext_update_ev(struct ath11k_base *ab,
ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory ext channel list\n");
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5634,7 +5707,7 @@ static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *
const struct wmi_peer_delete_resp_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5666,7 +5739,7 @@ static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab,
const struct wmi_vdev_delete_resp_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5686,15 +5759,15 @@ static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab,
return 0;
}
-static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, void *evt_buf,
- u32 len, u32 *vdev_id,
- u32 *tx_status)
+static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ u32 *vdev_id, u32 *tx_status)
{
const void **tb;
const struct wmi_bcn_tx_status_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5722,7 +5795,7 @@ static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_
const struct wmi_vdev_stopped_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5876,7 +5949,7 @@ static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab,
const struct wmi_mgmt_tx_compl_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6052,7 +6125,7 @@ static int ath11k_pull_scan_ev(struct ath11k_base *ab, struct sk_buff *skb,
const struct wmi_scan_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6085,7 +6158,7 @@ static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base *ab, struct sk_buf
const struct wmi_peer_sta_kickout_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6112,7 +6185,7 @@ static int ath11k_pull_roam_ev(struct ath11k_base *ab, struct sk_buff *skb,
const struct wmi_roam_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6153,14 +6226,14 @@ exit:
return idx;
}
-static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, u8 *evt_buf,
- u32 len, struct wmi_chan_info_event *ch_info_ev)
+static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_chan_info_event *ch_info_ev)
{
const void **tb;
const struct wmi_chan_info_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6199,7 +6272,7 @@ ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb,
const struct wmi_pdev_bss_chan_info_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6239,7 +6312,7 @@ ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base *ab, struct sk_buff *sk
const struct wmi_vdev_install_key_compl_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6270,7 +6343,7 @@ static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base *ab, struct sk_buff
const struct wmi_peer_assoc_conf_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6995,7 +7068,7 @@ static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *s
const void **tb;
int ret, i;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -7060,32 +7133,15 @@ static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
wake_up(&wmi->tx_ce_desc_wq);
}
-static bool ath11k_reg_is_world_alpha(char *alpha)
-{
- if (alpha[0] == '0' && alpha[1] == '0')
- return true;
-
- if (alpha[0] == 'n' && alpha[1] == 'a')
- return true;
-
- return false;
-}
-
-static int ath11k_reg_chan_list_event(struct ath11k_base *ab,
- struct sk_buff *skb,
+static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb,
enum wmi_reg_chan_list_cmd_type id)
{
- struct cur_regulatory_info *reg_info = NULL;
- struct ieee80211_regdomain *regd = NULL;
- bool intersect = false;
- int ret = 0, pdev_idx, i, j;
- struct ath11k *ar;
+ struct cur_regulatory_info *reg_info;
+ int ret;
reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
- if (!reg_info) {
- ret = -ENOMEM;
- goto fallback;
- }
+ if (!reg_info)
+ return -ENOMEM;
if (id == WMI_REG_CHAN_LIST_CC_ID)
ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info);
@@ -7093,118 +7149,22 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab,
ret = ath11k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
if (ret) {
- ath11k_warn(ab, "failed to extract regulatory info from received event\n");
- goto fallback;
- }
-
- ath11k_dbg(ab, ATH11K_DBG_WMI, "event reg chan list id %d", id);
-
- if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
- /* In case of failure to set the requested ctry,
- * fw retains the current regd. We print a failure info
- * and return from here.
- */
- ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
- goto mem_free;
- }
-
- pdev_idx = reg_info->phy_id;
-
- /* Avoid default reg rule updates sent during FW recovery if
- * it is already available
- */
- spin_lock(&ab->base_lock);
- if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
- ab->default_regd[pdev_idx]) {
- spin_unlock(&ab->base_lock);
+ ath11k_warn(ab, "failed to extract regulatory info\n");
goto mem_free;
}
- spin_unlock(&ab->base_lock);
- if (pdev_idx >= ab->num_radios) {
- /* Process the event for phy0 only if single_pdev_only
- * is true. If pdev_idx is valid but not 0, discard the
- * event. Otherwise, it goes to fallback.
- */
- if (ab->hw_params.single_pdev_only &&
- pdev_idx < ab->hw_params.num_rxmda_per_pdev)
- goto mem_free;
- else
- goto fallback;
- }
-
- /* Avoid multiple overwrites to default regd, during core
- * stop-start after mac registration.
- */
- if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
- !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
- (char *)reg_info->alpha2, 2))
+ ret = ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_UNSET_AP);
+ if (ret) {
+ ath11k_warn(ab, "failed to process regulatory info %d\n", ret);
goto mem_free;
-
- /* Intersect new rules with default regd if a new country setting was
- * requested, i.e a default regd was already set during initialization
- * and the regd coming from this event has a valid country info.
- */
- if (ab->default_regd[pdev_idx] &&
- !ath11k_reg_is_world_alpha((char *)
- ab->default_regd[pdev_idx]->alpha2) &&
- !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
- intersect = true;
-
- regd = ath11k_reg_build_regd(ab, reg_info, intersect);
- if (!regd) {
- ath11k_warn(ab, "failed to build regd from reg_info\n");
- goto fallback;
- }
-
- spin_lock(&ab->base_lock);
- if (ab->default_regd[pdev_idx]) {
- /* The initial rules from FW after WMI Init is to build
- * the default regd. From then on, any rules updated for
- * the pdev could be due to user reg changes.
- * Free previously built regd before assigning the newly
- * generated regd to ar. NULL pointer handling will be
- * taken care by kfree itself.
- */
- ar = ab->pdevs[pdev_idx].ar;
- kfree(ab->new_regd[pdev_idx]);
- ab->new_regd[pdev_idx] = regd;
- queue_work(ab->workqueue, &ar->regd_update_work);
- } else {
- /* This regd would be applied during mac registration and is
- * held constant throughout for regd intersection purpose
- */
- ab->default_regd[pdev_idx] = regd;
}
- ab->dfs_region = reg_info->dfs_region;
- spin_unlock(&ab->base_lock);
- goto mem_free;
+ kfree(reg_info);
+ return 0;
-fallback:
- /* Fallback to older reg (by sending previous country setting
- * again if fw has succeeded and we failed to process here.
- * The Regdomain should be uniform across driver and fw. Since the
- * FW has processed the command and sent a success status, we expect
- * this function to succeed as well. If it doesn't, CTRY needs to be
- * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
- */
- /* TODO: This is rare, but still should also be handled */
- WARN_ON(1);
mem_free:
- if (reg_info) {
- kfree(reg_info->reg_rules_2ghz_ptr);
- kfree(reg_info->reg_rules_5ghz_ptr);
- if (reg_info->is_ext_reg_event) {
- for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
- kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
-
- for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
- for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
- kfree(reg_info->reg_rules_6ghz_client_ptr[j][i]);
- }
- kfree(reg_info);
- }
+ ath11k_reg_reset_info(reg_info);
+ kfree(reg_info);
return ret;
}
@@ -7362,7 +7322,7 @@ static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff
}
ar->last_wmi_vdev_start_status = 0;
-
+ ar->max_allowed_tx_power = vdev_start_resp.max_allowed_tx_power;
status = vdev_start_resp.status;
if (WARN_ON_ONCE(status)) {
@@ -7384,8 +7344,7 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s
struct ath11k_vif *arvif;
u32 vdev_id, tx_status;
- if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
- &vdev_id, &tx_status) != 0) {
+ if (ath11k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
ath11k_warn(ab, "failed to extract bcn tx status");
return;
}
@@ -7416,7 +7375,7 @@ static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab,
enum ath11k_wmi_peer_ps_state peer_previous_ps_state;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -7884,7 +7843,7 @@ static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
/* HW channel counters frequency value in hertz */
u32 cc_freq_hz = ab->cc_freq_hz;
- if (ath11k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
+ if (ath11k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
ath11k_warn(ab, "failed to extract chan info event");
return;
}
@@ -8216,7 +8175,7 @@ static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab,
const struct wmi_pdev_ctl_failsafe_chk_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -8267,7 +8226,7 @@ ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab,
}
if (arvif->is_up && arvif->vif->bss_conf.csa_active)
- ieee80211_csa_finish(arvif->vif);
+ ieee80211_csa_finish(arvif->vif, 0);
}
rcu_read_unlock();
}
@@ -8281,7 +8240,7 @@ ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab,
const u32 *vdev_ids;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -8315,7 +8274,7 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
struct ath11k *ar;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -8369,7 +8328,7 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
const struct wmi_pdev_temperature_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -8409,7 +8368,7 @@ static void ath11k_fils_discovery_event(struct ath11k_base *ab,
const struct wmi_fils_discovery_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
@@ -8441,7 +8400,7 @@ static void ath11k_probe_resp_tx_status_event(struct ath11k_base *ab,
const struct wmi_probe_resp_tx_status_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
@@ -8567,7 +8526,7 @@ static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab,
const struct wmi_twt_add_dialog_event *ev;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab,
@@ -8604,7 +8563,7 @@ static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
u64 replay_ctr;
int ret;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -9793,3 +9752,9 @@ int ath11k_wmi_sta_keepalive(struct ath11k *ar,
return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
}
+
+bool ath11k_wmi_supports_6ghz_cc_ext(struct ath11k *ar)
+{
+ return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
+ ar->ab->wmi_ab.svc_map) && ar->supports_6ghz;
+}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index ff0a9a92beeb..bb419e3abb00 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -15,6 +15,7 @@ struct ath11k;
struct ath11k_fw_stats;
struct ath11k_fw_dbglog;
struct ath11k_vif;
+struct ath11k_reg_tpc_power_info;
#define PSOC_HOST_MAX_NUM_SS (8)
@@ -327,6 +328,22 @@ enum wmi_tlv_cmd_id {
WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID,
WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID,
WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID,
+ WMI_VDEV_SET_ARP_STAT_CMDID,
+ WMI_VDEV_GET_ARP_STAT_CMDID,
+ WMI_VDEV_GET_TX_POWER_CMDID,
+ WMI_VDEV_LIMIT_OFFCHAN_CMDID,
+ WMI_VDEV_SET_CUSTOM_SW_RETRY_TH_CMDID,
+ WMI_VDEV_CHAINMASK_CONFIG_CMDID,
+ WMI_VDEV_GET_BCN_RECEPTION_STATS_CMDID,
+ WMI_VDEV_GET_MWS_COEX_INFO_CMDID,
+ WMI_VDEV_DELETE_ALL_PEER_CMDID,
+ WMI_VDEV_BSS_MAX_IDLE_TIME_CMDID,
+ WMI_VDEV_AUDIO_SYNC_TRIGGER_CMDID,
+ WMI_VDEV_AUDIO_SYNC_QTIMER_CMDID,
+ WMI_VDEV_SET_PCL_CMDID,
+ WMI_VDEV_GET_BIG_DATA_CMDID,
+ WMI_VDEV_GET_BIG_DATA_P2_CMDID,
+ WMI_VDEV_SET_TPC_POWER_CMDID,
WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER),
WMI_PEER_DELETE_CMDID,
WMI_PEER_FLUSH_TIDS_CMDID,
@@ -1880,6 +1897,8 @@ enum wmi_tlv_tag {
WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
+ WMI_TAG_VDEV_SET_TPC_POWER_CMD = 0x3B5,
+ WMI_TAG_VDEV_CH_POWER_INFO,
WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
WMI_TAG_MAX
@@ -2114,6 +2133,7 @@ enum wmi_tlv_service {
/* The second 128 bits */
WMI_MAX_EXT_SERVICE = 256,
WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL = 265,
+ WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT = 280,
WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT = 281,
WMI_TLV_SERVICE_BIOS_SAR_SUPPORT = 326,
WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN = 357,
@@ -3168,6 +3188,41 @@ struct wlan_ssid {
u8 ssid[WLAN_SSID_MAX_LEN];
};
+struct wmi_vdev_ch_power_info {
+ u32 tlv_header;
+
+ /* Channel center frequency (MHz) */
+ u32 chan_cfreq;
+
+ /* Unit: dBm, either PSD/EIRP power for this frequency or
+ * incremental for non-PSD BW
+ */
+ u32 tx_power;
+} __packed;
+
+struct wmi_vdev_set_tpc_power_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+
+ /* Value: 0 or 1, is PSD power or not */
+ u32 psd_power;
+
+ /* Maximum EIRP power (dBm units), valid only if power is PSD */
+ u32 eirp_power;
+
+ /* Type: WMI_6GHZ_REG_TYPE, used for halphy CTL lookup */
+ u32 power_type_6ghz;
+
+ /* This fixed_param TLV is followed by the below TLVs:
+ * num_pwr_levels of wmi_vdev_ch_power_info
+ * For PSD power, it is the PSD/EIRP power of the frequency (20 MHz chunks).
+ * For non-PSD power, the power values are for 20, 40, and till
+ * BSS BW power levels.
+ * The num_pwr_levels will be checked by sw how many elements present
+ * in the variable-length array.
+ */
+} __packed;
+
#define WMI_IE_BITMAP_SIZE 8
/* prefix used by scan requestor ids on the host */
@@ -3308,24 +3363,19 @@ struct scan_req_params {
u32 vdev_id;
u32 pdev_id;
enum wmi_scan_priority scan_priority;
- union {
- struct {
- u32 scan_ev_started:1,
- scan_ev_completed:1,
- scan_ev_bss_chan:1,
- scan_ev_foreign_chan:1,
- scan_ev_dequeued:1,
- scan_ev_preempted:1,
- scan_ev_start_failed:1,
- scan_ev_restarted:1,
- scan_ev_foreign_chn_exit:1,
- scan_ev_invalid:1,
- scan_ev_gpio_timeout:1,
- scan_ev_suspended:1,
- scan_ev_resumed:1;
- };
- u32 scan_events;
- };
+ u32 scan_ev_started:1,
+ scan_ev_completed:1,
+ scan_ev_bss_chan:1,
+ scan_ev_foreign_chan:1,
+ scan_ev_dequeued:1,
+ scan_ev_preempted:1,
+ scan_ev_start_failed:1,
+ scan_ev_restarted:1,
+ scan_ev_foreign_chn_exit:1,
+ scan_ev_invalid:1,
+ scan_ev_gpio_timeout:1,
+ scan_ev_suspended:1,
+ scan_ev_resumed:1;
u32 scan_ctrl_flags_ext;
u32 dwell_time_active;
u32 dwell_time_active_2g;
@@ -3339,36 +3389,31 @@ struct scan_req_params {
u32 idle_time;
u32 max_scan_time;
u32 probe_delay;
- union {
- struct {
- u32 scan_f_passive:1,
- scan_f_bcast_probe:1,
- scan_f_cck_rates:1,
- scan_f_ofdm_rates:1,
- scan_f_chan_stat_evnt:1,
- scan_f_filter_prb_req:1,
- scan_f_bypass_dfs_chn:1,
- scan_f_continue_on_err:1,
- scan_f_offchan_mgmt_tx:1,
- scan_f_offchan_data_tx:1,
- scan_f_promisc_mode:1,
- scan_f_capture_phy_err:1,
- scan_f_strict_passive_pch:1,
- scan_f_half_rate:1,
- scan_f_quarter_rate:1,
- scan_f_force_active_dfs_chn:1,
- scan_f_add_tpc_ie_in_probe:1,
- scan_f_add_ds_ie_in_probe:1,
- scan_f_add_spoofed_mac_in_probe:1,
- scan_f_add_rand_seq_in_probe:1,
- scan_f_en_ie_whitelist_in_probe:1,
- scan_f_forced:1,
- scan_f_2ghz:1,
- scan_f_5ghz:1,
- scan_f_80mhz:1;
- };
- u32 scan_flags;
- };
+ u32 scan_f_passive:1,
+ scan_f_bcast_probe:1,
+ scan_f_cck_rates:1,
+ scan_f_ofdm_rates:1,
+ scan_f_chan_stat_evnt:1,
+ scan_f_filter_prb_req:1,
+ scan_f_bypass_dfs_chn:1,
+ scan_f_continue_on_err:1,
+ scan_f_offchan_mgmt_tx:1,
+ scan_f_offchan_data_tx:1,
+ scan_f_promisc_mode:1,
+ scan_f_capture_phy_err:1,
+ scan_f_strict_passive_pch:1,
+ scan_f_half_rate:1,
+ scan_f_quarter_rate:1,
+ scan_f_force_active_dfs_chn:1,
+ scan_f_add_tpc_ie_in_probe:1,
+ scan_f_add_ds_ie_in_probe:1,
+ scan_f_add_spoofed_mac_in_probe:1,
+ scan_f_add_rand_seq_in_probe:1,
+ scan_f_en_ie_whitelist_in_probe:1,
+ scan_f_forced:1,
+ scan_f_2ghz:1,
+ scan_f_5ghz:1,
+ scan_f_80mhz:1;
enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
u32 burst_duration;
u32 num_chan;
@@ -4119,6 +4164,7 @@ struct wmi_vdev_start_resp_event {
};
u32 cfgd_tx_streams;
u32 cfgd_rx_streams;
+ s32 max_allowed_tx_power;
} __packed;
/* VDEV start response status codes */
@@ -4951,6 +4997,7 @@ struct ath11k_targ_cap {
};
enum wmi_vdev_type {
+ WMI_VDEV_TYPE_UNSPEC = 0,
WMI_VDEV_TYPE_AP = 1,
WMI_VDEV_TYPE_STA = 2,
WMI_VDEV_TYPE_IBSS = 3,
@@ -6295,8 +6342,8 @@ enum wmi_sta_keepalive_method {
#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
-const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
- size_t len, gfp_t gfp);
+const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab,
+ struct sk_buff *skb, gfp_t gfp);
int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
u32 cmd_id);
struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len);
@@ -6479,5 +6526,9 @@ int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_va
int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar);
int ath11k_wmi_sta_keepalive(struct ath11k *ar,
const struct wmi_sta_keepalive_arg *arg);
+bool ath11k_wmi_supports_6ghz_cc_ext(struct ath11k *ar);
+int ath11k_wmi_send_vdev_set_tpc_power(struct ath11k *ar,
+ u32 vdev_id,
+ struct ath11k_reg_tpc_power_info *param);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
index 62c52e733b5e..71669f94ff75 100644
--- a/drivers/net/wireless/ath/ath12k/Makefile
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -19,7 +19,9 @@ ath12k-y += core.o \
hw.o \
mhi.o \
pci.o \
- dp_mon.o
+ dp_mon.o \
+ fw.o \
+ p2p.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 6c01b282fcd3..391b6fb2bd42 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -14,6 +14,7 @@
#include "dp_rx.h"
#include "debug.h"
#include "hif.h"
+#include "fw.h"
unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
@@ -104,27 +105,66 @@ int ath12k_core_resume(struct ath12k_base *ab)
return 0;
}
-static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
- size_t name_len)
+static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len, bool with_variant,
+ bool bus_type_mode)
{
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
- if (ab->qmi.target.bdf_ext[0] != '\0')
+ if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s",
ab->qmi.target.bdf_ext);
- scnprintf(name, name_len,
- "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
- ath12k_bus_str(ab->hif.bus),
- ab->qmi.target.chip_id,
- ab->qmi.target.board_id, variant);
+ switch (ab->id.bdf_search) {
+ case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
+ if (bus_type_mode)
+ scnprintf(name, name_len,
+ "bus=%s",
+ ath12k_bus_str(ab->hif.bus));
+ else
+ scnprintf(name, name_len,
+ "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
+ ath12k_bus_str(ab->hif.bus),
+ ab->id.vendor, ab->id.device,
+ ab->id.subsystem_vendor,
+ ab->id.subsystem_device,
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id,
+ variant);
+ break;
+ default:
+ scnprintf(name, name_len,
+ "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
+ ath12k_bus_str(ab->hif.bus),
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id, variant);
+ break;
+ }
ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
return 0;
}
+static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath12k_core_create_board_name(ab, name, name_len, true, false);
+}
+
+static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath12k_core_create_board_name(ab, name, name_len, false, false);
+}
+
+static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath12k_core_create_board_name(ab, name, name_len, false, true);
+}
+
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *file)
{
@@ -159,7 +199,9 @@ static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
struct ath12k_board_data *bd,
const void *buf, size_t buf_len,
const char *boardname,
- int bd_ie_type)
+ int ie_id,
+ int name_id,
+ int data_id)
{
const struct ath12k_fw_ie *hdr;
bool name_match_found;
@@ -169,7 +211,7 @@ static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
name_match_found = false;
- /* go through ATH12K_BD_IE_BOARD_ elements */
+ /* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
while (buf_len > sizeof(struct ath12k_fw_ie)) {
hdr = buf;
board_ie_id = le32_to_cpu(hdr->id);
@@ -180,48 +222,50 @@ static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
buf += sizeof(*hdr);
if (buf_len < ALIGN(board_ie_len, 4)) {
- ath12k_err(ab, "invalid ATH12K_BD_IE_BOARD length: %zu < %zu\n",
+ ath12k_err(ab, "invalid %s length: %zu < %zu\n",
+ ath12k_bd_ie_type_str(ie_id),
buf_len, ALIGN(board_ie_len, 4));
ret = -EINVAL;
goto out;
}
- switch (board_ie_id) {
- case ATH12K_BD_IE_BOARD_NAME:
+ if (board_ie_id == name_id) {
ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
board_ie_data, board_ie_len);
if (board_ie_len != strlen(boardname))
- break;
+ goto next;
ret = memcmp(board_ie_data, boardname, strlen(boardname));
if (ret)
- break;
+ goto next;
name_match_found = true;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
- "boot found match for name '%s'",
+ "boot found match %s for name '%s'",
+ ath12k_bd_ie_type_str(ie_id),
boardname);
- break;
- case ATH12K_BD_IE_BOARD_DATA:
+ } else if (board_ie_id == data_id) {
if (!name_match_found)
/* no match found */
- break;
+ goto next;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
- "boot found board data for '%s'", boardname);
+ "boot found %s for '%s'",
+ ath12k_bd_ie_type_str(ie_id),
+ boardname);
bd->data = board_ie_data;
bd->len = board_ie_len;
ret = 0;
goto out;
- default:
- ath12k_warn(ab, "unknown ATH12K_BD_IE_BOARD found: %d\n",
+ } else {
+ ath12k_warn(ab, "unknown %s id found: %d\n",
+ ath12k_bd_ie_type_str(ie_id),
board_ie_id);
- break;
}
-
+next:
/* jump over the padding */
board_ie_len = ALIGN(board_ie_len, 4);
@@ -238,7 +282,10 @@ out:
static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
struct ath12k_board_data *bd,
- const char *boardname)
+ const char *boardname,
+ int ie_id_match,
+ int name_id,
+ int data_id)
{
size_t len, magic_len;
const u8 *data;
@@ -303,22 +350,23 @@ static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
goto err;
}
- switch (ie_id) {
- case ATH12K_BD_IE_BOARD:
+ if (ie_id == ie_id_match) {
ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
ie_len,
boardname,
- ATH12K_BD_IE_BOARD);
+ ie_id_match,
+ name_id,
+ data_id);
if (ret == -ENOENT)
/* no match found, continue */
- break;
+ goto next;
else if (ret)
/* there was an error, bail out */
goto err;
/* either found or error, so stop searching */
goto out;
}
-
+next:
/* jump over the padding */
ie_len = ALIGN(ie_len, 4);
@@ -328,8 +376,9 @@ static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
out:
if (!bd->data || !bd->len) {
- ath12k_err(ab,
- "failed to fetch board data for %s from %s\n",
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to fetch %s for %s from %s\n",
+ ath12k_bd_ie_type_str(ie_id_match),
boardname, filepath);
ret = -ENODATA;
goto err;
@@ -356,28 +405,56 @@ int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
return 0;
}
-#define BOARD_NAME_SIZE 100
+#define BOARD_NAME_SIZE 200
int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
{
- char boardname[BOARD_NAME_SIZE];
+ char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
+ char *filename, filepath[100];
int bd_api;
int ret;
- ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ filename = ATH12K_BOARD_API2_FILE;
+
+ ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
if (ret) {
ath12k_err(ab, "failed to create board name: %d", ret);
return ret;
}
bd_api = 2;
- ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname);
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
+ ATH12K_BD_IE_BOARD,
+ ATH12K_BD_IE_BOARD_NAME,
+ ATH12K_BD_IE_BOARD_DATA);
+ if (!ret)
+ goto success;
+
+ ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
+ sizeof(fallback_boardname));
+ if (ret) {
+ ath12k_err(ab, "failed to create fallback board name: %d", ret);
+ return ret;
+ }
+
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
+ ATH12K_BD_IE_BOARD,
+ ATH12K_BD_IE_BOARD_NAME,
+ ATH12K_BD_IE_BOARD_DATA);
if (!ret)
goto success;
bd_api = 1;
ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
if (ret) {
- ath12k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n",
+ ath12k_core_create_firmware_path(ab, filename,
+ filepath, sizeof(filepath));
+ ath12k_err(ab, "failed to fetch board data for %s from %s\n",
+ boardname, filepath);
+ if (memcmp(boardname, fallback_boardname, strlen(boardname)))
+ ath12k_err(ab, "failed to fetch board data for %s from %s\n",
+ fallback_boardname, filepath);
+
+ ath12k_err(ab, "failed to fetch board.bin from %s\n",
ab->hw_params->fw.dir);
return ret;
}
@@ -387,6 +464,79 @@ success:
return 0;
}
+int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
+{
+ char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
+ int ret;
+
+ ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ if (ret) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to create board name for regdb: %d", ret);
+ goto exit;
+ }
+
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
+ ATH12K_BD_IE_REGDB,
+ ATH12K_BD_IE_REGDB_NAME,
+ ATH12K_BD_IE_REGDB_DATA);
+ if (!ret)
+ goto exit;
+
+ ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
+ BOARD_NAME_SIZE);
+ if (ret) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to create default board name for regdb: %d", ret);
+ goto exit;
+ }
+
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
+ ATH12K_BD_IE_REGDB,
+ ATH12K_BD_IE_REGDB_NAME,
+ ATH12K_BD_IE_REGDB_DATA);
+ if (!ret)
+ goto exit;
+
+ ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
+ if (ret)
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
+ ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
+
+exit:
+ if (!ret)
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
+
+ return ret;
+}
+
+u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
+{
+ if (ab->num_radios == 2)
+ return TARGET_NUM_STATIONS_DBS;
+ else if (ab->num_radios == 3)
+ return TARGET_NUM_PEERS_PDEV_DBS_SBS;
+ return TARGET_NUM_STATIONS_SINGLE;
+}
+
+u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
+{
+ if (ab->num_radios == 2)
+ return TARGET_NUM_PEERS_PDEV_DBS;
+ else if (ab->num_radios == 3)
+ return TARGET_NUM_PEERS_PDEV_DBS_SBS;
+ return TARGET_NUM_PEERS_PDEV_SINGLE;
+}
+
+u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
+{
+ if (ab->num_radios == 2)
+ return TARGET_NUM_TIDS(DBS);
+ else if (ab->num_radios == 3)
+ return TARGET_NUM_TIDS(DBS_SBS);
+ return TARGET_NUM_TIDS(SINGLE);
+}
+
static void ath12k_core_stop(struct ath12k_base *ab)
{
if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
@@ -592,14 +742,14 @@ static int ath12k_core_start(struct ath12k_base *ab,
ath12k_dp_cc_config(ab);
- ath12k_dp_pdev_pre_alloc(ab);
-
ret = ath12k_dp_rx_pdev_reo_setup(ab);
if (ret) {
ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
goto err_mac_destroy;
}
+ ath12k_dp_hal_rx_desc_init(ab);
+
ret = ath12k_wmi_cmd_init(ab);
if (ret) {
ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
@@ -759,20 +909,30 @@ static void ath12k_rfkill_work(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
struct ath12k *ar;
+ struct ath12k_hw *ah;
+ struct ieee80211_hw *hw;
bool rfkill_radio_on;
- int i;
+ int i, j;
spin_lock_bh(&ab->base_lock);
rfkill_radio_on = ab->rfkill_radio_on;
spin_unlock_bh(&ab->base_lock);
- for (i = 0; i < ab->num_radios; i++) {
- ar = ab->pdevs[i].ar;
- if (!ar)
+ for (i = 0; i < ab->num_hw; i++) {
+ ah = ab->ah[i];
+ if (!ah)
continue;
- ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
- wiphy_rfkill_set_hw_state(ar->hw->wiphy, !rfkill_radio_on);
+ for (j = 0; j < ah->num_radio; j++) {
+ ar = &ah->radio[j];
+ if (!ar)
+ continue;
+
+ ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
+ }
+
+ hw = ah->hw;
+ wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
}
}
@@ -801,6 +961,7 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
{
struct ath12k *ar;
struct ath12k_pdev *pdev;
+ struct ath12k_hw *ah;
int i;
spin_lock_bh(&ab->base_lock);
@@ -810,16 +971,24 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
if (ab->is_reset)
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+ for (i = 0; i < ab->num_hw; i++) {
+ if (!ab->ah[i])
+ continue;
+
+ ah = ab->ah[i];
+ ieee80211_stop_queues(ah->hw);
+ }
+
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (!ar || ar->state == ATH12K_STATE_OFF)
continue;
- ieee80211_stop_queues(ar->hw);
ath12k_mac_drain_tx(ar);
complete(&ar->scan.started);
complete(&ar->scan.completed);
+ complete(&ar->scan.on_channel);
complete(&ar->peer_assoc_done);
complete(&ar->peer_delete_done);
complete(&ar->install_key_done);
@@ -856,7 +1025,7 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
case ATH12K_STATE_ON:
ar->state = ATH12K_STATE_RESTARTING;
ath12k_core_halt(ar);
- ieee80211_restart_hw(ar->hw);
+ ieee80211_restart_hw(ath12k_ar_to_hw(ar));
break;
case ATH12K_STATE_OFF:
ath12k_warn(ab,
@@ -979,6 +1148,8 @@ int ath12k_core_pre_init(struct ath12k_base *ab)
return ret;
}
+ ath12k_fw_map(ab);
+
return 0;
}
@@ -1007,6 +1178,7 @@ void ath12k_core_deinit(struct ath12k_base *ab)
ath12k_hif_power_down(ab);
ath12k_mac_destroy(ab);
ath12k_core_soc_destroy(ab);
+ ath12k_fw_unmap(ab);
}
void ath12k_core_free(struct ath12k_base *ab)
@@ -1054,6 +1226,8 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
ab->dev = dev;
ab->hif.bus = bus;
+ ab->qmi.num_radios = U8_MAX;
+ ab->slo_capable = true;
return ab;
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 8458dc292821..97e5a0ccd233 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_CORE_H
@@ -13,6 +13,7 @@
#include <linux/bitfield.h>
#include <linux/dmi.h>
#include <linux/ctype.h>
+#include <linux/firmware.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -24,6 +25,7 @@
#include "hal_rx.h"
#include "reg.h"
#include "dbring.h"
+#include "fw.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -55,6 +57,11 @@
#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
+enum ath12k_bdf_search {
+ ATH12K_BDF_SEARCH_DEFAULT,
+ ATH12K_BDF_SEARCH_BUS_AND_BOARD,
+};
+
enum wme_ac {
WME_AC_BE,
WME_AC_BK,
@@ -259,6 +266,7 @@ struct ath12k_vif {
u8 tx_encap_type;
u8 vdev_stats_id;
u32 punct_bitmap;
+ bool ps;
};
struct ath12k_vif_iter {
@@ -420,7 +428,7 @@ struct ath12k_sta {
};
#define ATH12K_MIN_5G_FREQ 4150
-#define ATH12K_MIN_6G_FREQ 5945
+#define ATH12K_MIN_6G_FREQ 5925
#define ATH12K_MAX_6G_FREQ 7115
#define ATH12K_NUM_CHANS 100
#define ATH12K_MAX_5G_CHAN 173
@@ -468,7 +476,7 @@ struct ath12k_per_peer_tx_stats {
struct ath12k {
struct ath12k_base *ab;
struct ath12k_pdev *pdev;
- struct ieee80211_hw *hw;
+ struct ath12k_hw *ah;
struct ath12k_wmi_pdev *wmi;
struct ath12k_pdev_dp dp;
u8 mac_addr[ETH_ALEN];
@@ -532,6 +540,7 @@ struct ath12k {
/* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */
u8 pdev_idx;
u8 lmac_id;
+ u8 hw_link_id;
struct completion peer_assoc_done;
struct completion peer_delete_done;
@@ -591,6 +600,13 @@ struct ath12k {
int monitor_vdev_id;
};
+struct ath12k_hw {
+ struct ieee80211_hw *hw;
+
+ u8 num_radio;
+ struct ath12k radio[] __aligned(sizeof(void *));
+};
+
struct ath12k_band_cap {
u32 phy_id;
u32 max_bw_supported;
@@ -724,6 +740,16 @@ struct ath12k_base {
u8 fw_pdev_count;
struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS];
+
+ /* Holds information of wiphy (hw) registration.
+ *
+ * In Multi/Single Link Operation case, all pdevs are registered as
+ * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
+ * registered as separate wiphys.
+ */
+ struct ath12k_hw *ah[MAX_RADIOS];
+ u8 num_hw;
+
struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map;
unsigned long long free_vdev_stats_id_map;
@@ -793,10 +819,44 @@ struct ath12k_base {
/* true means radio is on */
bool rfkill_radio_on;
+ struct {
+ enum ath12k_bdf_search bdf_search;
+ u32 vendor;
+ u32 device;
+ u32 subsystem_vendor;
+ u32 subsystem_device;
+ } id;
+
+ struct {
+ u32 api_version;
+
+ const struct firmware *fw;
+ const u8 *amss_data;
+ size_t amss_len;
+ const u8 *amss_dualmac_data;
+ size_t amss_dualmac_len;
+ const u8 *m3_data;
+ size_t m3_len;
+
+ DECLARE_BITMAP(fw_features, ATH12K_FW_FEATURE_COUNT);
+ } fw;
+
+ const struct hal_rx_ops *hal_rx_ops;
+
+ /* slo_capable denotes if the single/multi link operation
+ * is supported within the same chip (SoC).
+ */
+ bool slo_capable;
+
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
};
+struct ath12k_pdev_map {
+ struct ath12k_base *ab;
+ u8 pdev_idx;
+};
+
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab);
int ath12k_core_pre_init(struct ath12k_base *ab);
int ath12k_core_init(struct ath12k_base *ath12k);
@@ -810,6 +870,7 @@ int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
int ath12k_core_fetch_bdf(struct ath12k_base *ath12k,
struct ath12k_board_data *bd);
void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd);
+int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd);
int ath12k_core_check_dt(struct ath12k_base *ath12k);
int ath12k_core_check_smbios(struct ath12k_base *ab);
void ath12k_core_halt(struct ath12k *ar);
@@ -818,6 +879,9 @@ int ath12k_core_suspend(struct ath12k_base *ab);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
+u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab);
+u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab);
+u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab);
static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
{
@@ -882,4 +946,18 @@ static inline const char *ath12k_bus_str(enum ath12k_bus bus)
return "unknown";
}
+static inline struct ath12k_hw *ath12k_hw_to_ah(struct ieee80211_hw *hw)
+{
+ return hw->priv;
+}
+
+static inline struct ath12k *ath12k_ah_to_ar(struct ath12k_hw *ah)
+{
+ return ah->radio;
+}
+
+static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar)
+{
+ return ar->ah->hw;
+}
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index a6f81f2f97ef..c8e1b244b69e 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@@ -997,6 +997,29 @@ void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
}
}
+bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab)
+{
+ if (test_bit(WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS, ab->wmi_ab.svc_map) &&
+ ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start &&
+ ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end &&
+ ab->hw_params->hal_ops->get_hal_rx_compact_ops) {
+ return true;
+ }
+ return false;
+}
+
+void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab)
+{
+ if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
+ /* RX TLVS compaction is supported, hence change the hal_rx_ops
+ * to compact hal_rx_ops.
+ */
+ ab->hal_rx_ops = ab->hw_params->hal_ops->get_hal_rx_compact_ops();
+ }
+ ab->hal.hal_desc_sz =
+ ab->hal_rx_ops->rx_desc_get_desc_size();
+}
+
static void ath12k_dp_service_mon_ring(struct timer_list *t)
{
struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index 1df3cdd46140..eb2dd408e081 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_H
@@ -150,7 +150,7 @@ struct ath12k_pdev_dp {
#define DP_RX_HASH_ENABLE 1 /* Enable hash based Rx steering */
-#define DP_BA_WIN_SZ_MAX 256
+#define DP_BA_WIN_SZ_MAX 1024
#define DP_TCL_NUM_RING_MAX 4
@@ -170,6 +170,7 @@ struct ath12k_pdev_dp {
#define DP_REO_CMD_RING_SIZE 128
#define DP_REO_STATUS_RING_SIZE 2048
#define DP_RXDMA_BUF_RING_SIZE 4096
+#define DP_RX_MAC_BUF_RING_SIZE 2048
#define DP_RXDMA_REFILL_RING_SIZE 2048
#define DP_RXDMA_ERR_DST_RING_SIZE 1024
#define DP_RXDMA_MON_STATUS_RING_SIZE 1024
@@ -765,6 +766,11 @@ enum htt_stats_internal_ppdu_frametype {
#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET GENMASK(31, 16)
#define HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET BIT(23)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK GENMASK(18, 16)
+#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK GENMASK(16, 0)
+
enum htt_rx_filter_tlv_flags {
HTT_RX_FILTER_TLV_FLAGS_MPDU_START = BIT(0),
HTT_RX_FILTER_TLV_FLAGS_MSDU_START = BIT(1),
@@ -1088,6 +1094,11 @@ struct htt_rx_ring_selection_cfg_cmd {
__le32 rx_mpdu_offset;
__le32 rx_msdu_offset;
__le32 rx_attn_offset;
+ __le32 info2;
+ __le32 reserved[2];
+ __le32 rx_mpdu_start_end_mask;
+ __le32 rx_msdu_end_word_mask;
+ __le32 info3;
} __packed;
struct htt_rx_ring_tlv_filter {
@@ -1104,6 +1115,9 @@ struct htt_rx_ring_tlv_filter {
u16 rx_msdu_end_offset;
u16 rx_msdu_start_offset;
u16 rx_attn_offset;
+ u16 rx_mpdu_start_wmask;
+ u16 rx_mpdu_end_wmask;
+ u32 rx_msdu_end_wmask;
};
#define HTT_STATS_FRAME_CTRL_TYPE_MGMT 0x0
@@ -1820,4 +1834,6 @@ struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
u32 cookie);
struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
u32 desc_id);
+bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab);
+void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index be4b39f5fa80..2d56913a75d0 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "dp_mon.h"
@@ -864,7 +864,7 @@ static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, struct sk_buff
{
u32 rx_pkt_offset, l2_hdr_offset;
- rx_pkt_offset = ar->ab->hw_params->hal_desc_sz;
+ rx_pkt_offset = ar->ab->hal.hal_desc_sz;
l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab,
(struct hal_rx_desc *)msdu->data);
skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
@@ -917,7 +917,8 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
u8 qos_pkt = 0;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
- hdr_desc = ab->hw_params->hal_ops->rx_desc_get_msdu_payload(rx_desc);
+ hdr_desc =
+ ab->hal_rx_ops->rx_desc_get_msdu_payload(rx_desc);
/* Base size */
wh = (struct ieee80211_hdr_3addr *)hdr_desc;
@@ -1130,7 +1131,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
!(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
rx_status->flag |= RX_FLAG_8023;
- ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
+ ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
}
static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 1ee83f765929..ca76c018dd0c 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@@ -23,34 +23,34 @@
static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- if (!ab->hw_params->hal_ops->rx_desc_encrypt_valid(desc))
+ if (!ab->hal_rx_ops->rx_desc_encrypt_valid(desc))
return HAL_ENCRYPT_TYPE_OPEN;
- return ab->hw_params->hal_ops->rx_desc_get_encrypt_type(desc);
+ return ab->hal_rx_ops->rx_desc_get_encrypt_type(desc);
}
u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_decap_type(desc);
+ return ab->hal_rx_ops->rx_desc_get_decap_type(desc);
}
static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mesh_ctl(desc);
+ return ab->hal_rx_ops->rx_desc_get_mesh_ctl(desc);
}
static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
}
static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_fc_valid(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_fc_valid(desc);
}
static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
@@ -58,7 +58,7 @@ static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
{
struct ieee80211_hdr *hdr;
- hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
return ieee80211_has_morefrags(hdr->frame_control);
}
@@ -67,156 +67,156 @@ static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,
{
struct ieee80211_hdr *hdr;
- hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
}
static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_start_seq_no(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_start_seq_no(desc);
}
static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_msdu_done(desc);
+ return ab->hal_rx_ops->dp_rx_h_msdu_done(desc);
}
static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_l4_cksum_fail(desc);
+ return ab->hal_rx_ops->dp_rx_h_l4_cksum_fail(desc);
}
static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_ip_cksum_fail(desc);
+ return ab->hal_rx_ops->dp_rx_h_ip_cksum_fail(desc);
}
static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_is_decrypted(desc);
+ return ab->hal_rx_ops->dp_rx_h_is_decrypted(desc);
}
u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->dp_rx_h_mpdu_err(desc);
+ return ab->hal_rx_ops->dp_rx_h_mpdu_err(desc);
}
static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_len(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_len(desc);
}
static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_sgi(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_sgi(desc);
}
static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_rate_mcs(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_rate_mcs(desc);
}
static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_rx_bw(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_rx_bw(desc);
}
static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_freq(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_freq(desc);
}
static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_msdu_pkt_type(desc);
+ return ab->hal_rx_ops->rx_desc_get_msdu_pkt_type(desc);
}
static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return hweight8(ab->hw_params->hal_ops->rx_desc_get_msdu_nss(desc));
+ return hweight8(ab->hal_rx_ops->rx_desc_get_msdu_nss(desc));
}
static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_tid(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_tid(desc);
}
static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_peer_id(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_peer_id(desc);
}
u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_l3_pad_bytes(desc);
+ return ab->hal_rx_ops->rx_desc_get_l3_pad_bytes(desc);
}
static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_first_msdu(desc);
+ return ab->hal_rx_ops->rx_desc_get_first_msdu(desc);
}
static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_last_msdu(desc);
+ return ab->hal_rx_ops->rx_desc_get_last_msdu(desc);
}
static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,
struct hal_rx_desc *fdesc,
struct hal_rx_desc *ldesc)
{
- ab->hw_params->hal_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
+ ab->hal_rx_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
}
static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
struct hal_rx_desc *desc,
u16 len)
{
- ab->hw_params->hal_ops->rx_desc_set_msdu_len(desc, len);
+ ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len);
}
static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
return (ath12k_dp_rx_h_first_msdu(ab, desc) &&
- ab->hw_params->hal_ops->rx_desc_is_da_mcbc(desc));
+ ab->hal_rx_ops->rx_desc_is_da_mcbc(desc));
}
static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_mac_addr2_valid(desc);
+ return ab->hal_rx_ops->rx_desc_mac_addr2_valid(desc);
}
static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_mpdu_start_addr2(desc);
+ return ab->hal_rx_ops->rx_desc_mpdu_start_addr2(desc);
}
static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,
struct hal_rx_desc *desc,
struct ieee80211_hdr *hdr)
{
- ab->hw_params->hal_ops->rx_desc_get_dot11_hdr(desc, hdr);
+ ab->hal_rx_ops->rx_desc_get_dot11_hdr(desc, hdr);
}
static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
@@ -224,13 +224,19 @@ static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
u8 *crypto_hdr,
enum hal_encrypt_type enctype)
{
- ab->hw_params->hal_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
+ ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
}
static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
struct hal_rx_desc *desc)
{
- return ab->hw_params->hal_ops->rx_desc_get_mpdu_frame_ctl(desc);
+ return ab->hal_rx_ops->rx_desc_get_mpdu_frame_ctl(desc);
+}
+
+static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc);
}
static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
@@ -1761,7 +1767,7 @@ static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
int buf_first_hdr_len, buf_first_len;
struct hal_rx_desc *ldesc;
int space_extra, rem_len, buf_len;
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
/* As the msdu is spread across multiple rx buffers,
* find the offset to the start of msdu for computing
@@ -2458,7 +2464,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
!(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
rx_status->flag |= RX_FLAG_8023;
- ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
+ ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
}
static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
@@ -2473,7 +2479,7 @@ static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
u8 l3_pad_bytes;
u16 msdu_len;
int ret;
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
if (!last_buf) {
@@ -2804,7 +2810,7 @@ static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer
u8 mic[IEEE80211_CCMP_MIC_LEN];
int head_len, tail_len, ret;
size_t data_len;
- u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hdr_len, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
u8 *key, *data;
u8 key_idx;
@@ -2844,7 +2850,7 @@ mic_fail:
ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
- ieee80211_rx(ar->hw, msdu);
+ ieee80211_rx(ath12k_ar_to_hw(ar), msdu);
return -EINVAL;
}
@@ -2854,7 +2860,7 @@ static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,
struct ieee80211_hdr *hdr;
size_t hdr_len;
size_t crypto_len;
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
if (!flags)
return;
@@ -2892,7 +2898,7 @@ static int ath12k_dp_rx_h_defrag(struct ath12k *ar,
bool is_decrypted = false;
int msdu_len = 0;
int extra_space;
- u32 flags, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 flags, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
first_frag = skb_peek(&rx_tid->rx_frags);
last_frag = skb_peek_tail(&rx_tid->rx_frags);
@@ -2968,7 +2974,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
struct ath12k_rx_desc_info *desc_info;
u8 dst_ind;
- hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ hal_rx_desc_sz = ab->hal.hal_desc_sz;
link_desc_banks = dp->link_desc_banks;
reo_dest_ring = rx_tid->dst_ring_desc;
@@ -3122,7 +3128,7 @@ static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)
struct ieee80211_hdr *hdr;
u64 pn = 0;
u8 *ehdr;
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
@@ -3305,7 +3311,7 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
struct ath12k_skb_rxcb *rxcb;
struct hal_rx_desc *rx_desc;
u16 msdu_len;
- u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
struct ath12k_rx_desc_info *desc_info;
u64 desc_va;
@@ -3486,7 +3492,7 @@ static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
int n_buffs;
n_buffs = DIV_ROUND_UP(msdu_len,
- (DP_RX_BUFFER_SIZE - ar->ab->hw_params->hal_desc_sz));
+ (DP_RX_BUFFER_SIZE - ar->ab->hal.hal_desc_sz));
skb_queue_walk_safe(msdu_list, skb, tmp) {
rxcb = ATH12K_SKB_RXCB(skb);
@@ -3510,7 +3516,7 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
u8 l3pad_bytes;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
@@ -3607,7 +3613,7 @@ static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
u8 l3pad_bytes;
struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
- u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc);
rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc);
@@ -3695,16 +3701,15 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct hal_rx_wbm_rel_info err_info;
struct hal_srng *srng;
struct sk_buff *msdu;
- struct sk_buff_head msdu_list[MAX_RADIOS];
+ struct sk_buff_head msdu_list;
struct ath12k_skb_rxcb *rxcb;
void *rx_desc;
- int mac_id;
+ u8 mac_id;
int num_buffs_reaped = 0;
struct ath12k_rx_desc_info *desc_info;
- int ret, i;
+ int ret, pdev_id;
- for (i = 0; i < ab->num_radios; i++)
- __skb_queue_head_init(&msdu_list[i]);
+ __skb_queue_head_init(&msdu_list);
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
rx_ring = &dp->rx_refill_buf_ring;
@@ -3737,11 +3742,6 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
}
}
- /* FIXME: Extract mac id correctly. Since descs are not tied
- * to mac, we can extract from vdev id in ring desc.
- */
- mac_id = 0;
-
if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
@@ -3771,7 +3771,8 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
rxcb->err_rel_src = err_info.err_rel_src;
rxcb->err_code = err_info.err_code;
rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
- __skb_queue_tail(&msdu_list[mac_id], msdu);
+
+ __skb_queue_tail(&msdu_list, msdu);
rxcb->is_first_msdu = err_info.first_msdu;
rxcb->is_last_msdu = err_info.last_msdu;
@@ -3788,21 +3789,22 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
rcu_read_lock();
- for (i = 0; i < ab->num_radios; i++) {
- if (!rcu_dereference(ab->pdevs_active[i])) {
- __skb_queue_purge(&msdu_list[i]);
+ while ((msdu = __skb_dequeue(&msdu_list))) {
+ mac_id = ath12k_dp_rx_get_msdu_src_link(ab,
+ (struct hal_rx_desc *)msdu->data);
+ pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
+ ar = ab->pdevs[pdev_id].ar;
+
+ if (!ar || !rcu_dereference(ar->ab->pdevs_active[mac_id])) {
+ dev_kfree_skb_any(msdu);
continue;
}
- ar = ab->pdevs[i].ar;
-
if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
- __skb_queue_purge(&msdu_list[i]);
+ dev_kfree_skb_any(msdu);
continue;
}
-
- while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
- ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
+ ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list);
}
rcu_read_unlock();
done:
@@ -3922,7 +3924,7 @@ int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
int ret;
- u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
@@ -3935,14 +3937,20 @@ int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
tlv_filter.rx_packet_offset = hal_rx_desc_sz;
tlv_filter.rx_mpdu_start_offset =
- ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
+ ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
tlv_filter.rx_msdu_end_offset =
- ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
+ ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
+
+ if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
+ tlv_filter.rx_mpdu_start_wmask =
+ ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start();
+ tlv_filter.rx_msdu_end_wmask =
+ ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end();
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n",
+ tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask);
+ }
- /* TODO: Selectively subscribe to required qwords within msdu_end
- * and mpdu_start and setup the mask in below msg
- * and modify the rx_desc struct
- */
ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
HAL_RXDMA_BUF,
DP_RXDMA_REFILL_RING_SIZE,
@@ -3957,7 +3965,7 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
int ret;
- u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
int i;
ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
@@ -3973,9 +3981,9 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
tlv_filter.rx_mpdu_start_offset =
- ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
+ ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
tlv_filter.rx_msdu_end_offset =
- ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
+ ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
/* TODO: Selectively subscribe to required qwords within msdu_end
* and mpdu_start and setup the mask in below msg
@@ -4086,7 +4094,7 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
ret = ath12k_dp_srng_setup(ab,
&dp->rx_mac_buf_ring[i],
HAL_RXDMA_BUF, 1,
- i, 1024);
+ i, DP_RX_MAC_BUF_RING_SIZE);
if (ret) {
ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n",
i);
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index 62f9cdbb811c..572b87153647 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -151,7 +151,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1);
@@ -401,7 +401,7 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
}
}
- ieee80211_tx_status_skb(ar->hw, msdu);
+ ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
}
static void
@@ -498,7 +498,7 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
* Might end up reporting it out-of-band from HTT stats.
*/
- ieee80211_tx_status_skb(ar->hw, msdu);
+ ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
exit:
rcu_read_unlock();
@@ -837,7 +837,7 @@ int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
ath12k_err(ab, "unsupported htt major version %d supported version is %d\n",
dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
return 0;
@@ -964,6 +964,26 @@ int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
}
+ if (tlv_filter->rx_mpdu_start_wmask > 0 &&
+ tlv_filter->rx_msdu_end_wmask > 0) {
+ cmd->info2 |=
+ le32_encode_bits(true,
+ HTT_RX_RING_SELECTION_CFG_WORD_MASK_COMPACT_SET);
+ cmd->rx_mpdu_start_end_mask =
+ le32_encode_bits(tlv_filter->rx_mpdu_start_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_MASK);
+ /* mpdu_end is not used for any hardwares so far
+ * please assign it in future if any chip is
+ * using through hal ops
+ */
+ cmd->rx_mpdu_start_end_mask |=
+ le32_encode_bits(tlv_filter->rx_mpdu_end_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_MASK);
+ cmd->rx_msdu_end_word_mask =
+ le32_encode_bits(tlv_filter->rx_msdu_end_wmask,
+ HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_MASK);
+ }
+
ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)
goto err_free;
diff --git a/drivers/net/wireless/ath/ath12k/fw.c b/drivers/net/wireless/ath/ath12k/fw.c
new file mode 100644
index 000000000000..5be4b2d4a19d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/fw.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+
+#include "debug.h"
+
+static int ath12k_fw_request_firmware_api_n(struct ath12k_base *ab,
+ const char *name)
+{
+ size_t magic_len, len, ie_len;
+ int ie_id, i, index, bit, ret;
+ struct ath12k_fw_ie *hdr;
+ const u8 *data;
+ __le32 *timestamp;
+
+ ab->fw.fw = ath12k_core_firmware_request(ab, name);
+ if (IS_ERR(ab->fw.fw)) {
+ ret = PTR_ERR(ab->fw.fw);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to load %s: %d\n", name, ret);
+ ab->fw.fw = NULL;
+ return ret;
+ }
+
+ data = ab->fw.fw->data;
+ len = ab->fw.fw->size;
+
+ /* magic also includes the null byte, check that as well */
+ magic_len = strlen(ATH12K_FIRMWARE_MAGIC) + 1;
+
+ if (len < magic_len) {
+ ath12k_err(ab, "firmware image too small to contain magic: %zu\n",
+ len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH12K_FIRMWARE_MAGIC, magic_len) != 0) {
+ ath12k_err(ab, "Invalid firmware magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* jump over the padding */
+ magic_len = ALIGN(magic_len, 4);
+
+ /* make sure there's space for padding */
+ if (magic_len > len) {
+ ath12k_err(ab, "No space for padding after magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ len -= magic_len;
+ data += magic_len;
+
+ /* loop elements */
+ while (len > sizeof(struct ath12k_fw_ie)) {
+ hdr = (struct ath12k_fw_ie *)data;
+
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data += sizeof(*hdr);
+
+ if (len < ie_len) {
+ ath12k_err(ab, "Invalid length for FW IE %d (%zu < %zu)\n",
+ ie_id, len, ie_len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (ie_id) {
+ case ATH12K_FW_IE_TIMESTAMP:
+ if (ie_len != sizeof(u32))
+ break;
+
+ timestamp = (__le32 *)data;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "found fw timestamp %d\n",
+ le32_to_cpup(timestamp));
+ break;
+ case ATH12K_FW_IE_FEATURES:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "found firmware features ie (%zd B)\n",
+ ie_len);
+
+ for (i = 0; i < ATH12K_FW_FEATURE_COUNT; i++) {
+ index = i / 8;
+ bit = i % 8;
+
+ if (index == ie_len)
+ break;
+
+ if (data[index] & (1 << bit))
+ __set_bit(i, ab->fw.fw_features);
+ }
+
+ ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "features", "",
+ ab->fw.fw_features,
+ sizeof(ab->fw.fw_features));
+ break;
+ case ATH12K_FW_IE_AMSS_IMAGE:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "found fw image ie (%zd B)\n",
+ ie_len);
+
+ ab->fw.amss_data = data;
+ ab->fw.amss_len = ie_len;
+ break;
+ case ATH12K_FW_IE_M3_IMAGE:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "found m3 image ie (%zd B)\n",
+ ie_len);
+
+ ab->fw.m3_data = data;
+ ab->fw.m3_len = ie_len;
+ break;
+ case ATH12K_FW_IE_AMSS_DUALMAC_IMAGE:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "found dualmac fw image ie (%zd B)\n",
+ ie_len);
+ ab->fw.amss_dualmac_data = data;
+ ab->fw.amss_dualmac_len = ie_len;
+ break;
+ default:
+ ath12k_warn(ab, "Unknown FW IE: %u\n", ie_id);
+ break;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ /* make sure there's space for padding */
+ if (ie_len > len)
+ break;
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+ return 0;
+
+err:
+ release_firmware(ab->fw.fw);
+ ab->fw.fw = NULL;
+ return ret;
+}
+
+void ath12k_fw_map(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_fw_request_firmware_api_n(ab, ATH12K_FW_API2_FILE);
+ if (ret == 0)
+ ab->fw.api_version = 2;
+ else
+ ab->fw.api_version = 1;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "using fw api %d\n",
+ ab->fw.api_version);
+}
+
+void ath12k_fw_unmap(struct ath12k_base *ab)
+{
+ release_firmware(ab->fw.fw);
+ memset(&ab->fw, 0, sizeof(ab->fw));
+}
diff --git a/drivers/net/wireless/ath/ath12k/fw.h b/drivers/net/wireless/ath/ath12k/fw.h
new file mode 100644
index 000000000000..3ff041f15fa0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/fw.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_FW_H
+#define ATH12K_FW_H
+
+#define ATH12K_FW_API2_FILE "firmware-2.bin"
+#define ATH12K_FIRMWARE_MAGIC "QCOM-ATH12K-FW"
+
+enum ath12k_fw_ie_type {
+ ATH12K_FW_IE_TIMESTAMP = 0,
+ ATH12K_FW_IE_FEATURES = 1,
+ ATH12K_FW_IE_AMSS_IMAGE = 2,
+ ATH12K_FW_IE_M3_IMAGE = 3,
+ ATH12K_FW_IE_AMSS_DUALMAC_IMAGE = 4,
+};
+
+enum ath12k_fw_features {
+ /* The firmware supports setting the QRTR id via register
+ * PCIE_LOCAL_REG_QRTR_NODE_ID
+ */
+ ATH12K_FW_FEATURE_MULTI_QRTR_ID = 0,
+
+ /* keep last */
+ ATH12K_FW_FEATURE_COUNT,
+};
+
+void ath12k_fw_map(struct ath12k_base *ab);
+void ath12k_fw_unmap(struct ath12k_base *ab);
+
+#endif /* ATH12K_FW_H */
diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
index a489369d8068..78310da8cfe8 100644
--- a/drivers/net/wireless/ath/ath12k/hal.c
+++ b/drivers/net/wireless/ath/ath12k/hal.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include "hal_tx.h"
@@ -449,8 +449,8 @@ static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
static bool ath12k_hw_qcn9274_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
{
- return __le16_to_cpu(desc->u.qcn9274.msdu_end.info5) &
- RX_MSDU_END_INFO5_DA_IS_MCBC;
+ return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) &
+ RX_MPDU_START_INFO6_MCAST_BCAST;
}
static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
@@ -626,6 +626,21 @@ static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
return 0;
}
+static u16 ath12k_hal_qcn9274_rx_mpdu_start_wmask_get(void)
+{
+ return QCN9274_MPDU_START_WMASK;
+}
+
+static u32 ath12k_hal_qcn9274_rx_msdu_end_wmask_get(void)
+{
+ return QCN9274_MSDU_END_WMASK;
+}
+
+static const struct hal_rx_ops *ath12k_hal_qcn9274_get_hal_rx_compact_ops(void)
+{
+ return &hal_rx_qcn9274_compact_ops;
+}
+
static bool ath12k_hw_qcn9274_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
{
return !!le32_get_bits(desc->u.qcn9274.msdu_end.info14,
@@ -680,7 +695,17 @@ static u32 ath12k_hw_qcn9274_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
return errmap;
}
-const struct hal_ops hal_qcn9274_ops = {
+static u32 ath12k_hw_qcn9274_get_rx_desc_size(void)
+{
+ return sizeof(struct hal_rx_desc_qcn9274);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
+{
+ return 0;
+}
+
+const struct hal_rx_ops hal_rx_qcn9274_ops = {
.rx_desc_get_first_msdu = ath12k_hw_qcn9274_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath12k_hw_qcn9274_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes,
@@ -712,13 +737,367 @@ const struct hal_ops hal_qcn9274_ops = {
.rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr,
.rx_desc_get_mpdu_frame_ctl = ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl,
- .create_srng_config = ath12k_hal_srng_create_config_qcn9274,
- .tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map,
.dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail,
.dp_rx_h_is_decrypted = ath12k_hw_qcn9274_dp_rx_h_is_decrypted,
.dp_rx_h_mpdu_err = ath12k_hw_qcn9274_dp_rx_h_mpdu_err,
+ .rx_desc_get_desc_size = ath12k_hw_qcn9274_get_rx_desc_size,
+ .rx_desc_get_msdu_src_link_id = ath12k_hw_qcn9274_rx_desc_get_msdu_src_link,
+};
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_INFO5_FIRST_MSDU);
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_INFO5_LAST_MSDU);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_INFO5_L3_HDR_PADDING);
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID);
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info2,
+ RX_MPDU_START_INFO2_ENC_TYPE);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info11,
+ RX_MSDU_END_INFO11_DECAP_FORMAT);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info11,
+ RX_MSDU_END_INFO11_MESH_CTRL_PRESENT);
+}
+
+static bool
+ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID);
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_FCTRL_VALID);
+}
+
+static u16
+ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_NUM);
+}
+
+static u16 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info10,
+ RX_MSDU_END_INFO10_MSDU_LENGTH);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_SGI);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_RATE_MCS);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_RECV_BW);
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.phy_meta_data);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_INFO12_PKT_TYPE);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12,
+ RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5,
+ RX_MSDU_END_QCN9274_INFO5_TID);
+}
+
+static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.sw_peer_id);
+}
+
+static void ath12k_hw_qcn9274_compact_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ fdesc->u.qcn9274_compact.msdu_end = ldesc->u.qcn9274_compact.msdu_end;
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.phy_ppdu_id);
+}
+
+static void
+ath12k_hw_qcn9274_compact_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info10);
+
+ info = u32_replace_bits(info, len, RX_MSDU_END_INFO10_MSDU_LENGTH);
+ desc->u.qcn9274_compact.msdu_end.info10 = __cpu_to_le32(info);
+}
+
+static u8 *ath12k_hw_qcn9274_compact_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.qcn9274_compact.msdu_payload[0];
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_offset(void)
+{
+ return offsetof(struct hal_rx_desc_qcn9274_compact, mpdu_start);
+}
+
+static u32 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_end_offset(void)
+{
+ return offsetof(struct hal_rx_desc_qcn9274_compact, msdu_end);
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR2_VALID;
+}
+
+static u8 *ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.qcn9274_compact.mpdu_start.addr2;
+}
+
+static bool ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info6) &
+ RX_MPDU_START_INFO6_MCAST_BCAST;
+}
+
+static void ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr)
+{
+ hdr->frame_control = desc->u.qcn9274_compact.mpdu_start.frame_ctrl;
+ hdr->duration_id = desc->u.qcn9274_compact.mpdu_start.duration;
+ ether_addr_copy(hdr->addr1, desc->u.qcn9274_compact.mpdu_start.addr1);
+ ether_addr_copy(hdr->addr2, desc->u.qcn9274_compact.mpdu_start.addr2);
+ ether_addr_copy(hdr->addr3, desc->u.qcn9274_compact.mpdu_start.addr3);
+ if (__le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR4_VALID) {
+ ether_addr_copy(hdr->addr4, desc->u.qcn9274_compact.mpdu_start.addr4);
+ }
+ hdr->seq_ctrl = desc->u.qcn9274_compact.mpdu_start.seq_ctrl;
+}
+
+static void
+ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype)
+{
+ unsigned int key_id;
+
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[1] = 0;
+ crypto_hdr[2] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ break;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[1] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[2] = 0;
+ break;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ return;
+ }
+ key_id = le32_get_bits(desc->u.qcn9274_compact.mpdu_start.info5,
+ RX_MPDU_START_INFO5_KEY_ID);
+ crypto_hdr[3] = 0x20 | (key_id << 6);
+ crypto_hdr[4] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[5] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.qcn9274_compact.mpdu_start.pn[0]);
+ crypto_hdr[6] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274_compact.mpdu_start.pn[1]);
+ crypto_hdr[7] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[1]);
+}
+
+static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.frame_ctrl);
+}
+
+static bool ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
+ RX_MSDU_END_INFO14_MSDU_DONE);
+}
+
+static bool ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13,
+ RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
+}
+
+static bool ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info13,
+ RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
+}
+
+static bool ath12k_hw_qcn9274_compact_dp_rx_h_is_decrypted(struct hal_rx_desc *desc)
+{
+ return (le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
+ RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) ==
+ RX_DESC_DECRYPT_STATUS_CODE_OK);
+}
+
+static u32 ath12k_hw_qcn9274_compact_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
+{
+ u32 info = __le32_to_cpu(desc->u.qcn9274_compact.msdu_end.info13);
+ u32 errmap = 0;
+
+ if (info & RX_MSDU_END_INFO13_FCS_ERR)
+ errmap |= HAL_RX_MPDU_ERR_FCS;
+
+ if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
+ errmap |= HAL_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
+ errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
+ errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
+ errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
+
+ return errmap;
+}
+
+static u32 ath12k_hw_qcn9274_compact_get_rx_desc_size(void)
+{
+ return sizeof(struct hal_rx_desc_qcn9274_compact);
+}
+
+static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
+{
+ return le64_get_bits(desc->u.qcn9274_compact.msdu_end.msdu_end_tag,
+ RX_MSDU_END_64_TLV_SRC_LINK_ID);
+}
+
+const struct hal_rx_ops hal_rx_qcn9274_compact_ops = {
+ .rx_desc_get_first_msdu = ath12k_hw_qcn9274_compact_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath12k_hw_qcn9274_compact_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_compact_rx_desc_get_l3_pad_bytes,
+ .rx_desc_encrypt_valid = ath12k_hw_qcn9274_compact_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath12k_hw_qcn9274_compact_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath12k_hw_qcn9274_compact_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath12k_hw_qcn9274_compact_rx_desc_get_mesh_ctl,
+ .rx_desc_get_mpdu_seq_ctl_vld =
+ ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no =
+ ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_end_tlv = ath12k_hw_qcn9274_compact_rx_desc_copy_end_tlv,
+ .rx_desc_get_mpdu_ppdu_id = ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath12k_hw_qcn9274_compact_rx_desc_set_msdu_len,
+ .rx_desc_get_msdu_payload = ath12k_hw_qcn9274_compact_rx_desc_get_msdu_payload,
+ .rx_desc_get_mpdu_start_offset =
+ ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_start_offset,
+ .rx_desc_get_msdu_end_offset =
+ ath12k_hw_qcn9274_compact_rx_desc_get_msdu_end_offset,
+ .rx_desc_mac_addr2_valid = ath12k_hw_qcn9274_compact_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath12k_hw_qcn9274_compact_rx_desc_mpdu_start_addr2,
+ .rx_desc_is_da_mcbc = ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc,
+ .rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr,
+ .rx_desc_get_crypto_header = ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr,
+ .rx_desc_get_mpdu_frame_ctl =
+ ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl,
+ .dp_rx_h_msdu_done = ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done,
+ .dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail,
+ .dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail,
+ .dp_rx_h_is_decrypted = ath12k_hw_qcn9274_compact_dp_rx_h_is_decrypted,
+ .dp_rx_h_mpdu_err = ath12k_hw_qcn9274_compact_dp_rx_h_mpdu_err,
+ .rx_desc_get_desc_size = ath12k_hw_qcn9274_compact_get_rx_desc_size,
+ .rx_desc_get_msdu_src_link_id =
+ ath12k_hw_qcn9274_compact_rx_desc_get_msdu_src_link,
+};
+
+const struct hal_ops hal_qcn9274_ops = {
+ .create_srng_config = ath12k_hal_srng_create_config_qcn9274,
+ .tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map,
+ .rxdma_ring_wmask_rx_mpdu_start = ath12k_hal_qcn9274_rx_mpdu_start_wmask_get,
+ .rxdma_ring_wmask_rx_msdu_end = ath12k_hal_qcn9274_rx_msdu_end_wmask_get,
+ .get_hal_rx_compact_ops = ath12k_hal_qcn9274_get_hal_rx_compact_ops,
};
static bool ath12k_hw_wcn7850_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
@@ -1134,7 +1513,17 @@ static u32 ath12k_hw_wcn7850_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
return errmap;
}
-const struct hal_ops hal_wcn7850_ops = {
+static u32 ath12k_hw_wcn7850_get_rx_desc_size(void)
+{
+ return sizeof(struct hal_rx_desc_wcn7850);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_src_link(struct hal_rx_desc *desc)
+{
+ return 0;
+}
+
+const struct hal_rx_ops hal_rx_wcn7850_ops = {
.rx_desc_get_first_msdu = ath12k_hw_wcn7850_rx_desc_get_first_msdu,
.rx_desc_get_last_msdu = ath12k_hw_wcn7850_rx_desc_get_last_msdu,
.rx_desc_get_l3_pad_bytes = ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes,
@@ -1167,13 +1556,21 @@ const struct hal_ops hal_wcn7850_ops = {
.rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr,
.rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr,
.rx_desc_get_mpdu_frame_ctl = ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl,
- .create_srng_config = ath12k_hal_srng_create_config_wcn7850,
- .tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map,
.dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done,
.dp_rx_h_l4_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail,
.dp_rx_h_ip_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail,
.dp_rx_h_is_decrypted = ath12k_hw_wcn7850_dp_rx_h_is_decrypted,
.dp_rx_h_mpdu_err = ath12k_hw_wcn7850_dp_rx_h_mpdu_err,
+ .rx_desc_get_desc_size = ath12k_hw_wcn7850_get_rx_desc_size,
+ .rx_desc_get_msdu_src_link_id = ath12k_hw_wcn7850_rx_desc_get_msdu_src_link,
+};
+
+const struct hal_ops hal_wcn7850_ops = {
+ .create_srng_config = ath12k_hal_srng_create_config_wcn7850,
+ .tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map,
+ .rxdma_ring_wmask_rx_mpdu_start = NULL,
+ .rxdma_ring_wmask_rx_msdu_end = NULL,
+ .get_hal_rx_compact_ops = NULL,
};
static int ath12k_hal_alloc_cont_rdp(struct ath12k_base *ab)
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index fc47e7e6b498..107927d64bbb 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_H
@@ -1023,6 +1023,8 @@ struct ath12k_hal {
/* shadow register configuration */
u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS];
int num_shadow_reg_configured;
+
+ u32 hal_desc_sz;
};
/* Maps WBM ring number and Return Buffer Manager Id per TCL ring */
@@ -1031,7 +1033,7 @@ struct ath12k_hal_tcl_to_wbm_rbm_map {
u8 rbm_id;
};
-struct hal_ops {
+struct hal_rx_ops {
bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc);
bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc);
u8 (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc);
@@ -1070,18 +1072,30 @@ struct hal_ops {
void (*rx_desc_get_crypto_header)(struct hal_rx_desc *desc,
u8 *crypto_hdr,
enum hal_encrypt_type enctype);
- int (*create_srng_config)(struct ath12k_base *ab);
bool (*dp_rx_h_msdu_done)(struct hal_rx_desc *desc);
bool (*dp_rx_h_l4_cksum_fail)(struct hal_rx_desc *desc);
bool (*dp_rx_h_ip_cksum_fail)(struct hal_rx_desc *desc);
bool (*dp_rx_h_is_decrypted)(struct hal_rx_desc *desc);
u32 (*dp_rx_h_mpdu_err)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_desc_size)(void);
+ u8 (*rx_desc_get_msdu_src_link_id)(struct hal_rx_desc *desc);
+};
+
+struct hal_ops {
+ int (*create_srng_config)(struct ath12k_base *ab);
+ u16 (*rxdma_ring_wmask_rx_mpdu_start)(void);
+ u32 (*rxdma_ring_wmask_rx_msdu_end)(void);
+ const struct hal_rx_ops *(*get_hal_rx_compact_ops)(void);
const struct ath12k_hal_tcl_to_wbm_rbm_map *tcl_to_wbm_rbm_map;
};
extern const struct hal_ops hal_qcn9274_ops;
extern const struct hal_ops hal_wcn7850_ops;
+extern const struct hal_rx_ops hal_rx_qcn9274_ops;
+extern const struct hal_rx_ops hal_rx_qcn9274_compact_ops;
+extern const struct hal_rx_ops hal_rx_wcn7850_ops;
+
u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
int tid, u32 ba_window_size,
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 6c17adc6d60b..63340256d3f6 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -2500,13 +2500,13 @@ struct hal_rx_reo_queue {
#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE BIT(30)
#define HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG BIT(31)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(7, 0)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(9, 8)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(10)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(22, 11)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(23)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(24)
-#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(9, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(11, 10)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(12)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(24, 13)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(27)
struct hal_reo_update_rx_queue {
struct hal_reo_cmd_hdr cmd;
@@ -2517,6 +2517,12 @@ struct hal_reo_update_rx_queue {
__le32 pn[4];
} __packed;
+struct hal_rx_reo_queue_1k {
+ struct hal_desc_header desc_hdr;
+ __le32 rx_bitmap_1023_288[23];
+ __le32 reserved[8];
+} __packed;
+
#define HAL_REO_UNBLOCK_CACHE_INFO0_UNBLK_CACHE BIT(0)
#define HAL_REO_UNBLOCK_CACHE_INFO0_RESOURCE_IDX GENMASK(2, 1)
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c
index 4f25eb9f7745..f7c1aaa3b5d4 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "debug.h"
@@ -247,7 +247,7 @@ int ath12k_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng,
case HAL_REO_CMD_UNBLOCK_CACHE:
case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
ath12k_warn(ab, "Unsupported reo command %d\n", type);
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
break;
default:
ath12k_warn(ab, "Unknown reo command %d\n", type);
@@ -688,23 +688,28 @@ void ath12k_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab,
u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
{
- u32 num_ext_desc;
+ u32 num_ext_desc, num_1k_desc = 0;
if (ba_window_size <= 1) {
if (tid != HAL_DESC_REO_NON_QOS_TID)
num_ext_desc = 1;
else
num_ext_desc = 0;
+
} else if (ba_window_size <= 105) {
num_ext_desc = 1;
} else if (ba_window_size <= 210) {
num_ext_desc = 2;
- } else {
+ } else if (ba_window_size <= 256) {
num_ext_desc = 3;
+ } else {
+ num_ext_desc = 10;
+ num_1k_desc = 1;
}
return sizeof(struct hal_rx_reo_queue) +
- (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
+ (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext)) +
+ (num_1k_desc * sizeof(struct hal_rx_reo_queue_1k));
}
void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index de60d988d860..0b17dfd47856 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
@@ -897,7 +897,6 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.reoq_lut_support = false,
.supports_shadow_regs = false,
- .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
.num_tcl_banks = 48,
.max_tx_ring = 4,
@@ -914,6 +913,13 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.rfkill_on_level = 0,
.rddm_size = 0,
+
+ .def_num_link = 0,
+ .max_mlo_peer = 256,
+
+ .otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
+
+ .supports_sta_ps = false,
},
{
.name = "wcn7850 hw2.0",
@@ -950,7 +956,10 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.vdev_start_delay = true,
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.supports_monitor = false,
.idle_ps = true,
@@ -960,7 +969,6 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.reoq_lut_support = false,
.supports_shadow_regs = true,
- .hal_desc_sz = sizeof(struct hal_rx_desc_wcn7850),
.num_tcl_banks = 7,
.max_tx_ring = 3,
@@ -978,6 +986,13 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.rfkill_on_level = 1,
.rddm_size = 0x780000,
+
+ .def_num_link = 2,
+ .max_mlo_peer = 32,
+
+ .otp_board_id_register = 0,
+
+ .supports_sta_ps = true,
},
{
.name = "qcn9274 hw2.0",
@@ -987,7 +1002,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.board_size = 256 * 1024,
.cal_offset = 128 * 1024,
},
- .max_radios = 1,
+ .max_radios = 2,
.single_pdev_only = false,
.qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274,
.internal_sleep_clock = false,
@@ -1023,7 +1038,6 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.reoq_lut_support = false,
.supports_shadow_regs = false,
- .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
.num_tcl_banks = 48,
.max_tx_ring = 4,
@@ -1040,6 +1054,13 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.rfkill_on_level = 0,
.rddm_size = 0,
+
+ .def_num_link = 0,
+ .max_mlo_peer = 256,
+
+ .otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
+
+ .supports_sta_ps = false,
},
};
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index d2622bfef942..87965980b938 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HW_H
@@ -17,19 +17,30 @@
/* Num VDEVS per radio */
#define TARGET_NUM_VDEVS (16 + 1)
-#define TARGET_NUM_PEERS_PDEV (512 + TARGET_NUM_VDEVS)
+#define TARGET_NUM_PEERS_PDEV_SINGLE (TARGET_NUM_STATIONS_SINGLE + \
+ TARGET_NUM_VDEVS)
+#define TARGET_NUM_PEERS_PDEV_DBS (TARGET_NUM_STATIONS_DBS + \
+ TARGET_NUM_VDEVS)
+#define TARGET_NUM_PEERS_PDEV_DBS_SBS (TARGET_NUM_STATIONS_DBS_SBS + \
+ TARGET_NUM_VDEVS)
/* Num of peers for Single Radio mode */
-#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV)
+#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV_SINGLE)
/* Num of peers for DBS */
-#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV)
+#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV_DBS)
/* Num of peers for DBS_SBS */
-#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV)
+#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV_DBS_SBS)
-/* Max num of stations (per radio) */
-#define TARGET_NUM_STATIONS 512
+/* Max num of stations for Single Radio mode */
+#define TARGET_NUM_STATIONS_SINGLE 512
+
+/* Max num of stations for DBS */
+#define TARGET_NUM_STATIONS_DBS 128
+
+/* Max num of stations for DBS_SBS */
+#define TARGET_NUM_STATIONS_DBS_SBS 128
#define TARGET_NUM_PEERS(x) TARGET_NUM_PEERS_##x
#define TARGET_NUM_PEER_KEYS 2
@@ -66,6 +77,8 @@
#define TARGET_NUM_WDS_ENTRIES 32
#define TARGET_DMA_BURST_SIZE 1
#define TARGET_RX_BATCHMODE 1
+#define TARGET_RX_PEER_METADATA_VER_V1A 2
+#define TARGET_RX_PEER_METADATA_VER_V1B 3
#define ATH12K_HW_MAX_QUEUES 4
#define ATH12K_QUEUE_LEN 4096
@@ -174,7 +187,6 @@ struct ath12k_hw_params {
bool reoq_lut_support:1;
bool supports_shadow_regs:1;
- u32 hal_desc_sz;
u32 num_tcl_banks;
u32 max_tx_ring;
@@ -192,6 +204,13 @@ struct ath12k_hw_params {
u32 rfkill_on_level;
u32 rddm_size;
+
+ u8 def_num_link;
+ u16 max_mlo_peer;
+
+ u32 otp_board_id_register;
+
+ bool supports_sta_ps;
};
struct ath12k_hw_ops {
@@ -242,10 +261,16 @@ enum ath12k_bd_ie_board_type {
ATH12K_BD_IE_BOARD_DATA = 1,
};
+enum ath12k_bd_ie_regdb_type {
+ ATH12K_BD_IE_REGDB_NAME = 0,
+ ATH12K_BD_IE_REGDB_DATA = 1,
+};
+
enum ath12k_bd_ie_type {
/* contains sub IEs of enum ath12k_bd_ie_board_type */
ATH12K_BD_IE_BOARD = 0,
- ATH12K_BD_IE_BOARD_EXT = 1,
+ /* contains sub IEs of enum ath12k_bd_ie_regdb_type */
+ ATH12K_BD_IE_REGDB = 1,
};
struct ath12k_hw_regs {
@@ -315,6 +340,18 @@ struct ath12k_hw_regs {
u32 hal_reo_status_ring_base;
};
+static inline const char *ath12k_bd_ie_type_str(enum ath12k_bd_ie_type type)
+{
+ switch (type) {
+ case ATH12K_BD_IE_BOARD:
+ return "board data";
+ case ATH12K_BD_IE_REGDB:
+ return "regdb data";
+ }
+
+ return "unknown";
+}
+
int ath12k_hw_init(struct ath12k_base *ab);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 88cec54c6c2e..52a5fb8b03e9 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
@@ -241,8 +241,8 @@ static const u32 ath12k_smps_map[] = {
[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
};
-static int ath12k_start_vdev_delay(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+static int ath12k_start_vdev_delay(struct ath12k *ar,
+ struct ath12k_vif *arvif);
static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode)
{
@@ -542,7 +542,7 @@ struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id)
arvif_iter.vdev_id = vdev_id;
flags = IEEE80211_IFACE_ITER_RESUME_ALL;
- ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
flags,
ath12k_get_arvif_iter,
&arvif_iter);
@@ -563,7 +563,8 @@ struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
- if (pdev && pdev->ar) {
+ if (pdev && pdev->ar &&
+ (pdev->ar->allocated_vdev_map & (1LL << vdev_id))) {
arvif = ath12k_mac_get_arvif(pdev->ar, vdev_id);
if (arvif)
return arvif;
@@ -1040,7 +1041,7 @@ static int ath12k_mac_monitor_start(struct ath12k *ar)
if (ar->monitor_started)
return 0;
- ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
ath12k_mac_get_any_chandef_iter,
&chandef);
if (!chandef)
@@ -1083,9 +1084,49 @@ static int ath12k_mac_monitor_stop(struct ath12k *ar)
return ret;
}
-static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ret = ath12k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ ar->num_started_vdevs--;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
+ arvif->vif->addr, arvif->vdev_id);
+
+ if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
+ arvif->vdev_id);
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static int ath12k_mac_config(struct ath12k *ar, u32 changed)
{
- struct ath12k *ar = hw->priv;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
@@ -1122,11 +1163,84 @@ err_mon_del:
return ret;
}
+static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ int ret;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ ret = ath12k_mac_config(ar, changed);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to update config pdev idx %d: %d\n",
+ ar->pdev_idx, ret);
+
+ return ret;
+}
+
+static int ath12k_mac_setup_bcn_p2p_ie(struct ath12k_vif *arvif,
+ struct sk_buff *bcn)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ieee80211_mgmt *mgmt;
+ const u8 *p2p_ie;
+ int ret;
+
+ mgmt = (void *)bcn->data;
+ p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+ mgmt->u.beacon.variable,
+ bcn->len - (mgmt->u.beacon.variable -
+ bcn->data));
+ if (!p2p_ie) {
+ ath12k_warn(ar->ab, "no P2P ie found in beacon\n");
+ return -ENOENT;
+ }
+
+ ret = ath12k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit P2P GO bcn ie for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
+ u8 oui_type, size_t ie_offset)
+{
+ const u8 *next, *end;
+ size_t len;
+ u8 *ie;
+
+ if (WARN_ON(skb->len < ie_offset))
+ return -EINVAL;
+
+ ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
+ skb->data + ie_offset,
+ skb->len - ie_offset);
+ if (!ie)
+ return -ENOENT;
+
+ len = ie[1] + 2;
+ end = skb->data + skb->len;
+ next = ie + len;
+
+ if (WARN_ON(next > end))
+ return -EINVAL;
+
+ memmove(ie, next, end - next);
+ skb_trim(skb, skb->len - len);
+
+ return 0;
+}
+
static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
{
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_mutable_offsets offs = {};
struct sk_buff *bcn;
@@ -1154,14 +1268,37 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
ies, (skb_tail_pointer(bcn) - ies)))
arvif->wpaie_present = true;
- ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
+ if (arvif->vif->type == NL80211_IFTYPE_AP && arvif->vif->p2p) {
+ ret = ath12k_mac_setup_bcn_p2p_ie(arvif, bcn);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup P2P GO bcn ie: %d\n",
+ ret);
+ goto free_bcn_skb;
+ }
- kfree_skb(bcn);
+ /* P2P IE is inserted by firmware automatically (as
+ * configured above) so remove it from the base beacon
+ * template to avoid duplicate P2P IEs in beacon frames.
+ */
+ ret = ath12k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA,
+ WLAN_OUI_TYPE_WFA_P2P,
+ offsetof(struct ieee80211_mgmt,
+ u.beacon.variable));
+ if (ret) {
+ ath12k_warn(ab, "failed to remove P2P vendor ie: %d\n",
+ ret);
+ goto free_bcn_skb;
+ }
+ }
+
+ ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
if (ret)
ath12k_warn(ab, "failed to submit beacon template command: %d\n",
ret);
+free_bcn_skb:
+ kfree_skb(bcn);
return ret;
}
@@ -1214,6 +1351,7 @@ static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
struct ath12k_wmi_peer_assoc_arg *arg)
{
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
u32 aid;
lockdep_assert_held(&ar->conf_mutex);
@@ -1228,7 +1366,7 @@ static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
arg->peer_associd = aid;
arg->auth_flag = true;
/* TODO: STA WAR in ath10k for listen interval required? */
- arg->peer_listen_intval = ar->hw->conf.listen_interval;
+ arg->peer_listen_intval = hw->conf.listen_interval;
arg->peer_nss = 1;
arg->peer_caps = vif->bss_conf.assoc_capability;
}
@@ -1242,6 +1380,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
struct cfg80211_chan_def def;
struct cfg80211_bss *bss;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
const u8 *rsnie = NULL;
const u8 *wpaie = NULL;
@@ -1250,7 +1389,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
return;
- bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+ bss = cfg80211_get_bss(hw->wiphy, def.chan, info->bssid, NULL, 0,
IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
if (arvif->rsnie_present || arvif->wpaie_present) {
@@ -1270,7 +1409,7 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
ies->data,
ies->len);
rcu_read_unlock();
- cfg80211_put_bss(ar->hw->wiphy, bss);
+ cfg80211_put_bss(hw->wiphy, bss);
}
/* FIXME: base on RSN IE/WPA IE is a correct idea? */
@@ -1304,6 +1443,7 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
struct cfg80211_chan_def def;
const struct ieee80211_supported_band *sband;
const struct ieee80211_rate *rates;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
enum nl80211_band band;
u32 ratemask;
u8 rate;
@@ -1315,7 +1455,7 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
return;
band = def.chan->band;
- sband = ar->hw->wiphy->bands[band];
+ sband = hw->wiphy->bands[band];
ratemask = sta->deflink.supp_rates[band];
ratemask &= arvif->bitrate_mask.control[band].legacy;
rates = sband->bitrates;
@@ -2266,12 +2406,11 @@ static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif,
ath12k_smps_map[smps]);
}
-static void ath12k_bss_assoc(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
+static void ath12k_bss_assoc(struct ath12k *ar,
+ struct ath12k_vif *arvif,
struct ieee80211_bss_conf *bss_conf)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_vif *vif = arvif->vif;
struct ath12k_wmi_peer_assoc_arg peer_arg;
struct ieee80211_sta *ap_sta;
struct ath12k_peer *peer;
@@ -2361,11 +2500,9 @@ static void ath12k_bss_assoc(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
}
-static void ath12k_bss_disassoc(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void ath12k_bss_disassoc(struct ath12k *ar,
+ struct ath12k_vif *arvif)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -2413,6 +2550,7 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
struct cfg80211_chan_def *def)
{
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
const struct ieee80211_supported_band *sband;
u8 basic_rate_idx;
int hw_rate_code;
@@ -2422,7 +2560,7 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
lockdep_assert_held(&ar->conf_mutex);
- sband = ar->hw->wiphy->bands[def->chan->band];
+ sband = hw->wiphy->bands[def->chan->band];
basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
bitrate = sband->bitrates[basic_rate_idx].bitrate;
@@ -2449,6 +2587,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
struct ieee80211_bss_conf *info)
{
struct ath12k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct sk_buff *tmpl;
int ret;
u32 interval;
@@ -2457,7 +2596,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
if (info->fils_discovery.max_interval) {
interval = info->fils_discovery.max_interval;
- tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif);
+ tmpl = ieee80211_get_fils_discovery_tmpl(hw, arvif->vif);
if (tmpl)
ret = ath12k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id,
tmpl);
@@ -2465,7 +2604,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
unsol_bcast_probe_resp_enabled = 1;
interval = info->unsol_bcast_probe_resp_interval;
- tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw,
+ tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw,
arvif->vif);
if (tmpl)
ret = ath12k_wmi_probe_resp_tmpl(ar, arvif->vdev_id,
@@ -2491,13 +2630,60 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
return ret;
}
-static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u64 changed)
+static void ath12k_mac_vif_setup_ps(struct ath12k_vif *arvif)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
+ enum wmi_sta_powersave_param param;
+ enum wmi_sta_ps_mode psmode;
+ int ret;
+ int timeout;
+ bool enable_ps;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ enable_ps = arvif->ps;
+ if (enable_ps) {
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+ timeout = conf->dynamic_ps_timeout;
+ if (timeout == 0) {
+ /* firmware doesn't like 0 */
+ timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000;
+ }
+
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
+ timeout);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+ } else {
+ psmode = WMI_STA_PS_MODE_DISABLED;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
+ ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
+ psmode, arvif->vdev_id, ret);
+}
+
+static void ath12k_mac_bss_info_changed(struct ath12k *ar,
+ struct ath12k_vif *arvif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_vif_cfg *vif_cfg = &vif->cfg;
struct cfg80211_chan_def def;
u32 param_id, param_value;
enum nl80211_band band;
@@ -2510,7 +2696,7 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
u8 rateidx;
u32 rate;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ar->conf_mutex);
if (changed & BSS_CHANGED_BEACON_INT) {
arvif->beacon_interval = info->beacon_int;
@@ -2666,9 +2852,9 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
if (vif->cfg.assoc)
- ath12k_bss_assoc(hw, vif, info);
+ ath12k_bss_assoc(ar, arvif, info);
else
- ath12k_bss_disassoc(hw, vif);
+ ath12k_bss_disassoc(ar, arvif);
}
if (changed & BSS_CHANGED_TXPOWER) {
@@ -2768,14 +2954,35 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
ath12k_mac_fils_discovery(arvif, info);
- if (changed & BSS_CHANGED_EHT_PUNCTURING)
- arvif->punct_bitmap = info->eht_puncturing;
+ if (changed & BSS_CHANGED_PS &&
+ ar->ab->hw_params->supports_sta_ps) {
+ arvif->ps = vif_cfg->ps;
+ ath12k_mac_vif_setup_ps(arvif);
+ }
+}
+
+static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath12k_mac_bss_info_changed(ar, arvif, info, changed);
mutex_unlock(&ar->conf_mutex);
}
void __ath12k_mac_scan_finish(struct ath12k *ar)
{
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
@@ -2784,7 +2991,7 @@ void __ath12k_mac_scan_finish(struct ath12k *ar)
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
if (ar->scan.is_roc && ar->scan.roc_notify)
- ieee80211_remain_on_channel_expired(ar->hw);
+ ieee80211_remain_on_channel_expired(hw);
fallthrough;
case ATH12K_SCAN_STARTING:
if (!ar->scan.is_roc) {
@@ -2795,7 +3002,7 @@ void __ath12k_mac_scan_finish(struct ath12k *ar)
ATH12K_SCAN_STARTING)),
};
- ieee80211_scan_completed(ar->hw, &info);
+ ieee80211_scan_completed(hw, &info);
}
ar->scan.state = ATH12K_SCAN_IDLE;
@@ -2940,13 +3147,16 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct cfg80211_scan_request *req = &hw_req->req;
struct ath12k_wmi_scan_req_arg arg = {};
int ret;
int i;
+ ar = ath12k_ah_to_ar(ah);
+
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
@@ -2988,7 +3198,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
for (i = 0; i < arg.num_ssids; i++)
arg.ssid[i] = req->ssids[i];
} else {
- arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg.scan_f_passive = 1;
}
if (req->n_channels) {
@@ -3014,7 +3224,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
}
/* Add a margin to account for event/command processing */
- ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout,
msecs_to_jiffies(arg.max_scan_time +
ATH12K_MAC_SCAN_TIMEOUT_MSECS));
@@ -3025,13 +3235,17 @@ exit:
kfree(arg.extraie.ptr);
mutex_unlock(&ar->conf_mutex);
+
return ret;
}
static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
mutex_lock(&ar->conf_mutex);
ath12k_scan_abort(ar);
@@ -3159,8 +3373,9 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_peer *peer;
struct ath12k_sta *arsta;
@@ -3175,6 +3390,9 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
return 1;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
return 1;
@@ -3696,7 +3914,7 @@ static int ath12k_mac_station_add(struct ath12k *ar,
if (ab->hw_params->vdev_start_delay &&
!arvif->is_started &&
arvif->vdev_type != WMI_VDEV_TYPE_AP) {
- ret = ath12k_start_vdev_delay(ar->hw, vif);
+ ret = ath12k_start_vdev_delay(ar, arvif);
if (ret) {
ath12k_warn(ab, "failed to delay vdev start: %d\n", ret);
goto free_peer;
@@ -3750,7 +3968,8 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
struct ath12k_peer *peer;
@@ -3761,6 +3980,8 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NOTEXIST))
cancel_work_sync(&arsta->update_wk);
+ ar = ath12k_ah_to_ar(ah);
+
mutex_lock(&ar->conf_mutex);
if (old_state == IEEE80211_STA_NOTEXIST &&
@@ -3775,6 +3996,13 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
sta->addr, arvif->vdev_id);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ ath12k_bss_disassoc(ar, arvif);
+ ret = ath12k_mac_vdev_stop(arvif);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
@@ -3856,6 +4084,7 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
}
mutex_unlock(&ar->conf_mutex);
+
return ret;
}
@@ -3863,7 +4092,8 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
s16 txpwr;
@@ -3879,6 +4109,8 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL)
return -EINVAL;
+ ar = ath12k_ah_to_ar(ah);
+
mutex_lock(&ar->conf_mutex);
ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
@@ -3899,12 +4131,15 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u32 changed)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_peer *peer;
u32 bw, smps;
+ ar = ath12k_ah_to_ar(ah);
+
spin_lock_bh(&ar->ab->base_lock);
peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
@@ -3964,10 +4199,10 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
ieee80211_queue_work(hw, &arsta->update_wk);
}
-static int ath12k_conf_tx_uapsd(struct ath12k *ar, struct ieee80211_vif *vif,
+static int ath12k_conf_tx_uapsd(struct ath12k_vif *arvif,
u16 ac, bool enable)
{
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k *ar = arvif->ar;
u32 value;
int ret;
@@ -4021,17 +4256,16 @@ exit:
return ret;
}
-static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- unsigned int link_id, u16 ac,
- const struct ieee80211_tx_queue_params *params)
+static int ath12k_mac_conf_tx(struct ath12k_vif *arvif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct wmi_wmm_params_arg *p = NULL;
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_base *ab = ar->ab;
int ret;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ar->conf_mutex);
switch (ac) {
case IEEE80211_AC_VO:
@@ -4061,17 +4295,36 @@ static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
ret = ath12k_wmi_send_wmm_update_cmd(ar, arvif->vdev_id,
&arvif->wmm_params);
if (ret) {
- ath12k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
+ ath12k_warn(ab, "pdev idx %d failed to set wmm params: %d\n",
+ ar->pdev_idx, ret);
goto exit;
}
- ret = ath12k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
-
+ ret = ath12k_conf_tx_uapsd(arvif, ac, params->uapsd);
if (ret)
- ath12k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret);
+ ath12k_warn(ab, "pdev idx %d failed to set sta uapsd: %d\n",
+ ar->pdev_idx, ret);
exit:
+ return ret;
+}
+
+static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ int ret;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_mac_conf_tx(arvif, link_id, ac, params);
mutex_unlock(&ar->conf_mutex);
+
return ret;
}
@@ -4782,7 +5035,7 @@ static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb)
{
int num_mgmt;
- ieee80211_free_txskb(ar->hw, skb);
+ ieee80211_free_txskb(ath12k_ar_to_hw(ar), skb);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -4914,8 +5167,8 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
arvif = ath12k_vif_to_arvif(skb_cb->vif);
- if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
- arvif->is_started) {
+
+ if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb);
if (ret) {
ath12k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
@@ -4959,20 +5212,41 @@ static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
skb_queue_tail(q, skb);
atomic_inc(&ar->num_pending_mgmt_tx);
- ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+ ieee80211_queue_work(ath12k_ar_to_hw(ar), &ar->wmi_mgmt_tx_work);
return 0;
}
+static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb,
+ bool is_prb_rsp)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+
+ if (likely(!is_prb_rsp))
+ return;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (arvif->u.ap.noa_data &&
+ !pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
+ GFP_ATOMIC))
+ skb_put_data(skb, arvif->u.ap.noa_data,
+ arvif->u.ap.noa_len);
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
- struct ath12k *ar = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k *ar = arvif->ar;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
u32 info_flags = info->flags;
@@ -4987,10 +5261,11 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
}
+ is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
+
if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
- is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp);
if (ret) {
ath12k_warn(ar->ab, "failed to queue management frame %d\n",
@@ -5000,6 +5275,10 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
return;
}
+ /* This is case only for P2P_GO */
+ if (vif->type == NL80211_IFTYPE_AP && vif->p2p)
+ ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp);
+
ret = ath12k_dp_tx(ar, arvif, skb);
if (ret) {
ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
@@ -5018,7 +5297,7 @@ void ath12k_mac_drain_tx(struct ath12k *ar)
static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/* TODO: Need to support new monitor mode */
}
@@ -5044,14 +5323,12 @@ static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab)
ATH12K_RECONFIGURE_TIMEOUT_HZ);
}
-static int ath12k_mac_op_start(struct ieee80211_hw *hw)
+static int ath12k_mac_start(struct ath12k *ar)
{
- struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev *pdev = ar->pdev;
int ret;
- ath12k_mac_drain_tx(ar);
mutex_lock(&ar->conf_mutex);
switch (ar->state) {
@@ -5074,14 +5351,14 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
1, pdev->pdev_id);
if (ret) {
- ath12k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret);
+ ath12k_err(ab, "failed to enable PMF QOS: (%d\n", ret);
goto err;
}
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
pdev->pdev_id);
if (ret) {
- ath12k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret);
+ ath12k_err(ab, "failed to enable dynamic bw: %d\n", ret);
goto err;
}
@@ -5111,7 +5388,7 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
1, pdev->pdev_id);
if (ret) {
- ath12k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
+ ath12k_err(ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
goto err;
}
@@ -5130,14 +5407,14 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
* such as rssi, rx_duration.
*/
ret = ath12k_mac_config_mon_status_default(ar, true);
- if (ret && (ret != -ENOTSUPP)) {
+ if (ret && (ret != -EOPNOTSUPP)) {
ath12k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n",
ret);
goto err;
}
- if (ret == -ENOTSUPP)
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ if (ret == -EOPNOTSUPP)
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
"monitor status config is not yet supported");
/* Configure the hash seed for hash based reo dest ring selection */
@@ -5159,7 +5436,6 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw)
&ab->pdevs[ar->pdev_idx]);
return 0;
-
err:
ar->state = ATH12K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
@@ -5167,6 +5443,25 @@ err:
return ret;
}
+static int ath12k_mac_op_start(struct ieee80211_hw *hw)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k_base *ab = ar->ab;
+ int ret;
+
+ ath12k_mac_drain_tx(ar);
+
+ ret = ath12k_mac_start(ar);
+ if (ret) {
+ ath12k_err(ab, "fail to start mac operations in pdev idx %d ret %d\n",
+ ar->pdev_idx, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
int ath12k_mac_rfkill_config(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
@@ -5224,17 +5519,14 @@ int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable)
return 0;
}
-static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
+static void ath12k_mac_stop(struct ath12k *ar)
{
- struct ath12k *ar = hw->priv;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
int ret;
- ath12k_mac_drain_tx(ar);
-
mutex_lock(&ar->conf_mutex);
ret = ath12k_mac_config_mon_status_default(ar, false);
- if (ret && (ret != -ENOTSUPP))
+ if (ret && (ret != -EOPNOTSUPP))
ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
ret);
@@ -5260,6 +5552,16 @@ static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
atomic_set(&ar->num_pending_mgmt_tx, 0);
}
+static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+
+ ath12k_mac_drain_tx(ar);
+
+ ath12k_mac_stop(ar);
+}
+
static u8
ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
{
@@ -5269,7 +5571,7 @@ ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
do {
if (ab->free_vdev_stats_id_map & (1LL << vdev_stats_id)) {
vdev_stats_id++;
- if (vdev_stats_id <= ATH12K_INVAL_VDEV_STATS_ID) {
+ if (vdev_stats_id >= ATH12K_MAX_VDEV_STATS_ID) {
vdev_stats_id = ATH12K_INVAL_VDEV_STATS_ID;
break;
}
@@ -5376,12 +5678,11 @@ static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar,
return ret;
}
-static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void ath12k_mac_update_vif_offload(struct ath12k_vif *arvif)
{
- struct ath12k *ar = hw->priv;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
u32 param_id, param_value;
int ret;
@@ -5423,11 +5724,20 @@ static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
}
}
+static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+
+ ath12k_mac_update_vif_offload(arvif);
+}
+
static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
struct ath12k_wmi_peer_create_arg peer_param;
@@ -5439,6 +5749,9 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
if (vif->type == NL80211_IFTYPE_AP &&
@@ -5483,17 +5796,29 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_STATION:
arvif->vdev_type = WMI_VDEV_TYPE_STA;
+
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
+
break;
case NL80211_IFTYPE_MESH_POINT:
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
fallthrough;
case NL80211_IFTYPE_AP:
arvif->vdev_type = WMI_VDEV_TYPE_AP;
+
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
+
break;
case NL80211_IFTYPE_MONITOR:
arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
ar->monitor_vdev_id = bit;
break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
+ break;
default:
WARN_ON(1);
break;
@@ -5526,7 +5851,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
list_add(&arvif->list, &ar->arvifs);
spin_unlock_bh(&ar->data_lock);
- ath12k_mac_op_update_vif_offload(hw, vif);
+ ath12k_mac_update_vif_offload(arvif);
nss = hweight32(ar->cfg_tx_chainmask) ? : 1;
ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
@@ -5685,12 +6010,16 @@ static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif
static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_base *ab;
unsigned long time_left;
int ret;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n",
@@ -5766,19 +6095,15 @@ err_vdev_del:
FIF_PROBE_REQ | \
FIF_FCSFAIL)
-static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
+static void ath12k_mac_configure_filter(struct ath12k *ar,
+ unsigned int total_flags)
{
- struct ath12k *ar = hw->priv;
bool reset_flag;
int ret;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ar->conf_mutex);
- *total_flags &= SUPPORTED_FILTERS;
- ar->filter_flags = *total_flags;
+ ar->filter_flags = total_flags;
/* For monitor mode */
reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
@@ -5793,16 +6118,36 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
ath12k_warn(ar->ab,
"fail to set monitor filter: %d\n", ret);
}
+
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"total_flags:0x%x, reset_flag:%d\n",
- *total_flags, reset_flag);
+ total_flags, reset_flag);
+}
+
+static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+
+ *total_flags &= SUPPORTED_FILTERS;
+ ath12k_mac_configure_filter(ar, *total_flags);
mutex_unlock(&ar->conf_mutex);
}
static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
mutex_lock(&ar->conf_mutex);
@@ -5816,9 +6161,12 @@ static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *
static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
int ret;
+ ar = ath12k_ah_to_ar(ah);
+
mutex_lock(&ar->conf_mutex);
ret = __ath12k_set_antenna(ar, tx_ant, rx_ant);
mutex_unlock(&ar->conf_mutex);
@@ -5826,14 +6174,13 @@ static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx
return ret;
}
-static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_ampdu_params *params)
+static int ath12k_mac_ampdu_action(struct ath12k_vif *arvif,
+ struct ieee80211_ampdu_params *params)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k *ar = arvif->ar;
int ret = -EINVAL;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ar->conf_mutex);
switch (params->action) {
case IEEE80211_AMPDU_RX_START:
@@ -5854,16 +6201,40 @@ static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
break;
}
+ return ret;
+}
+
+static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ int ret = -EINVAL;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_mac_ampdu_action(arvif, params);
mutex_unlock(&ar->conf_mutex);
+ if (ret)
+ ath12k_warn(ar->ab, "pdev idx %d unable to perform ampdu action %d ret %d\n",
+ ar->pdev_idx, params->action, ret);
+
return ret;
}
static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
+
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx add freq %u width %d ptr %pK\n",
@@ -5886,8 +6257,12 @@ static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
+
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx remove freq %u width %d ptr %pK\n",
@@ -5995,6 +6370,11 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
+ /* Fill the MBSSID flags to indicate AP is non MBSSID by default
+ * Corresponding flags would be updated with MBSSID support.
+ */
+ arg.mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP;
+
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
arg.ssid = arvif->u.ap.ssid;
arg.ssid_len = arvif->u.ap.ssid_len;
@@ -6071,46 +6451,6 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
return 0;
}
-static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif)
-{
- struct ath12k *ar = arvif->ar;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- reinit_completion(&ar->vdev_setup_done);
-
- ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id);
- if (ret) {
- ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
- arvif->vdev_id, ret);
- goto err;
- }
-
- ret = ath12k_mac_vdev_setup_sync(ar);
- if (ret) {
- ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
- arvif->vdev_id, ret);
- goto err;
- }
-
- WARN_ON(ar->num_started_vdevs == 0);
-
- ar->num_started_vdevs--;
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
- arvif->vif->addr, arvif->vdev_id);
-
- if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
- clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
- arvif->vdev_id);
- }
-
- return 0;
-err:
- return ret;
-}
-
static int ath12k_mac_vdev_start(struct ath12k_vif *arvif,
struct ieee80211_chanctx_conf *ctx)
{
@@ -6215,6 +6555,8 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
if (WARN_ON(!arvif->is_started))
continue;
+ arvif->punct_bitmap = vifs[i].new_ctx->def.punctured;
+
/* Firmware expect vdev_restart only if vdev is up.
* If vdev is down then it expect vdev_stop->vdev_start.
*/
@@ -6266,7 +6608,7 @@ ath12k_mac_update_active_vif_chan(struct ath12k *ar,
struct ieee80211_chanctx_conf *ctx)
{
struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx };
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
lockdep_assert_held(&ar->conf_mutex);
@@ -6295,8 +6637,12 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
+
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
mutex_lock(&ar->conf_mutex);
@@ -6311,7 +6657,8 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
goto unlock;
if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH ||
- changed & IEEE80211_CHANCTX_CHANGE_RADAR)
+ changed & IEEE80211_CHANCTX_CHANGE_RADAR ||
+ changed & IEEE80211_CHANCTX_CHANGE_PUNCTURING)
ath12k_mac_update_active_vif_chan(ar, ctx);
/* TODO: Recalc radar detection */
@@ -6320,12 +6667,11 @@ unlock:
mutex_unlock(&ar->conf_mutex);
}
-static int ath12k_start_vdev_delay(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int ath12k_start_vdev_delay(struct ath12k *ar,
+ struct ath12k_vif *arvif)
{
- struct ath12k *ar = hw->priv;
struct ath12k_base *ab = ar->ab;
- struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_vif *vif = arvif->vif;
int ret;
if (WARN_ON(arvif->is_started))
@@ -6359,19 +6705,23 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
struct ath12k_wmi_peer_create_arg param;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC,
"mac chanctx assign ptr %pK vdev_id %i\n",
ctx, arvif->vdev_id);
- arvif->punct_bitmap = link_conf->eht_puncturing;
+ arvif->punct_bitmap = ctx->def.punctured;
/* for some targets bss peer must be created before vdev_start */
if (ab->hw_params->vdev_start_delay &&
@@ -6438,11 +6788,15 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC,
@@ -6466,11 +6820,13 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
arvif->is_started = false;
}
- ret = ath12k_mac_vdev_stop(arvif);
- if (ret)
- ath12k_warn(ab, "failed to stop vdev %i: %d\n",
- arvif->vdev_id, ret);
-
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA) {
+ ath12k_bss_disassoc(ar, arvif);
+ ret = ath12k_mac_vdev_stop(arvif);
+ if (ret)
+ ath12k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
arvif->is_started = false;
if (ab->hw_params->vdev_start_delay &&
@@ -6490,7 +6846,10 @@ ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
int n_vifs,
enum ieee80211_chanctx_switch_mode mode)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
mutex_lock(&ar->conf_mutex);
@@ -6532,10 +6891,15 @@ ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value)
*/
static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
- struct ath12k *ar = hw->priv;
- int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
- return ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
+ return ret;
}
static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
@@ -6553,15 +6917,10 @@ static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
return -EOPNOTSUPP;
}
-static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u32 queues, bool drop)
+static void ath12k_mac_flush(struct ath12k *ar)
{
- struct ath12k *ar = hw->priv;
long time_left;
- if (drop)
- return;
-
time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
(atomic_read(&ar->dp.num_tx_pending) == 0),
ATH12K_FLUSH_TIMEOUT);
@@ -6576,6 +6935,18 @@ static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v
time_left);
}
+static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+
+ if (drop)
+ return;
+
+ ath12k_mac_flush(ar);
+}
+
static int
ath12k_mac_bitrate_mask_num_ht_rates(struct ath12k *ar,
enum nl80211_band band,
@@ -6778,7 +7149,7 @@ static void ath12k_mac_set_bitrate_mask_iter(void *data,
arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
spin_unlock_bh(&ar->data_lock);
- ieee80211_queue_work(ar->hw, &arsta->update_wk);
+ ieee80211_queue_work(ath12k_ar_to_hw(ar), &arsta->update_wk);
}
static void ath12k_mac_disable_peer_fixed_rate(void *data,
@@ -6826,8 +7197,10 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
sgi = mask->control[band].gi;
- if (sgi == NL80211_TXRATE_FORCE_LGI)
- return -EINVAL;
+ if (sgi == NL80211_TXRATE_FORCE_LGI) {
+ ret = -EINVAL;
+ goto out;
+ }
/* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
* requires passing at least one of used basic rates along with them.
@@ -6843,7 +7216,7 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
if (ret) {
ath12k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n",
arvif->vdev_id, ret);
- return ret;
+ goto out;
}
ieee80211_iterate_stations_atomic(hw,
ath12k_mac_disable_peer_fixed_rate,
@@ -6888,7 +7261,8 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
*/
ath12k_warn(ar->ab,
"Setting more than one MCS Value in bitrate mask not supported\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
ieee80211_iterate_stations_atomic(hw,
@@ -6915,6 +7289,7 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
+out:
return ret;
}
@@ -6922,14 +7297,18 @@ static void
ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type)
{
- struct ath12k *ar = hw->priv;
- struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+ struct ath12k_base *ab;
struct ath12k_vif *arvif;
int recovery_count;
if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
return;
+ ar = ath12k_ah_to_ar(ah);
+ ab = ar->ab;
+
mutex_lock(&ar->conf_mutex);
if (ar->state == ATH12K_STATE_RESTARTED) {
@@ -7013,7 +7392,8 @@ ath12k_mac_update_bss_chan_survey(struct ath12k *ar,
static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
struct ieee80211_supported_band *sband;
struct survey_info *ar_survey;
int ret = 0;
@@ -7021,6 +7401,8 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
if (idx >= ATH12K_NUM_CHANS)
return -ENOENT;
+ ar = ath12k_ah_to_ar(ah);
+
ar_survey = &ar->survey[idx];
mutex_lock(&ar->conf_mutex);
@@ -7052,6 +7434,7 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
exit:
mutex_unlock(&ar->conf_mutex);
+
return ret;
}
@@ -7089,6 +7472,125 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
+static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.roc_notify = false;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath12k_scan_abort(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+
+ return 0;
+}
+
+static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_wmi_scan_req_arg arg;
+ struct ath12k *ar;
+ u32 scan_time_msec;
+ int ret;
+
+ ar = ath12k_ah_to_ar(ah);
+
+ mutex_lock(&ar->conf_mutex);
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ reinit_completion(&ar->scan.on_channel);
+ ar->scan.state = ATH12K_SCAN_STARTING;
+ ar->scan.is_roc = true;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.roc_freq = chan->center_freq;
+ ar->scan.roc_notify = true;
+ ret = 0;
+ break;
+ case ATH12K_SCAN_STARTING:
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ scan_time_msec = hw->wiphy->max_remain_on_channel_duration * 2;
+
+ memset(&arg, 0, sizeof(arg));
+ ath12k_wmi_start_scan_init(ar, &arg);
+ arg.num_chan = 1;
+ arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
+ GFP_KERNEL);
+ if (!arg.chan_list) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH12K_SCAN_ID;
+ arg.chan_list[0] = chan->center_freq;
+ arg.dwell_time_active = scan_time_msec;
+ arg.dwell_time_passive = scan_time_msec;
+ arg.max_scan_time = scan_time_msec;
+ arg.scan_f_passive = 1;
+ arg.burst_duration = duration;
+
+ ret = ath12k_start_scan(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH12K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ goto free_chan_list;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
+ if (ret == 0) {
+ ath12k_warn(ar->ab, "failed to switch to channel for roc scan\n");
+ ret = ath12k_scan_stop(ar);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret);
+ ret = -ETIMEDOUT;
+ goto free_chan_list;
+ }
+
+ ieee80211_queue_delayed_work(hw, &ar->scan.timeout,
+ msecs_to_jiffies(duration));
+
+ ret = 0;
+
+free_chan_list:
+ kfree(arg.chan_list);
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
static const struct ieee80211_ops ath12k_ops = {
.tx = ath12k_mac_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
@@ -7123,6 +7625,8 @@ static const struct ieee80211_ops ath12k_ops = {
.get_survey = ath12k_mac_op_get_survey,
.flush = ath12k_mac_op_flush,
.sta_statistics = ath12k_mac_op_sta_statistics,
+ .remain_on_channel = ath12k_mac_op_remain_on_channel,
+ .cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel,
};
static void ath12k_mac_update_ch_list(struct ath12k *ar,
@@ -7158,9 +7662,9 @@ static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
}
static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
- u32 supported_bands)
+ u32 supported_bands,
+ struct ieee80211_supported_band *bands[])
{
- struct ieee80211_hw *hw = ar->hw;
struct ieee80211_supported_band *band;
struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
void *channels;
@@ -7186,7 +7690,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_g_rates_size;
band->bitrates = ath12k_g_rates;
- hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
+ bands[NL80211_BAND_2GHZ] = band;
if (ar->ab->hw_params->single_pdev_only) {
phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
@@ -7198,7 +7702,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
}
if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
- if (reg_cap->high_5ghz_chan >= ATH12K_MAX_6G_FREQ) {
+ if (reg_cap->high_5ghz_chan >= ATH12K_MIN_6G_FREQ) {
channels = kmemdup(ath12k_6ghz_channels,
sizeof(ath12k_6ghz_channels), GFP_KERNEL);
if (!channels) {
@@ -7213,7 +7717,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_a_rates_size;
band->bitrates = ath12k_a_rates;
- hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
+ bands[NL80211_BAND_6GHZ] = band;
ath12k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
@@ -7235,7 +7739,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
band->channels = channels;
band->n_bitrates = ath12k_a_rates_size;
band->bitrates = ath12k_a_rates;
- hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+ bands[NL80211_BAND_5GHZ] = band;
if (ar->ab->hw_params->single_pdev_only) {
phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
@@ -7251,28 +7755,59 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
return 0;
}
-static int ath12k_mac_setup_iface_combinations(struct ath12k *ar)
+static u16 ath12k_mac_get_ifmodes(struct ath12k_hw *ah)
{
- struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ar->hw;
- struct wiphy *wiphy = hw->wiphy;
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+ u16 interface_modes = U16_MAX;
+
+ interface_modes &= ar->ab->hw_params->interface_modes;
+
+ return interface_modes == U16_MAX ? 0 : interface_modes;
+}
+
+static bool ath12k_mac_is_iface_mode_enable(struct ath12k_hw *ah,
+ enum nl80211_iftype type)
+{
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+ u16 interface_modes, mode;
+ bool is_enable = true;
+
+ mode = BIT(type);
+
+ interface_modes = ar->ab->hw_params->interface_modes;
+ if (!(interface_modes & mode))
+ is_enable = false;
+
+ return is_enable;
+}
+
+static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
+{
+ struct wiphy *wiphy = ah->hw->wiphy;
struct ieee80211_iface_combination *combinations;
struct ieee80211_iface_limit *limits;
int n_limits, max_interfaces;
- bool ap, mesh;
+ bool ap, mesh, p2p;
- ap = ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_AP);
+ ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP);
+ p2p = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_P2P_DEVICE);
mesh = IS_ENABLED(CONFIG_MAC80211_MESH) &&
- ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT);
+ ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT);
combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
if (!combinations)
return -ENOMEM;
- if (ap || mesh) {
+ if ((ap || mesh) && !p2p) {
n_limits = 2;
max_interfaces = 16;
+ } else if (p2p) {
+ n_limits = 3;
+ if (ap || mesh)
+ max_interfaces = 16;
+ else
+ max_interfaces = 3;
} else {
n_limits = 1;
max_interfaces = 1;
@@ -7287,14 +7822,22 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k *ar)
limits[0].max = 1;
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
- if (ap) {
+ if (ap || mesh || p2p)
limits[1].max = max_interfaces;
+
+ if (ap)
limits[1].types |= BIT(NL80211_IFTYPE_AP);
- }
if (mesh)
limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+ if (p2p) {
+ limits[1].types |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+ limits[2].max = 1;
+ limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE);
+ }
+
combinations[0].limits = limits;
combinations[0].n_limits = n_limits;
combinations[0].max_interfaces = max_interfaces;
@@ -7349,21 +7892,27 @@ static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
},
};
-static void __ath12k_mac_unregister(struct ath12k *ar)
+static void ath12k_mac_cleanup_unregister(struct ath12k *ar)
{
- struct ieee80211_hw *hw = ar->hw;
- struct wiphy *wiphy = hw->wiphy;
-
- cancel_work_sync(&ar->regd_update_work);
-
- ieee80211_unregister_hw(hw);
-
idr_for_each(&ar->txmgmt_idr, ath12k_mac_tx_mgmt_pending_free, ar);
idr_destroy(&ar->txmgmt_idr);
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+}
+
+static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
+{
+ struct ieee80211_hw *hw = ah->hw;
+ struct wiphy *wiphy = hw->wiphy;
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+
+ cancel_work_sync(&ar->regd_update_work);
+
+ ieee80211_unregister_hw(hw);
+
+ ath12k_mac_cleanup_unregister(ar);
kfree(wiphy->iface_combinations[0].limits);
kfree(wiphy->iface_combinations);
@@ -7371,28 +7920,42 @@ static void __ath12k_mac_unregister(struct ath12k *ar)
SET_IEEE80211_DEV(hw, NULL);
}
-void ath12k_mac_unregister(struct ath12k_base *ab)
+static int ath12k_mac_setup_register(struct ath12k *ar,
+ u32 *ht_cap,
+ struct ieee80211_supported_band *bands[])
{
- struct ath12k *ar;
- struct ath12k_pdev *pdev;
- int i;
+ struct ath12k_pdev_cap *cap = &ar->pdev->cap;
+ int ret;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (!ar)
- continue;
+ init_waitqueue_head(&ar->txmgmt_empty_waitq);
+ idr_init(&ar->txmgmt_idr);
+ spin_lock_init(&ar->txmgmt_idr_lock);
- __ath12k_mac_unregister(ar);
- }
+ ath12k_pdev_caps_update(ar);
+
+ ret = ath12k_mac_setup_channels_rates(ar,
+ cap->supported_bands,
+ bands);
+ if (ret)
+ return ret;
+
+ ath12k_mac_setup_ht_vht_cap(ar, cap, ht_cap);
+ ath12k_mac_setup_sband_iftype_data(ar, cap);
+
+ ar->max_num_stations = ath12k_core_get_max_station_per_radio(ar->ab);
+ ar->max_num_peers = ath12k_core_get_max_peers_per_radio(ar->ab);
+
+ return 0;
}
-static int __ath12k_mac_register(struct ath12k *ar)
+static int ath12k_mac_hw_register(struct ath12k_hw *ah)
{
- struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ah->hw;
struct wiphy *wiphy = hw->wiphy;
- struct ath12k_pdev_cap *cap = &ar->pdev->cap;
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev *pdev;
+ struct ath12k_pdev_cap *cap;
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
@@ -7407,30 +7970,34 @@ static int __ath12k_mac_register(struct ath12k *ar)
int ret;
u32 ht_cap = 0;
- ath12k_pdev_caps_update(ar);
+ pdev = ar->pdev;
- SET_IEEE80211_PERM_ADDR(hw, ar->mac_addr);
-
- SET_IEEE80211_DEV(hw, ab->dev);
+ if (ab->pdevs_macaddr_valid)
+ ether_addr_copy(ar->mac_addr, pdev->mac_addr);
+ else
+ ether_addr_copy(ar->mac_addr, ab->mac_addr);
- ret = ath12k_mac_setup_channels_rates(ar,
- cap->supported_bands);
+ ret = ath12k_mac_setup_register(ar, &ht_cap, hw->wiphy->bands);
if (ret)
- goto err;
+ goto out;
- ath12k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
- ath12k_mac_setup_sband_iftype_data(ar, cap);
+ wiphy->max_ap_assoc_sta = ar->max_num_stations;
- ret = ath12k_mac_setup_iface_combinations(ar);
- if (ret) {
- ath12k_err(ar->ab, "failed to setup interface combinations: %d\n", ret);
- goto err_free_channels;
- }
+ cap = &pdev->cap;
wiphy->available_antennas_rx = cap->rx_chain_mask;
wiphy->available_antennas_tx = cap->tx_chain_mask;
- wiphy->interface_modes = ab->hw_params->interface_modes;
+ SET_IEEE80211_PERM_ADDR(hw, ar->mac_addr);
+ SET_IEEE80211_DEV(hw, ab->dev);
+
+ ret = ath12k_mac_setup_iface_combinations(ah);
+ if (ret) {
+ ath12k_err(ab, "failed to setup interface combinations: %d\n", ret);
+ goto err_cleanup_unregister;
+ }
+
+ wiphy->interface_modes = ath12k_mac_get_ifmodes(ah);
if (wiphy->bands[NL80211_BAND_2GHZ] &&
wiphy->bands[NL80211_BAND_5GHZ] &&
@@ -7483,15 +8050,10 @@ static int __ath12k_mac_register(struct ath12k *ar)
wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_AP_SCAN;
- ar->max_num_stations = TARGET_NUM_STATIONS;
- ar->max_num_peers = TARGET_NUM_PEERS_PDEV;
-
- wiphy->max_ap_assoc_sta = ar->max_num_stations;
-
hw->queues = ATH12K_HW_MAX_QUEUES;
wiphy->tx_queue_len = ATH12K_QUEUE_LEN;
hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1;
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT;
hw->vif_data_size = sizeof(struct ath12k_vif);
hw->sta_data_size = sizeof(struct ath12k_sta);
@@ -7524,7 +8086,7 @@ static int __ath12k_mac_register(struct ath12k *ar)
ret = ieee80211_register_hw(hw);
if (ret) {
- ath12k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
+ ath12k_err(ab, "ieee80211 registration failed: %d\n", ret);
goto err_free_if_combs;
}
@@ -7552,142 +8114,213 @@ err_free_if_combs:
kfree(wiphy->iface_combinations[0].limits);
kfree(wiphy->iface_combinations);
-err_free_channels:
- kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
- kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
- kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+err_cleanup_unregister:
+ ath12k_mac_cleanup_unregister(ar);
-err:
+out:
SET_IEEE80211_DEV(hw, NULL);
+
return ret;
}
+static void ath12k_mac_setup(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev *pdev = ar->pdev;
+ u8 pdev_idx = ar->pdev_idx;
+
+ ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, pdev_idx);
+
+ ar->wmi = &ab->wmi_ab.wmi[pdev_idx];
+ /* FIXME: wmi[0] is already initialized during attach,
+ * Should we do this again?
+ */
+ ath12k_wmi_pdev_attach(ab, pdev_idx);
+
+ ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
+ ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
+ ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
+ ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
+
+ spin_lock_init(&ar->data_lock);
+ INIT_LIST_HEAD(&ar->arvifs);
+ INIT_LIST_HEAD(&ar->ppdu_stats_info);
+ mutex_init(&ar->conf_mutex);
+ init_completion(&ar->vdev_setup_done);
+ init_completion(&ar->vdev_delete_done);
+ init_completion(&ar->peer_assoc_done);
+ init_completion(&ar->peer_delete_done);
+ init_completion(&ar->install_key_done);
+ init_completion(&ar->bss_survey_done);
+ init_completion(&ar->scan.started);
+ init_completion(&ar->scan.completed);
+ init_completion(&ar->scan.on_channel);
+
+ INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
+ INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
+
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+ clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+}
+
int ath12k_mac_register(struct ath12k_base *ab)
{
- struct ath12k *ar;
- struct ath12k_pdev *pdev;
+ struct ath12k_hw *ah;
int i;
int ret;
if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (ab->pdevs_macaddr_valid) {
- ether_addr_copy(ar->mac_addr, pdev->mac_addr);
- } else {
- ether_addr_copy(ar->mac_addr, ab->mac_addr);
- ar->mac_addr[4] += i;
- }
-
- ret = __ath12k_mac_register(ar);
- if (ret)
- goto err_cleanup;
-
- init_waitqueue_head(&ar->txmgmt_empty_waitq);
- idr_init(&ar->txmgmt_idr);
- spin_lock_init(&ar->txmgmt_idr_lock);
- }
-
/* Initialize channel counters frequency value in hertz */
ab->cc_freq_hz = 320000;
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+ for (i = 0; i < ab->num_hw; i++) {
+ ah = ab->ah[i];
+
+ ret = ath12k_mac_hw_register(ah);
+ if (ret)
+ goto err;
+ }
+
return 0;
-err_cleanup:
+err:
for (i = i - 1; i >= 0; i--) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- __ath12k_mac_unregister(ar);
+ ah = ab->ah[i];
+ if (!ah)
+ continue;
+
+ ath12k_mac_hw_unregister(ah);
}
return ret;
}
-int ath12k_mac_allocate(struct ath12k_base *ab)
+void ath12k_mac_unregister(struct ath12k_base *ab)
+{
+ struct ath12k_hw *ah;
+ int i;
+
+ for (i = ab->num_hw - 1; i >= 0; i--) {
+ ah = ab->ah[i];
+ if (!ah)
+ continue;
+
+ ath12k_mac_hw_unregister(ah);
+ }
+}
+
+static void ath12k_mac_hw_destroy(struct ath12k_hw *ah)
+{
+ ieee80211_free_hw(ah->hw);
+}
+
+static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
+ struct ath12k_pdev_map *pdev_map,
+ u8 num_pdev_map)
{
struct ieee80211_hw *hw;
struct ath12k *ar;
struct ath12k_pdev *pdev;
- int ret;
+ struct ath12k_hw *ah;
int i;
+ u8 pdev_idx;
- if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
- return 0;
+ hw = ieee80211_alloc_hw(struct_size(ah, radio, num_pdev_map),
+ &ath12k_ops);
+ if (!hw)
+ return NULL;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- hw = ieee80211_alloc_hw(sizeof(struct ath12k), &ath12k_ops);
- if (!hw) {
- ath12k_warn(ab, "failed to allocate mac80211 hw device\n");
- ret = -ENOMEM;
- goto err_free_mac;
- }
+ ah = ath12k_hw_to_ah(hw);
+ ah->hw = hw;
+ ah->num_radio = num_pdev_map;
+
+ for (i = 0; i < num_pdev_map; i++) {
+ ab = pdev_map[i].ab;
+ pdev_idx = pdev_map[i].pdev_idx;
+ pdev = &ab->pdevs[pdev_idx];
- ar = hw->priv;
- ar->hw = hw;
+ ar = ath12k_ah_to_ar(ah);
+ ar->ah = ah;
ar->ab = ab;
+ ar->hw_link_id = i;
ar->pdev = pdev;
- ar->pdev_idx = i;
- ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, i);
-
- ar->wmi = &ab->wmi_ab.wmi[i];
- /* FIXME: wmi[0] is already initialized during attach,
- * Should we do this again?
- */
- ath12k_wmi_pdev_attach(ab, i);
-
- ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
- ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
- ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
- ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
-
+ ar->pdev_idx = pdev_idx;
pdev->ar = ar;
- spin_lock_init(&ar->data_lock);
- INIT_LIST_HEAD(&ar->arvifs);
- INIT_LIST_HEAD(&ar->ppdu_stats_info);
- mutex_init(&ar->conf_mutex);
- init_completion(&ar->vdev_setup_done);
- init_completion(&ar->vdev_delete_done);
- init_completion(&ar->peer_assoc_done);
- init_completion(&ar->peer_delete_done);
- init_completion(&ar->install_key_done);
- init_completion(&ar->bss_survey_done);
- init_completion(&ar->scan.started);
- init_completion(&ar->scan.completed);
-
- INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
- INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
-
- INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
- skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
- clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- }
- return 0;
-
-err_free_mac:
- ath12k_mac_destroy(ab);
+ ath12k_mac_setup(ar);
+ }
- return ret;
+ return ah;
}
void ath12k_mac_destroy(struct ath12k_base *ab)
{
- struct ath12k *ar;
struct ath12k_pdev *pdev;
int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (!ar)
+ if (!pdev->ar)
continue;
- ieee80211_free_hw(ar->hw);
pdev->ar = NULL;
}
+
+ for (i = 0; i < ab->num_hw; i++) {
+ if (!ab->ah[i])
+ continue;
+
+ ath12k_mac_hw_destroy(ab->ah[i]);
+ ab->ah[i] = NULL;
+ }
+}
+
+int ath12k_mac_allocate(struct ath12k_base *ab)
+{
+ struct ath12k_hw *ah;
+ struct ath12k_pdev_map pdev_map[MAX_RADIOS];
+ int ret, i, j;
+ u8 radio_per_hw;
+
+ if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ ab->num_hw = ab->num_radios;
+ radio_per_hw = 1;
+
+ for (i = 0; i < ab->num_hw; i++) {
+ for (j = 0; j < radio_per_hw; j++) {
+ pdev_map[j].ab = ab;
+ pdev_map[j].pdev_idx = (i * radio_per_hw) + j;
+ }
+
+ ah = ath12k_mac_hw_allocate(ab, pdev_map, radio_per_hw);
+ if (!ah) {
+ ath12k_warn(ab, "failed to allocate mac80211 hw device for hw_idx %d\n",
+ i);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ab->ah[i] = ah;
+ }
+
+ ath12k_dp_pdev_pre_alloc(ab);
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--) {
+ if (!ab->ah[i])
+ continue;
+
+ ath12k_mac_hw_destroy(ab->ah[i]);
+ ab->ah[i] = NULL;
+ }
+
+ return ret;
}
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 7c63bb628adc..3f5e1be0dff9 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_MAC_H
@@ -12,6 +12,8 @@
struct ath12k;
struct ath12k_base;
+struct ath12k_hw;
+struct ath12k_pdev_map;
struct ath12k_generic_iter {
struct ath12k *ar;
diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
index d5441ddb374b..adb8c3ec1950 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.c
+++ b/drivers/net/wireless/ath/ath12k/mhi.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/firmware.h>
#include "core.h"
#include "debug.h"
@@ -13,6 +14,8 @@
#include "pci.h"
#define MHI_TIMEOUT_DEFAULT_MS 90000
+#define OTP_INVALID_BOARD_ID 0xFFFF
+#define OTP_VALID_DUALMAC_BOARD_ID_MASK 0x1000
static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = {
{
@@ -358,23 +361,60 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
{
struct ath12k_base *ab = ab_pci->ab;
struct mhi_controller *mhi_ctrl;
+ unsigned int board_id;
int ret;
+ bool dualmac = false;
mhi_ctrl = mhi_alloc_controller();
if (!mhi_ctrl)
return -ENOMEM;
- ath12k_core_create_firmware_path(ab, ATH12K_AMSS_FILE,
- ab_pci->amss_path,
- sizeof(ab_pci->amss_path));
-
ab_pci->mhi_ctrl = mhi_ctrl;
mhi_ctrl->cntrl_dev = ab->dev;
- mhi_ctrl->fw_image = ab_pci->amss_path;
mhi_ctrl->regs = ab->mem;
mhi_ctrl->reg_len = ab->mem_len;
mhi_ctrl->rddm_size = ab->hw_params->rddm_size;
+ if (ab->hw_params->otp_board_id_register) {
+ board_id =
+ ath12k_pci_read32(ab, ab->hw_params->otp_board_id_register);
+ board_id = u32_get_bits(board_id, OTP_BOARD_ID_MASK);
+
+ if (!board_id || (board_id == OTP_INVALID_BOARD_ID)) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "failed to read board id\n");
+ } else if (board_id & OTP_VALID_DUALMAC_BOARD_ID_MASK) {
+ dualmac = true;
+ ab->slo_capable = false;
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "dualmac fw selected for board id: %x\n", board_id);
+ }
+ }
+
+ if (dualmac) {
+ if (ab->fw.amss_dualmac_data && ab->fw.amss_dualmac_len > 0) {
+ /* use MHI firmware file from firmware-N.bin */
+ mhi_ctrl->fw_data = ab->fw.amss_dualmac_data;
+ mhi_ctrl->fw_sz = ab->fw.amss_dualmac_len;
+ } else {
+ ath12k_warn(ab, "dualmac firmware IE not present in firmware-N.bin\n");
+ ret = -ENOENT;
+ goto free_controller;
+ }
+ } else {
+ if (ab->fw.amss_data && ab->fw.amss_len > 0) {
+ /* use MHI firmware file from firmware-N.bin */
+ mhi_ctrl->fw_data = ab->fw.amss_data;
+ mhi_ctrl->fw_sz = ab->fw.amss_len;
+ } else {
+ /* use the old separate mhi.bin MHI firmware file */
+ ath12k_core_create_firmware_path(ab, ATH12K_AMSS_FILE,
+ ab_pci->amss_path,
+ sizeof(ab_pci->amss_path));
+ mhi_ctrl->fw_image = ab_pci->amss_path;
+ }
+ }
+
ret = ath12k_mhi_get_msi(ab_pci);
if (ret) {
ath12k_err(ab, "failed to get msi for mhi\n");
diff --git a/drivers/net/wireless/ath/ath12k/p2p.c b/drivers/net/wireless/ath/ath12k/p2p.c
new file mode 100644
index 000000000000..d334df720032
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/p2p.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <net/mac80211.h>
+#include "core.h"
+#include "mac.h"
+#include "p2p.h"
+
+static void ath12k_p2p_noa_ie_fill(u8 *data, size_t len,
+ const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ struct ieee80211_p2p_noa_attr *noa_attr;
+ u8 ctwindow = le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_CTWIN_TU);
+ bool oppps = le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS);
+ __le16 *noa_attr_len;
+ u16 attr_len;
+ u8 noa_descriptors = le32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_DESC_NUM);
+ int i;
+
+ /* P2P IE */
+ data[0] = WLAN_EID_VENDOR_SPECIFIC;
+ data[1] = len - 2;
+ data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+ data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+ data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+ data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+ /* NOA ATTR */
+ data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+ noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+ noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+ noa_attr->index = le32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_INDEX);
+ noa_attr->oppps_ctwindow = ctwindow;
+ if (oppps)
+ noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+ for (i = 0; i < noa_descriptors; i++) {
+ noa_attr->desc[i].count =
+ __le32_to_cpu(noa->descriptors[i].type_count);
+ noa_attr->desc[i].duration = noa->descriptors[i].duration;
+ noa_attr->desc[i].interval = noa->descriptors[i].interval;
+ noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
+ }
+
+ attr_len = 2; /* index + oppps_ctwindow */
+ attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+ *noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static size_t ath12k_p2p_noa_ie_len_compute(const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ size_t len = 0;
+
+ if (!(le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM)) &&
+ !(le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS)))
+ return 0;
+
+ len += 1 + 1 + 4; /* EID + len + OUI */
+ len += 1 + 2; /* noa attr + attr len */
+ len += 1 + 1; /* index + oppps_ctwindow */
+ len += le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM) *
+ sizeof(struct ieee80211_p2p_noa_desc);
+
+ return len;
+}
+
+static void ath12k_p2p_noa_ie_assign(struct ath12k_vif *arvif, void *ie,
+ size_t len)
+{
+ struct ath12k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ kfree(arvif->u.ap.noa_data);
+
+ arvif->u.ap.noa_data = ie;
+ arvif->u.ap.noa_len = len;
+}
+
+static void __ath12k_p2p_noa_update(struct ath12k_vif *arvif,
+ const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ struct ath12k *ar = arvif->ar;
+ void *ie;
+ size_t len;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath12k_p2p_noa_ie_assign(arvif, NULL, 0);
+
+ len = ath12k_p2p_noa_ie_len_compute(noa);
+ if (!len)
+ return;
+
+ ie = kmalloc(len, GFP_ATOMIC);
+ if (!ie)
+ return;
+
+ ath12k_p2p_noa_ie_fill(ie, len, noa);
+ ath12k_p2p_noa_ie_assign(arvif, ie, len);
+}
+
+void ath12k_p2p_noa_update(struct ath12k_vif *arvif,
+ const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ struct ath12k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ __ath12k_p2p_noa_update(arvif, noa);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath12k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_p2p_noa_arg *arg = data;
+
+ if (arvif->vdev_id != arg->vdev_id)
+ return;
+
+ ath12k_p2p_noa_update(arvif, arg->noa);
+}
+
+void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id,
+ const struct ath12k_wmi_p2p_noa_info *noa)
+{
+ struct ath12k_p2p_noa_arg arg = {
+ .vdev_id = vdev_id,
+ .noa = noa,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath12k_p2p_noa_update_vdev_iter,
+ &arg);
+}
diff --git a/drivers/net/wireless/ath/ath12k/p2p.h b/drivers/net/wireless/ath/ath12k/p2p.h
new file mode 100644
index 000000000000..5768139a7844
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/p2p.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved..
+ */
+
+#ifndef ATH12K_P2P_H
+#define ATH12K_P2P_H
+
+#include "wmi.h"
+
+struct ath12k_wmi_p2p_noa_info;
+
+struct ath12k_p2p_noa_arg {
+ u32 vdev_id;
+ const struct ath12k_wmi_p2p_noa_info *noa;
+};
+
+void ath12k_p2p_noa_update(struct ath12k_vif *arvif,
+ const struct ath12k_wmi_p2p_noa_info *noa);
+void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id,
+ const struct ath12k_wmi_p2p_noa_info *noa);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index f0d2e2d8719c..14954bc05144 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -39,6 +39,10 @@
#define QCN9274_DEVICE_ID 0x1109
#define WCN7850_DEVICE_ID 0x1107
+#define PCIE_LOCAL_REG_QRTR_NODE_ID 0x1E03164
+#define DOMAIN_NUMBER_MASK GENMASK(7, 4)
+#define BUS_NUMBER_MASK GENMASK(3, 0)
+
static const struct pci_device_id ath12k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) },
{ PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) },
@@ -201,18 +205,17 @@ static u32 ath12k_pci_get_window_start(struct ath12k_base *ab,
/* If offset lies within CE register range, use 2nd window */
else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK)
window_start = 2 * WINDOW_START;
- /* If offset lies within PCI_BAR_WINDOW0_BASE and within PCI_SOC_PCI_REG_BASE
- * use 0th window
- */
- else if (((offset ^ PCI_BAR_WINDOW0_BASE) < WINDOW_RANGE_MASK) &&
- !((offset ^ PCI_SOC_PCI_REG_BASE) < PCI_SOC_RANGE_MASK))
- window_start = 0;
else
window_start = WINDOW_START;
return window_start;
}
+static inline bool ath12k_pci_is_offset_within_mhi_region(u32 offset)
+{
+ return (offset >= PCI_MHIREGLEN_REG && offset <= PCI_MHI_REGION_END);
+}
+
static void ath12k_pci_soc_global_reset(struct ath12k_base *ab)
{
u32 val, delay;
@@ -682,12 +685,22 @@ static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab)
{
struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ struct pci_bus *bus = ab_pci->pdev->bus;
+
cfg->tgt_ce = ab->hw_params->target_ce_config;
cfg->tgt_ce_len = ab->hw_params->target_ce_count;
cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
+
+ if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features)) {
+ ab_pci->qmi_instance =
+ u32_encode_bits(pci_domain_nr(bus), DOMAIN_NUMBER_MASK) |
+ u32_encode_bits(bus->number, BUS_NUMBER_MASK);
+ ab->qmi.service_ins_id += ab_pci->qmi_instance;
+ }
}
static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab)
@@ -901,6 +914,26 @@ static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci)
set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags);
}
+static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ u32 reg;
+
+ /* On platforms with two or more identical mhi devices, qmi service run
+ * with identical qrtr-node-id. Because of this identical ID qrtr-lookup
+ * cannot register more than one qmi service with identical node ID.
+ *
+ * This generates a unique instance ID from PCIe domain number and bus number,
+ * writes to the given register, it is available for firmware when the QMI service
+ * is spawned.
+ */
+ reg = PCIE_LOCAL_REG_QRTR_NODE_ID & WINDOW_RANGE_MASK;
+ ath12k_pci_write32(ab, reg, ab_pci->qmi_instance);
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "pci reg 0x%x instance 0x%x read val 0x%x\n",
+ reg, ab_pci->qmi_instance, ath12k_pci_read32(ab, reg));
+}
+
static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
{
if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
@@ -1138,15 +1171,17 @@ u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset)
if (window_start == WINDOW_START) {
spin_lock_bh(&ab_pci->window_lock);
ath12k_pci_select_window(ab_pci, offset);
- val = ioread32(ab->mem + window_start +
- (offset & WINDOW_RANGE_MASK));
+
+ if (ath12k_pci_is_offset_within_mhi_region(offset)) {
+ offset = offset - PCI_MHIREGLEN_REG;
+ val = ioread32(ab->mem +
+ (offset & WINDOW_RANGE_MASK));
+ } else {
+ val = ioread32(ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ }
spin_unlock_bh(&ab_pci->window_lock);
} else {
- if ((!window_start) &&
- (offset >= PCI_MHIREGLEN_REG &&
- offset <= PCI_MHI_REGION_END))
- offset = offset - PCI_MHIREGLEN_REG;
-
val = ioread32(ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
}
@@ -1183,15 +1218,17 @@ void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value)
if (window_start == WINDOW_START) {
spin_lock_bh(&ab_pci->window_lock);
ath12k_pci_select_window(ab_pci, offset);
- iowrite32(value, ab->mem + window_start +
- (offset & WINDOW_RANGE_MASK));
+
+ if (ath12k_pci_is_offset_within_mhi_region(offset)) {
+ offset = offset - PCI_MHIREGLEN_REG;
+ iowrite32(value, ab->mem +
+ (offset & WINDOW_RANGE_MASK));
+ } else {
+ iowrite32(value, ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ }
spin_unlock_bh(&ab_pci->window_lock);
} else {
- if ((!window_start) &&
- (offset >= PCI_MHIREGLEN_REG &&
- offset <= PCI_MHI_REGION_END))
- offset = offset - PCI_MHIREGLEN_REG;
-
iowrite32(value, ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
}
@@ -1219,6 +1256,9 @@ int ath12k_pci_power_up(struct ath12k_base *ab)
ath12k_pci_msi_enable(ab_pci);
+ if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features))
+ ath12k_pci_update_qrtr_node_id(ab);
+
ret = ath12k_mhi_start(ab_pci);
if (ret) {
ath12k_err(ab, "failed to start mhi: %d\n", ret);
@@ -1310,11 +1350,21 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
goto err_free_core;
}
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
+
+ ab->id.vendor = pdev->vendor;
+ ab->id.device = pdev->device;
+ ab->id.subsystem_vendor = pdev->subsystem_vendor;
+ ab->id.subsystem_device = pdev->subsystem_device;
+
switch (pci_dev->device) {
case QCN9274_DEVICE_ID:
ab_pci->msi_config = &ath12k_msi_config[0];
ab->static_window_map = true;
ab_pci->pci_ops = &ath12k_pci_ops_qcn9274;
+ ab->hal_rx_ops = &hal_rx_qcn9274_ops;
ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
@@ -1333,9 +1383,11 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
}
break;
case WCN7850_DEVICE_ID:
+ ab->id.bdf_search = ATH12K_BDF_SEARCH_BUS_AND_BOARD;
ab_pci->msi_config = &ath12k_msi_config[0];
ab->static_window_map = false;
ab_pci->pci_ops = &ath12k_pci_ops_wcn7850;
+ ab->hal_rx_ops = &hal_rx_wcn7850_ops;
ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
&soc_hw_version_minor);
switch (soc_hw_version_major) {
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
index b2edf32ada20..ca93693ba4e9 100644
--- a/drivers/net/wireless/ath/ath12k/pci.h
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_PCI_H
#define ATH12K_PCI_H
@@ -53,6 +53,9 @@
#define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c
#define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4
+#define QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB 0x1E20338
+#define OTP_BOARD_ID_MASK GENMASK(15, 0)
+
#define PCI_BAR_WINDOW0_BASE 0x1E00000
#define PCI_BAR_WINDOW0_END 0x1E7FFFC
#define PCI_SOC_RANGE_MASK 0x3FFF
@@ -111,6 +114,7 @@ struct ath12k_pci {
u16 link_ctl;
unsigned long irq_flags;
const struct ath12k_pci_ops *pci_ops;
+ u32 qmi_instance;
};
static inline struct ath12k_pci *ath12k_pci_priv(struct ath12k_base *ab)
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 77a132f6bbd1..92845ffff44a 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
@@ -17,7 +17,7 @@
#define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08
#define ATH12K_QMI_MAX_CHUNK_SIZE 2097152
-static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
+static const struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@@ -61,7 +61,7 @@ static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -511,7 +511,7 @@ static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -528,7 +528,68 @@ static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_phy_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_phy_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ num_phy_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ num_phy),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ board_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ board_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -753,7 +814,7 @@ static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -789,7 +850,7 @@ static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -821,7 +882,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -863,7 +924,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@@ -890,7 +951,7 @@ static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -930,7 +991,7 @@ static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
@@ -957,7 +1018,7 @@ static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -975,7 +1036,7 @@ static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
@@ -983,7 +1044,7 @@ static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1009,7 +1070,7 @@ static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1026,7 +1087,7 @@ static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1042,7 +1103,7 @@ static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -1068,7 +1129,7 @@ static struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1094,7 +1155,7 @@ static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1348,7 +1409,7 @@ static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
@@ -1483,7 +1544,7 @@ static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1501,7 +1562,7 @@ static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
@@ -1525,7 +1586,7 @@ static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1542,7 +1603,7 @@ static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1595,7 +1656,7 @@ static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1630,7 +1691,7 @@ static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
@@ -1654,7 +1715,7 @@ static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1671,7 +1732,7 @@ static struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -1706,7 +1767,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1724,7 +1785,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
@@ -1862,7 +1923,7 @@ static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -1879,22 +1940,78 @@ static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
-static struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
+static const struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
{
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
},
};
-static void ath12k_host_cap_parse_mlo(struct qmi_wlanfw_host_cap_req_msg_v01 *req)
+static const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
+ enable_fwlog_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
+ enable_fwlog),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
+ struct qmi_wlanfw_host_cap_req_msg_v01 *req)
{
+ struct wlfw_host_mlo_chip_info_s_v01 *info;
+ u8 hw_link_id = 0;
+ int i;
+
+ if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) {
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "skip QMI MLO cap due to invalid num_radio %d\n",
+ ab->qmi.num_radios);
+ return;
+ }
+
req->mlo_capable_valid = 1;
req->mlo_capable = 1;
req->mlo_chip_id_valid = 1;
@@ -1905,28 +2022,31 @@ static void ath12k_host_cap_parse_mlo(struct qmi_wlanfw_host_cap_req_msg_v01 *re
/* Max peer number generally won't change for the same device
* but needs to be synced with host driver.
*/
- req->max_mlo_peer = 32;
+ req->max_mlo_peer = ab->hw_params->max_mlo_peer;
req->mlo_num_chips_valid = 1;
req->mlo_num_chips = 1;
+
+ info = &req->mlo_chip_info[0];
+ info->chip_id = 0;
+ info->num_local_links = ab->qmi.num_radios;
+
+ for (i = 0; i < info->num_local_links; i++) {
+ info->hw_link_id[i] = hw_link_id;
+ info->valid_mlo_link_id[i] = 1;
+
+ hw_link_id++;
+ }
+
req->mlo_chip_info_valid = 1;
- req->mlo_chip_info[0].chip_id = 0;
- req->mlo_chip_info[0].num_local_links = 2;
- req->mlo_chip_info[0].hw_link_id[0] = 0;
- req->mlo_chip_info[0].hw_link_id[1] = 1;
- req->mlo_chip_info[0].valid_mlo_link_id[0] = 1;
- req->mlo_chip_info[0].valid_mlo_link_id[1] = 1;
}
static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
{
- struct qmi_wlanfw_host_cap_req_msg_v01 req;
- struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_host_cap_req_msg_v01 req = {};
+ struct qmi_wlanfw_host_cap_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
int ret = 0;
- memset(&req, 0, sizeof(req));
- memset(&resp, 0, sizeof(resp));
-
req.num_clients_valid = 1;
req.num_clients = 1;
req.mem_cfg_mode = ab->qmi.target_mem_mode;
@@ -1963,10 +2083,10 @@ static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
*/
req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
-
- ath12k_host_cap_parse_mlo(&req);
}
+ ath12k_host_cap_parse_mlo(ab, &req);
+
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -1977,6 +2097,7 @@ static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_host_cap_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "Failed to send host capability request,err = %d\n", ret);
goto out;
}
@@ -1996,6 +2117,62 @@ out:
return ret;
}
+static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_phy_cap_req_msg_v01 req = {};
+ struct qmi_wlanfw_phy_cap_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
+ int ret;
+
+ if (!ab->slo_capable)
+ goto out;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_phy_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_PHY_CAP_REQ_V01,
+ QMI_WLANFW_PHY_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_phy_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath12k_warn(ab, "failed to send phy capability request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!resp.num_phy_valid) {
+ ret = -ENODATA;
+ goto out;
+ }
+
+ ab->qmi.num_radios = resp.num_phy;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "phy capability resp valid %d num_phy %d valid %d board_id %d\n",
+ resp.num_phy_valid, resp.num_phy,
+ resp.board_id_valid, resp.board_id);
+
+ return;
+
+out:
+ /* If PHY capability not advertised then rely on default num link */
+ ab->qmi.num_radios = ab->hw_params->def_num_link;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "no valid response from PHY capability, choose default num_phy %d\n",
+ ab->qmi.num_radios);
+}
+
static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab)
{
struct qmi_wlanfw_ind_register_req_msg_v01 *req;
@@ -2040,6 +2217,7 @@ static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab)
QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_ind_register_req_msg_v01_ei, req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "Failed to send indication register request, err = %d\n",
ret);
goto out;
@@ -2068,8 +2246,8 @@ resp_out:
static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
{
struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
- struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_respond_mem_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
int ret = 0, i;
bool delayed;
@@ -2077,8 +2255,6 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
if (!req)
return -ENOMEM;
- memset(&resp, 0, sizeof(resp));
-
/* Some targets by default request a block of big contiguous
* DMA memory, it's hard to allocate from kernel. So host returns
* failure to firmware and firmware then request multiple blocks of
@@ -2088,7 +2264,6 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
delayed = true;
ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi delays mem_request %d\n",
ab->qmi.mem_seg_count);
- memset(req, 0, sizeof(*req));
} else {
delayed = false;
req->mem_seg_len = ab->qmi.mem_seg_count;
@@ -2114,6 +2289,7 @@ static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to respond memory request, err = %d\n",
ret);
goto out;
@@ -2208,17 +2384,14 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
{
- struct qmi_wlanfw_cap_req_msg_v01 req;
- struct qmi_wlanfw_cap_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_cap_req_msg_v01 req = {};
+ struct qmi_wlanfw_cap_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
unsigned int board_id = ATH12K_BOARD_ID_DEFAULT;
int ret = 0;
int r;
int i;
- memset(&req, 0, sizeof(req));
- memset(&resp, 0, sizeof(resp));
-
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -2229,6 +2402,7 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_cap_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to send target cap request, err = %d\n",
ret);
goto out;
@@ -2310,8 +2484,8 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
const u8 *data, u32 len, u8 type)
{
struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
- struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_bdf_download_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
const u8 *temp = data;
int ret;
u32 remaining = len;
@@ -2319,7 +2493,6 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- memset(&resp, 0, sizeof(resp));
while (remaining) {
req->valid = 1;
@@ -2423,8 +2596,7 @@ static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
break;
case ATH12K_QMI_BDF_TYPE_REGDB:
- ret = ath12k_core_fetch_board_data_api_1(ab, &bd,
- ATH12K_REGDB_FILE_NAME);
+ ret = ath12k_core_fetch_regdb(ab, &bd);
if (ret) {
ath12k_warn(ab, "qmi failed to load regdb bin:\n");
goto out;
@@ -2497,37 +2669,56 @@ out:
static int ath12k_qmi_m3_load(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
- const struct firmware *fw;
+ const struct firmware *fw = NULL;
+ const void *m3_data;
char path[100];
+ size_t m3_len;
int ret;
- if (m3_mem->vaddr || m3_mem->size)
+ if (m3_mem->vaddr)
+ /* m3 firmware buffer is already available in the DMA buffer */
return 0;
- fw = ath12k_core_firmware_request(ab, ATH12K_M3_FILE);
- if (IS_ERR(fw)) {
- ret = PTR_ERR(fw);
- ath12k_core_create_firmware_path(ab, ATH12K_M3_FILE,
- path, sizeof(path));
- ath12k_err(ab, "failed to load %s: %d\n", path, ret);
- return ret;
+ if (ab->fw.m3_data && ab->fw.m3_len > 0) {
+ /* firmware-N.bin had a m3 firmware file so use that */
+ m3_data = ab->fw.m3_data;
+ m3_len = ab->fw.m3_len;
+ } else {
+ /* No m3 file in firmware-N.bin so try to request old
+ * separate m3.bin.
+ */
+ fw = ath12k_core_firmware_request(ab, ATH12K_M3_FILE);
+ if (IS_ERR(fw)) {
+ ret = PTR_ERR(fw);
+ ath12k_core_create_firmware_path(ab, ATH12K_M3_FILE,
+ path, sizeof(path));
+ ath12k_err(ab, "failed to load %s: %d\n", path, ret);
+ return ret;
+ }
+
+ m3_data = fw->data;
+ m3_len = fw->size;
}
m3_mem->vaddr = dma_alloc_coherent(ab->dev,
- fw->size, &m3_mem->paddr,
+ m3_len, &m3_mem->paddr,
GFP_KERNEL);
if (!m3_mem->vaddr) {
ath12k_err(ab, "failed to allocate memory for M3 with size %zu\n",
fw->size);
- release_firmware(fw);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
- memcpy(m3_mem->vaddr, fw->data, fw->size);
- m3_mem->size = fw->size;
+ memcpy(m3_mem->vaddr, m3_data, m3_len);
+ m3_mem->size = m3_len;
+
+ ret = 0;
+
+out:
release_firmware(fw);
- return 0;
+ return ret;
}
static void ath12k_qmi_m3_free(struct ath12k_base *ab)
@@ -2546,14 +2737,11 @@ static void ath12k_qmi_m3_free(struct ath12k_base *ab)
static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
- struct qmi_wlanfw_m3_info_req_msg_v01 req;
- struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_m3_info_req_msg_v01 req = {};
+ struct qmi_wlanfw_m3_info_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
int ret = 0;
- memset(&req, 0, sizeof(req));
- memset(&resp, 0, sizeof(resp));
-
ret = ath12k_qmi_m3_load(ab);
if (ret) {
ath12k_err(ab, "failed to load m3 firmware: %d", ret);
@@ -2573,6 +2761,7 @@ static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
qmi_wlanfw_m3_info_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to send M3 information request, err = %d\n",
ret);
goto out;
@@ -2597,14 +2786,11 @@ out:
static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab,
u32 mode)
{
- struct qmi_wlanfw_wlan_mode_req_msg_v01 req;
- struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
- struct qmi_txn txn = {};
+ struct qmi_wlanfw_wlan_mode_req_msg_v01 req = {};
+ struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
int ret = 0;
- memset(&req, 0, sizeof(req));
- memset(&resp, 0, sizeof(resp));
-
req.mode = mode;
req.hw_debug_valid = 1;
req.hw_debug = 0;
@@ -2619,6 +2805,7 @@ static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab,
QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n",
mode, ret);
goto out;
@@ -2649,10 +2836,10 @@ out:
static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
{
struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
- struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
+ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp = {};
struct ce_pipe_config *ce_cfg;
struct service_to_pipe *svc_cfg;
- struct qmi_txn txn = {};
+ struct qmi_txn txn;
int ret = 0, pipe_num;
ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce;
@@ -2662,8 +2849,6 @@ static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
if (!req)
return -ENOMEM;
- memset(&resp, 0, sizeof(resp));
-
req->host_version_valid = 1;
strscpy(req->host_version, ATH12K_HOST_VERSION_STRING,
sizeof(req->host_version));
@@ -2710,6 +2895,7 @@ static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req);
if (ret < 0) {
+ qmi_txn_cancel(&txn);
ath12k_warn(ab, "qmi failed to send wlan config request, err = %d\n",
ret);
goto out;
@@ -2733,6 +2919,49 @@ out:
return ret;
}
+static int ath12k_qmi_wlanfw_wlan_ini_send(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp = {};
+ struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {};
+ struct qmi_txn txn;
+ int ret;
+
+ req.enable_fwlog_valid = true;
+ req.enable_fwlog = 1;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_ini_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ ATH12K_QMI_WLANFW_WLAN_INI_REQ_V01,
+ QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath12k_warn(ab, "failed to send QMI wlan ini request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to receive QMI wlan ini request: %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "QMI wlan ini response failure: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
void ath12k_qmi_firmware_stop(struct ath12k_base *ab)
{
int ret;
@@ -2749,6 +2978,12 @@ int ath12k_qmi_firmware_start(struct ath12k_base *ab,
{
int ret;
+ ret = ath12k_qmi_wlanfw_wlan_ini_send(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send wlan fw ini: %d\n", ret);
+ return ret;
+ }
+
ret = ath12k_qmi_wlanfw_wlan_cfg_send(ab);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send wlan cfg:%d\n", ret);
@@ -2792,6 +3027,8 @@ static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
struct ath12k_base *ab = qmi->ab;
int ret;
+ ath12k_qmi_phy_cap_send(ab);
+
ret = ath12k_qmi_fw_ind_register_send(ab);
if (ret < 0) {
ath12k_warn(ab, "qmi failed to send FW indication QMI:%d\n", ret);
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index e25bbaa125e8..6ee33c9851c6 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_QMI_H
@@ -15,7 +15,6 @@
#define ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE 64
#define ATH12K_QMI_CALDB_ADDRESS 0x4BA00000
#define ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
-#define ATH12K_QMI_WLFW_NODE_ID_BASE 0x07
#define ATH12K_QMI_WLFW_SERVICE_ID_V01 0x45
#define ATH12K_QMI_WLFW_SERVICE_VERS_V01 0x01
#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01 0x02
@@ -141,6 +140,7 @@ struct ath12k_qmi {
u32 target_mem_mode;
bool target_mem_delayed;
u8 cal_done;
+ u8 num_radios;
struct target_info target;
struct m3_mem_region m3_mem;
unsigned int service_ins_id;
@@ -251,6 +251,22 @@ struct qmi_wlanfw_host_cap_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+#define QMI_WLANFW_PHY_CAP_REQ_MSG_V01_MAX_LEN 0
+#define QMI_WLANFW_PHY_CAP_REQ_V01 0x0057
+#define QMI_WLANFW_PHY_CAP_RESP_MSG_V01_MAX_LEN 18
+#define QMI_WLANFW_PHY_CAP_RESP_V01 0x0057
+
+struct qmi_wlanfw_phy_cap_req_msg_v01 {
+};
+
+struct qmi_wlanfw_phy_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 num_phy_valid;
+ u8 num_phy;
+ u8 board_id_valid;
+ u32 board_id;
+};
+
#define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54
#define QMI_WLANFW_IND_REGISTER_REQ_V01 0x0020
#define QMI_WLANFW_IND_REGISTER_RESP_MSG_V01_MAX_LEN 18
@@ -559,6 +575,21 @@ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
+#define ATH12K_QMI_WLANFW_WLAN_INI_REQ_V01 0x002F
+#define ATH12K_QMI_WLANFW_WLAN_INI_RESP_V01 0x002F
+#define QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_INI_RESP_MSG_V01_MAX_LEN 7
+
+struct qmi_wlanfw_wlan_ini_req_msg_v01 {
+ /* Must be set to true if enable_fwlog is being passed */
+ u8 enable_fwlog_valid;
+ u8 enable_fwlog;
+};
+
+struct qmi_wlanfw_wlan_ini_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
int ath12k_qmi_firmware_start(struct ath12k_base *ab,
u32 mode);
void ath12k_qmi_firmware_stop(struct ath12k_base *ab);
diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
index f924bc13ccff..f308e9a6ed55 100644
--- a/drivers/net/wireless/ath/ath12k/reg.c
+++ b/drivers/net/wireless/ath/ath12k/reg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/rtnetlink.h>
#include "core.h"
@@ -48,7 +48,8 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath12k_wmi_init_country_arg arg;
- struct ath12k *ar = hw->priv;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah);
int ret;
ath12k_dbg(ar->ab, ATH12K_DBG_REG,
@@ -95,7 +96,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
struct ieee80211_supported_band **bands;
struct ath12k_wmi_scan_chan_list_arg *arg;
struct ieee80211_channel *channel;
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ath12k_wmi_channel_arg *ch;
enum nl80211_band band;
int num_channels = 0;
@@ -103,7 +104,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
bands = hw->wiphy->bands;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!bands[band])
+ if (!(ar->mac.sbands[band].channels && bands[band]))
continue;
for (i = 0; i < bands[band]->n_channels; i++) {
@@ -129,7 +130,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
ch = arg->channel;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!bands[band])
+ if (!(ar->mac.sbands[band].channels && bands[band]))
continue;
for (i = 0; i < bands[band]->n_channels; i++) {
@@ -199,7 +200,7 @@ static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig,
int ath12k_regd_update(struct ath12k *ar, bool init)
{
- struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ieee80211_regdomain *regd, *regd_copy = NULL;
int ret, regd_len, pdev_id;
struct ath12k_base *ab;
diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h
index 55f20c446ca9..a0db6702a189 100644
--- a/drivers/net/wireless/ath/ath12k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath12k/rx_desc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_RX_DESC_H
#define ATH12K_RX_DESC_H
@@ -147,6 +147,61 @@ struct rx_mpdu_start_qcn9274 {
__le32 res1;
} __packed;
+#define QCN9274_MPDU_START_SELECT_MPDU_START_TAG BIT(0)
+#define QCN9274_MPDU_START_SELECT_INFO0_REO_QUEUE_DESC_LO BIT(1)
+#define QCN9274_MPDU_START_SELECT_INFO1_PN_31_0 BIT(2)
+#define QCN9274_MPDU_START_SELECT_PN_95_32 BIT(3)
+#define QCN9274_MPDU_START_SELECT_PN_127_96_INFO2 BIT(4)
+#define QCN9274_MPDU_START_SELECT_PEER_MDATA_INFO3_PHY_PPDU_ID BIT(5)
+#define QCN9274_MPDU_START_SELECT_AST_IDX_SW_PEER_ID_INFO4 BIT(6)
+#define QCN9274_MPDU_START_SELECT_INFO5_INFO6 BIT(7)
+#define QCN9274_MPDU_START_SELECT_FRAME_CTRL_DURATION_ADDR1_31_0 BIT(8)
+#define QCN9274_MPDU_START_SELECT_ADDR2_47_0_ADDR1_47_32 BIT(9)
+#define QCN9274_MPDU_START_SELECT_ADDR3_47_0_SEQ_CTRL BIT(10)
+#define QCN9274_MPDU_START_SELECT_ADDR4_47_0_QOS_CTRL BIT(11)
+#define QCN9274_MPDU_START_SELECT_HT_CTRL_INFO7 BIT(12)
+#define QCN9274_MPDU_START_SELECT_ML_ADDR1_47_0_ML_ADDR2_15_0 BIT(13)
+#define QCN9274_MPDU_START_SELECT_ML_ADDR2_47_16_INFO8 BIT(14)
+#define QCN9274_MPDU_START_SELECT_RES_0_RES_1 BIT(15)
+
+#define QCN9274_MPDU_START_WMASK (QCN9274_MPDU_START_SELECT_INFO1_PN_31_0 | \
+ QCN9274_MPDU_START_SELECT_PN_95_32 | \
+ QCN9274_MPDU_START_SELECT_PN_127_96_INFO2 | \
+ QCN9274_MPDU_START_SELECT_PEER_MDATA_INFO3_PHY_PPDU_ID | \
+ QCN9274_MPDU_START_SELECT_AST_IDX_SW_PEER_ID_INFO4 | \
+ QCN9274_MPDU_START_SELECT_INFO5_INFO6 | \
+ QCN9274_MPDU_START_SELECT_FRAME_CTRL_DURATION_ADDR1_31_0 | \
+ QCN9274_MPDU_START_SELECT_ADDR2_47_0_ADDR1_47_32 | \
+ QCN9274_MPDU_START_SELECT_ADDR3_47_0_SEQ_CTRL | \
+ QCN9274_MPDU_START_SELECT_ADDR4_47_0_QOS_CTRL)
+
+/* The below rx_mpdu_start_qcn9274_compact structure is tied with the mask
+ * value QCN9274_MPDU_START_WMASK. If the mask value changes the structure
+ * will also change.
+ */
+
+struct rx_mpdu_start_qcn9274_compact {
+ __le32 info1;
+ __le32 pn[4];
+ __le32 info2;
+ __le32 peer_meta_data;
+ __le16 info3;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+} __packed;
+
/* rx_mpdu_start
*
* reo_destination_indication
@@ -608,6 +663,8 @@ enum rx_msdu_start_reception_type {
RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
};
+#define RX_MSDU_END_64_TLV_SRC_LINK_ID GENMASK(24, 22)
+
#define RX_MSDU_END_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
#define RX_MSDU_END_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2)
@@ -786,6 +843,52 @@ struct rx_msdu_end_qcn9274 {
__le32 info14;
} __packed;
+#define QCN9274_MSDU_END_SELECT_MSDU_END_TAG BIT(0)
+#define QCN9274_MSDU_END_SELECT_INFO0_PHY_PPDUID_IP_HDR_CSUM_INFO1 BIT(1)
+#define QCN9274_MSDU_END_SELECT_INFO2_CUMULATIVE_CSUM_RULE_IND_0 BIT(2)
+#define QCN9274_MSDU_END_SELECT_IPV6_OP_CRC_INFO3_TYPE13 BIT(3)
+#define QCN9274_MSDU_END_SELECT_RULE_IND_1_TCP_SEQ_NUM BIT(4)
+#define QCN9274_MSDU_END_SELECT_TCP_ACK_NUM_INFO4_WINDOW_SIZE BIT(5)
+#define QCN9274_MSDU_END_SELECT_SA_SW_PER_ID_INFO5_SA_DA_ID BIT(6)
+#define QCN9274_MSDU_END_SELECT_INFO6_FSE_METADATA BIT(7)
+#define QCN9274_MSDU_END_SELECT_CCE_MDATA_TCP_UDP_CSUM_INFO7_IP_LEN BIT(8)
+#define QCN9274_MSDU_END_SELECT_INFO8_INFO9 BIT(9)
+#define QCN9274_MSDU_END_SELECT_INFO10_INFO11 BIT(10)
+#define QCN9274_MSDU_END_SELECT_VLAN_CTAG_STAG_CI_PEER_MDATA BIT(11)
+#define QCN9274_MSDU_END_SELECT_INFO12_AND_FLOW_ID_TOEPLITZ BIT(12)
+#define QCN9274_MSDU_END_SELECT_PPDU_START_TS_63_32_PHY_MDATA BIT(13)
+#define QCN9274_MSDU_END_SELECT_PPDU_START_TS_31_0_TOEPLITZ_HASH_2_4 BIT(14)
+#define QCN9274_MSDU_END_SELECT_RES0_SA_47_0 BIT(15)
+#define QCN9274_MSDU_END_SELECT_INFO13_INFO14 BIT(16)
+
+#define QCN9274_MSDU_END_WMASK (QCN9274_MSDU_END_SELECT_MSDU_END_TAG | \
+ QCN9274_MSDU_END_SELECT_SA_SW_PER_ID_INFO5_SA_DA_ID | \
+ QCN9274_MSDU_END_SELECT_INFO10_INFO11 | \
+ QCN9274_MSDU_END_SELECT_INFO12_AND_FLOW_ID_TOEPLITZ | \
+ QCN9274_MSDU_END_SELECT_PPDU_START_TS_63_32_PHY_MDATA | \
+ QCN9274_MSDU_END_SELECT_INFO13_INFO14)
+
+/* The below rx_msdu_end_qcn9274_compact structure is tied with the mask value
+ * QCN9274_MSDU_END_WMASK. If the mask value changes the structure will also
+ * change.
+ */
+
+struct rx_msdu_end_qcn9274_compact {
+ __le64 msdu_end_tag;
+ __le16 sa_sw_peer_id;
+ __le16 info5;
+ __le16 sa_idx;
+ __le16 da_idx_or_sw_peer_id;
+ __le32 info10;
+ __le32 info11;
+ __le32 info12;
+ __le32 flow_id_toeplitz;
+ __le32 ppdu_start_timestamp_63_32;
+ __le32 phy_meta_data;
+ __le32 info13;
+ __le32 info14;
+} __packed;
+
/* These macro definitions are only used for WCN7850 */
#define RX_MSDU_END_WCN7850_INFO2_KEY_ID BIT(7, 0)
@@ -1450,16 +1553,18 @@ struct rx_msdu_end_wcn7850 {
*
*/
-/* TODO: Move to compact TLV approach
- * By default these tlv's are not aligned to 128b boundary
- * Need to remove unused qwords and make them compact/aligned
- */
struct hal_rx_desc_qcn9274 {
struct rx_msdu_end_qcn9274 msdu_end;
struct rx_mpdu_start_qcn9274 mpdu_start;
u8 msdu_payload[];
} __packed;
+struct hal_rx_desc_qcn9274_compact {
+ struct rx_msdu_end_qcn9274_compact msdu_end;
+ struct rx_mpdu_start_qcn9274_compact mpdu_start;
+ u8 msdu_payload[];
+} __packed;
+
#define RX_BE_PADDING0_BYTES 8
#define RX_BE_PADDING1_BYTES 8
@@ -1484,6 +1589,7 @@ struct hal_rx_desc_wcn7850 {
struct hal_rx_desc {
union {
struct hal_rx_desc_qcn9274 qcn9274;
+ struct hal_rx_desc_qcn9274_compact qcn9274_compact;
struct hal_rx_desc_wcn7850 wcn7850;
} u;
} __packed;
diff --git a/drivers/net/wireless/ath/ath12k/trace.h b/drivers/net/wireless/ath/ath12k/trace.h
index f72096684b74..240737e1542d 100644
--- a/drivers/net/wireless/ath/ath12k/trace.h
+++ b/drivers/net/wireless/ath/ath12k/trace.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
@@ -140,6 +140,33 @@ TRACE_EVENT(ath12k_htt_rxdesc,
)
);
+TRACE_EVENT(ath12k_wmi_diag,
+ TP_PROTO(struct ath12k_base *ab, const void *data, size_t len),
+
+ TP_ARGS(ab, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __field(u16, len)
+ __dynamic_array(u8, data, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ab->dev));
+ __assign_str(driver, dev_driver_string(ab->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(data), data, len);
+ ),
+
+ TP_printk(
+ "%s %s tlv diag len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 11cc3005c0f9..9d69a1769926 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -19,6 +19,7 @@
#include "mac.h"
#include "hw.h"
#include "peer.h"
+#include "p2p.h"
struct ath12k_wmi_svc_ready_parse {
bool wmi_svc_bitmap_done;
@@ -162,6 +163,14 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
+ [WMI_TAG_TWT_ENABLE_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_twt_enable_event) },
+ [WMI_TAG_TWT_DISABLE_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_twt_disable_event) },
+ [WMI_TAG_P2P_NOA_INFO] = {
+ .min_len = sizeof(struct ath12k_wmi_p2p_noa_info) },
+ [WMI_TAG_P2P_NOA_EVENT] = {
+ .min_len = sizeof(struct wmi_p2p_noa_event) },
};
static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
@@ -179,18 +188,9 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config)
{
config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
-
- if (ab->num_radios == 2) {
- config->num_peers = TARGET_NUM_PEERS(DBS);
- config->num_tids = TARGET_NUM_TIDS(DBS);
- } else if (ab->num_radios == 3) {
- config->num_peers = TARGET_NUM_PEERS(DBS_SBS);
- config->num_tids = TARGET_NUM_TIDS(DBS_SBS);
- } else {
- /* Control should not reach here */
- config->num_peers = TARGET_NUM_PEERS(SINGLE);
- config->num_tids = TARGET_NUM_TIDS(SINGLE);
- }
+ config->num_peers = ab->num_radios *
+ ath12k_core_get_max_peers_per_radio(ab);
+ config->num_tids = ath12k_core_get_max_num_tids(ab);
config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
config->num_peer_keys = TARGET_NUM_PEER_KEYS;
@@ -228,6 +228,9 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
config->peer_map_unmap_version = 0x32;
config->twt_ap_pdev_count = ab->num_radios;
config->twt_ap_sta_count = 1000;
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
+ config->dp_peer_meta_data_ver = TARGET_RX_PEER_METADATA_VER_V1B;
}
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -359,8 +362,8 @@ static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
}
static const void **
-ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr,
- size_t len, gfp_t gfp)
+ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab,
+ struct sk_buff *skb, gfp_t gfp)
{
const void **tb;
int ret;
@@ -369,7 +372,7 @@ ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr,
if (!tb)
return ERR_PTR(-ENOMEM);
- ret = ath12k_wmi_tlv_parse(ab, tb, ptr, len);
+ ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
if (ret) {
kfree(tb);
return ERR_PTR(ret);
@@ -493,13 +496,13 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
mac_caps = wmi_mac_phy_caps + phy_idx;
- pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
+ pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
fw_pdev = &ab->fw_pdev[ab->fw_pdev_count];
fw_pdev->supported_bands = le32_to_cpu(mac_caps->supported_bands);
- fw_pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
+ fw_pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
fw_pdev->phy_id = le32_to_cpu(mac_caps->phy_id);
ab->fw_pdev_count++;
@@ -727,6 +730,20 @@ static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *sk
return 0;
}
+static u32 ath12k_wmi_mgmt_get_freq(struct ath12k *ar,
+ struct ieee80211_tx_info *info)
+{
+ struct ath12k_base *ab = ar->ab;
+ u32 freq = 0;
+
+ if (ab->hw_params->single_pdev_only &&
+ ar->scan.is_roc &&
+ (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+ freq = ar->scan.roc_freq;
+
+ return freq;
+}
+
struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_ab, u32 len)
{
struct sk_buff *skb;
@@ -752,6 +769,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_mgmt_send_cmd *cmd;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
struct wmi_tlv *frame_tlv;
struct sk_buff *skb;
u32 buf_len;
@@ -770,7 +788,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
sizeof(*cmd));
cmd->vdev_id = cpu_to_le32(vdev_id);
cmd->desc_id = cpu_to_le32(buf_id);
- cmd->chanfreq = 0;
+ cmd->chanfreq = cpu_to_le32(ath12k_wmi_mgmt_get_freq(ar, info));
cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
cmd->frame_len = cpu_to_le32(frame->len);
@@ -826,6 +844,9 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
+ if (args->if_stats_id != ATH12K_INVAL_VDEV_STATS_ID)
+ cmd->vdev_stats_id_valid = cpu_to_le32(BIT(0));
+
ptr = skb->data + sizeof(*cmd);
len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
@@ -1024,6 +1045,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
cmd->regdomain = cpu_to_le32(arg->regdomain);
cmd->he_ops = cpu_to_le32(arg->he_ops);
cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
+ cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
if (!restart) {
if (arg->ssid) {
@@ -1051,7 +1073,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
/* Note: This is a nested TLV containing:
- * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ * [wmi_tlv][ath12k_wmi_p2p_noa_descriptor][wmi_tlv]..
*/
ptr += sizeof(*tlv);
@@ -1710,6 +1732,48 @@ int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
return ret;
}
+int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
+ const u8 *p2p_ie)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
+ size_t p2p_ie_len, aligned_len;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+
+ p2p_ie_len = p2p_ie[1] + 2;
+ aligned_len = roundup(p2p_ie_len, sizeof(u32));
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_P2P_GO_SET_BEACON_IE,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->ie_buf_len = cpu_to_le32(p2p_ie_len);
+
+ ptr += sizeof(*cmd);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
+ aligned_len);
+ memcpy(tlv->value, p2p_ie, p2p_ie_len);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn)
@@ -2130,7 +2194,7 @@ void ath12k_wmi_start_scan_init(struct ath12k *ar,
WMI_SCAN_EVENT_BSS_CHANNEL |
WMI_SCAN_EVENT_FOREIGN_CHAN |
WMI_SCAN_EVENT_DEQUEUED;
- arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->scan_f_chan_stat_evnt = 1;
arg->num_bssid = 1;
/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
@@ -3265,6 +3329,9 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf
wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
+ wmi_cfg->flags2 = le32_encode_bits(tg_cfg->dp_peer_meta_data_ver,
+ WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
+
wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
}
@@ -4214,7 +4281,7 @@ ath12k_wmi_tlv_mac_phy_caps_ext_parse(struct ath12k_base *ab,
for (i = 0; i < ab->fw_pdev_count; i++) {
struct ath12k_fw_pdev *fw_pdev = &ab->fw_pdev[i];
- if (fw_pdev->pdev_id == le32_to_cpu(caps->pdev_id) &&
+ if (fw_pdev->pdev_id == ath12k_wmi_caps_ext_get_pdev_id(caps) &&
fw_pdev->phy_id == le32_to_cpu(caps->phy_id)) {
bands = fw_pdev->supported_bands;
break;
@@ -4271,7 +4338,8 @@ static int ath12k_wmi_tlv_mac_phy_caps_ext(struct ath12k_base *ab, u16 tag,
return 0;
} else {
for (i = 0; i < ab->num_radios; i++) {
- if (ab->pdevs[i].pdev_id == le32_to_cpu(caps->pdev_id))
+ if (ab->pdevs[i].pdev_id ==
+ ath12k_wmi_caps_ext_get_pdev_id(caps))
break;
}
@@ -4374,7 +4442,7 @@ static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buf
const struct wmi_vdev_start_resp_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4452,7 +4520,7 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4738,7 +4806,7 @@ static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *
const struct wmi_peer_delete_resp_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4770,7 +4838,7 @@ static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
const struct wmi_vdev_delete_resp_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4790,15 +4858,15 @@ static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
return 0;
}
-static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, void *evt_buf,
- u32 len, u32 *vdev_id,
- u32 *tx_status)
+static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab,
+ struct sk_buff *skb,
+ u32 *vdev_id, u32 *tx_status)
{
const void **tb;
const struct wmi_bcn_tx_status_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4826,7 +4894,7 @@ static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_
const struct wmi_vdev_stopped_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -4948,7 +5016,7 @@ static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
info->flags |= IEEE80211_TX_STAT_ACK;
- ieee80211_tx_status_irqsafe(ar->hw, msdu);
+ ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -4970,7 +5038,7 @@ static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
const struct wmi_mgmt_tx_compl_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5006,6 +5074,10 @@ static void ath12k_wmi_event_scan_started(struct ath12k *ar)
break;
case ATH12K_SCAN_STARTING:
ar->scan.state = ATH12K_SCAN_RUNNING;
+
+ if (ar->scan.is_roc)
+ ieee80211_ready_on_channel(ath12k_ar_to_hw(ar));
+
complete(&ar->scan.started);
break;
}
@@ -5076,6 +5148,8 @@ static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
{
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+
lockdep_assert_held(&ar->data_lock);
switch (ar->scan.state) {
@@ -5087,7 +5161,11 @@ static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
break;
case ATH12K_SCAN_RUNNING:
case ATH12K_SCAN_ABORTING:
- ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+ ar->scan_channel = ieee80211_get_channel(hw->wiphy, freq);
+
+ if (ar->scan.is_roc && ar->scan.roc_freq == freq)
+ complete(&ar->scan.on_channel);
+
break;
}
}
@@ -5141,7 +5219,7 @@ static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
const struct wmi_scan_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5174,7 +5252,7 @@ static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buf
const struct wmi_peer_sta_kickout_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5201,7 +5279,7 @@ static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
const struct wmi_roam_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5226,13 +5304,14 @@ static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
static int freq_to_idx(struct ath12k *ar, int freq)
{
struct ieee80211_supported_band *sband;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
int band, ch, idx = 0;
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
if (!ar->mac.sbands[band].channels)
continue;
- sband = ar->hw->wiphy->bands[band];
+ sband = hw->wiphy->bands[band];
if (!sband)
continue;
@@ -5245,14 +5324,14 @@ exit:
return idx;
}
-static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, u8 *evt_buf,
- u32 len, struct wmi_chan_info_event *ch_info_ev)
+static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_chan_info_event *ch_info_ev)
{
const void **tb;
const struct wmi_chan_info_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5291,7 +5370,7 @@ ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
const struct wmi_pdev_bss_chan_info_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5331,7 +5410,7 @@ ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *sk
const struct wmi_vdev_install_key_compl_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5362,7 +5441,7 @@ static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff
const struct wmi_peer_assoc_conf_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5384,13 +5463,13 @@ static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff
}
static int
-ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, u8 *evt_buf,
- u32 len, const struct wmi_pdev_temperature_event *ev)
+ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ const struct wmi_pdev_temperature_event *ev)
{
const void **tb;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -5725,8 +5804,7 @@ static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *s
{
u32 vdev_id, tx_status;
- if (ath12k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
- &vdev_id, &tx_status) != 0) {
+ if (ath12k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
ath12k_warn(ab, "failed to extract bcn tx status");
return;
}
@@ -5864,7 +5942,7 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
status->freq, status->band, status->signal,
status->rate_idx);
- ieee80211_rx_ni(ar->hw, skb);
+ ieee80211_rx_ni(ath12k_ar_to_hw(ar), skb);
exit:
rcu_read_unlock();
@@ -6037,7 +6115,7 @@ static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff
goto exit;
}
- sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
arg.mac_addr, NULL);
if (!sta) {
ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
@@ -6110,7 +6188,7 @@ static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
/* HW channel counters frequency value in hertz */
u32 cc_freq_hz = ab->cc_freq_hz;
- if (ath12k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
+ if (ath12k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
ath12k_warn(ab, "failed to extract chan info event");
return;
}
@@ -6395,7 +6473,7 @@ static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
const struct wmi_pdev_ctl_failsafe_chk_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6446,7 +6524,7 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
}
if (arvif->is_up && arvif->vif->bss_conf.csa_active)
- ieee80211_csa_finish(arvif->vif);
+ ieee80211_csa_finish(arvif->vif, 0);
}
rcu_read_unlock();
}
@@ -6460,7 +6538,7 @@ ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
const u32 *vdev_ids;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6494,7 +6572,7 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
struct ath12k *ar;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6531,7 +6609,7 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff
if (ar->dfs_block_radar_events)
ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
else
- ieee80211_radar_detected(ar->hw);
+ ieee80211_radar_detected(ath12k_ar_to_hw(ar));
exit:
rcu_read_unlock();
@@ -6546,7 +6624,7 @@ ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
struct ath12k *ar;
struct wmi_pdev_temperature_event ev = {0};
- if (ath12k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
+ if (ath12k_pull_pdev_temp_ev(ab, skb, &ev) != 0) {
ath12k_warn(ab, "failed to extract pdev temperature event");
return;
}
@@ -6573,7 +6651,7 @@ static void ath12k_fils_discovery_event(struct ath12k_base *ab,
const struct wmi_fils_discovery_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab,
@@ -6603,7 +6681,7 @@ static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
const struct wmi_probe_resp_tx_status_event *ev;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab,
@@ -6628,6 +6706,56 @@ static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
kfree(tb);
}
+static int ath12k_wmi_p2p_noa_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_p2p_noa_event *ev;
+ const struct ath12k_wmi_p2p_noa_info *noa;
+ struct ath12k *ar;
+ int ret, vdev_id;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse P2P NoA TLV: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_P2P_NOA_EVENT];
+ noa = tb[WMI_TAG_P2P_NOA_INFO];
+
+ if (!ev || !noa) {
+ ret = -EPROTO;
+ goto out;
+ }
+
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "wmi tlv p2p noa vdev_id %i descriptors %u\n",
+ vdev_id, le32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_DESC_NUM));
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
+ vdev_id);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ath12k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
+
+ ret = 0;
+
+unlock:
+ rcu_read_unlock();
+out:
+ kfree(tb);
+ return ret;
+}
+
static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
struct sk_buff *skb)
{
@@ -6635,7 +6763,7 @@ static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
const void **tb;
int ret;
- tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
@@ -6662,6 +6790,70 @@ static void ath12k_rfkill_state_change_event(struct ath12k_base *ab,
kfree(tb);
}
+static void
+ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ trace_ath12k_wmi_diag(ab, skb->data, skb->len);
+}
+
+static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_twt_enable_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse wmi twt enable status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_TWT_ENABLE_COMPLETE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch twt enable wmi event\n");
+ goto exit;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt enable event pdev id %u status %u\n",
+ le32_to_cpu(ev->pdev_id),
+ le32_to_cpu(ev->status));
+
+exit:
+ kfree(tb);
+}
+
+static void ath12k_wmi_twt_disable_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_twt_disable_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse wmi twt disable status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_TWT_DISABLE_COMPLETE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch twt disable wmi event\n");
+ goto exit;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "wmi twt disable event pdev id %d status %u\n",
+ le32_to_cpu(ev->pdev_id),
+ le32_to_cpu(ev->status));
+
+exit:
+ kfree(tb);
+}
+
static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -6757,11 +6949,18 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_RFKILL_STATE_CHANGE_EVENTID:
ath12k_rfkill_state_change_event(ab, skb);
break;
+ case WMI_TWT_ENABLE_EVENTID:
+ ath12k_wmi_twt_enable_event(ab, skb);
+ break;
+ case WMI_TWT_DISABLE_EVENTID:
+ ath12k_wmi_twt_disable_event(ab, skb);
+ break;
+ case WMI_P2P_NOA_EVENTID:
+ ath12k_wmi_p2p_noa_event(ab, skb);
+ break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
- case WMI_TWT_ENABLE_EVENTID:
- case WMI_TWT_DISABLE_EVENTID:
case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
ath12k_dbg(ab, ATH12K_DBG_WMI,
"ignoring unsupported event 0x%x\n", id);
@@ -6772,6 +6971,9 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_VDEV_DELETE_RESP_EVENTID:
ath12k_vdev_delete_resp_event(ab, skb);
break;
+ case WMI_DIAG_EVENTID:
+ ath12k_wmi_diag_event(ab, skb);
+ break;
/* TODO: Add remaining events */
default:
ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 06e5b9b4049b..103462feb935 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_WMI_H
@@ -168,10 +168,6 @@ struct wmi_tlv {
#define WLAN_SCAN_MAX_HINT_BSSID 10
#define MAX_RNR_BSS 5
-#define WLAN_SCAN_PARAMS_MAX_SSID 16
-#define WLAN_SCAN_PARAMS_MAX_BSSID 4
-#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
-
#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
#define WMI_BA_MODE_BUFFER_SIZE_256 3
@@ -2163,6 +2159,10 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_11BE = 289,
+ WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS = 361,
+
+ WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT = 365,
+
WMI_MAX_EXT2_SERVICE,
};
@@ -2350,6 +2350,7 @@ struct ath12k_wmi_resource_config_arg {
u32 twt_ap_pdev_count;
u32 twt_ap_sta_count;
bool is_reg_cc_ext_event_supported;
+ u8 dp_peer_meta_data_ver;
};
struct ath12k_wmi_init_cmd_arg {
@@ -2402,6 +2403,7 @@ struct wmi_init_cmd {
} __packed;
#define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4
+#define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5, 4)
struct ath12k_wmi_resource_config_params {
__le32 tlv_header;
@@ -2542,9 +2544,17 @@ struct ath12k_wmi_hw_mode_cap_params {
#define WMI_MAX_HECAP_PHY_SIZE (3)
+/* pdev_id is present in lower 16 bits of pdev_and_hw_link_ids in
+ * ath12k_wmi_mac_phy_caps_params & ath12k_wmi_caps_ext_params.
+ *
+ * hw_link_id is present in higher 16 bits of pdev_and_hw_link_ids.
+ */
+#define WMI_CAPS_PARAMS_PDEV_ID GENMASK(15, 0)
+#define WMI_CAPS_PARAMS_HW_LINK_ID GENMASK(31, 16)
+
struct ath12k_wmi_mac_phy_caps_params {
__le32 hw_mode_id;
- __le32 pdev_id;
+ __le32 pdev_and_hw_link_ids;
__le32 phy_id;
__le32 supported_flags;
__le32 supported_bands;
@@ -2636,13 +2646,7 @@ struct wmi_service_ready_ext2_event {
struct ath12k_wmi_caps_ext_params {
__le32 hw_mode_id;
- union {
- struct {
- __le16 pdev_id;
- __le16 hw_link_id;
- } __packed ath12k_wmi_pdev_to_link_map;
- __le32 pdev_id;
- };
+ __le32 pdev_and_hw_link_ids;
__le32 phy_id;
__le32 wireless_modes_ext;
__le32 eht_cap_mac_info_2ghz[WMI_MAX_EHTCAP_MAC_SIZE];
@@ -2716,6 +2720,9 @@ struct wmi_vdev_create_cmd {
struct ath12k_wmi_mac_addr_params vdev_macaddr;
__le32 num_cfg_txrx_streams;
__le32 pdev_id;
+ __le32 mbssid_flags;
+ __le32 mbssid_tx_vdev_id;
+ __le32 vdev_stats_id_valid;
__le32 vdev_stats_id;
} __packed;
@@ -2764,6 +2771,10 @@ struct ath12k_wmi_ssid_params {
#define ATH12K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
+enum wmi_vdev_mbssid_flags {
+ WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP = BIT(0),
+};
+
struct wmi_vdev_start_request_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -2782,7 +2793,7 @@ struct wmi_vdev_start_request_cmd {
__le32 cac_duration_ms;
__le32 regdomain;
__le32 min_data_rate;
- __le32 mbssid_flags;
+ __le32 mbssid_flags; /* uses enum wmi_vdev_mbssid_flags */
__le32 mbssid_tx_vdev_id;
__le32 eht_ops;
__le32 punct_bitmap;
@@ -3146,7 +3157,7 @@ struct ath12k_wmi_element_info_arg {
#define WLAN_SCAN_PARAMS_MAX_SSID 16
#define WLAN_SCAN_PARAMS_MAX_BSSID 4
-#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 512
/* Values lower than this may be refused by some firmware revisions with a scan
* completion with a timedout reason.
@@ -3270,24 +3281,19 @@ struct ath12k_wmi_scan_req_arg {
u32 vdev_id;
u32 pdev_id;
enum wmi_scan_priority scan_priority;
- union {
- struct {
- u32 scan_ev_started:1,
- scan_ev_completed:1,
- scan_ev_bss_chan:1,
- scan_ev_foreign_chan:1,
- scan_ev_dequeued:1,
- scan_ev_preempted:1,
- scan_ev_start_failed:1,
- scan_ev_restarted:1,
- scan_ev_foreign_chn_exit:1,
- scan_ev_invalid:1,
- scan_ev_gpio_timeout:1,
- scan_ev_suspended:1,
- scan_ev_resumed:1;
- };
- u32 scan_events;
- };
+ u32 scan_ev_started:1,
+ scan_ev_completed:1,
+ scan_ev_bss_chan:1,
+ scan_ev_foreign_chan:1,
+ scan_ev_dequeued:1,
+ scan_ev_preempted:1,
+ scan_ev_start_failed:1,
+ scan_ev_restarted:1,
+ scan_ev_foreign_chn_exit:1,
+ scan_ev_invalid:1,
+ scan_ev_gpio_timeout:1,
+ scan_ev_suspended:1,
+ scan_ev_resumed:1;
u32 dwell_time_active;
u32 dwell_time_active_2g;
u32 dwell_time_passive;
@@ -3300,36 +3306,31 @@ struct ath12k_wmi_scan_req_arg {
u32 idle_time;
u32 max_scan_time;
u32 probe_delay;
- union {
- struct {
- u32 scan_f_passive:1,
- scan_f_bcast_probe:1,
- scan_f_cck_rates:1,
- scan_f_ofdm_rates:1,
- scan_f_chan_stat_evnt:1,
- scan_f_filter_prb_req:1,
- scan_f_bypass_dfs_chn:1,
- scan_f_continue_on_err:1,
- scan_f_offchan_mgmt_tx:1,
- scan_f_offchan_data_tx:1,
- scan_f_promisc_mode:1,
- scan_f_capture_phy_err:1,
- scan_f_strict_passive_pch:1,
- scan_f_half_rate:1,
- scan_f_quarter_rate:1,
- scan_f_force_active_dfs_chn:1,
- scan_f_add_tpc_ie_in_probe:1,
- scan_f_add_ds_ie_in_probe:1,
- scan_f_add_spoofed_mac_in_probe:1,
- scan_f_add_rand_seq_in_probe:1,
- scan_f_en_ie_whitelist_in_probe:1,
- scan_f_forced:1,
- scan_f_2ghz:1,
- scan_f_5ghz:1,
- scan_f_80mhz:1;
- };
- u32 scan_flags;
- };
+ u32 scan_f_passive:1,
+ scan_f_bcast_probe:1,
+ scan_f_cck_rates:1,
+ scan_f_ofdm_rates:1,
+ scan_f_chan_stat_evnt:1,
+ scan_f_filter_prb_req:1,
+ scan_f_bypass_dfs_chn:1,
+ scan_f_continue_on_err:1,
+ scan_f_offchan_mgmt_tx:1,
+ scan_f_offchan_data_tx:1,
+ scan_f_promisc_mode:1,
+ scan_f_capture_phy_err:1,
+ scan_f_strict_passive_pch:1,
+ scan_f_half_rate:1,
+ scan_f_quarter_rate:1,
+ scan_f_force_active_dfs_chn:1,
+ scan_f_add_tpc_ie_in_probe:1,
+ scan_f_add_ds_ie_in_probe:1,
+ scan_f_add_spoofed_mac_in_probe:1,
+ scan_f_add_rand_seq_in_probe:1,
+ scan_f_en_ie_whitelist_in_probe:1,
+ scan_f_forced:1,
+ scan_f_2ghz:1,
+ scan_f_5ghz:1,
+ scan_f_80mhz:1;
enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
u32 burst_duration;
u32 num_chan;
@@ -3489,6 +3490,37 @@ struct wmi_get_pdev_temperature_cmd {
__le32 pdev_id;
} __packed;
+#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
+
+struct wmi_p2p_noa_event {
+ __le32 vdev_id;
+} __packed;
+
+struct ath12k_wmi_p2p_noa_descriptor {
+ __le32 type_count; /* 255: continuous schedule, 0: reserved */
+ __le32 duration; /* Absent period duration in micro seconds */
+ __le32 interval; /* Absent period interval in micro seconds */
+ __le32 start_time; /* 32 bit tsf time when in starts */
+} __packed;
+
+#define WMI_P2P_NOA_INFO_CHANGED_FLAG BIT(0)
+#define WMI_P2P_NOA_INFO_INDEX GENMASK(15, 8)
+#define WMI_P2P_NOA_INFO_OPP_PS BIT(16)
+#define WMI_P2P_NOA_INFO_CTWIN_TU GENMASK(23, 17)
+#define WMI_P2P_NOA_INFO_DESC_NUM GENMASK(31, 24)
+
+struct ath12k_wmi_p2p_noa_info {
+ /* Bit 0 - Flag to indicate an update in NOA schedule
+ * Bits 7-1 - Reserved
+ * Bits 15-8 - Index (identifies the instance of NOA sub element)
+ * Bit 16 - Opp PS state of the AP
+ * Bits 23-17 - Ctwindow in TUs
+ * Bits 31-24 - Number of NOA descriptors
+ */
+ __le32 noa_attr;
+ struct ath12k_wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
#define WMI_BEACON_TX_BUFFER_SIZE 512
struct wmi_bcn_tmpl_cmd {
@@ -3503,6 +3535,12 @@ struct wmi_bcn_tmpl_cmd {
__le32 esp_ie_offset;
} __packed;
+struct wmi_p2p_go_set_beacon_ie_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 ie_buf_len;
+} __packed;
+
struct wmi_vdev_install_key_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -4797,6 +4835,16 @@ struct wmi_rfkill_state_change_event {
__le32 radio_state;
} __packed;
+struct wmi_twt_enable_event {
+ __le32 pdev_id;
+ __le32 status;
+} __packed;
+
+struct wmi_twt_disable_event {
+ __le32 pdev_id;
+ __le32 status;
+} __packed;
+
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -4806,6 +4854,8 @@ int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len);
int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
struct sk_buff *frame);
+int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
+ const u8 *p2p_ie);
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn);
@@ -4917,4 +4967,30 @@ int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
enum wmi_host_hw_mode_config_type mode);
+static inline u32
+ath12k_wmi_caps_ext_get_pdev_id(const struct ath12k_wmi_caps_ext_params *param)
+{
+ return le32_get_bits(param->pdev_and_hw_link_ids, WMI_CAPS_PARAMS_PDEV_ID);
+}
+
+static inline u32
+ath12k_wmi_caps_ext_get_hw_link_id(const struct ath12k_wmi_caps_ext_params *param)
+{
+ return le32_get_bits(param->pdev_and_hw_link_ids, WMI_CAPS_PARAMS_HW_LINK_ID);
+}
+
+static inline u32
+ath12k_wmi_mac_phy_get_pdev_id(const struct ath12k_wmi_mac_phy_caps_params *param)
+{
+ return le32_get_bits(param->pdev_and_hw_link_ids,
+ WMI_CAPS_PARAMS_PDEV_ID);
+}
+
+static inline u32
+ath12k_wmi_mac_phy_get_hw_link_id(const struct ath12k_wmi_mac_phy_caps_params *param)
+{
+ return le32_get_bits(param->pdev_and_hw_link_ids,
+ WMI_CAPS_PARAMS_HW_LINK_ID);
+}
+
#endif
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index c630343ca4f9..eea4bda77608 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -779,6 +779,10 @@ static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
const struct ieee80211_ops ath5k_hw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = ath5k_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = ath5k_start,
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index e37db4af33de..61b2e3f15f0e 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1119,7 +1119,7 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
wiphy_lock(vif->ar->wiphy);
- cfg80211_ch_switch_notify(vif->ndev, &chandef, 0, 0);
+ cfg80211_ch_switch_notify(vif->ndev, &chandef, 0);
wiphy_unlock(vif->ar->wiphy);
}
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 9bfaadfa6c00..1a6697b6e3b4 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -144,7 +144,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
return ret;
}
-static int ath_ahb_remove(struct platform_device *pdev)
+static void ath_ahb_remove(struct platform_device *pdev)
{
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
@@ -155,13 +155,11 @@ static int ath_ahb_remove(struct platform_device *pdev)
free_irq(sc->irq, sc);
ieee80211_free_hw(sc->hw);
}
-
- return 0;
}
static struct platform_driver ath_ahb_driver = {
.probe = ath_ahb_probe,
- .remove = ath_ahb_remove,
+ .remove_new = ath_ahb_remove,
.driver = {
.name = "ath9k",
},
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index 988222cea9df..acc84e6711b0 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -643,7 +643,7 @@ static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
} else if (antcomb->rssi_sub >
- antcomb->rssi_lna1) {
+ antcomb->rssi_lna2) {
/* set to A-B */
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 57e2b4c89125..ad72a30b67c3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -851,8 +851,6 @@
#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN 0x0000000e
#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN_S 1
-#define AR_PHY_POWER_TX_RATE1 0x9934
-#define AR_PHY_POWER_TX_RATE2 0x9938
#define AR_PHY_POWER_TX_RATE_MAX 0x993c
#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
#define PHY_AGC_CLR 0x10000000
@@ -1041,13 +1039,6 @@
#define AR_PHY_TX_IQCAL_STATUS_B2_FAILED 0x00000001
-/*
- * AGC 3 Register Map
- */
-#define AR_AGC3_BASE 0xce00
-
-#define AR_PHY_RSSI_3 (AR_AGC3_BASE + 0x180)
-
/* GLB Registers */
#define AR_GLB_BASE 0x20000
#define AR_GLB_GPIO_CONTROL (AR_GLB_BASE)
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index ee72faac2f1d..b399a7926ef5 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -365,10 +365,10 @@ bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
if (!vif || !vif->bss_conf.csa_active)
return false;
- if (!ieee80211_beacon_cntdwn_is_complete(vif))
+ if (!ieee80211_beacon_cntdwn_is_complete(vif, 0))
return false;
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
return true;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 237f4ec2cffd..6c33e898b300 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -306,7 +306,6 @@ struct ath9k_htc_tx {
DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM);
struct timer_list cleanup_timer;
spinlock_t tx_lock;
- bool initialized;
};
struct ath9k_htc_tx_ctl {
@@ -515,6 +514,7 @@ struct ath9k_htc_priv {
unsigned long ps_usecount;
bool ps_enabled;
bool ps_idle;
+ bool initialized;
#ifdef CONFIG_MAC80211_LEDS
enum led_brightness brightness;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 533471e69400..547634f82183 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -514,10 +514,10 @@ bool ath9k_htc_csa_is_finished(struct ath9k_htc_priv *priv)
if (!vif || !vif->bss_conf.csa_active)
return false;
- if (!ieee80211_beacon_cntdwn_is_complete(vif))
+ if (!ieee80211_beacon_cntdwn_is_complete(vif, 0))
return false;
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
priv->csa_vif = NULL;
return true;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 0aa5bdeb44a1..3633f9eb2c55 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -966,6 +966,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
htc_handle->drv_priv = priv;
+ /* Allow ath9k_wmi_event_tasklet() to operate. */
+ smp_wmb();
+ priv->initialized = true;
+
return 0;
err_init:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9a9b5212051a..b389e19381c4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1868,6 +1868,10 @@ static void ath9k_htc_channel_switch_beacon(struct ieee80211_hw *hw,
}
struct ieee80211_ops ath9k_htc_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = ath9k_htc_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = ath9k_htc_start,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index efcaeccb055a..ce9c04e418b8 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -815,10 +815,6 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv)
skb_queue_head_init(&priv->tx.data_vo_queue);
skb_queue_head_init(&priv->tx.tx_failed);
- /* Allow ath9k_wmi_event_tasklet(WMI_TXSTATUS_EVENTID) to operate. */
- smp_wmb();
- priv->tx.initialized = true;
-
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c48ff0ffbfef..a2943aaecb20 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2786,6 +2786,10 @@ static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
struct ieee80211_ops ath9k_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = ath9k_tx,
.start = ath9k_start,
.stop = ath9k_stop,
diff --git a/drivers/net/wireless/ath/ath9k/reg_aic.h b/drivers/net/wireless/ath/ath9k/reg_aic.h
index 955147ab48a2..f50994910eae 100644
--- a/drivers/net/wireless/ath/ath9k/reg_aic.h
+++ b/drivers/net/wireless/ath/ath9k/reg_aic.h
@@ -17,10 +17,6 @@
#ifndef REG_AIC_H
#define REG_AIC_H
-#define AR_SM_BASE 0xa200
-#define AR_SM1_BASE 0xb200
-#define AR_AGC_BASE 0x9e00
-
#define AR_PHY_AIC_CTRL_0_B0 (AR_SM_BASE + 0x4b0)
#define AR_PHY_AIC_CTRL_1_B0 (AR_SM_BASE + 0x4b4)
#define AR_PHY_AIC_CTRL_2_B0 (AR_SM_BASE + 0x4b8)
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 1476b42b52a9..805ad31edba2 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -155,6 +155,12 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
}
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+ /* Check if ath9k_htc_probe_device() completed. */
+ if (!data_race(priv->initialized)) {
+ kfree_skb(skb);
+ continue;
+ }
+
hdr = (struct wmi_cmd_hdr *) skb->data;
cmd_id = be16_to_cpu(hdr->command_id);
wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
@@ -169,10 +175,6 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
&wmi->drv_priv->fatal_work);
break;
case WMI_TXSTATUS_EVENTID:
- /* Check if ath9k_tx_init() completed. */
- if (!data_race(priv->tx.initialized))
- break;
-
spin_lock_bh(&priv->tx.tx_lock);
if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
spin_unlock_bh(&priv->tx.tx_lock);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index f15684379b03..d519b676a109 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -369,12 +369,11 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
struct list_head bf_head;
struct ath_tx_status ts;
struct ath_frame_info *fi;
- int ret;
memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
- while ((ret = ath_tid_dequeue(tid, &skb)) == 0) {
+ while (ath_tid_dequeue(tid, &skb) == 0) {
fi = get_frame_info(skb);
bf = fi->bf;
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 524327d24964..7e7797bf44b7 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1712,6 +1712,10 @@ static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
}
static const struct ieee80211_ops carl9170_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = carl9170_op_start,
.stop = carl9170_op_stop,
.tx = carl9170_op_tx,
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 6bb9aa2bfe65..e902ca80eba7 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -189,7 +189,7 @@ static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
{
- struct _carl9170_tx_superframe *super = (void *) skb->data;
+ struct _carl9170_tx_superframe *super;
unsigned int chunks;
int cookie = -1;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 41119fb177e3..bfbd3c7a70b3 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -1347,6 +1347,10 @@ static void wcn36xx_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif
}
static const struct ieee80211_ops wcn36xx_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = wcn36xx_start,
.stop = wcn36xx_stop,
.add_interface = wcn36xx_add_interface,
@@ -1685,6 +1689,7 @@ static struct platform_driver wcn36xx_driver = {
module_platform_driver(wcn36xx_driver);
+MODULE_DESCRIPTION("Qualcomm Atheros WCN3660/3680 wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com");
MODULE_FIRMWARE(WLAN_NV_FILE);
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 447b51cff8f9..0b55a272bfd6 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2178,6 +2178,10 @@ static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
static const struct ieee80211_ops at76_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = at76_mac80211_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.add_interface = at76_add_interface,
diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h
index 67b4bac048e5..c0d8fc0b22fb 100644
--- a/drivers/net/wireless/broadcom/b43/b43.h
+++ b/drivers/net/wireless/broadcom/b43/b43.h
@@ -1082,6 +1082,22 @@ static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
return dev->__using_pio_transfers;
}
+static inline void b43_wake_queue(struct b43_wldev *dev, int queue_prio)
+{
+ if (dev->qos_enabled)
+ ieee80211_wake_queue(dev->wl->hw, queue_prio);
+ else
+ ieee80211_wake_queue(dev->wl->hw, 0);
+}
+
+static inline void b43_stop_queue(struct b43_wldev *dev, int queue_prio)
+{
+ if (dev->qos_enabled)
+ ieee80211_stop_queue(dev->wl->hw, queue_prio);
+ else
+ ieee80211_stop_queue(dev->wl->hw, 0);
+}
+
/* Message printing */
__printf(2, 3) void b43info(struct b43_wl *wl, const char *fmt, ...);
__printf(2, 3) void b43err(struct b43_wl *wl, const char *fmt, ...);
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index 760d1a28edc6..6ac7dcebfff9 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -1399,7 +1399,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
should_inject_overflow(ring)) {
/* This TX ring is full. */
unsigned int skb_mapping = skb_get_queue_mapping(skb);
- ieee80211_stop_queue(dev->wl->hw, skb_mapping);
+ b43_stop_queue(dev, skb_mapping);
dev->wl->tx_queue_stopped[skb_mapping] = true;
ring->stopped = true;
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
@@ -1570,7 +1570,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
} else {
/* If the driver queue is running wake the corresponding
* mac80211 queue. */
- ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
+ b43_wake_queue(dev, ring->queue_prio);
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
}
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 92ca0b2ca286..badb2f494035 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -2587,7 +2587,8 @@ static void b43_request_firmware(struct work_struct *work)
start_ieee80211:
wl->hw->queues = B43_QOS_QUEUE_NUM;
- if (!modparam_qos || dev->fw.opensource)
+ if (!modparam_qos || dev->fw.opensource ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM4331)
wl->hw->queues = 1;
err = ieee80211_register_hw(wl->hw);
@@ -3603,7 +3604,7 @@ static void b43_tx_work(struct work_struct *work)
err = b43_dma_tx(dev, skb);
if (err == -ENOSPC) {
wl->tx_queue_stopped[queue_num] = true;
- ieee80211_stop_queue(wl->hw, queue_num);
+ b43_stop_queue(dev, queue_num);
skb_queue_head(&wl->tx_queue[queue_num], skb);
break;
}
@@ -3627,6 +3628,7 @@ static void b43_op_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
+ u16 skb_queue_mapping;
if (unlikely(skb->len < 2 + 2 + 6)) {
/* Too short, this can't be a valid frame. */
@@ -3635,12 +3637,12 @@ static void b43_op_tx(struct ieee80211_hw *hw,
}
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
- skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
- if (!wl->tx_queue_stopped[skb->queue_mapping]) {
+ skb_queue_mapping = skb_get_queue_mapping(skb);
+ skb_queue_tail(&wl->tx_queue[skb_queue_mapping], skb);
+ if (!wl->tx_queue_stopped[skb_queue_mapping])
ieee80211_queue_work(wl->hw, &wl->tx_work);
- } else {
- ieee80211_stop_queue(wl->hw, skb->queue_mapping);
- }
+ else
+ b43_stop_queue(wl->current_dev, skb_queue_mapping);
}
static void b43_qos_params_upload(struct b43_wldev *dev,
@@ -5170,6 +5172,10 @@ static int b43_op_get_survey(struct ieee80211_hw *hw, int idx,
}
static const struct ieee80211_ops b43_hw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = b43_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.conf_tx = b43_op_conf_tx,
diff --git a/drivers/net/wireless/broadcom/b43/phy_ht.c b/drivers/net/wireless/broadcom/b43/phy_ht.c
index d050971d150a..26a226126bc4 100644
--- a/drivers/net/wireless/broadcom/b43/phy_ht.c
+++ b/drivers/net/wireless/broadcom/b43/phy_ht.c
@@ -322,8 +322,8 @@ static void b43_phy_ht_bphy_reset(struct b43_wldev *dev, bool reset)
B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX);
else
b43_phy_mask(dev, B43_PHY_B_BBCFG,
- (u16)~(B43_PHY_B_BBCFG_RSTCCA |
- B43_PHY_B_BBCFG_RSTRX));
+ 0xffff & ~(B43_PHY_B_BBCFG_RSTCCA |
+ B43_PHY_B_BBCFG_RSTRX));
b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp);
}
@@ -551,7 +551,7 @@ static void b43_phy_ht_tx_power_ctl(struct b43_wldev *dev, bool enable)
phy_ht->tx_pwr_idx[i] =
b43_phy_read(dev, status_regs[i]);
}
- b43_phy_mask(dev, B43_PHY_HT_TXPCTL_CMD_C1, ~en_bits);
+ b43_phy_mask(dev, B43_PHY_HT_TXPCTL_CMD_C1, 0xffff & ~en_bits);
} else {
b43_phy_set(dev, B43_PHY_HT_TXPCTL_CMD_C1, en_bits);
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index 2c0c019a815d..4bb005b93f2c 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -6246,7 +6246,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
/* Take BPHY out of the reset */
b43_phy_mask(dev, B43_PHY_B_BBCFG,
- (u16)~(B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX));
+ ~(B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX) & 0xffff);
b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
}
@@ -6377,7 +6377,7 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
} else if (channel_type == NL80211_CHAN_HT40MINUS) {
b43_phy_mask(dev, B43_NPHY_RXCTL, ~B43_NPHY_RXCTL_BSELU20);
if (phy->rev >= 7)
- b43_phy_mask(dev, 0x310, (u16)~0x8000);
+ b43_phy_mask(dev, 0x310, 0x7fff);
}
if (phy->rev >= 19) {
diff --git a/drivers/net/wireless/broadcom/b43/pio.c b/drivers/net/wireless/broadcom/b43/pio.c
index 0cf70fdb60a6..e41f2f5b4c26 100644
--- a/drivers/net/wireless/broadcom/b43/pio.c
+++ b/drivers/net/wireless/broadcom/b43/pio.c
@@ -525,7 +525,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (total_len > (q->buffer_size - q->buffer_used)) {
/* Not enough memory on the queue. */
err = -EBUSY;
- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
+ b43_stop_queue(dev, skb_get_queue_mapping(skb));
q->stopped = true;
goto out;
}
@@ -552,7 +552,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
(q->free_packet_slots == 0)) {
/* The queue is full. */
- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
+ b43_stop_queue(dev, skb_get_queue_mapping(skb));
q->stopped = true;
}
@@ -587,7 +587,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
list_add(&pack->list, &q->packets_list);
if (q->stopped) {
- ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
+ b43_wake_queue(dev, q->queue_prio);
q->stopped = false;
}
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 760136638a95..18eb610f600a 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -3531,6 +3531,10 @@ static int b43legacy_op_get_survey(struct ieee80211_hw *hw, int idx,
}
static const struct ieee80211_ops b43legacy_hw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = b43legacy_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.conf_tx = b43legacy_op_conf_tx,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
index ac3a36fa3640..f471c962104a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
@@ -7,21 +7,33 @@
#include <core.h>
#include <bus.h>
#include <fwvid.h>
+#include <feature.h>
#include "vops.h"
-static int brcmf_bca_attach(struct brcmf_pub *drvr)
+#define BRCMF_BCA_E_LAST 212
+
+static void brcmf_bca_feat_attach(struct brcmf_if *ifp)
{
- pr_err("%s: executing\n", __func__);
- return 0;
+ /* SAE support not confirmed so disabling for now */
+ ifp->drvr->feat_flags &= ~BIT(BRCMF_FEAT_SAE);
}
-static void brcmf_bca_detach(struct brcmf_pub *drvr)
+static int brcmf_bca_alloc_fweh_info(struct brcmf_pub *drvr)
{
- pr_err("%s: executing\n", __func__);
+ struct brcmf_fweh_info *fweh;
+
+ fweh = kzalloc(struct_size(fweh, evt_handler, BRCMF_BCA_E_LAST),
+ GFP_KERNEL);
+ if (!fweh)
+ return -ENOMEM;
+
+ fweh->num_event_codes = BRCMF_BCA_E_LAST;
+ drvr->fweh = fweh;
+ return 0;
}
const struct brcmf_fwvid_ops brcmf_bca_ops = {
- .attach = brcmf_bca_attach,
- .detach = brcmf_bca_detach,
+ .feat_attach = brcmf_bca_feat_attach,
+ .alloc_fweh_info = brcmf_bca_alloc_fweh_info,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c
index d55f3271d619..4f0c1e1a8e60 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c
@@ -20,6 +20,7 @@ static void __exit brcmf_bca_exit(void)
brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_BCA, THIS_MODULE);
}
+MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Broadcom AP chipsets");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(BRCMFMAC);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 133c5ea6429c..b99aa66dc5a9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -32,6 +32,7 @@
#include "vendor.h"
#include "bus.h"
#include "common.h"
+#include "fwvid.h"
#define BRCMF_SCAN_IE_LEN_MAX 2048
@@ -1179,8 +1180,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
scan_request = cfg->scan_request;
cfg->scan_request = NULL;
- if (timer_pending(&cfg->escan_timeout))
- del_timer_sync(&cfg->escan_timeout);
+ timer_delete_sync(&cfg->escan_timeout);
if (fw_abort) {
/* Do a scan abort to stop the driver's scan engine */
@@ -1687,52 +1687,39 @@ static u16 brcmf_map_fw_linkdown_reason(const struct brcmf_event_msg *e)
return reason;
}
-static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
+int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags)
{
struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_wsec_pmk_le pmk;
int err;
+ if (key_len > sizeof(pmk.key)) {
+ bphy_err(drvr, "key must be less than %zu bytes\n",
+ sizeof(pmk.key));
+ return -EINVAL;
+ }
+
memset(&pmk, 0, sizeof(pmk));
- /* pass pmk directly */
- pmk.key_len = cpu_to_le16(pmk_len);
- pmk.flags = cpu_to_le16(0);
- memcpy(pmk.key, pmk_data, pmk_len);
+ /* pass key material directly */
+ pmk.key_len = cpu_to_le16(key_len);
+ pmk.flags = cpu_to_le16(flags);
+ memcpy(pmk.key, key, key_len);
- /* store psk in firmware */
+ /* store key material in firmware */
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK,
&pmk, sizeof(pmk));
if (err < 0)
bphy_err(drvr, "failed to change PSK in firmware (len=%u)\n",
- pmk_len);
+ key_len);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_set_wsec);
-static int brcmf_set_sae_password(struct brcmf_if *ifp, const u8 *pwd_data,
- u16 pwd_len)
+static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
{
- struct brcmf_pub *drvr = ifp->drvr;
- struct brcmf_wsec_sae_pwd_le sae_pwd;
- int err;
-
- if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) {
- bphy_err(drvr, "sae_password must be less than %d\n",
- BRCMF_WSEC_MAX_SAE_PASSWORD_LEN);
- return -EINVAL;
- }
-
- sae_pwd.key_len = cpu_to_le16(pwd_len);
- memcpy(sae_pwd.key, pwd_data, pwd_len);
-
- err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd,
- sizeof(sae_pwd));
- if (err < 0)
- bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n",
- pwd_len);
-
- return err;
+ return brcmf_set_wsec(ifp, pmk_data, pmk_len, 0);
}
static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason,
@@ -2503,8 +2490,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
bphy_err(drvr, "failed to clean up user-space RSNE\n");
goto done;
}
- err = brcmf_set_sae_password(ifp, sme->crypto.sae_pwd,
- sme->crypto.sae_pwd_len);
+ err = brcmf_fwvid_set_sae_password(ifp, &sme->crypto);
if (!err && sme->crypto.psk)
err = brcmf_set_pmk(ifp, sme->crypto.psk,
BRCMF_WSEC_MAX_PSK_LEN);
@@ -3081,7 +3067,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp,
struct brcmf_scb_val_le scbval;
struct brcmf_pktcnt_le pktcnt;
s32 err;
- u32 rate;
+ u32 rate = 0;
u32 rssi;
/* Get the current tx rate */
@@ -3779,8 +3765,10 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
if (req->channels[i] == chan)
break;
}
- if (i == req->n_channels)
- req->channels[req->n_channels++] = chan;
+ if (i == req->n_channels) {
+ req->n_channels++;
+ req->channels[i] = chan;
+ }
for (i = 0; i < req->n_ssids; i++) {
if (req->ssids[i].ssid_len == ssid_len &&
@@ -4320,6 +4308,9 @@ brcmf_pmksa_v3_op(struct brcmf_if *ifp, struct cfg80211_pmksa *pmksa,
int ret;
pmk_op = kzalloc(sizeof(*pmk_op), GFP_KERNEL);
+ if (!pmk_op)
+ return -ENOMEM;
+
pmk_op->version = cpu_to_le16(BRCMF_PMKSA_VER_3);
if (!pmksa) {
@@ -5113,6 +5104,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
bool mbss;
int is_11d;
bool supports_11d;
+ bool closednet;
brcmf_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n",
settings->chandef.chan->hw_value,
@@ -5252,8 +5244,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
if (crypto->sae_pwd) {
brcmf_dbg(INFO, "using SAE offload\n");
profile->use_fwauth |= BIT(BRCMF_PROFILE_FWAUTH_SAE);
- err = brcmf_set_sae_password(ifp, crypto->sae_pwd,
- crypto->sae_pwd_len);
+ err = brcmf_fwvid_set_sae_password(ifp, crypto);
if (err < 0)
goto exit;
}
@@ -5283,12 +5274,12 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
goto exit;
}
- err = brcmf_fil_iovar_int_set(ifp, "closednet",
- settings->hidden_ssid);
+ closednet =
+ (settings->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
+ err = brcmf_fil_iovar_int_set(ifp, "closednet", closednet);
if (err) {
bphy_err(drvr, "%s closednet error (%d)\n",
- settings->hidden_ssid ?
- "enabled" : "disabled",
+ (closednet ? "enabled" : "disabled"),
err);
goto exit;
}
@@ -5360,10 +5351,12 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev,
msleep(400);
if (profile->use_fwauth != BIT(BRCMF_PROFILE_FWAUTH_NONE)) {
+ struct cfg80211_crypto_settings crypto = {};
+
if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_PSK))
brcmf_set_pmk(ifp, NULL, 0);
if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_SAE))
- brcmf_set_sae_password(ifp, NULL, 0);
+ brcmf_fwvid_set_sae_password(ifp, &crypto);
profile->use_fwauth = BIT(BRCMF_PROFILE_FWAUTH_NONE);
}
@@ -7269,7 +7262,7 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg)
u32 nmode = 0;
u32 vhtmode = 0;
u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT };
- u32 rxchain;
+ u32 rxchain = 0;
u32 nchain;
int err;
s32 i;
@@ -8435,6 +8428,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
brcmf_btcoex_detach(cfg);
wiphy_unregister(cfg->wiphy);
wl_deinit_priv(cfg);
+ cancel_work_sync(&cfg->escan_timeout_work);
brcmf_free_wiphy(cfg->wiphy);
kfree(cfg);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 0e1fa3f0dea2..dc3a6a537507 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -468,4 +468,6 @@ void brcmf_set_mpc(struct brcmf_if *ndev, int mpc);
void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
void brcmf_cfg80211_free_netdev(struct net_device *ndev);
+int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags);
+
#endif /* BRCMFMAC_CFG80211_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index b6d458e022fa..b24faae35873 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -266,7 +266,7 @@ static int brcmf_c_process_cal_blob(struct brcmf_if *ifp)
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
{
struct brcmf_pub *drvr = ifp->drvr;
- s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+ struct brcmf_fweh_info *fweh = drvr->fweh;
u8 buf[BRCMF_DCMD_SMLEN];
struct brcmf_bus *bus;
struct brcmf_rev_info_le revinfo;
@@ -413,15 +413,21 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
brcmf_c_set_joinpref_default(ifp);
/* Setup event_msgs, enable E_IF */
- err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
- BRCMF_EVENTING_MASK_LEN);
+ err = brcmf_fil_iovar_data_get(ifp, "event_msgs", fweh->event_mask,
+ fweh->event_mask_len);
if (err) {
bphy_err(drvr, "Get event_msgs error (%d)\n", err);
goto done;
}
- setbit(eventmask, BRCMF_E_IF);
- err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask,
- BRCMF_EVENTING_MASK_LEN);
+ /*
+ * BRCMF_E_IF can safely be used to set the appropriate bit
+ * in the event_mask as the firmware event code is guaranteed
+ * to match the value of BRCMF_E_IF because it is old cruft
+ * that all vendors have.
+ */
+ setbit(fweh->event_mask, BRCMF_E_IF);
+ err = brcmf_fil_iovar_data_set(ifp, "event_msgs", fweh->event_mask,
+ fweh->event_mask_len);
if (err) {
bphy_err(drvr, "Set event_msgs error (%d)\n", err);
goto done;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index f599d5f896e8..bf91b1e1368f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -691,7 +691,7 @@ static int brcmf_net_mon_open(struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
- u32 monitor;
+ u32 monitor = 0;
int err;
brcmf_dbg(TRACE, "Enter\n");
@@ -1348,13 +1348,17 @@ int brcmf_attach(struct device *dev)
goto fail;
}
+ /* attach firmware event handler */
+ ret = brcmf_fweh_attach(drvr);
+ if (ret != 0) {
+ bphy_err(drvr, "brcmf_fweh_attach failed\n");
+ goto fail;
+ }
+
/* Attach to events important for core code */
brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG,
brcmf_psm_watchdog_notify);
- /* attach firmware event handler */
- brcmf_fweh_attach(drvr);
-
ret = brcmf_bus_started(drvr, drvr->ops);
if (ret != 0) {
bphy_err(drvr, "dongle is not responding: err=%d\n", ret);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index e4f911dd414b..ea76b8d33401 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -122,7 +122,7 @@ struct brcmf_pub {
struct mutex proto_block;
unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
- struct brcmf_fweh_info fweh;
+ struct brcmf_fweh_info *fweh;
struct brcmf_ampdu_rx_reorder
*reorder_flows[BRCMF_AMPDU_RX_REORDER_MAXFLOWS];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
index b75652ba9359..9a4837881486 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
@@ -7,21 +7,53 @@
#include <core.h>
#include <bus.h>
#include <fwvid.h>
+#include <fwil.h>
#include "vops.h"
-static int brcmf_cyw_attach(struct brcmf_pub *drvr)
+#define BRCMF_CYW_E_LAST 197
+
+static int brcmf_cyw_set_sae_pwd(struct brcmf_if *ifp,
+ struct cfg80211_crypto_settings *crypto)
{
- pr_err("%s: executing\n", __func__);
- return 0;
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_wsec_sae_pwd_le sae_pwd;
+ u16 pwd_len = crypto->sae_pwd_len;
+ int err;
+
+ if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) {
+ bphy_err(drvr, "sae_password must be less than %d\n",
+ BRCMF_WSEC_MAX_SAE_PASSWORD_LEN);
+ return -EINVAL;
+ }
+
+ sae_pwd.key_len = cpu_to_le16(pwd_len);
+ memcpy(sae_pwd.key, crypto->sae_pwd, pwd_len);
+
+ err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd,
+ sizeof(sae_pwd));
+ if (err < 0)
+ bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n",
+ pwd_len);
+
+ return err;
}
-static void brcmf_cyw_detach(struct brcmf_pub *drvr)
+static int brcmf_cyw_alloc_fweh_info(struct brcmf_pub *drvr)
{
- pr_err("%s: executing\n", __func__);
+ struct brcmf_fweh_info *fweh;
+
+ fweh = kzalloc(struct_size(fweh, evt_handler, BRCMF_CYW_E_LAST),
+ GFP_KERNEL);
+ if (!fweh)
+ return -ENOMEM;
+
+ fweh->num_event_codes = BRCMF_CYW_E_LAST;
+ drvr->fweh = fweh;
+ return 0;
}
const struct brcmf_fwvid_ops brcmf_cyw_ops = {
- .attach = brcmf_cyw_attach,
- .detach = brcmf_cyw_detach,
+ .set_sae_password = brcmf_cyw_set_sae_pwd,
+ .alloc_fweh_info = brcmf_cyw_alloc_fweh_info,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c
index f82fbbe3ecef..90d06cda03a2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c
@@ -20,6 +20,7 @@ static void __exit brcmf_cyw_exit(void)
brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_CYW, THIS_MODULE);
}
+MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Cypress/Infineon chipsets");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(BRCMFMAC);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 86ff174936a9..c3a602197662 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -83,6 +83,15 @@ static const struct dmi_system_id dmi_platform_data[] = {
.driver_data = (void *)&acepc_t8_data,
},
{
+ /* ACEPC W5 Pro Cherry Trail Z8350 HDMI stick, same wifi as the T8 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "3"),
+ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
+ },
+ .driver_data = (void *)&acepc_t8_data,
+ },
+ {
/* Chuwi Hi8 Pro with D2D3_Hi8Pro.233 BIOS */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 6d10c9efbe93..f23310a77a5d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -13,6 +13,7 @@
#include "debug.h"
#include "fwil.h"
#include "fwil_types.h"
+#include "fwvid.h"
#include "feature.h"
#include "common.h"
@@ -183,7 +184,7 @@ static void brcmf_feat_wlc_version_overrides(struct brcmf_pub *drv)
static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
enum brcmf_feat_id id, char *name)
{
- u32 data;
+ u32 data = 0;
int err;
/* we need to know firmware error */
@@ -339,6 +340,11 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_SCAN_V2, "scan_ver");
+ brcmf_feat_wlc_version_overrides(drvr);
+ brcmf_feat_firmware_overrides(drvr);
+
+ brcmf_fwvid_feat_attach(ifp);
+
if (drvr->settings->feature_disable) {
brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n",
ifp->drvr->feat_flags,
@@ -346,9 +352,6 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
ifp->drvr->feat_flags &= ~drvr->settings->feature_disable;
}
- brcmf_feat_wlc_version_overrides(drvr);
- brcmf_feat_firmware_overrides(drvr);
-
/* set chip related quirks */
switch (drvr->bus_if->chip) {
case BRCM_CC_43236_CHIP_ID:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index 68960ae98987..f0b6a7607f16 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -14,7 +14,8 @@
#include "fweh.h"
#include "fwil.h"
#include "proto.h"
-
+#include "bus.h"
+#include "fwvid.h"
/**
* struct brcmf_fweh_queue_item - event item on event queue.
*
@@ -28,7 +29,7 @@
*/
struct brcmf_fweh_queue_item {
struct list_head q;
- enum brcmf_fweh_event_code code;
+ u32 code;
u8 ifidx;
u8 ifaddr[ETH_ALEN];
struct brcmf_event_msg_be emsg;
@@ -94,7 +95,7 @@ static void brcmf_fweh_queue_event(struct brcmf_fweh_info *fweh,
static int brcmf_fweh_call_event_handler(struct brcmf_pub *drvr,
struct brcmf_if *ifp,
- enum brcmf_fweh_event_code code,
+ u32 fwcode,
struct brcmf_event_msg *emsg,
void *data)
{
@@ -102,13 +103,13 @@ static int brcmf_fweh_call_event_handler(struct brcmf_pub *drvr,
int err = -EINVAL;
if (ifp) {
- fweh = &ifp->drvr->fweh;
+ fweh = ifp->drvr->fweh;
/* handle the event if valid interface and handler */
- if (fweh->evt_handler[code])
- err = fweh->evt_handler[code](ifp, emsg, data);
+ if (fweh->evt_handler[fwcode])
+ err = fweh->evt_handler[fwcode](ifp, emsg, data);
else
- bphy_err(drvr, "unhandled event %d ignored\n", code);
+ bphy_err(drvr, "unhandled fwevt %d ignored\n", fwcode);
} else {
bphy_err(drvr, "no interface object\n");
}
@@ -142,7 +143,7 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
is_p2pdev = ((ifevent->flags & BRCMF_E_IF_FLAG_NOIF) &&
(ifevent->role == BRCMF_E_IF_ROLE_P2P_CLIENT ||
((ifevent->role == BRCMF_E_IF_ROLE_STA) &&
- (drvr->fweh.p2pdev_setup_ongoing))));
+ (drvr->fweh->p2pdev_setup_ongoing))));
if (!is_p2pdev && (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) {
brcmf_dbg(EVENT, "event can be ignored\n");
return;
@@ -163,7 +164,7 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
return;
if (!is_p2pdev)
brcmf_proto_add_if(drvr, ifp);
- if (!drvr->fweh.evt_handler[BRCMF_E_IF])
+ if (!drvr->fweh->evt_handler[BRCMF_E_IF])
if (brcmf_net_attach(ifp, false) < 0)
return;
}
@@ -183,6 +184,45 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
}
}
+static void brcmf_fweh_map_event_code(struct brcmf_fweh_info *fweh,
+ enum brcmf_fweh_event_code code,
+ u32 *fw_code)
+{
+ int i;
+
+ if (WARN_ON(!fw_code))
+ return;
+
+ *fw_code = code;
+ if (fweh->event_map) {
+ for (i = 0; i < fweh->event_map->n_items; i++) {
+ if (fweh->event_map->items[i].code == code) {
+ *fw_code = fweh->event_map->items[i].fwevt_code;
+ break;
+ }
+ }
+ }
+}
+
+static void brcmf_fweh_map_fwevt_code(struct brcmf_fweh_info *fweh, u32 fw_code,
+ enum brcmf_fweh_event_code *code)
+{
+ int i;
+
+ if (WARN_ON(!code))
+ return;
+
+ *code = fw_code;
+ if (fweh->event_map) {
+ for (i = 0; i < fweh->event_map->n_items; i++) {
+ if (fweh->event_map->items[i].fwevt_code == fw_code) {
+ *code = fweh->event_map->items[i].code;
+ break;
+ }
+ }
+ }
+}
+
/**
* brcmf_fweh_dequeue_event() - get event from the queue.
*
@@ -221,15 +261,19 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
struct brcmf_event_msg emsg;
fweh = container_of(work, struct brcmf_fweh_info, event_work);
- drvr = container_of(fweh, struct brcmf_pub, fweh);
+ drvr = fweh->drvr;
while ((event = brcmf_fweh_dequeue_event(fweh))) {
- brcmf_dbg(EVENT, "event %s (%u) ifidx %u bsscfg %u addr %pM\n",
- brcmf_fweh_event_name(event->code), event->code,
+ enum brcmf_fweh_event_code code;
+
+ brcmf_fweh_map_fwevt_code(fweh, event->code, &code);
+ brcmf_dbg(EVENT, "event %s (%u:%u) ifidx %u bsscfg %u addr %pM\n",
+ brcmf_fweh_event_name(code), code, event->code,
event->emsg.ifidx, event->emsg.bsscfgidx,
event->emsg.addr);
if (event->emsg.bsscfgidx >= BRCMF_MAX_IFS) {
- bphy_err(drvr, "invalid bsscfg index: %u\n", event->emsg.bsscfgidx);
+ bphy_err(drvr, "invalid bsscfg index: %u\n",
+ event->emsg.bsscfgidx);
goto event_free;
}
@@ -237,7 +281,7 @@ static void brcmf_fweh_event_worker(struct work_struct *work)
emsg_be = &event->emsg;
emsg.version = be16_to_cpu(emsg_be->version);
emsg.flags = be16_to_cpu(emsg_be->flags);
- emsg.event_code = event->code;
+ emsg.event_code = code;
emsg.status = be32_to_cpu(emsg_be->status);
emsg.reason = be32_to_cpu(emsg_be->reason);
emsg.auth_type = be32_to_cpu(emsg_be->auth_type);
@@ -283,7 +327,7 @@ event_free:
*/
void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing)
{
- ifp->drvr->fweh.p2pdev_setup_ongoing = ongoing;
+ ifp->drvr->fweh->p2pdev_setup_ongoing = ongoing;
}
/**
@@ -291,12 +335,27 @@ void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing)
*
* @drvr: driver information object.
*/
-void brcmf_fweh_attach(struct brcmf_pub *drvr)
+int brcmf_fweh_attach(struct brcmf_pub *drvr)
{
- struct brcmf_fweh_info *fweh = &drvr->fweh;
+ struct brcmf_fweh_info *fweh;
+ int err;
+
+ err = brcmf_fwvid_alloc_fweh_info(drvr);
+ if (err < 0)
+ return err;
+
+ fweh = drvr->fweh;
+ fweh->drvr = drvr;
+
+ fweh->event_mask_len = DIV_ROUND_UP(fweh->num_event_codes, 8);
+ fweh->event_mask = kzalloc(fweh->event_mask_len, GFP_KERNEL);
+ if (!fweh->event_mask)
+ return -ENOMEM;
+
INIT_WORK(&fweh->event_work, brcmf_fweh_event_worker);
spin_lock_init(&fweh->evt_q_lock);
INIT_LIST_HEAD(&fweh->event_q);
+ return 0;
}
/**
@@ -306,14 +365,19 @@ void brcmf_fweh_attach(struct brcmf_pub *drvr)
*/
void brcmf_fweh_detach(struct brcmf_pub *drvr)
{
- struct brcmf_fweh_info *fweh = &drvr->fweh;
+ struct brcmf_fweh_info *fweh = drvr->fweh;
+
+ if (!fweh)
+ return;
/* cancel the worker if initialized */
if (fweh->event_work.func) {
cancel_work_sync(&fweh->event_work);
WARN_ON(!list_empty(&fweh->event_q));
- memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler));
}
+ drvr->fweh = NULL;
+ kfree(fweh->event_mask);
+ kfree(fweh);
}
/**
@@ -326,11 +390,17 @@ void brcmf_fweh_detach(struct brcmf_pub *drvr)
int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
brcmf_fweh_handler_t handler)
{
- if (drvr->fweh.evt_handler[code]) {
+ struct brcmf_fweh_info *fweh = drvr->fweh;
+ u32 evt_handler_idx;
+
+ brcmf_fweh_map_event_code(fweh, code, &evt_handler_idx);
+
+ if (fweh->evt_handler[evt_handler_idx]) {
bphy_err(drvr, "event code %d already registered\n", code);
return -ENOSPC;
}
- drvr->fweh.evt_handler[code] = handler;
+
+ fweh->evt_handler[evt_handler_idx] = handler;
brcmf_dbg(TRACE, "event handler registered for %s\n",
brcmf_fweh_event_name(code));
return 0;
@@ -345,9 +415,12 @@ int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
void brcmf_fweh_unregister(struct brcmf_pub *drvr,
enum brcmf_fweh_event_code code)
{
+ u32 evt_handler_idx;
+
brcmf_dbg(TRACE, "event handler cleared for %s\n",
brcmf_fweh_event_name(code));
- drvr->fweh.evt_handler[code] = NULL;
+ brcmf_fweh_map_event_code(drvr->fweh, code, &evt_handler_idx);
+ drvr->fweh->evt_handler[evt_handler_idx] = NULL;
}
/**
@@ -357,27 +430,28 @@ void brcmf_fweh_unregister(struct brcmf_pub *drvr,
*/
int brcmf_fweh_activate_events(struct brcmf_if *ifp)
{
- struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_fweh_info *fweh = ifp->drvr->fweh;
+ enum brcmf_fweh_event_code code;
int i, err;
- s8 eventmask[BRCMF_EVENTING_MASK_LEN];
- memset(eventmask, 0, sizeof(eventmask));
- for (i = 0; i < BRCMF_E_LAST; i++) {
- if (ifp->drvr->fweh.evt_handler[i]) {
+ memset(fweh->event_mask, 0, fweh->event_mask_len);
+ for (i = 0; i < fweh->num_event_codes; i++) {
+ if (fweh->evt_handler[i]) {
+ brcmf_fweh_map_fwevt_code(fweh, i, &code);
brcmf_dbg(EVENT, "enable event %s\n",
- brcmf_fweh_event_name(i));
- setbit(eventmask, i);
+ brcmf_fweh_event_name(code));
+ setbit(fweh->event_mask, i);
}
}
/* want to handle IF event as well */
brcmf_dbg(EVENT, "enable event IF\n");
- setbit(eventmask, BRCMF_E_IF);
+ setbit(fweh->event_mask, BRCMF_E_IF);
- err = brcmf_fil_iovar_data_set(ifp, "event_msgs",
- eventmask, BRCMF_EVENTING_MASK_LEN);
+ err = brcmf_fil_iovar_data_set(ifp, "event_msgs", fweh->event_mask,
+ fweh->event_mask_len);
if (err)
- bphy_err(drvr, "Set event_msgs error (%d)\n", err);
+ bphy_err(fweh->drvr, "Set event_msgs error (%d)\n", err);
return err;
}
@@ -397,21 +471,21 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
struct brcmf_event *event_packet,
u32 packet_len, gfp_t gfp)
{
- enum brcmf_fweh_event_code code;
- struct brcmf_fweh_info *fweh = &drvr->fweh;
+ u32 fwevt_idx;
+ struct brcmf_fweh_info *fweh = drvr->fweh;
struct brcmf_fweh_queue_item *event;
void *data;
u32 datalen;
/* get event info */
- code = get_unaligned_be32(&event_packet->msg.event_type);
+ fwevt_idx = get_unaligned_be32(&event_packet->msg.event_type);
datalen = get_unaligned_be32(&event_packet->msg.datalen);
data = &event_packet[1];
- if (code >= BRCMF_E_LAST)
+ if (fwevt_idx >= fweh->num_event_codes)
return;
- if (code != BRCMF_E_IF && !fweh->evt_handler[code])
+ if (fwevt_idx != BRCMF_E_IF && !fweh->evt_handler[fwevt_idx])
return;
if (datalen > BRCMF_DCMD_MAXLEN ||
@@ -422,8 +496,8 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
if (!event)
return;
+ event->code = fwevt_idx;
event->datalen = datalen;
- event->code = code;
event->ifidx = event_packet->msg.ifidx;
/* use memcpy to get aligned event message */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
index 48414e8b9389..9ca1b2aadcb5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
@@ -17,6 +17,10 @@ struct brcmf_pub;
struct brcmf_if;
struct brcmf_cfg80211_info;
+#define BRCMF_ABSTRACT_EVENT_BIT BIT(31)
+#define BRCMF_ABSTRACT_ENUM_DEF(_id, _val) \
+ BRCMF_ENUM_DEF(_id, (BRCMF_ABSTRACT_EVENT_BIT | (_val)))
+
/* list of firmware events */
#define BRCMF_FWEH_EVENT_ENUM_DEFLIST \
BRCMF_ENUM_DEF(SET_SSID, 0) \
@@ -98,16 +102,9 @@ struct brcmf_cfg80211_info;
/* firmware event codes sent by the dongle */
enum brcmf_fweh_event_code {
BRCMF_FWEH_EVENT_ENUM_DEFLIST
- /* this determines event mask length which must match
- * minimum length check in device firmware so it is
- * hard-coded here.
- */
- BRCMF_E_LAST = 139
};
#undef BRCMF_ENUM_DEF
-#define BRCMF_EVENTING_MASK_LEN DIV_ROUND_UP(BRCMF_E_LAST, 8)
-
/* flags field values in struct brcmf_event_msg */
#define BRCMF_EVENT_MSG_LINK 0x01
#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
@@ -288,27 +285,66 @@ typedef int (*brcmf_fweh_handler_t)(struct brcmf_if *ifp,
void *data);
/**
+ * struct brcmf_fweh_event_map_item - fweh event and firmware event pair.
+ *
+ * @code: fweh event code as used by higher layers.
+ * @fwevt_code: firmware event code as used by firmware.
+ *
+ * This mapping is needed when a functionally identical event has a
+ * different numerical definition between vendors. When such mapping
+ * is needed the higher layer event code should not collide with the
+ * firmware event.
+ */
+struct brcmf_fweh_event_map_item {
+ enum brcmf_fweh_event_code code;
+ u32 fwevt_code;
+};
+
+/**
+ * struct brcmf_fweh_event_map - mapping between firmware event and fweh event.
+ *
+ * @n_items: number of mapping items.
+ * @items: array of fweh event and firmware event pairs.
+ */
+struct brcmf_fweh_event_map {
+ u32 n_items;
+ const struct brcmf_fweh_event_map_item items[] __counted_by(n_items);
+};
+
+/**
* struct brcmf_fweh_info - firmware event handling information.
*
* @p2pdev_setup_ongoing: P2P device creation in progress.
* @event_work: event worker.
* @evt_q_lock: lock for event queue protection.
* @event_q: event queue.
- * @evt_handler: registered event handlers.
+ * @event_mask_len: length of @event_mask used to enable firmware events.
+ * @event_mask: byte array used in 'event_msgs' iovar command.
+ * @event_map: mapping between fweh event and firmware event which
+ * may be provided by vendor-specific module for events that need
+ * mapping.
+ * @num_event_codes: number of firmware events supported by firmware which
+ * does a minimum length check for the @event_mask. This value is to
+ * be provided by vendor-specific module determining @event_mask_len
+ * and consequently the allocation size for @event_mask.
+ * @evt_handler: event handler registry indexed by firmware event code.
*/
struct brcmf_fweh_info {
+ struct brcmf_pub *drvr;
bool p2pdev_setup_ongoing;
struct work_struct event_work;
spinlock_t evt_q_lock;
struct list_head event_q;
- int (*evt_handler[BRCMF_E_LAST])(struct brcmf_if *ifp,
- const struct brcmf_event_msg *evtmsg,
- void *data);
+ uint event_mask_len;
+ u8 *event_mask;
+ struct brcmf_fweh_event_map *event_map;
+ uint num_event_codes;
+ brcmf_fweh_handler_t evt_handler[] __counted_by(num_event_codes);
};
const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code);
-void brcmf_fweh_attach(struct brcmf_pub *drvr);
+int brcmf_fweh_attach(struct brcmf_pub *drvr);
void brcmf_fweh_detach(struct brcmf_pub *drvr);
int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
int (*handler)(struct brcmf_if *ifp,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
index 72fe8bce6eaf..6385a7db7f7d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
@@ -142,6 +142,7 @@ brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_cmd_data_set);
s32
brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
@@ -160,36 +161,7 @@ brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
return err;
}
-
-
-s32
-brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
-{
- s32 err;
- __le32 data_le = cpu_to_le32(data);
-
- mutex_lock(&ifp->drvr->proto_block);
- brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data);
- err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true);
- mutex_unlock(&ifp->drvr->proto_block);
-
- return err;
-}
-
-s32
-brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
-{
- s32 err;
- __le32 data_le = cpu_to_le32(*data);
-
- mutex_lock(&ifp->drvr->proto_block);
- err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false);
- mutex_unlock(&ifp->drvr->proto_block);
- *data = le32_to_cpu(data_le);
- brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data);
-
- return err;
-}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_cmd_data_get);
static u32
brcmf_create_iovar(const char *name, const char *data, u32 datalen,
@@ -239,6 +211,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *dat
mutex_unlock(&drvr->proto_block);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_iovar_data_set);
s32
brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
@@ -270,26 +243,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
mutex_unlock(&drvr->proto_block);
return err;
}
-
-s32
-brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data)
-{
- __le32 data_le = cpu_to_le32(data);
-
- return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le));
-}
-
-s32
-brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
-{
- __le32 data_le = cpu_to_le32(*data);
- s32 err;
-
- err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le));
- if (err == 0)
- *data = le32_to_cpu(data_le);
- return err;
-}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_iovar_data_get);
static u32
brcmf_create_bsscfg(s32 bsscfgidx, const char *name, char *data, u32 datalen,
@@ -364,6 +318,7 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name,
mutex_unlock(&drvr->proto_block);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_bsscfg_data_set);
s32
brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
@@ -394,28 +349,7 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
mutex_unlock(&drvr->proto_block);
return err;
}
-
-s32
-brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data)
-{
- __le32 data_le = cpu_to_le32(data);
-
- return brcmf_fil_bsscfg_data_set(ifp, name, &data_le,
- sizeof(data_le));
-}
-
-s32
-brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
-{
- __le32 data_le = cpu_to_le32(*data);
- s32 err;
-
- err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le,
- sizeof(data_le));
- if (err == 0)
- *data = le32_to_cpu(data_le);
- return err;
-}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_bsscfg_data_get);
static u32 brcmf_create_xtlv(const char *name, u16 id, char *data, u32 len,
char *buf, u32 buflen)
@@ -465,6 +399,7 @@ s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
mutex_unlock(&drvr->proto_block);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_xtlv_data_set);
s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len)
@@ -494,39 +429,4 @@ s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
mutex_unlock(&drvr->proto_block);
return err;
}
-
-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data)
-{
- __le32 data_le = cpu_to_le32(data);
-
- return brcmf_fil_xtlv_data_set(ifp, name, id, &data_le,
- sizeof(data_le));
-}
-
-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data)
-{
- __le32 data_le = cpu_to_le32(*data);
- s32 err;
-
- err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
- if (err == 0)
- *data = le32_to_cpu(data_le);
- return err;
-}
-
-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data)
-{
- return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data));
-}
-
-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data)
-{
- __le16 data_le = cpu_to_le16(*data);
- s32 err;
-
- err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
- if (err == 0)
- *data = le16_to_cpu(data_le);
- return err;
-}
-
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_xtlv_data_get);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index bc693157c4b1..a315a7fac6a0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -81,29 +81,122 @@
s32 brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
-s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
-s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
+static inline
+s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
+{
+ s32 err;
+ __le32 data_le = cpu_to_le32(data);
-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *data,
- u32 len);
+ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data);
+ err = brcmf_fil_cmd_data_set(ifp, cmd, &data_le, sizeof(data_le));
+
+ return err;
+}
+static inline
+s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
+{
+ s32 err;
+ __le32 data_le = cpu_to_le32(*data);
+
+ err = brcmf_fil_cmd_data_get(ifp, cmd, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ brcmf_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data);
+
+ return err;
+}
+
+s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name,
+ const void *data, u32 len);
s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
u32 len);
-s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data);
-s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
-
-s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name, void *data,
- u32 len);
-s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name, void *data,
- u32 len);
-s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data);
-s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
+static inline
+s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data)
+{
+ __le32 data_le = cpu_to_le32(data);
+
+ return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le));
+}
+static inline
+s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
+{
+ __le32 data_le = cpu_to_le32(*data);
+ s32 err;
+
+ err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ return err;
+}
+
+
+s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name,
+ void *data, u32 len);
+s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
+ void *data, u32 len);
+static inline
+s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data)
+{
+ __le32 data_le = cpu_to_le32(data);
+
+ return brcmf_fil_bsscfg_data_set(ifp, name, &data_le,
+ sizeof(data_le));
+}
+static inline
+s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
+{
+ __le32 data_le = cpu_to_le32(*data);
+ s32 err;
+
+ err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le,
+ sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ return err;
+}
+
s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data);
-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data);
-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data);
-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data);
+static inline
+s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id,
+ u32 data)
+{
+ __le32 data_le = cpu_to_le32(data);
+
+ return brcmf_fil_xtlv_data_set(ifp, name, id, &data_le,
+ sizeof(data_le));
+}
+static inline
+s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id,
+ u32 *data)
+{
+ __le32 data_le = cpu_to_le32(*data);
+ s32 err;
+
+ err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le32_to_cpu(data_le);
+ return err;
+}
+static inline
+s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id,
+ u8 *data)
+{
+ return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data));
+}
+static inline
+s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id,
+ u16 *data)
+{
+ __le16 data_le = cpu_to_le16(*data);
+ s32 err;
+
+ err = brcmf_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le));
+ if (err == 0)
+ *data = le16_to_cpu(data_le);
+ return err;
+}
#endif /* _fwil_h_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 9d248ba1c0b2..e74a23e11830 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -584,7 +584,7 @@ struct brcmf_wsec_key_le {
struct brcmf_wsec_pmk_le {
__le16 key_len;
__le16 flags;
- u8 key[2 * BRCMF_WSEC_MAX_PSK_LEN + 1];
+ u8 key[BRCMF_WSEC_MAX_SAE_PASSWORD_LEN];
};
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
index 86eafdb40541..41eafcda77f7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
@@ -90,7 +90,7 @@ int brcmf_fwvid_register_vendor(enum brcmf_fwvendor fwvid, struct module *vmod,
return -ERANGE;
if (WARN_ON(!vmod) || WARN_ON(!vops) ||
- WARN_ON(!vops->attach) || WARN_ON(!vops->detach))
+ WARN_ON(!vops->alloc_fweh_info))
return -EINVAL;
if (WARN_ON(fwvid_list[fwvid].vmod))
@@ -150,7 +150,7 @@ static inline int brcmf_fwvid_request_module(enum brcmf_fwvendor fwvid)
}
#endif
-int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr)
+int brcmf_fwvid_attach(struct brcmf_pub *drvr)
{
enum brcmf_fwvendor fwvid = drvr->bus_if->fwvid;
int ret;
@@ -175,7 +175,7 @@ int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr)
return ret;
}
-void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr)
+void brcmf_fwvid_detach(struct brcmf_pub *drvr)
{
enum brcmf_fwvendor fwvid = drvr->bus_if->fwvid;
@@ -187,9 +187,10 @@ void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr)
mutex_lock(&fwvid_list_lock);
- drvr->vops = NULL;
- list_del(&drvr->bus_if->list);
-
+ if (drvr->vops) {
+ drvr->vops = NULL;
+ list_del(&drvr->bus_if->list);
+ }
mutex_unlock(&fwvid_list_lock);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
index 43df58bb70ad..e6ac9fc341bc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
@@ -6,12 +6,15 @@
#define FWVID_H_
#include "firmware.h"
+#include "cfg80211.h"
struct brcmf_pub;
+struct brcmf_if;
struct brcmf_fwvid_ops {
- int (*attach)(struct brcmf_pub *drvr);
- void (*detach)(struct brcmf_pub *drvr);
+ void (*feat_attach)(struct brcmf_if *ifp);
+ int (*set_sae_password)(struct brcmf_if *ifp, struct cfg80211_crypto_settings *crypto);
+ int (*alloc_fweh_info)(struct brcmf_pub *drvr);
};
/* exported functions */
@@ -20,28 +23,37 @@ int brcmf_fwvid_register_vendor(enum brcmf_fwvendor fwvid, struct module *mod,
int brcmf_fwvid_unregister_vendor(enum brcmf_fwvendor fwvid, struct module *mod);
/* core driver functions */
-int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr);
-void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr);
+int brcmf_fwvid_attach(struct brcmf_pub *drvr);
+void brcmf_fwvid_detach(struct brcmf_pub *drvr);
const char *brcmf_fwvid_vendor_name(struct brcmf_pub *drvr);
-static inline int brcmf_fwvid_attach(struct brcmf_pub *drvr)
+static inline void brcmf_fwvid_feat_attach(struct brcmf_if *ifp)
{
- int ret;
+ const struct brcmf_fwvid_ops *vops = ifp->drvr->vops;
- ret = brcmf_fwvid_attach_ops(drvr);
- if (ret)
- return ret;
+ if (!vops->feat_attach)
+ return;
- return drvr->vops->attach(drvr);
+ vops->feat_attach(ifp);
}
-static inline void brcmf_fwvid_detach(struct brcmf_pub *drvr)
+static inline int brcmf_fwvid_set_sae_password(struct brcmf_if *ifp,
+ struct cfg80211_crypto_settings *crypto)
+{
+ const struct brcmf_fwvid_ops *vops = ifp->drvr->vops;
+
+ if (!vops || !vops->set_sae_password)
+ return -EOPNOTSUPP;
+
+ return vops->set_sae_password(ifp, crypto);
+}
+
+static inline int brcmf_fwvid_alloc_fweh_info(struct brcmf_pub *drvr)
{
if (!drvr->vops)
- return;
+ return -EIO;
- drvr->vops->detach(drvr);
- brcmf_fwvid_detach_ops(drvr);
+ return drvr->vops->alloc_fweh_info(drvr);
}
#endif /* FWVID_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
index 5573a47766ad..05d7c2a4fba5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
@@ -7,21 +7,34 @@
#include <core.h>
#include <bus.h>
#include <fwvid.h>
+#include <cfg80211.h>
#include "vops.h"
-static int brcmf_wcc_attach(struct brcmf_pub *drvr)
+#define BRCMF_WCC_E_LAST 213
+
+static int brcmf_wcc_set_sae_pwd(struct brcmf_if *ifp,
+ struct cfg80211_crypto_settings *crypto)
{
- pr_debug("%s: executing\n", __func__);
- return 0;
+ return brcmf_set_wsec(ifp, crypto->sae_pwd, crypto->sae_pwd_len,
+ BRCMF_WSEC_PASSPHRASE);
}
-static void brcmf_wcc_detach(struct brcmf_pub *drvr)
+static int brcmf_wcc_alloc_fweh_info(struct brcmf_pub *drvr)
{
- pr_debug("%s: executing\n", __func__);
+ struct brcmf_fweh_info *fweh;
+
+ fweh = kzalloc(struct_size(fweh, evt_handler, BRCMF_WCC_E_LAST),
+ GFP_KERNEL);
+ if (!fweh)
+ return -ENOMEM;
+
+ fweh->num_event_codes = BRCMF_WCC_E_LAST;
+ drvr->fweh = fweh;
+ return 0;
}
const struct brcmf_fwvid_ops brcmf_wcc_ops = {
- .attach = brcmf_wcc_attach,
- .detach = brcmf_wcc_detach,
+ .set_sae_password = brcmf_wcc_set_sae_pwd,
+ .alloc_fweh_info = brcmf_wcc_alloc_fweh_info,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c
index 02918d434556..b66135e3cff4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c
@@ -20,6 +20,7 @@ static void __exit brcmf_wcc_exit(void)
brcmf_fwvid_unregister_vendor(BRCMF_FWVENDOR_WCC, THIS_MODULE);
}
+MODULE_DESCRIPTION("Broadcom FullMAC WLAN driver plugin for Broadcom mobility chipsets");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_IMPORT_NS(BRCMFMAC);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
index 89c8829528c2..9540a05247c2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <net/mac80211.h>
#include <linux/bcma/bcma_driver_chipcommon.h>
-#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 543e93ec49d2..92860dc0a92e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -959,6 +959,10 @@ static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw,
}
static const struct ieee80211_ops brcms_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = brcms_ops_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = brcms_ops_start,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
index ccc621b8ed9f..a27d6f0b8819 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -383,8 +383,9 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
return sh;
}
-static void wlc_phy_timercb_phycal(struct brcms_phy *pi)
+static void wlc_phy_timercb_phycal(void *ptr)
{
+ struct brcms_phy *pi = ptr;
uint delay = 5;
if (PHY_PERICAL_MPHASE_PENDING(pi)) {
@@ -551,8 +552,7 @@ wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core,
if (!pi->phycal_timer)
goto err;
- if (!wlc_phy_attach_nphy(pi))
- goto err;
+ wlc_phy_attach_nphy(pi);
} else if (ISLCNPHY(pi)) {
if (!wlc_phy_attach_lcnphy(pi))
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
index 8668fa5558a2..70a9ec050717 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
@@ -941,7 +941,7 @@ void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real, s32 *eps_imag);
void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
-bool wlc_phy_attach_nphy(struct brcms_phy *pi);
+void wlc_phy_attach_nphy(struct brcms_phy *pi);
bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
index 7717eb85a1db..aae2cf95fe95 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -3299,7 +3299,7 @@ wlc_lcnphy_run_samples(struct brcms_phy *pi,
if (iqcalmode) {
- and_phy_reg(pi, 0x453, (u16) ~(0x1 << 15));
+ and_phy_reg(pi, 0x453, 0xffff & ~(0x1 << 15));
or_phy_reg(pi, 0x453, (0x1 << 15));
} else {
write_phy_reg(pi, 0x63f, 1);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index 8580a2754789..d69879e1bd87 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -14546,7 +14546,7 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
wlc_phy_txpwr_apply_nphy(pi);
}
-static bool wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
+static void wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
{
struct ssb_sprom *sprom = &pi->d11core->bus->sprom;
@@ -14595,11 +14595,9 @@ static bool wlc_phy_txpwr_srom_read_nphy(struct brcms_phy *pi)
pi->phycal_tempdelta = 0;
wlc_phy_txpwr_srom_read_ppr_nphy(pi);
-
- return true;
}
-bool wlc_phy_attach_nphy(struct brcms_phy *pi)
+void wlc_phy_attach_nphy(struct brcms_phy *pi)
{
uint i;
@@ -14645,10 +14643,7 @@ bool wlc_phy_attach_nphy(struct brcms_phy *pi)
pi->pi_fptr.chanset = wlc_phy_chanspec_set_nphy;
pi->pi_fptr.txpwrrecalc = wlc_phy_txpower_recalc_target_nphy;
- if (!wlc_phy_txpwr_srom_read_nphy(pi))
- return false;
-
- return true;
+ wlc_phy_txpwr_srom_read_nphy(pi);
}
static s32 get_rf_pwr_offset(struct brcms_phy *pi, s16 pga_gn, s16 pad_gn)
@@ -17587,7 +17582,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
or_phy_reg(pi, 0x122, (0x1 << 0));
if (NREV_GE(pi->pubpi.phy_rev, 3))
- and_phy_reg(pi, 0x1e7, (u16) (~(0x1 << 15)));
+ and_phy_reg(pi, 0x1e7, 0x7fff);
else
or_phy_reg(pi, 0x1e7, (0x1 << 15));
@@ -18086,7 +18081,7 @@ wlc_phy_rfctrlintc_override_nphy(struct brcms_phy *pi, u8 field, u16 value,
(0x1 << 10));
and_phy_reg(pi, 0x2ff, (u16)
- ~(0x3 << 14));
+ 0xffff & ~(0x3 << 14));
or_phy_reg(pi, 0x2ff, (0x1 << 13));
or_phy_reg(pi, 0x2ff, (0x1 << 0));
} else {
@@ -21053,7 +21048,7 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
(val | MAC_PHY_FORCE_CLK));
and_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG),
- (u16) (~(BBCFG_RESETCCA | BBCFG_RESETRX)));
+ 0xffff & ~(BBCFG_RESETCCA | BBCFG_RESETRX));
bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val);
}
@@ -21287,7 +21282,8 @@ void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init)
bcma_set16(pi->d11core, D11REGOFFS(psm_gpio_oe), mask);
- bcma_mask16(pi->d11core, D11REGOFFS(psm_gpio_out), ~mask);
+ bcma_mask16(pi->d11core, D11REGOFFS(psm_gpio_out),
+ 0xffff & ~mask);
if (lut_init) {
write_phy_reg(pi, 0xf8, 0x02d8);
@@ -23197,7 +23193,7 @@ void wlc_phy_stopplayback_nphy(struct brcms_phy *pi)
or_phy_reg(pi, 0xc3, NPHY_sampleCmd_STOP);
else if (playback_status & 0x2)
and_phy_reg(pi, 0xc2,
- (u16) ~NPHY_iqloCalCmdGctl_IQLO_CAL_EN);
+ 0xffff & ~NPHY_iqloCalCmdGctl_IQLO_CAL_EN);
and_phy_reg(pi, 0xc3, (u16) ~(0x1 << 2));
@@ -28202,8 +28198,9 @@ void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type)
if (NREV_GE(pi->pubpi.phy_rev, 3))
and_phy_reg(pi, 0x1e7,
- (u16) (~((0x1 << 15) |
- (0x1 << 14) | (0x1 << 13))));
+ 0xffff & ~((0x1 << 15) |
+ (0x1 << 14) |
+ (0x1 << 13)));
else
and_phy_reg(pi, 0x1e7,
(u16) (~((0x1 << 14) | (0x1 << 13))));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
index a0de5db0cd64..b72381791536 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
@@ -57,12 +57,11 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim)
}
struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
- void (*fn)(struct brcms_phy *pi),
+ void (*fn)(void *pi),
void *arg, const char *name)
{
return (struct wlapi_timer *)
- brcms_init_timer(physhim->wl, (void (*)(void *))fn,
- arg, name);
+ brcms_init_timer(physhim->wl, fn, arg, name);
}
void wlapi_free_timer(struct wlapi_timer *t)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
index dd8774717ade..27d0934e600e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
@@ -131,7 +131,7 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim);
/* PHY to WL utility functions */
struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
- void (*fn)(struct brcms_phy *pi),
+ void (*fn)(void *pi),
void *arg, const char *name);
void wlapi_free_timer(struct wlapi_timer *t);
void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 9eaf5ec133f9..075b705a8d7b 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -3432,6 +3432,10 @@ static const struct attribute_group il3945_attribute_group = {
};
static struct ieee80211_ops il3945_mac_ops __ro_after_init = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = il3945_mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = il3945_mac_start,
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 70e420df1643..4beb7be6d51d 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -6301,6 +6301,10 @@ il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
}
static const struct ieee80211_ops il4965_mac_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = il4965_mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = il4965_mac_start,
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 17570d62c896..9d33a66a49b5 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -3438,9 +3438,7 @@ il_init_geos(struct il_priv *il)
if (!channels)
return -ENOMEM;
- rates =
- kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
- GFP_KERNEL);
+ rates = kcalloc(RATE_COUNT_LEGACY, sizeof(*rates), GFP_KERNEL);
if (!rates) {
kfree(channels);
return -ENOMEM;
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 20971304fdef..4b04865fc2c9 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -46,6 +46,15 @@ config IWLWIFI
if IWLWIFI
+config IWLWIFI_KUNIT_TESTS
+ tristate
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option for iwlwifi kunit tests.
+
+ If unsure, say N.
+
config IWLWIFI_LEDS
bool
depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index b983982aee45..8bb94a4c12cd 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -18,6 +18,7 @@ iwlwifi-objs += queue/tx.o
iwlwifi-objs += fw/img.o fw/notif-wait.o fw/rs.o
iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o
+iwlwifi-objs += fw/regulatory.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_EFI) += fw/uefi.o
@@ -33,4 +34,6 @@ obj-$(CONFIG_IWLDVM) += dvm/
obj-$(CONFIG_IWLMVM) += mvm/
obj-$(CONFIG_IWLMEI) += mei/
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/
+
CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
index 134635c70ce8..25952d0bea99 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_AX210_UCODE_API_MAX 86
+#define IWL_AX210_UCODE_API_MAX 89
/* Lowest firmware API version supported */
#define IWL_AX210_UCODE_API_MIN 59
@@ -299,3 +299,9 @@ MODULE_FIRMWARE(IWL_MA_B_HR_B_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_B_GF_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_B_GF4_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_MA_B_MR_A_FW_MODULE_FIRMWARE(IWL_AX210_UCODE_API_MAX));
+
+MODULE_FIRMWARE("iwlwifi-so-a0-gf-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-so-a0-gf4-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-ty-a0-gf-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-ma-b0-gf-a0.pnvm");
+MODULE_FIRMWARE("iwlwifi-ma-b0-gf4-a0.pnvm");
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index 82da957adcf6..072b0a5827d1 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 86
+#define IWL_BZ_UCODE_API_MAX 90
/* Lowest firmware API version supported */
#define IWL_BZ_UCODE_API_MIN 80
@@ -129,10 +129,6 @@ static const struct iwl_base_params iwl_bz_base_params = {
IWL_DEVICE_BZ_COMMON, \
.ht_params = &iwl_22000_ht_params
-#define IWL_DEVICE_GL_A \
- IWL_DEVICE_BZ_COMMON, \
- .ht_params = &iwl_gl_a_ht_params
-
/*
* This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
* A-MPDU, with additional overhead to account for processing time.
@@ -153,6 +149,7 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
};
const char iwl_bz_name[] = "Intel(R) TBD Bz device";
+const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";
const struct iwl_cfg iwl_cfg_bz = {
.fw_name_mac = "bz",
@@ -179,3 +176,5 @@ MODULE_FIRMWARE(IWL_BZ_A_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_FM4_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_GL_B_FM_B_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_GL_C_FM_C_MODULE_FIRMWARE(IWL_BZ_UCODE_API_MAX));
+
+MODULE_FIRMWARE("iwlwifi-gl-c0-fm-c0.pnvm");
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index 80eb9b499538..9b79279fd76c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 86
+#define IWL_SC_UCODE_API_MAX 90
/* Lowest firmware API version supported */
#define IWL_SC_UCODE_API_MIN 82
@@ -33,6 +33,10 @@
#define IWL_SC_A_GF_A_FW_PRE "iwlwifi-sc-a0-gf-a0"
#define IWL_SC_A_GF4_A_FW_PRE "iwlwifi-sc-a0-gf4-a0"
#define IWL_SC_A_WH_A_FW_PRE "iwlwifi-sc-a0-wh-a0"
+#define IWL_SC2_A_FM_C_FW_PRE "iwlwifi-sc2-a0-fm-c0"
+#define IWL_SC2_A_WH_A_FW_PRE "iwlwifi-sc2-a0-wh-a0"
+#define IWL_SC2F_A_FM_C_FW_PRE "iwlwifi-sc2f-a0-fm-c0"
+#define IWL_SC2F_A_WH_A_FW_PRE "iwlwifi-sc2f-a0-wh-a0"
#define IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_FM_B_FW_PRE "-" __stringify(api) ".ucode"
@@ -48,6 +52,14 @@
IWL_SC_A_GF4_A_FW_PRE "-" __stringify(api) ".ucode"
#define IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(api) \
IWL_SC_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2_A_FM_C_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2_A_WH_A_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2F_A_FM_C_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2F_A_FM_C_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_SC2F_A_WH_A_FW_MODULE_FIRMWARE(api) \
+ IWL_SC2F_A_WH_A_FW_PRE "-" __stringify(api) ".ucode"
static const struct iwl_base_params iwl_sc_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -124,6 +136,9 @@ static const struct iwl_base_params iwl_sc_base_params = {
#define IWL_DEVICE_SC \
IWL_DEVICE_BZ_COMMON, \
+ .uhb_supported = true, \
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
+ .num_rbds = IWL_NUM_RBDS_SC_EHT, \
.ht_params = &iwl_22000_ht_params
/*
@@ -149,10 +164,21 @@ const char iwl_sc_name[] = "Intel(R) TBD Sc device";
const struct iwl_cfg iwl_cfg_sc = {
.fw_name_mac = "sc",
- .uhb_supported = true,
IWL_DEVICE_SC,
- .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,
- .num_rbds = IWL_NUM_RBDS_SC_EHT,
+};
+
+const char iwl_sc2_name[] = "Intel(R) TBD Sc2 device";
+
+const struct iwl_cfg iwl_cfg_sc2 = {
+ .fw_name_mac = "sc2",
+ IWL_DEVICE_SC,
+};
+
+const char iwl_sc2f_name[] = "Intel(R) TBD Sc2f device";
+
+const struct iwl_cfg iwl_cfg_sc2f = {
+ .fw_name_mac = "sc2f",
+ IWL_DEVICE_SC,
};
MODULE_FIRMWARE(IWL_SC_A_FM_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
@@ -162,3 +188,7 @@ MODULE_FIRMWARE(IWL_SC_A_HR_B_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_GF_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_GF4_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SC_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2F_A_FM_C_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SC2F_A_WH_A_FW_MODULE_FIRMWARE(IWL_SC_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 5f3d5b15f727..52b008ce53bd 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -1570,6 +1570,10 @@ static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
}
const struct ieee80211_ops iwlagn_hw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = iwlagn_mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = iwlagn_mac_start,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index b96f30d11644..4caf2e25a297 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -4,7 +4,6 @@
* Copyright (C) 2019-2023 Intel Corporation
*/
#include <linux/uuid.h>
-#include <linux/dmi.h>
#include "iwl-drv.h"
#include "iwl-debug.h"
#include "acpi.h"
@@ -13,68 +12,21 @@
const guid_t iwl_guid = GUID_INIT(0xF21202BF, 0x8F78, 0x4DC6,
0xA5, 0xB3, 0x1F, 0x73,
0x8E, 0x28, 0x5A, 0xDE);
-IWL_EXPORT_SYMBOL(iwl_guid);
-const guid_t iwl_rfi_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29,
- 0x81, 0x4F, 0x75, 0xE4,
- 0xDD, 0x26, 0xB5, 0xFD);
-IWL_EXPORT_SYMBOL(iwl_rfi_guid);
-
-static const struct dmi_system_id dmi_ppag_approved_list[] = {
- { .ident = "HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- },
- },
- { .ident = "SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "MSFT",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- },
- },
- { .ident = "ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- },
- },
- { .ident = "GOOGLE-HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
- },
- },
- { .ident = "GOOGLE-ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."),
- },
- },
- { .ident = "GOOGLE-SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "DELL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- },
- },
- { .ident = "DELL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
- },
- },
- { .ident = "RAZER",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
- },
- },
- {}
+static const size_t acpi_dsm_size[DSM_FUNC_NUM_FUNCS] = {
+ [DSM_FUNC_QUERY] = sizeof(u32),
+ [DSM_FUNC_DISABLE_SRD] = sizeof(u8),
+ [DSM_FUNC_ENABLE_INDONESIA_5G2] = sizeof(u8),
+ [DSM_FUNC_ENABLE_6E] = sizeof(u32),
+ [DSM_FUNC_REGULATORY_CONFIG] = sizeof(u32),
+ /* Not supported in driver */
+ [5] = (size_t)0,
+ [DSM_FUNC_11AX_ENABLEMENT] = sizeof(u32),
+ [DSM_FUNC_ENABLE_UNII4_CHAN] = sizeof(u32),
+ [DSM_FUNC_ACTIVATE_CHANNEL] = sizeof(u32),
+ [DSM_FUNC_FORCE_DISABLE_CHANNELS] = sizeof(u32),
+ [DSM_FUNC_ENERGY_DETECTION_THRESHOLD] = sizeof(u32),
+ [DSM_FUNC_RFI_CONFIG] = sizeof(u32),
};
static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
@@ -200,46 +152,41 @@ out:
}
/*
- * Evaluate a DSM with no arguments and a u8 return value,
+ * This function receives a DSM function number, calculates its expected size
+ * according to Intel BIOS spec, and fills in the value in a 32-bit field.
+ * In case the expected size is smaller than 32-bit, padding will be added.
*/
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
- const guid_t *guid, u8 *value)
+int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value)
{
+ size_t expected_size;
+ u64 tmp;
int ret;
- u64 val;
- ret = iwl_acpi_get_dsm_integer(dev, rev, func,
- guid, &val, sizeof(u8));
+ BUILD_BUG_ON(ARRAY_SIZE(acpi_dsm_size) != DSM_FUNC_NUM_FUNCS);
- if (ret < 0)
- return ret;
+ if (WARN_ON(func >= ARRAY_SIZE(acpi_dsm_size)))
+ return -EINVAL;
- /* cast val (u64) to be u8 */
- *value = (u8)val;
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
+ expected_size = acpi_dsm_size[func];
-/*
- * Evaluate a DSM with no arguments and a u32 return value,
- */
-int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
- const guid_t *guid, u32 *value)
-{
- int ret;
- u64 val;
-
- ret = iwl_acpi_get_dsm_integer(dev, rev, func,
- guid, &val, sizeof(u32));
+ /* Currently all ACPI DSMs are either 8-bit or 32-bit */
+ if (expected_size != sizeof(u8) && expected_size != sizeof(u32))
+ return -EOPNOTSUPP;
- if (ret < 0)
+ ret = iwl_acpi_get_dsm_integer(fwrt->dev, ACPI_DSM_REV, func,
+ &iwl_guid, &tmp, expected_size);
+ if (ret)
return ret;
- /* cast val (u64) to be u32 */
- *value = (u32)val;
+ if ((expected_size == sizeof(u8) && tmp != (u8)tmp) ||
+ (expected_size == sizeof(u32) && tmp != (u32)tmp))
+ IWL_DEBUG_RADIO(fwrt,
+ "DSM value overflows the expected size, truncating\n");
+ *value = (u32)tmp;
+
return 0;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32);
static union acpi_object *
iwl_acpi_get_wifi_pkg_range(struct device *dev,
@@ -307,9 +254,8 @@ iwl_acpi_get_wifi_pkg(struct device *dev,
tbl_rev);
}
-
-int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- union iwl_tas_config_cmd *cmd, int fw_ver)
+int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev, i, block_list_size, enabled;
@@ -331,22 +277,9 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
ACPI_TYPE_INTEGER) {
u32 tas_selection =
(u32)wifi_pkg->package.elements[1].integer.value;
- u16 override_iec =
- (tas_selection & ACPI_WTAS_OVERRIDE_IEC_MSK) >> ACPI_WTAS_OVERRIDE_IEC_POS;
- u16 enabled_iec = (tas_selection & ACPI_WTAS_ENABLE_IEC_MSK) >>
- ACPI_WTAS_ENABLE_IEC_POS;
- u8 usa_tas_uhb = (tas_selection & ACPI_WTAS_USA_UHB_MSK) >> ACPI_WTAS_USA_UHB_POS;
-
- enabled = tas_selection & ACPI_WTAS_ENABLED_MSK;
- if (fw_ver <= 3) {
- cmd->v3.override_tas_iec = cpu_to_le16(override_iec);
- cmd->v3.enable_tas_iec = cpu_to_le16(enabled_iec);
- } else {
- cmd->v4.usa_tas_uhb_allowed = usa_tas_uhb;
- cmd->v4.override_tas_iec = (u8)override_iec;
- cmd->v4.enable_tas_iec = (u8)enabled_iec;
- }
+ enabled = iwl_parse_tas_selection(fwrt, tas_data,
+ tas_selection);
} else if (tbl_rev == 0 &&
wifi_pkg->package.elements[1].type == ACPI_TYPE_INTEGER) {
@@ -365,22 +298,16 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n", tbl_rev);
if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER ||
wifi_pkg->package.elements[2].integer.value >
- APCI_WTAS_BLACK_LIST_MAX) {
+ IWL_WTAS_BLACK_LIST_MAX) {
IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n",
wifi_pkg->package.elements[2].integer.value);
ret = -EINVAL;
goto out_free;
}
block_list_size = wifi_pkg->package.elements[2].integer.value;
- cmd->v4.block_list_size = cpu_to_le32(block_list_size);
+ tas_data->block_list_size = cpu_to_le32(block_list_size);
IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", block_list_size);
- if (block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
- IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n",
- block_list_size);
- ret = -EINVAL;
- goto out_free;
- }
for (i = 0; i < block_list_size; i++) {
u32 country;
@@ -394,7 +321,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
}
country = wifi_pkg->package.elements[3 + i].integer.value;
- cmd->v4.block_list_array[i] = cpu_to_le32(country);
+ tas_data->block_list_array[i] = cpu_to_le32(country);
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
}
@@ -403,19 +330,19 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_tas);
-int iwl_acpi_get_mcc(struct device *dev, char *mcc)
+int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
{
union acpi_object *wifi_pkg, *data;
u32 mcc_val;
int ret, tbl_rev;
- data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD);
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDD_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE,
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WRDD_WIFI_DATA_SIZE,
&tbl_rev);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
@@ -439,46 +366,42 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_mcc);
-u64 iwl_acpi_get_pwr_limit(struct device *dev)
+int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt, u64 *dflt_pwr_limit)
{
union acpi_object *data, *wifi_pkg;
- u64 dflt_pwr_limit;
- int tbl_rev;
+ int tbl_rev, ret = -EINVAL;
- data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD);
- if (IS_ERR(data)) {
- dflt_pwr_limit = 0;
+ *dflt_pwr_limit = 0;
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_SPLC_METHOD);
+ if (IS_ERR(data))
goto out;
- }
- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data,
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_SPLC_WIFI_DATA_SIZE, &tbl_rev);
if (IS_ERR(wifi_pkg) || tbl_rev != 0 ||
- wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) {
- dflt_pwr_limit = 0;
+ wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER)
goto out_free;
- }
- dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value;
+ *dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value;
+ ret = 0;
out_free:
kfree(data);
out:
- return dflt_pwr_limit;
+ return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit);
-int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
+int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
{
union acpi_object *wifi_pkg, *data;
int ret, tbl_rev;
- data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD);
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_ECKV_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE,
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_ECKV_WIFI_DATA_SIZE,
&tbl_rev);
if (IS_ERR(wifi_pkg)) {
ret = PTR_ERR(wifi_pkg);
@@ -499,11 +422,11 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv);
-static int iwl_sar_set_profile(union acpi_object *table,
- struct iwl_sar_profile *profile,
- bool enabled, u8 num_chains, u8 num_sub_bands)
+static int iwl_acpi_sar_set_profile(union acpi_object *table,
+ struct iwl_sar_profile *profile,
+ bool enabled, u8 num_chains,
+ u8 num_sub_bands)
{
int i, j, idx = 0;
@@ -511,8 +434,8 @@ static int iwl_sar_set_profile(union acpi_object *table,
* The table from ACPI is flat, but we store it in a
* structured array.
*/
- for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV2; i++) {
- for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS_REV2; j++) {
+ for (i = 0; i < BIOS_SAR_MAX_CHAINS_PER_PROFILE; i++) {
+ for (j = 0; j < BIOS_SAR_MAX_SUB_BANDS_NUM; j++) {
/* if we don't have the values, use the default */
if (i >= num_chains || j >= num_sub_bands) {
profile->chains[i].subbands[j] = 0;
@@ -535,73 +458,7 @@ static int iwl_sar_set_profile(union acpi_object *table,
return 0;
}
-static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_subbands,
- int prof_a, int prof_b)
-{
- int profs[ACPI_SAR_NUM_CHAINS_REV0] = { prof_a, prof_b };
- int i, j;
-
- for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV0; i++) {
- struct iwl_sar_profile *prof;
-
- /* don't allow SAR to be disabled (profile 0 means disable) */
- if (profs[i] == 0)
- return -EPERM;
-
- /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
- if (profs[i] > ACPI_SAR_PROFILE_NUM)
- return -EINVAL;
-
- /* profiles go from 1 to 4, so decrement to access the array */
- prof = &fwrt->sar_profiles[profs[i] - 1];
-
- /* if the profile is disabled, do nothing */
- if (!prof->enabled) {
- IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
- profs[i]);
- /*
- * if one of the profiles is disabled, we
- * ignore all of them and return 1 to
- * differentiate disabled from other failures.
- */
- return 1;
- }
-
- IWL_DEBUG_INFO(fwrt,
- "SAR EWRD: chain %d profile index %d\n",
- i, profs[i]);
- IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
- for (j = 0; j < n_subbands; j++) {
- per_chain[i * n_subbands + j] =
- cpu_to_le16(prof->chains[i].subbands[j]);
- IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
- j, prof->chains[i].subbands[j]);
- }
- }
-
- return 0;
-}
-
-int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_tables, u32 n_subbands,
- int prof_a, int prof_b)
-{
- int i, ret = 0;
-
- for (i = 0; i < n_tables; i++) {
- ret = iwl_sar_fill_table(fwrt,
- &per_chain[i * n_subbands * ACPI_SAR_NUM_CHAINS_REV0],
- n_subbands, prof_a, prof_b);
- if (ret)
- break;
- }
-
- return ret;
-}
-IWL_EXPORT_SYMBOL(iwl_sar_select_profile);
-
-int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *table, *data;
int ret, tbl_rev;
@@ -618,7 +475,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 2) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -634,7 +491,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 1) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -650,7 +507,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 0) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -680,16 +537,15 @@ read_table:
/* The profile from WRDS is officially profile 1, but goes
* into sar_profiles[0] (because we don't have a profile 0).
*/
- ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0],
- flags & IWL_SAR_ENABLE_MSK,
- num_chains, num_sub_bands);
+ ret = iwl_acpi_sar_set_profile(table, &fwrt->sar_profiles[0],
+ flags & IWL_SAR_ENABLE_MSK,
+ num_chains, num_sub_bands);
out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_sar_get_wrds_table);
-int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data;
bool enabled;
@@ -707,7 +563,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 2) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -723,7 +579,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 1) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -739,7 +595,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
&tbl_rev);
if (!IS_ERR(wifi_pkg)) {
if (tbl_rev != 0) {
- ret = PTR_ERR(wifi_pkg);
+ ret = -EINVAL;
goto out_free;
}
@@ -767,7 +623,7 @@ read_table:
* from index 1, so the maximum value allowed here is
* ACPI_SAR_PROFILES_NUM - 1.
*/
- if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
+ if (n_profiles >= BIOS_SAR_MAX_PROFILE_NUM) {
ret = -EINVAL;
goto out_free;
}
@@ -776,13 +632,15 @@ read_table:
pos = 3;
for (i = 0; i < n_profiles; i++) {
+ union acpi_object *table = &wifi_pkg->package.elements[pos];
/* The EWRD profiles officially go from 2 to 4, but we
* save them in sar_profiles[1-3] (because we don't
* have profile 0). So in the array we start from 1.
*/
- ret = iwl_sar_set_profile(&wifi_pkg->package.elements[pos],
- &fwrt->sar_profiles[i + 1], enabled,
- num_chains, num_sub_bands);
+ ret = iwl_acpi_sar_set_profile(table,
+ &fwrt->sar_profiles[i + 1],
+ enabled, num_chains,
+ num_sub_bands);
if (ret < 0)
break;
@@ -794,9 +652,8 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_sar_get_ewrd_table);
-int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data;
int i, j, k, ret, tbl_rev;
@@ -811,7 +668,7 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
.revisions = BIT(3),
.bands = ACPI_GEO_NUM_BANDS_REV2,
.profiles = ACPI_NUM_GEO_PROFILES_REV3,
- .min_profiles = 3,
+ .min_profiles = BIOS_GEO_MIN_PROFILE_NUM,
},
{
.revisions = BIT(2),
@@ -897,7 +754,7 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
read_table:
fwrt->geo_rev = tbl_rev;
for (i = 0; i < num_profiles; i++) {
- for (j = 0; j < ACPI_GEO_NUM_BANDS_REV2; j++) {
+ for (j = 0; j < BIOS_GEO_MAX_NUM_BANDS; j++) {
union acpi_object *entry;
/*
@@ -921,7 +778,7 @@ read_table:
entry->integer.value;
}
- for (k = 0; k < ACPI_GEO_NUM_CHAINS; k++) {
+ for (k = 0; k < BIOS_GEO_NUM_CHAINS; k++) {
/* same here as above */
if (j >= num_bands) {
fwrt->geo_profiles[i].bands[j].chains[k] =
@@ -949,151 +806,26 @@ out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_sar_get_wgds_table);
-
-bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
-{
- /*
- * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on
- * earlier firmware versions. Unfortunately, we don't have a
- * TLV API flag to rely on, so rely on the major version which
- * is in the first byte of ucode_ver. This was implemented
- * initially on version 38 and then backported to 17. It was
- * also backported to 29, but only for 7265D devices. The
- * intention was to have it in 36 as well, but not all 8000
- * family got this feature enabled. The 8000 family is the
- * only one using version 36, so skip this version entirely.
- */
- return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
- fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
- ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
- CSR_HW_REV_TYPE_7265D));
-}
-IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
-
-int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset *table,
- u32 n_bands, u32 n_profiles)
-{
- int i, j;
-
- if (!fwrt->geo_enabled)
- return -ENODATA;
-
- if (!iwl_sar_geo_support(fwrt))
- return -EOPNOTSUPP;
-
- for (i = 0; i < n_profiles; i++) {
- for (j = 0; j < n_bands; j++) {
- struct iwl_per_chain_offset *chain =
- &table[i * n_bands + j];
-
- chain->max_tx_power =
- cpu_to_le16(fwrt->geo_profiles[i].bands[j].max);
- chain->chain_a = fwrt->geo_profiles[i].bands[j].chains[0];
- chain->chain_b = fwrt->geo_profiles[i].bands[j].chains[1];
- IWL_DEBUG_RADIO(fwrt,
- "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
- i, j,
- fwrt->geo_profiles[i].bands[j].chains[0],
- fwrt->geo_profiles[i].bands[j].chains[1],
- fwrt->geo_profiles[i].bands[j].max);
- }
- }
-
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
-
-__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
-{
- int ret;
- u8 value;
- u32 val;
- __le32 config_bitmap = 0;
-
- /*
- * Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2'.
- * Setting config_bitmap Indonesia bit is valid only for HR/JF.
- */
- switch (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)) {
- case IWL_CFG_RF_TYPE_HR1:
- case IWL_CFG_RF_TYPE_HR2:
- case IWL_CFG_RF_TYPE_JF1:
- case IWL_CFG_RF_TYPE_JF2:
- ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
- DSM_FUNC_ENABLE_INDONESIA_5G2,
- &iwl_guid, &value);
-
- if (!ret && value == DSM_VALUE_INDONESIA_ENABLE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
- break;
- default:
- break;
- }
-
- /*
- ** Evaluate func 'DSM_FUNC_DISABLE_SRD'
- */
- ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
- DSM_FUNC_DISABLE_SRD,
- &iwl_guid, &value);
- if (!ret) {
- if (value == DSM_VALUE_SRD_PASSIVE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
- else if (value == DSM_VALUE_SRD_DISABLE)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
- }
-
- if (fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) {
- /*
- ** Evaluate func 'DSM_FUNC_REGULATORY_CONFIG'
- */
- ret = iwl_acpi_get_dsm_u32(fwrt->dev, 0,
- DSM_FUNC_REGULATORY_CONFIG,
- &iwl_guid, &val);
- /*
- * China 2022 enable if the BIOS object does not exist or
- * if it is enabled in BIOS.
- */
- if (ret < 0 || val & DSM_MASK_CHINA_22_REG)
- config_bitmap |=
- cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK);
- }
-
- return config_bitmap;
-}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_lari_config_bitmap);
int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data, *flags;
int i, j, ret, tbl_rev, num_sub_bands = 0;
int idx = 2;
- u8 cmd_ver;
-
- fwrt->ppag_flags = 0;
- fwrt->ppag_table_valid = false;
data = iwl_acpi_get_object(fwrt->dev, ACPI_PPAG_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- /* try to read ppag table rev 2 or 1 (both have the same data size) */
+ /* try to read ppag table rev 3, 2 or 1 (all have the same data size) */
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
if (!IS_ERR(wifi_pkg)) {
- if (tbl_rev == 1 || tbl_rev == 2) {
+ if (tbl_rev >= 1 && tbl_rev <= 3) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
IWL_DEBUG_RADIO(fwrt,
- "Reading PPAG table v2 (tbl_rev=%d)\n",
+ "Reading PPAG table (tbl_rev=%d)\n",
tbl_rev);
goto read_table;
} else {
@@ -1116,6 +848,9 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
goto read_table;
}
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+
read_table:
fwrt->ppag_ver = tbl_rev;
flags = &wifi_pkg->package.elements[1];
@@ -1125,19 +860,8 @@ read_table:
goto out_free;
}
- fwrt->ppag_flags = flags->integer.value & ACPI_PPAG_MASK;
- cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
- WIDE_ID(PHY_OPS_GROUP,
- PER_PLATFORM_ANT_GAIN_CMD),
- IWL_FW_CMD_VER_UNKNOWN);
- if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN) {
- ret = -EINVAL;
- goto out_free;
- }
- if (!fwrt->ppag_flags && cmd_ver <= 3) {
- ret = 0;
- goto out_free;
- }
+ fwrt->ppag_flags = iwl_bios_get_ppag_flags(flags->integer.value,
+ fwrt->ppag_ver);
/*
* read, verify gain values and save them into the PPAG table.
@@ -1155,132 +879,15 @@ read_table:
}
fwrt->ppag_chains[i].subbands[j] = ent->integer.value;
- /* from ver 4 the fw deals with out of range values */
- if (cmd_ver >= 4)
- continue;
- if ((j == 0 &&
- (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_LB ||
- fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_LB)) ||
- (j != 0 &&
- (fwrt->ppag_chains[i].subbands[j] > ACPI_PPAG_MAX_HB ||
- fwrt->ppag_chains[i].subbands[j] < ACPI_PPAG_MIN_HB))) {
- ret = -EINVAL;
- goto out_free;
- }
}
}
- fwrt->ppag_table_valid = true;
ret = 0;
out_free:
kfree(data);
return ret;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_ppag_table);
-
-int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
- int *cmd_size)
-{
- u8 cmd_ver;
- int i, j, num_sub_bands;
- s8 *gain;
-
- /* many firmware images for JF lie about this */
- if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
- CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
- return -EOPNOTSUPP;
-
- if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
- IWL_DEBUG_RADIO(fwrt,
- "PPAG capability not supported by FW, command not sent.\n");
- return -EINVAL;
- }
-
- cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
- WIDE_ID(PHY_OPS_GROUP,
- PER_PLATFORM_ANT_GAIN_CMD),
- IWL_FW_CMD_VER_UNKNOWN);
- if (!fwrt->ppag_table_valid || (cmd_ver <= 3 && !fwrt->ppag_flags)) {
- IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
- return -EINVAL;
- }
-
- /* The 'flags' field is the same in v1 and in v2 so we can just
- * use v1 to access it.
- */
- cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
-
- IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver);
- if (cmd_ver == 1) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V1;
- gain = cmd->v1.gain[0];
- *cmd_size = sizeof(cmd->v1);
- if (fwrt->ppag_ver == 1 || fwrt->ppag_ver == 2) {
- /* in this case FW supports revision 0 */
- IWL_DEBUG_RADIO(fwrt,
- "PPAG table rev is %d, send truncated table\n",
- fwrt->ppag_ver);
- }
- } else if (cmd_ver >= 2 && cmd_ver <= 4) {
- num_sub_bands = IWL_NUM_SUB_BANDS_V2;
- gain = cmd->v2.gain[0];
- *cmd_size = sizeof(cmd->v2);
- if (fwrt->ppag_ver == 0) {
- /* in this case FW supports revisions 1 or 2 */
- IWL_DEBUG_RADIO(fwrt,
- "PPAG table rev is 0, send padded table\n");
- }
- } else {
- IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
- return -EINVAL;
- }
-
- /* ppag mode */
- IWL_DEBUG_RADIO(fwrt,
- "PPAG MODE bits were read from bios: %d\n",
- cmd->v1.flags & cpu_to_le32(ACPI_PPAG_MASK));
- if ((cmd_ver == 1 && !fw_has_capa(&fwrt->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
- (cmd_ver == 2 && fwrt->ppag_ver == 2)) {
- cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
- IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
- } else {
- IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
- }
-
- IWL_DEBUG_RADIO(fwrt,
- "PPAG MODE bits going to be sent: %d\n",
- cmd->v1.flags & cpu_to_le32(ACPI_PPAG_MASK));
-
- for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
- for (j = 0; j < num_sub_bands; j++) {
- gain[i * num_sub_bands + j] =
- fwrt->ppag_chains[i].subbands[j];
- IWL_DEBUG_RADIO(fwrt,
- "PPAG table: chain[%d] band[%d]: gain = %d\n",
- i, j, gain[i * num_sub_bands + j]);
- }
- }
-
- return 0;
-}
-IWL_EXPORT_SYMBOL(iwl_read_ppag_table);
-
-bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
-{
-
- if (!dmi_check_system(dmi_ppag_approved_list)) {
- IWL_DEBUG_RADIO(fwrt,
- "System vendor '%s' is not in the approved list, disabling PPAG.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
- fwrt->ppag_flags = 0;
- return false;
- }
-
- return true;
-}
-IWL_EXPORT_SYMBOL(iwl_acpi_is_ppag_approved);
void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
struct iwl_phy_specific_cfg *filters)
@@ -1293,7 +900,6 @@ void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
if (IS_ERR(data))
return;
- /* try to read wtas table revision 1 or revision 0*/
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WPFC_WIFI_DATA_SIZE,
&tbl_rev);
@@ -1303,13 +909,14 @@ void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
if (tbl_rev != 0)
goto out_free;
- BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) != ACPI_WPFC_WIFI_DATA_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) !=
+ ACPI_WPFC_WIFI_DATA_SIZE - 1);
for (i = 0; i < ARRAY_SIZE(filters->filter_cfg_chains); i++) {
- if (wifi_pkg->package.elements[i].type != ACPI_TYPE_INTEGER)
- return;
+ if (wifi_pkg->package.elements[i + 1].type != ACPI_TYPE_INTEGER)
+ goto out_free;
tmp.filter_cfg_chains[i] =
- cpu_to_le32(wifi_pkg->package.elements[i].integer.value);
+ cpu_to_le32(wifi_pkg->package.elements[i + 1].integer.value);
}
IWL_DEBUG_RADIO(fwrt, "Loaded WPFC filter config from ACPI\n");
@@ -1318,3 +925,38 @@ out_free:
kfree(data);
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_phy_filters);
+
+void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt)
+{
+ union acpi_object *wifi_pkg, *data;
+ int tbl_rev;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_GLAI_METHOD);
+ if (IS_ERR(data))
+ return;
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_GLAI_WIFI_DATA_SIZE,
+ &tbl_rev);
+ if (IS_ERR(wifi_pkg))
+ goto out_free;
+
+ if (tbl_rev != 0) {
+ IWL_DEBUG_RADIO(fwrt, "Invalid GLAI revision: %d\n", tbl_rev);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ wifi_pkg->package.elements[1].integer.value > ACPI_GLAI_MAX_STATUS)
+ goto out_free;
+
+ fwrt->uefi_tables_lock_status =
+ wifi_pkg->package.elements[1].integer.value;
+
+ IWL_DEBUG_RADIO(fwrt,
+ "Loaded UEFI WIFI GUID lock status: %d from ACPI\n",
+ fwrt->uefi_tables_lock_status);
+out_free:
+ kfree(data);
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_guid_lock_status);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index e9277f6f3582..1d32b82f73db 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -7,6 +7,7 @@
#define __iwl_fw_acpi__
#include <linux/acpi.h>
+#include "fw/regulatory.h"
#include "fw/api/commands.h"
#include "fw/api/power.h"
#include "fw/api/phy.h"
@@ -25,6 +26,7 @@
#define ACPI_PPAG_METHOD "PPAG"
#define ACPI_WTAS_METHOD "WTAS"
#define ACPI_WPFC_METHOD "WPFC"
+#define ACPI_GLAI_METHOD "GLAI"
#define ACPI_WIFI_DOMAIN (0x07)
@@ -56,187 +58,90 @@
#define ACPI_EWRD_WIFI_DATA_SIZE_REV2 ((ACPI_SAR_PROFILE_NUM - 1) * \
ACPI_SAR_NUM_CHAINS_REV2 * \
ACPI_SAR_NUM_SUB_BANDS_REV2 + 3)
-#define ACPI_WPFC_WIFI_DATA_SIZE 4 /* 4 filter config words */
+#define ACPI_WPFC_WIFI_DATA_SIZE 5 /* domain and 4 filter config words */
/* revision 0 and 1 are identical, except for the semantics in the FW */
#define ACPI_GEO_NUM_BANDS_REV0 2
#define ACPI_GEO_NUM_BANDS_REV2 3
-#define ACPI_GEO_NUM_CHAINS 2
#define ACPI_WRDD_WIFI_DATA_SIZE 2
#define ACPI_SPLC_WIFI_DATA_SIZE 2
#define ACPI_ECKV_WIFI_DATA_SIZE 2
-
+/*
+ * One element for domain type,
+ * and one for the status
+ */
+#define ACPI_GLAI_WIFI_DATA_SIZE 2
+#define ACPI_GLAI_MAX_STATUS 2
/*
* TAS size: 1 elelment for type,
* 1 element for enabled field,
* 1 element for block list size,
* 16 elements for block list array
*/
-#define APCI_WTAS_BLACK_LIST_MAX 16
-#define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX)
-#define ACPI_WTAS_ENABLED_MSK 0x1
-#define ACPI_WTAS_OVERRIDE_IEC_MSK 0x2
-#define ACPI_WTAS_ENABLE_IEC_MSK 0x4
-#define ACPI_WTAS_OVERRIDE_IEC_POS 0x1
-#define ACPI_WTAS_ENABLE_IEC_POS 0x2
-#define ACPI_WTAS_USA_UHB_MSK BIT(16)
-#define ACPI_WTAS_USA_UHB_POS 16
-
+#define ACPI_WTAS_WIFI_DATA_SIZE (3 + IWL_WTAS_BLACK_LIST_MAX)
#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS_V1) + 2)
#define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \
IWL_NUM_SUB_BANDS_V2) + 2)
-/* PPAG gain value bounds in 1/8 dBm */
-#define ACPI_PPAG_MIN_LB -16
-#define ACPI_PPAG_MAX_LB 24
-#define ACPI_PPAG_MIN_HB -16
-#define ACPI_PPAG_MAX_HB 40
-#define ACPI_PPAG_MASK 3
-#define IWL_PPAG_ETSI_MASK BIT(0)
-
#define IWL_SAR_ENABLE_MSK BIT(0)
#define IWL_REDUCE_POWER_FLAGS_POS 1
-/*
- * The profile for revision 2 is a superset of revision 1, which is in
- * turn a superset of revision 0. So we can store all revisions
- * inside revision 2, which is what we represent here.
- */
-struct iwl_sar_profile_chain {
- u8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2];
-};
-
-struct iwl_sar_profile {
- bool enabled;
- struct iwl_sar_profile_chain chains[ACPI_SAR_NUM_CHAINS_REV2];
-};
-
-/* Same thing as with SAR, all revisions fit in revision 2 */
-struct iwl_geo_profile_band {
- u8 max;
- u8 chains[ACPI_GEO_NUM_CHAINS];
-};
-
-struct iwl_geo_profile {
- struct iwl_geo_profile_band bands[ACPI_GEO_NUM_BANDS_REV2];
-};
-
-/* Same thing as with SAR, all revisions fit in revision 2 */
-struct iwl_ppag_chain {
- s8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2];
-};
-
-enum iwl_dsm_funcs_rev_0 {
- DSM_FUNC_QUERY = 0,
- DSM_FUNC_DISABLE_SRD = 1,
- DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
- DSM_FUNC_ENABLE_6E = 3,
- DSM_FUNC_REGULATORY_CONFIG = 4,
- DSM_FUNC_11AX_ENABLEMENT = 6,
- DSM_FUNC_ENABLE_UNII4_CHAN = 7,
- DSM_FUNC_ACTIVATE_CHANNEL = 8,
- DSM_FUNC_FORCE_DISABLE_CHANNELS = 9,
- DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10,
-};
-
-enum iwl_dsm_values_srd {
- DSM_VALUE_SRD_ACTIVE,
- DSM_VALUE_SRD_PASSIVE,
- DSM_VALUE_SRD_DISABLE,
- DSM_VALUE_SRD_MAX
-};
-
-enum iwl_dsm_values_indonesia {
- DSM_VALUE_INDONESIA_DISABLE,
- DSM_VALUE_INDONESIA_ENABLE,
- DSM_VALUE_INDONESIA_RESERVED,
- DSM_VALUE_INDONESIA_MAX
-};
-
-/* DSM RFI uses a different GUID, so need separate definitions */
-
-#define DSM_RFI_FUNC_ENABLE 3
-
-enum iwl_dsm_values_rfi {
- DSM_VALUE_RFI_ENABLE,
- DSM_VALUE_RFI_DISABLE,
- DSM_VALUE_RFI_MAX
-};
-
-enum iwl_dsm_masks_reg {
- DSM_MASK_CHINA_22_REG = BIT(2)
-};
+/* The Inidcator whether UEFI WIFI GUID tables are locked is read from ACPI */
+#define UEFI_WIFI_GUID_UNLOCKED 0
+
+#define ACPI_DSM_REV 0
#ifdef CONFIG_ACPI
struct iwl_fw_runtime;
extern const guid_t iwl_guid;
-extern const guid_t iwl_rfi_guid;
-
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
- const guid_t *guid, u8 *value);
-
-int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
- const guid_t *guid, u32 *value);
/**
* iwl_acpi_get_mcc - read MCC from ACPI, if available
*
- * @dev: the struct device
+ * @fwrt: the fw runtime struct
* @mcc: output buffer (3 bytes) that will get the MCC
*
* This function tries to read the current MCC from ACPI if available.
*/
-int iwl_acpi_get_mcc(struct device *dev, char *mcc);
+int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
-u64 iwl_acpi_get_pwr_limit(struct device *dev);
+int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt, u64 *dflt_pwr_limit);
/*
* iwl_acpi_get_eckv - read external clock validation from ACPI, if available
*
- * @dev: the struct device
+ * @fwrt: the fw runtime struct
* @extl_clk: output var (2 bytes) that will get the clk indication.
*
* This function tries to read the external clock indication
* from ACPI if available.
*/
-int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk);
-
-int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_tables, u32 n_subbands,
- int prof_a, int prof_b);
+int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk);
-int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt);
-int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt);
-int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt);
-bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
-
-int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset *table,
- u32 n_bands, u32 n_profiles);
-
-int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- union iwl_tas_config_cmd *cmd, int fw_ver);
-
-__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
+int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data);
int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt);
-int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *cmd,
- int *cmd_size);
-
-bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt);
-
void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
struct iwl_phy_specific_cfg *filters);
+void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt);
+
+int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value);
+
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
@@ -245,92 +150,61 @@ static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
return ERR_PTR(-ENOENT);
}
-static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
- const guid_t *guid, u8 *value)
-{
- return -ENOENT;
-}
-
-static inline int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
- const guid_t *guid, u32 *value)
-{
- return -ENOENT;
-}
-
-static inline int iwl_acpi_get_mcc(struct device *dev, char *mcc)
+static inline int iwl_acpi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
{
return -ENOENT;
}
-static inline u64 iwl_acpi_get_pwr_limit(struct device *dev)
+static inline int iwl_acpi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit)
{
+ *dflt_pwr_limit = 0;
return 0;
}
-static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
+static inline int iwl_acpi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
{
return -ENOENT;
}
-static inline int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 *per_chain, u32 n_tables, u32 n_subbands,
- int prof_a, int prof_b)
+static inline int iwl_acpi_get_wrds_table(struct iwl_fw_runtime *fwrt)
{
return -ENOENT;
}
-static inline int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
+static inline int iwl_acpi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
{
return -ENOENT;
}
-static inline int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
-{
- return -ENOENT;
-}
-
-static inline int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
+static inline int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
{
return 1;
}
-static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
-{
- return false;
-}
-
-static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
- union iwl_tas_config_cmd *cmd, int fw_ver)
+static inline int iwl_acpi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data)
{
return -ENOENT;
}
-static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
-{
- return 0;
-}
-
static inline int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt)
{
return -ENOENT;
}
-static inline int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt,
- union iwl_ppag_table_cmd *cmd, int *cmd_size)
-{
- return -ENOENT;
-}
+/* macro since the second argument doesn't always exist */
+#define iwl_acpi_get_phy_filters(fwrt, filters) do { } while (0)
-static inline bool iwl_acpi_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+static inline void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt)
{
- return false;
}
-static inline void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
- struct iwl_phy_specific_cfg *filters)
+static inline int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value)
{
+ return -ENOENT;
}
-
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
index 3e81e9369224..bc27e15488f5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
+ * Copyright (C) 2023 Intel Corporation
* Copyright (C) 2013-2014, 2018-2019 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
@@ -170,7 +171,11 @@ enum iwl_bt_ci_compliance {
* @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading
* @ttc_status: is TTC enabled - one bit per PHY
* @rrc_status: is RRC enabled - one bit per PHY
- * @reserved: reserved
+ * The following fields are only for version 5, and are reserved in version 4:
+ * @wifi_loss_low_rssi: The predicted lost WiFi rate (% of air time that BT is
+ * utilizing) when the RSSI is low (<= -65 dBm)
+ * @wifi_loss_mid_high_rssi: The predicted lost WiFi rate (% of air time that
+ * BT is utilizing) when the RSSI is mid/high (>= -65 dBm)
*/
struct iwl_bt_coex_profile_notif {
__le32 mbox_msg[4];
@@ -182,7 +187,10 @@ struct iwl_bt_coex_profile_notif {
__le32 bt_activity_grading;
u8 ttc_status;
u8 rrc_status;
- __le16 reserved;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */
+ u8 wifi_loss_low_rssi;
+ u8 wifi_loss_mid_high_rssi;
+} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4
+ * BT_COEX_PROFILE_NTFY_API_S_VER_5
+ */
#endif /* __iwl_fw_api_coex_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index ea99d41040d2..d2a74beed3a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -324,7 +324,7 @@ struct iwl_wowlan_patterns_cmd {
u8 n_patterns;
/**
- * @n_patterns: sta_id
+ * @sta_id: sta_id
*/
u8 sta_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 751b596ea1a5..0f7903c5a4df 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -101,7 +101,7 @@ enum iwl_data_path_subcmd_ids {
RX_NO_DATA_NOTIF = 0xF5,
/**
- * @THERMAL_DUAL_CHAIN_DISABLE_REQ: firmware request for SMPS mode,
+ * @THERMAL_DUAL_CHAIN_REQUEST: firmware request for SMPS mode,
* &struct iwl_thermal_dual_chain_request
*/
THERMAL_DUAL_CHAIN_REQUEST = 0xF6,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 394747deb269..47c914de2992 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __iwl_fw_dbg_tlv_h__
#define __iwl_fw_dbg_tlv_h__
@@ -319,7 +319,7 @@ struct iwl_fw_ini_conf_set_tlv {
* @IWL_FW_INI_CONFIG_SET_TYPE_CSR: for CSR configuration
* @IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: for DBGC_DRAM_ADDR configuration
* @IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: for PERIPH SCRATCH HWM configuration
- * @IWL_FW_INI_ALLOCATION_NUM: max number of configuration supported
+ * @IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM: max number of configuration supported
*/
enum iwl_fw_ini_config_set_type {
@@ -360,6 +360,7 @@ enum iwl_fw_ini_allocation_id {
* @IWL_FW_INI_LOCATION_SRAM_PATH: SRAM location
* @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location
* @IWL_FW_INI_LOCATION_NPK_PATH: NPK location
+ * @IWL_FW_INI_LOCATION_NUM: number of valid locations
*/
enum iwl_fw_ini_buffer_location {
IWL_FW_INI_LOCATION_INVALID,
@@ -439,6 +440,7 @@ enum iwl_fw_ini_region_device_memory_subtype {
* Hard coded time points in which the driver can send hcmd or perform dump
* collection
*
+ * @IWL_FW_INI_TIME_POINT_INVALID: invalid timepoint
* @IWL_FW_INI_TIME_POINT_EARLY: pre loading the FW
* @IWL_FW_INI_TIME_POINT_AFTER_ALIVE: first cmd from host after alive notif
* @IWL_FW_INI_TIME_POINT_POST_INIT: last cmd in series of init sequence
@@ -553,7 +555,7 @@ enum iwl_fw_ini_dump_policy {
* enum iwl_fw_ini_dump_type - Determines dump type based on size defined by FW.
*
* @IWL_FW_INI_DUMP_BRIEF : only dump the most important regions
- * @IWL_FW_INI_DEBUG_MEDIUM: dump more regions than "brief", but not all regions
+ * @IWL_FW_INI_DUMP_MEDIUM: dump more regions than "brief", but not all regions
* @IWL_FW_INI_DUMP_VERBOSE : dump all regions
*/
enum iwl_fw_ini_dump_type {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index 798731ecbefd..b31ae6889bd0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -394,7 +394,7 @@ struct iwl_buf_alloc_cmd {
*
* @first_word: magic word value
* @second_word: magic word value
- * @framfrags: DRAM fragmentaion detail
+ * @dram_frags: DRAM fragmentaion detail
*/
struct iwl_dram_info {
__le32 first_word;
@@ -537,7 +537,7 @@ enum iwl_fw_dbg_config_cmd_type {
}; /* LDBG_CFG_CMD_TYPE_API_E_VER_1 */
/* this token disables debug asserts in the firmware */
-#define IWL_FW_DBG_CONFIG_TOKEN 0x00011301
+#define IWL_FW_DBG_CONFIG_TOKEN 0x00010001
/**
* struct iwl_fw_dbg_config_cmd - configure FW debug
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index b044990c7b87..25530a29317e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -630,6 +630,7 @@ enum iwl_location_frame_format {
* @IWL_LOCATION_BW_20MHZ: 20MHz
* @IWL_LOCATION_BW_40MHZ: 40MHz
* @IWL_LOCATION_BW_80MHZ: 80MHz
+ * @IWL_LOCATION_BW_160MHZ: 160MHz
*/
enum iwl_location_bw {
IWL_LOCATION_BW_20MHZ,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index f15e6d64c298..c6d1f5644638 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -242,9 +242,9 @@ struct iwl_mac_low_latency_cmd {
* @esr_transition_timeout: the timeout required by the AP for the
* eSR transition.
* Available only from version 2 of the command.
- * This values comes from the EMLSR transition delay in the EML
+ * This value comes from the EMLSR transition delay in the EML
* Capabilities subfield.
- * @medium_sync_delay: the value as it appeasr in P802.11be_D2.2 Figure 9-1002j.
+ * @medium_sync_delay: the value as it appears in P802.11be_D2.2 Figure 9-1002j.
* @assoc_id: unique ID assigned by the AP during association
* @reserved1: alignment
* @data_policy: see &enum iwl_mac_data_policy
@@ -317,7 +317,6 @@ enum iwl_mac_config_filter_flags {
* If the NIC is not ACK_ENABLED it may use the EOF-bit in first non-0
* len delim to determine if AGG or single.
* @client: client mac data
- * @go_ibss: mac data for go or ibss
* @p2p_dev: mac data for p2p device
*/
struct iwl_mac_config_cmd {
@@ -374,7 +373,7 @@ struct iwl_mac_config_cmd {
* iwl_link_ctx_cfg_cmd::bss_color_disable
* @LINK_CONTEXT_MODIFY_EHT_PARAMS: covers iwl_link_ctx_cfg_cmd::puncture_mask.
* This flag can be set only if the MAC that this link relates to has
- * eht_support set to true.
+ * eht_support set to true. No longer used since _VER_3 of this command.
* @LINK_CONTEXT_MODIFY_ALL: set all above flags
*/
enum iwl_link_ctx_modify_flags {
@@ -447,6 +446,7 @@ enum iwl_link_ctx_flags {
* @listen_lmac: indicates whether the link should be allocated on the Listen
* Lmac or on the Main Lmac. Cannot be changed on an active Link.
* Relevant only for eSR.
+ * @reserved1: in version 2, listen_lmac became reserved
* @cck_rates: basic rates available for CCK
* @ofdm_rates: basic rates available for OFDM
* @cck_short_preamble: 1 for enabling short preamble, 0 otherwise
@@ -462,7 +462,7 @@ enum iwl_link_ctx_flags {
* @bi: beacon interval in TU, applicable only when associated
* @dtim_interval: DTIM interval in TU.
* Relevant only for GO, otherwise this is offloaded.
- * @puncture_mask: puncture mask for EHT
+ * @puncture_mask: puncture mask for EHT (removed in VER_3)
* @frame_time_rts_th: HE duration RTS threshold, in units of 32us
* @flags: a combination from &enum iwl_link_ctx_flags
* @flags_mask: what of %flags have changed. Also &enum iwl_link_ctx_flags
@@ -472,10 +472,10 @@ enum iwl_link_ctx_flags {
* @bssid_index: index of the associated VAP
* @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
* @spec_link_id: link_id as the AP knows it
- * @reserved: alignment
+ * @reserved2: alignment
* @ibss_bssid_addr: bssid for ibss
* @reserved_for_ibss_bssid_addr: reserved
- * @reserved1: reserved for future use
+ * @reserved3: reserved for future use
*/
struct iwl_link_config_cmd {
__le32 action;
@@ -486,7 +486,10 @@ struct iwl_link_config_cmd {
__le16 reserved_for_local_link_addr;
__le32 modify_mask;
__le32 active;
- __le32 listen_lmac;
+ union {
+ __le32 listen_lmac;
+ __le32 reserved1;
+ };
__le32 cck_rates;
__le32 ofdm_rates;
__le32 cck_short_preamble;
@@ -502,7 +505,7 @@ struct iwl_link_config_cmd {
struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
__le32 bi;
__le32 dtim_interval;
- __le16 puncture_mask;
+ __le16 puncture_mask; /* removed in _VER_3 */
__le16 frame_time_rts_th;
__le32 flags;
__le32 flags_mask;
@@ -512,11 +515,11 @@ struct iwl_link_config_cmd {
u8 bssid_index;
u8 bss_color;
u8 spec_link_id;
- u8 reserved;
+ u8 reserved2;
u8 ibss_bssid_addr[6];
__le16 reserved_for_ibss_bssid_addr;
- __le32 reserved1[8];
-} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1 */
+ __le32 reserved3[8];
+} __packed; /* LINK_CONTEXT_CONFIG_CMD_API_S_VER_1, _VER_2, _VER_3 */
/* Currently FW supports link ids in the range 0-3 and can have
* at most two active links for each vif.
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 55882190251c..545826973a80 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_mac_h__
@@ -431,8 +431,8 @@ enum iwl_he_pkt_ext_constellations {
};
#define MAX_HE_SUPP_NSS 2
-#define MAX_CHANNEL_BW_INDX_API_D_VER_2 4
-#define MAX_CHANNEL_BW_INDX_API_D_VER_3 5
+#define MAX_CHANNEL_BW_INDX_API_D_VER_1 4
+#define MAX_CHANNEL_BW_INDX_API_D_VER_2 5
/**
* struct iwl_he_pkt_ext_v1 - QAM thresholds
@@ -455,7 +455,7 @@ enum iwl_he_pkt_ext_constellations {
* (0-low_th, 1-high_th)
*/
struct iwl_he_pkt_ext_v1 {
- u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_1][2];
} __packed; /* PKT_EXT_DOT11AX_API_S_VER_1 */
/**
@@ -480,7 +480,7 @@ struct iwl_he_pkt_ext_v1 {
* (0-low_th, 1-high_th)
*/
struct iwl_he_pkt_ext_v2 {
- u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_3][2];
+ u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_CHANNEL_BW_INDX_API_D_VER_2][2];
} __packed; /* PKT_EXT_DOT11AX_API_S_VER_2 */
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 7ec959244ffc..58034dfa7e70 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_nvm_reg_h__
#define __iwl_fw_api_nvm_reg_h__
+#include "fw/regulatory.h"
/**
* enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands
*/
@@ -438,36 +439,30 @@ enum iwl_mcc_source {
MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
};
-#define IWL_TAS_BLOCK_LIST_MAX 16
/**
- * struct iwl_tas_config_cmd_v2 - configures the TAS
+ * struct iwl_tas_config_cmd_common - configures the TAS.
+ * This is also the v2 structure.
* @block_list_size: size of relevant field in block_list_array
* @block_list_array: list of countries where TAS must be disabled
*/
-struct iwl_tas_config_cmd_v2 {
+struct iwl_tas_config_cmd_common {
__le32 block_list_size;
- __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
+ __le32 block_list_array[IWL_WTAS_BLACK_LIST_MAX];
} __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */
/**
* struct iwl_tas_config_cmd_v3 - configures the TAS
- * @block_list_size: size of relevant field in block_list_array
- * @block_list_array: list of countries where TAS must be disabled
* @override_tas_iec: indicates whether to override default value of IEC regulatory
* @enable_tas_iec: in case override_tas_iec is set -
* indicates whether IEC regulatory is enabled or disabled
*/
struct iwl_tas_config_cmd_v3 {
- __le32 block_list_size;
- __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
__le16 override_tas_iec;
__le16 enable_tas_iec;
} __packed; /* TAS_CONFIG_CMD_API_S_VER_3 */
/**
- * struct iwl_tas_config_cmd_v3 - configures the TAS
- * @block_list_size: size of relevant field in block_list_array
- * @block_list_array: list of countries where TAS must be disabled
+ * struct iwl_tas_config_cmd_v4 - configures the TAS
* @override_tas_iec: indicates whether to override default value of IEC regulatory
* @enable_tas_iec: in case override_tas_iec is set -
* indicates whether IEC regulatory is enabled or disabled
@@ -475,19 +470,20 @@ struct iwl_tas_config_cmd_v3 {
* @reserved: reserved
*/
struct iwl_tas_config_cmd_v4 {
- __le32 block_list_size;
- __le32 block_list_array[IWL_TAS_BLOCK_LIST_MAX];
u8 override_tas_iec;
u8 enable_tas_iec;
u8 usa_tas_uhb_allowed;
u8 reserved;
} __packed; /* TAS_CONFIG_CMD_API_S_VER_4 */
-union iwl_tas_config_cmd {
- struct iwl_tas_config_cmd_v2 v2;
- struct iwl_tas_config_cmd_v3 v3;
- struct iwl_tas_config_cmd_v4 v4;
+struct iwl_tas_config_cmd {
+ struct iwl_tas_config_cmd_common common;
+ union {
+ struct iwl_tas_config_cmd_v3 v3;
+ struct iwl_tas_config_cmd_v4 v4;
+ };
};
+
/**
* enum iwl_lari_config_masks - bit masks for the various LARI config operations
* @LARI_CONFIG_DISABLE_11AC_UKRAINE_MSK: disable 11ac in ukraine
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
index 306ed88de463..08a2c416ce60 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
@@ -142,6 +142,8 @@ struct iwl_phy_context_cmd_v1 {
* @lmac_id: the lmac id the phy context belongs to
* @ci: channel info
* @rxchain_info: ???
+ * @sbb_bandwidth: 0 disabled, 1 - 40Mhz ... 4 - 320MHz
+ * @sbb_ctrl_channel_loc: location of the control channel
* @dsp_cfg_flags: set to 0
* @reserved: reserved to align to 64 bit
*/
@@ -152,9 +154,20 @@ struct iwl_phy_context_cmd {
/* PHY_CONTEXT_DATA_API_S_VER_3, PHY_CONTEXT_DATA_API_S_VER_4 */
struct iwl_fw_channel_info ci;
__le32 lmac_id;
- __le32 rxchain_info; /* reserved in _VER_4 */
+ union {
+ __le32 rxchain_info; /* reserved in _VER_4 */
+ struct { /* used for _VER_5/_VER_6 */
+ u8 sbb_bandwidth;
+ u8 sbb_ctrl_channel_loc;
+ __le16 puncture_mask; /* added in VER_6 */
+ };
+ };
__le32 dsp_cfg_flags;
__le32 reserved;
-} __packed; /* PHY_CONTEXT_CMD_API_VER_3, PHY_CONTEXT_CMD_API_VER_4 */
+} __packed; /* PHY_CONTEXT_CMD_API_VER_3,
+ * PHY_CONTEXT_CMD_API_VER_4,
+ * PHY_CONTEXT_CMD_API_VER_5,
+ * PHY_CONTEXT_CMD_API_VER_6
+ */
#endif /* __iwl_fw_api_phy_ctxt_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 040d83fa5424..0bf38243f88a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -506,13 +506,40 @@ struct iwl_geo_tx_power_profiles_resp {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_RSP */
/**
+ * enum iwl_ppag_flags - PPAG enable masks
+ * @IWL_PPAG_ETSI_MASK: enable PPAG in ETSI
+ * @IWL_PPAG_CHINA_MASK: enable PPAG in China
+ * @IWL_PPAG_ETSI_LPI_UHB_MASK: enable LPI in ETSI for UHB
+ * @IWL_PPAG_ETSI_VLP_UHB_MASK: enable VLP in ETSI for UHB
+ * @IWL_PPAG_ETSI_SP_UHB_MASK: enable SP in ETSI for UHB
+ * @IWL_PPAG_USA_LPI_UHB_MASK: enable LPI in USA for UHB
+ * @IWL_PPAG_USA_VLP_UHB_MASK: enable VLP in USA for UHB
+ * @IWL_PPAG_USA_SP_UHB_MASK: enable SP in USA for UHB
+ * @IWL_PPAG_CANADA_LPI_UHB_MASK: enable LPI in CANADA for UHB
+ * @IWL_PPAG_CANADA_VLP_UHB_MASK: enable VLP in CANADA for UHB
+ * @IWL_PPAG_CANADA_SP_UHB_MASK: enable SP in CANADA for UHB
+ */
+enum iwl_ppag_flags {
+ IWL_PPAG_ETSI_MASK = BIT(0),
+ IWL_PPAG_CHINA_MASK = BIT(1),
+ IWL_PPAG_ETSI_LPI_UHB_MASK = BIT(2),
+ IWL_PPAG_ETSI_VLP_UHB_MASK = BIT(3),
+ IWL_PPAG_ETSI_SP_UHB_MASK = BIT(4),
+ IWL_PPAG_USA_LPI_UHB_MASK = BIT(5),
+ IWL_PPAG_USA_VLP_UHB_MASK = BIT(6),
+ IWL_PPAG_USA_SP_UHB_MASK = BIT(7),
+ IWL_PPAG_CANADA_LPI_UHB_MASK = BIT(8),
+ IWL_PPAG_CANADA_VLP_UHB_MASK = BIT(9),
+ IWL_PPAG_CANADA_SP_UHB_MASK = BIT(10),
+};
+
+/**
* union iwl_ppag_table_cmd - union for all versions of PPAG command
* @v1: version 1
* @v2: version 2
- *
- * @flags: bit 0 - indicates enablement of PPAG for ETSI
- * bit 1 - indicates enablement of PPAG for CHINA BIOS
- * bit 1 can be used only in v3 (identical to v2)
+ * version 3, 4 and 5 are the same structure as v2,
+ * but has a different format of the flags bitmap
+ * @flags: values from &enum iwl_ppag_flags
* @gain: table of antenna gain values per chain and sub-band
* @reserved: reserved
*/
@@ -529,6 +556,11 @@ union iwl_ppag_table_cmd {
} v2;
} __packed;
+#define IWL_PPAG_CMD_V4_MASK (IWL_PPAG_ETSI_MASK | IWL_PPAG_CHINA_MASK)
+#define IWL_PPAG_CMD_V5_MASK (IWL_PPAG_CMD_V4_MASK | \
+ IWL_PPAG_ETSI_LPI_UHB_MASK | \
+ IWL_PPAG_USA_LPI_UHB_MASK)
+
#define MCC_TO_SAR_OFFSET_TABLE_ROW_SIZE 26
#define MCC_TO_SAR_OFFSET_TABLE_COL_SIZE 13
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index d62fed543276..d7f8a276b683 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021, 2023 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -109,6 +109,7 @@ enum iwl_sta_flags {
* @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value
* @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
* station info array (1 - n 1X mode)
+ * @STA_KEY_FLG_AMSDU_SPP: SPP (signaling and payload protected) A-MSDU
* @STA_KEY_FLG_KEYID_MSK: the index of the key
* @STA_KEY_FLG_KEYID_POS: key index bit position
* @STA_KEY_NOT_VALID: key is invalid
@@ -129,6 +130,7 @@ enum iwl_sta_key_flag {
STA_KEY_FLG_EN_MSK = (7 << 0),
STA_KEY_FLG_WEP_KEY_MAP = BIT(3),
+ STA_KEY_FLG_AMSDU_SPP = BIT(7),
STA_KEY_FLG_KEYID_POS = 8,
STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS),
STA_KEY_NOT_VALID = BIT(11),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 842360b1e995..d9e4c75403b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -76,6 +76,8 @@ enum iwl_tx_flags {
* to a secured STA
* @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate
* selection, retry limits and BT kill
+ * @IWL_TX_FLAGS_RTS: firmware used an RTS
+ * @IWL_TX_FLAGS_CTS: firmware used CTS-to-self
*/
enum iwl_tx_cmd_flags {
IWL_TX_FLAGS_CMD_RATE = BIT(0),
@@ -884,6 +886,7 @@ struct iwl_tx_path_flush_cmd {
/**
* struct iwl_flush_queue_info - virtual flush queue info
+ * @tid: the tid to flush
* @queue_num: virtual queue id
* @read_before_flush: read pointer before flush
* @read_after_flush: read pointer after flush
@@ -897,6 +900,7 @@ struct iwl_flush_queue_info {
/**
* struct iwl_tx_path_flush_cmd_rsp -- queue/FIFO flush command response
+ * @sta_id: the station for which the queue was flushed
* @num_flushed_queues: number of queues in queues array
* @queues: all flushed queues
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
index 9c69d3674384..e6c0f928a6bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2019-2021, 2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2019-2021, 2023-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -66,6 +66,16 @@ enum iwl_gen2_tx_fifo {
IWL_GEN2_TRIG_TX_FIFO_VO,
};
+enum iwl_bz_tx_fifo {
+ IWL_BZ_EDCA_TX_FIFO_BK,
+ IWL_BZ_EDCA_TX_FIFO_BE,
+ IWL_BZ_EDCA_TX_FIFO_VI,
+ IWL_BZ_EDCA_TX_FIFO_VO,
+ IWL_BZ_TRIG_TX_FIFO_BK,
+ IWL_BZ_TRIG_TX_FIFO_BE,
+ IWL_BZ_TRIG_TX_FIFO_VI,
+ IWL_BZ_TRIG_TX_FIFO_VO,
+};
/**
* enum iwl_tx_queue_cfg_actions - TXQ config options
* @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index e27774e7ed74..db6d7013df66 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -19,7 +19,6 @@
* @fwrt_ptr: pointer to the buffer coming from fwrt
* @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
* transport's data.
- * @trans_len: length of the valid data in trans_ptr
* @fwrt_len: length of the valid data in fwrt_ptr
*/
struct iwl_fw_dump_ptrs {
@@ -1728,10 +1727,12 @@ iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
/**
* mask_apply_and_normalize - applies mask on val and normalize the result
*
- * The normalization is based on the first set bit in the mask
- *
* @val: value
* @mask: mask to apply and to normalize with
+ *
+ * The normalization is based on the first set bit in the mask
+ *
+ * Returns: the extracted value
*/
static u32 mask_apply_and_normalize(u32 val, u32 mask)
{
@@ -2200,15 +2201,16 @@ struct iwl_dump_ini_mem_ops {
};
/**
- * iwl_dump_ini_mem
- *
- * Creates a dump tlv and copy a memory region into it.
- * Returns the size of the current dump tlv or 0 if failed
+ * iwl_dump_ini_mem - dump memory region
*
* @fwrt: fw runtime struct
* @list: list to add the dump tlv to
* @reg_data: memory region
* @ops: memory dump operations
+ *
+ * Creates a dump tlv and copy a memory region into it.
+ *
+ * Returns: the size of the current dump tlv or 0 if failed
*/
static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
struct iwl_dump_ini_region_data *reg_data,
@@ -2427,9 +2429,12 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_debug_info_tlv *debug_info =
(void *)node->tlv.data;
+ BUILD_BUG_ON(sizeof(cfg_name->cfg_name) !=
+ sizeof(debug_info->debug_cfg_name));
+
cfg_name->image_type = debug_info->image_type;
cfg_name->cfg_name_len =
- cpu_to_le32(IWL_FW_INI_MAX_CFG_NAME);
+ cpu_to_le32(sizeof(cfg_name->cfg_name));
memcpy(cfg_name->cfg_name, debug_info->debug_cfg_name,
sizeof(cfg_name->cfg_name));
cfg_name++;
@@ -2873,7 +2878,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
le32_to_cpu(desc->trig_desc.type));
- schedule_delayed_work(&wk_data->wk, usecs_to_jiffies(delay));
+ queue_delayed_work(system_unbound_wq, &wk_data->wk,
+ usecs_to_jiffies(delay));
return 0;
}
@@ -3175,7 +3181,9 @@ int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
if (sync)
iwl_fw_dbg_collect_sync(fwrt, idx);
else
- schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
+ queue_delayed_work(system_unbound_wq,
+ &fwrt->dump.wks[idx].wk,
+ usecs_to_jiffies(delay));
return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index eb38c686b5cb..98d56e778d99 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -306,8 +306,6 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync)
_iwl_dbg_tlv_time_point(fwrt, tp_id, NULL, sync);
}
-void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt);
-
static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
struct iwl_lmac_alive *lmac,
struct iwl_umac_alive *umac)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 06d6f7f66430..5c76e3b94968 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2014, 2018-2024 Intel Corporation
* Copyright (C) 2014-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -16,7 +16,7 @@
/**
* enum iwl_fw_error_dump_type - types of data in the dump file
* @IWL_FW_ERROR_DUMP_CSR: Control Status Registers - from offset 0
- * @IWL_FW_ERROR_DUMP_RXF:
+ * @IWL_FW_ERROR_DUMP_RXF: RX FIFO contents
* @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as
* &struct iwl_fw_error_dump_txcmd packets
* @IWL_FW_ERROR_DUMP_DEV_FW_INFO: struct %iwl_fw_error_dump_info
@@ -24,21 +24,24 @@
* @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor
* @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several
* sections like this in a single file.
+ * @IWL_FW_ERROR_DUMP_TXF: TX FIFO contents
* @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers
* @IWL_FW_ERROR_DUMP_MEM: chunk of memory
* @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
* Structured as &struct iwl_fw_error_dump_trigger_desc.
* @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
* &struct iwl_fw_error_dump_rb
- * @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were
+ * @IWL_FW_ERROR_DUMP_PAGING: UMAC's image memory segments which were
* paged to the DRAM.
* @IWL_FW_ERROR_DUMP_RADIO_REG: Dump the radio registers.
+ * @IWL_FW_ERROR_DUMP_INTERNAL_TXF: internal TX FIFO data
* @IWL_FW_ERROR_DUMP_EXTERNAL: used only by external code utilities, and
* for that reason is not in use in any other place in the Linux Wi-Fi
* stack.
* @IWL_FW_ERROR_DUMP_MEM_CFG: the addresses and sizes of fifos in the smem,
* which we get from the fw after ALIVE. The content is structured as
* &struct iwl_fw_error_dump_smem_cfg.
+ * @IWL_FW_ERROR_DUMP_D3_DEBUG_DATA: D3 debug data
*/
enum iwl_fw_error_dump_type {
/* 0 is deprecated */
@@ -59,8 +62,6 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */
IWL_FW_ERROR_DUMP_MEM_CFG = 16,
IWL_FW_ERROR_DUMP_D3_DEBUG_DATA = 17,
-
- IWL_FW_ERROR_DUMP_MAX,
};
/**
@@ -442,7 +443,7 @@ struct iwl_fw_ini_err_table_dump {
* struct iwl_fw_error_dump_rb - content of an Receive Buffer
* @index: the index of the Receive Buffer in the Rx queue
* @rxq: the RB's Rx queue
- * @reserved:
+ * @reserved: reserved
* @data: the content of the Receive Buffer
*/
struct iwl_fw_error_dump_rb {
@@ -488,7 +489,7 @@ struct iwl_fw_ini_special_device_memory {
* struct iwl_fw_error_dump_paging - content of the UMAC's image page
* block on DRAM
* @index: the index of the page block
- * @reserved:
+ * @reserved: reserved
* @data: the content of the page block
*/
struct iwl_fw_error_dump_paging {
@@ -511,6 +512,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
/**
* enum iwl_fw_dbg_trigger - triggers available
*
+ * @FW_DBG_TRIGGER_INVALID: invalid trigger value
* @FW_DBG_TRIGGER_USER: trigger log collection by user
* This should not be defined as a trigger to the driver, but a value the
* driver should set to indicate that the trigger was initiated by the
@@ -530,14 +532,15 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
* @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
* events.
* @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
- * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a
- * threshold.
- * @FW_DBG_TDLS: trigger log collection upon TDLS related events.
+ * @FW_DBG_TRIGGER_TX_LATENCY: trigger log collection when the tx latency
+ * goes above a threshold.
+ * @FW_DBG_TRIGGER_TDLS: trigger log collection upon TDLS related events.
* @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
* the firmware sends a tx reply.
* @FW_DBG_TRIGGER_ALIVE_TIMEOUT: trigger log collection if alive flow timeouts
* @FW_DBG_TRIGGER_DRIVER: trigger log collection upon a flow failure
* in the driver.
+ * @FW_DBG_TRIGGER_MAX: beyond triggers, number for sizing arrays etc.
*/
enum iwl_fw_dbg_trigger {
FW_DBG_TRIGGER_INVALID = 0,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index bfc39bd5bbc6..f69d29e531c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2008-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2008-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -216,6 +216,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* ADD_MODIFY_STA_KEY_API_S_VER_2.
* @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
* @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2
+ * @IWL_UCODE_TLV_API_ADAPTIVE_DWELL: support for adaptive dwell in scanning
* @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
* @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field
* indicating low latency direction.
@@ -239,14 +240,21 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S.
* @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of
* STA_CONTEXT_DOT11AX_API_S
+ * @IWL_UCODE_TLV_API_FTM_RTT_ACCURACY: version 7 of the range response API
+ * is supported by FW, this indicates the RTT confidence value
* @IWL_UCODE_TLV_API_SAR_TABLE_VER: This ucode supports different sar
* version tables.
* @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of
- * SCAN_CONFIG_DB_CMD_API_S.
+ * SCAN_CONFIG_DB_CMD_API_S.
+ * @IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP: support for setting adaptive dwell
+ * number of APs in the 5 GHz band
+ * @IWL_UCODE_TLV_API_BAND_IN_RX_DATA: FW reports band number in RX notification
* @IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX: Firmware offloaded the station disable tx
* logic.
* @IWL_UCODE_TLV_API_INT_DBG_BUF_CLEAR: Firmware supports clearing the debug
* internal buffer
+ * @IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD: Firmware doesn't need the host to
+ * configure the smart fifo
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -287,6 +295,7 @@ enum iwl_ucode_tlv_api {
/* API Set 2 */
IWL_UCODE_TLV_API_NO_HOST_DISABLE_TX = (__force iwl_ucode_tlv_api_t)66,
IWL_UCODE_TLV_API_INT_DBG_BUF_CLEAR = (__force iwl_ucode_tlv_api_t)67,
+ IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD = (__force iwl_ucode_tlv_api_t)68,
NUM_IWL_UCODE_TLV_API
/*
@@ -383,6 +392,9 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* channels even when these are not enabled.
* @IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT: Support for indicating dump collection
* complete to FW.
+ * @IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT: Support SPP (signaling and payload
+ * protected) A-MSDU.
+ * @IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT: Support secure LTF measurement.
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -468,6 +480,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)98,
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
+ IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT = (__force iwl_ucode_tlv_capa_t)103,
IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104,
IWL_UCODE_TLV_CAPA_DUMP_COMPLETE_SUPPORT = (__force iwl_ucode_tlv_capa_t)105,
IWL_UCODE_TLV_CAPA_SYNCED_TIME = (__force iwl_ucode_tlv_capa_t)106,
@@ -480,7 +493,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT = (__force iwl_ucode_tlv_capa_t)114,
IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT = (__force iwl_ucode_tlv_capa_t)116,
IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT = (__force iwl_ucode_tlv_capa_t)117,
-
+ IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT = (__force iwl_ucode_tlv_capa_t)121,
NUM_IWL_UCODE_TLV_CAPA
/*
* This construction make both sparse (which cannot increment the previous
@@ -566,6 +579,7 @@ enum iwl_fw_dbg_reg_operator {
* struct iwl_fw_dbg_reg_op - an operation on a register
*
* @op: &enum iwl_fw_dbg_reg_operator
+ * @reserved: reserved
* @addr: offset of the register
* @val: value
*/
@@ -612,6 +626,7 @@ struct iwl_fw_dbg_mem_seg_tlv {
* @version: version of the TLV - currently 0
* @monitor_mode: &enum iwl_fw_dbg_monitor_mode
* @size_power: buffer size will be 2^(size_power + 11)
+ * @reserved: reserved
* @base_reg: addr of the base addr register (PRPH)
* @end_reg: addr of the end addr register (PRPH)
* @write_ptr_reg: the addr of the reg of the write pointer
@@ -722,6 +737,8 @@ enum iwl_fw_dbg_trigger_vif_type {
* @trig_dis_ms: the time, in milliseconds, after an occurrence of this
* trigger in which another occurrence should be ignored.
* @flags: &enum iwl_fw_dbg_trigger_flags
+ * @reserved: reserved (for alignment)
+ * @data: trigger data
*/
struct iwl_fw_dbg_trigger_tlv {
__le32 id;
@@ -762,7 +779,7 @@ struct iwl_fw_dbg_trigger_missed_bcon {
/**
* struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW.
- * cmds: the list of commands to trigger the collection on
+ * @cmds: the list of commands to trigger the collection on
*/
struct iwl_fw_dbg_trigger_cmd {
struct cmd {
@@ -772,7 +789,7 @@ struct iwl_fw_dbg_trigger_cmd {
} __packed;
/**
- * iwl_fw_dbg_trigger_stats - configures trigger for statistics
+ * struct iwl_fw_dbg_trigger_stats - configures trigger for statistics
* @stop_offset: the offset of the value to be monitored
* @stop_threshold: the threshold above which to collect
* @start_offset: the offset of the value to be monitored
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 650e4bde9c17..1195e708caa9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2020-2023 Intel Corporation
+ * Copyright(c) 2020-2024 Intel Corporation
*/
#include "iwl-drv.h"
@@ -12,6 +12,8 @@
#include "fw/api/alive.h"
#include "fw/uefi.h"
+#define IWL_PNVM_REDUCED_CAP_BIT BIT(25)
+
struct iwl_pnvm_section {
__le32 offset;
const u8 data[];
@@ -173,6 +175,7 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
while (len >= sizeof(*tlv)) {
u32 tlv_len, tlv_type;
+ u32 rf_type;
len -= sizeof(*tlv);
tlv = (const void *)data;
@@ -201,6 +204,16 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
len -= ALIGN(tlv_len, 4);
+ trans->reduced_cap_sku = false;
+ rf_type = CSR_HW_RFID_TYPE(trans->hw_rf_id);
+ if ((trans->sku_id[0] & IWL_PNVM_REDUCED_CAP_BIT) &&
+ rf_type == IWL_CFG_RF_TYPE_FM)
+ trans->reduced_cap_sku = true;
+
+ IWL_DEBUG_FW(trans,
+ "Reduced SKU device %d\n",
+ trans->reduced_cap_sku);
+
if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
@@ -239,7 +252,7 @@ static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
}
new_len = pnvm->size;
- *data = kmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
+ *data = kvmemdup(pnvm->data, pnvm->size, GFP_KERNEL);
release_firmware(pnvm);
if (!*data)
@@ -255,21 +268,27 @@ static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len)
struct pnvm_sku_package *package;
u8 *image = NULL;
- /* First attempt to get the PNVM from BIOS */
- package = iwl_uefi_get_pnvm(trans_p, len);
- if (!IS_ERR_OR_NULL(package)) {
- if (*len >= sizeof(*package)) {
- /* we need only the data */
- *len -= sizeof(*package);
- image = kmemdup(package->data, *len, GFP_KERNEL);
+ /* Get PNVM from BIOS for non-Intel SKU */
+ if (trans_p->sku_id[2]) {
+ package = iwl_uefi_get_pnvm(trans_p, len);
+ if (!IS_ERR_OR_NULL(package)) {
+ if (*len >= sizeof(*package)) {
+ /* we need only the data */
+ *len -= sizeof(*package);
+ image = kvmemdup(package->data,
+ *len, GFP_KERNEL);
+ }
+ /*
+ * free package regardless of whether kmemdup
+ * succeeded
+ */
+ kfree(package);
+ if (image)
+ return image;
}
- /* free package regardless of whether kmemdup succeeded */
- kfree(package);
- if (image)
- return image;
}
- /* If it's not available, try from the filesystem */
+ /* If it's not available, or for Intel SKU, try from the filesystem */
if (iwl_pnvm_get_from_fs(trans_p, &image, len))
return NULL;
return image;
@@ -314,7 +333,7 @@ static void iwl_pnvm_load_pnvm_to_trans(struct iwl_trans *trans,
set:
iwl_trans_set_pnvm(trans, capa);
free:
- kfree(data);
+ kvfree(data);
kfree(pnvm_data);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
new file mode 100644
index 000000000000..36d506463e0e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2023 Intel Corporation
+ */
+#include <linux/dmi.h>
+#include "iwl-drv.h"
+#include "iwl-debug.h"
+#include "regulatory.h"
+#include "fw/runtime.h"
+#include "fw/uefi.h"
+
+#define GET_BIOS_TABLE(__name, ...) \
+do { \
+ int ret = -ENOENT; \
+ if (fwrt->uefi_tables_lock_status > UEFI_WIFI_GUID_UNLOCKED) \
+ ret = iwl_uefi_get_ ## __name(__VA_ARGS__); \
+ if (ret < 0) \
+ ret = iwl_acpi_get_ ## __name(__VA_ARGS__); \
+ return ret; \
+} while (0)
+
+#define IWL_BIOS_TABLE_LOADER(__name) \
+int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt) \
+{GET_BIOS_TABLE(__name, fwrt); } \
+IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name)
+
+#define IWL_BIOS_TABLE_LOADER_DATA(__name, data_type) \
+int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt, \
+ data_type * data) \
+{GET_BIOS_TABLE(__name, fwrt, data); } \
+IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name)
+
+IWL_BIOS_TABLE_LOADER(wrds_table);
+IWL_BIOS_TABLE_LOADER(ewrd_table);
+IWL_BIOS_TABLE_LOADER(wgds_table);
+IWL_BIOS_TABLE_LOADER(ppag_table);
+IWL_BIOS_TABLE_LOADER_DATA(tas_table, struct iwl_tas_data);
+IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64);
+IWL_BIOS_TABLE_LOADER_DATA(mcc, char);
+IWL_BIOS_TABLE_LOADER_DATA(eckv, u32);
+
+
+static const struct dmi_system_id dmi_ppag_approved_list[] = {
+ { .ident = "HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ },
+ },
+ { .ident = "SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "MSFT",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ },
+ },
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ },
+ },
+ { .ident = "GOOGLE-HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ },
+ },
+ { .ident = "GOOGLE-ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."),
+ },
+ },
+ { .ident = "GOOGLE-SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ },
+ },
+ { .ident = "RAZER",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
+ },
+ },
+ { .ident = "Honor",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
+ },
+ },
+ {}
+};
+
+static const struct dmi_system_id dmi_tas_approved_list[] = {
+ { .ident = "HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ },
+ },
+ { .ident = "SAMSUNG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+ },
+ },
+ { .ident = "LENOVO",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ },
+ { .ident = "MSFT",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ },
+ },
+ { .ident = "Acer",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ },
+ },
+ { .ident = "ASUS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ },
+ },
+ { .ident = "GOOGLE-HP",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ },
+ },
+ { .ident = "MSI",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
+ },
+ },
+ { .ident = "Honor",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
+ },
+ },
+ /* keep last */
+ {}
+};
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
+{
+ /*
+ * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on
+ * earlier firmware versions. Unfortunately, we don't have a
+ * TLV API flag to rely on, so rely on the major version which
+ * is in the first byte of ucode_ver. This was implemented
+ * initially on version 38 and then backported to 17. It was
+ * also backported to 29, but only for 7265D devices. The
+ * intention was to have it in 36 as well, but not all 8000
+ * family got this feature enabled. The 8000 family is the
+ * only one using version 36, so skip this version entirely.
+ */
+ return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
+ fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
+ ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
+
+int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset *table,
+ u32 n_bands, u32 n_profiles)
+{
+ int i, j;
+
+ if (!fwrt->geo_enabled)
+ return -ENODATA;
+
+ if (!iwl_sar_geo_support(fwrt))
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < n_profiles; i++) {
+ for (j = 0; j < n_bands; j++) {
+ struct iwl_per_chain_offset *chain =
+ &table[i * n_bands + j];
+
+ chain->max_tx_power =
+ cpu_to_le16(fwrt->geo_profiles[i].bands[j].max);
+ chain->chain_a =
+ fwrt->geo_profiles[i].bands[j].chains[0];
+ chain->chain_b =
+ fwrt->geo_profiles[i].bands[j].chains[1];
+ IWL_DEBUG_RADIO(fwrt,
+ "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
+ i, j,
+ fwrt->geo_profiles[i].bands[j].chains[0],
+ fwrt->geo_profiles[i].bands[j].chains[1],
+ fwrt->geo_profiles[i].bands[j].max);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_geo_fill_table);
+
+static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_subbands,
+ int prof_a, int prof_b)
+{
+ int profs[BIOS_SAR_NUM_CHAINS] = { prof_a, prof_b };
+ int i, j;
+
+ for (i = 0; i < BIOS_SAR_NUM_CHAINS; i++) {
+ struct iwl_sar_profile *prof;
+
+ /* don't allow SAR to be disabled (profile 0 means disable) */
+ if (profs[i] == 0)
+ return -EPERM;
+
+ /* we are off by one, so allow up to BIOS_SAR_MAX_PROFILE_NUM */
+ if (profs[i] > BIOS_SAR_MAX_PROFILE_NUM)
+ return -EINVAL;
+
+ /* profiles go from 1 to 4, so decrement to access the array */
+ prof = &fwrt->sar_profiles[profs[i] - 1];
+
+ /* if the profile is disabled, do nothing */
+ if (!prof->enabled) {
+ IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
+ profs[i]);
+ /*
+ * if one of the profiles is disabled, we
+ * ignore all of them and return 1 to
+ * differentiate disabled from other failures.
+ */
+ return 1;
+ }
+
+ IWL_DEBUG_INFO(fwrt,
+ "SAR EWRD: chain %d profile index %d\n",
+ i, profs[i]);
+ IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
+ for (j = 0; j < n_subbands; j++) {
+ per_chain[i * n_subbands + j] =
+ cpu_to_le16(prof->chains[i].subbands[j]);
+ IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
+ j, prof->chains[i].subbands[j]);
+ }
+ }
+
+ return 0;
+}
+
+int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
+ int prof_a, int prof_b)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < n_tables; i++) {
+ ret = iwl_sar_fill_table(fwrt,
+ &per_chain[i * n_subbands * BIOS_SAR_NUM_CHAINS],
+ n_subbands, prof_a, prof_b);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_sar_fill_profile);
+
+static bool iwl_ppag_value_valid(struct iwl_fw_runtime *fwrt, int chain,
+ int subband)
+{
+ s8 ppag_val = fwrt->ppag_chains[chain].subbands[subband];
+
+ if ((subband == 0 &&
+ (ppag_val > IWL_PPAG_MAX_LB || ppag_val < IWL_PPAG_MIN_LB)) ||
+ (subband != 0 &&
+ (ppag_val > IWL_PPAG_MAX_HB || ppag_val < IWL_PPAG_MIN_HB))) {
+ IWL_DEBUG_RADIO(fwrt, "Invalid PPAG value: %d\n", ppag_val);
+ return false;
+ }
+ return true;
+}
+
+int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
+ union iwl_ppag_table_cmd *cmd, int *cmd_size)
+{
+ u8 cmd_ver;
+ int i, j, num_sub_bands;
+ s8 *gain;
+ bool send_ppag_always;
+
+ /* many firmware images for JF lie about this */
+ if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
+ CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
+ return -EOPNOTSUPP;
+
+ if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG capability not supported by FW, command not sent.\n");
+ return -EINVAL;
+ }
+
+ cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
+ WIDE_ID(PHY_OPS_GROUP,
+ PER_PLATFORM_ANT_GAIN_CMD), 1);
+ /*
+ * Starting from ver 4, driver needs to send the PPAG CMD regardless
+ * if PPAG is enabled/disabled or valid/invalid.
+ */
+ send_ppag_always = cmd_ver > 3;
+
+ /* Don't send PPAG if it is disabled */
+ if (!send_ppag_always && !fwrt->ppag_flags) {
+ IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
+ return -EINVAL;
+ }
+
+ /* The 'flags' field is the same in v1 and in v2 so we can just
+ * use v1 to access it.
+ */
+ cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
+
+ IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver);
+ if (cmd_ver == 1) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V1;
+ gain = cmd->v1.gain[0];
+ *cmd_size = sizeof(cmd->v1);
+ if (fwrt->ppag_ver >= 1) {
+ /* in this case FW supports revision 0 */
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table rev is %d, send truncated table\n",
+ fwrt->ppag_ver);
+ }
+ } else if (cmd_ver >= 2 && cmd_ver <= 5) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ gain = cmd->v2.gain[0];
+ *cmd_size = sizeof(cmd->v2);
+ if (fwrt->ppag_ver == 0) {
+ /* in this case FW supports revisions 1,2 or 3 */
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table rev is 0, send padded table\n");
+ }
+ } else {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
+ return -EINVAL;
+ }
+
+ /* ppag mode */
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG MODE bits were read from bios: %d\n",
+ le32_to_cpu(cmd->v1.flags));
+
+ if (cmd_ver == 5)
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V5_MASK);
+ else if (cmd_ver < 5)
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V4_MASK);
+
+ if ((cmd_ver == 1 &&
+ !fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
+ (cmd_ver == 2 && fwrt->ppag_ver >= 2)) {
+ cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
+ IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
+ } else {
+ IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
+ }
+
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG MODE bits going to be sent: %d\n",
+ le32_to_cpu(cmd->v1.flags));
+
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (j = 0; j < num_sub_bands; j++) {
+ if (!send_ppag_always &&
+ !iwl_ppag_value_valid(fwrt, i, j))
+ return -EINVAL;
+
+ gain[i * num_sub_bands + j] =
+ fwrt->ppag_chains[i].subbands[j];
+ IWL_DEBUG_RADIO(fwrt,
+ "PPAG table: chain[%d] band[%d]: gain = %d\n",
+ i, j, gain[i * num_sub_bands + j]);
+ }
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_fill_ppag_table);
+
+bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt)
+{
+ if (!dmi_check_system(dmi_ppag_approved_list)) {
+ IWL_DEBUG_RADIO(fwrt,
+ "System vendor '%s' is not in the approved list, disabling PPAG.\n",
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ fwrt->ppag_flags = 0;
+ return false;
+ }
+
+ return true;
+}
+IWL_EXPORT_SYMBOL(iwl_is_ppag_approved);
+
+bool iwl_is_tas_approved(void)
+{
+ return dmi_check_system(dmi_tas_approved_list);
+}
+IWL_EXPORT_SYMBOL(iwl_is_tas_approved);
+
+int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data,
+ const u32 tas_selection)
+{
+ u8 override_iec = u32_get_bits(tas_selection,
+ IWL_WTAS_OVERRIDE_IEC_MSK);
+ u8 enabled_iec = u32_get_bits(tas_selection, IWL_WTAS_ENABLE_IEC_MSK);
+ u8 usa_tas_uhb = u32_get_bits(tas_selection, IWL_WTAS_USA_UHB_MSK);
+ int enabled = tas_selection & IWL_WTAS_ENABLED_MSK;
+
+ IWL_DEBUG_RADIO(fwrt, "TAS selection as read from BIOS: 0x%x\n",
+ tas_selection);
+
+ tas_data->usa_tas_uhb_allowed = usa_tas_uhb;
+ tas_data->override_tas_iec = override_iec;
+ tas_data->enable_tas_iec = enabled_iec;
+
+ return enabled;
+}
+
+__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
+{
+ int ret;
+ u32 val;
+ __le32 config_bitmap = 0;
+
+ switch (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id)) {
+ case IWL_CFG_RF_TYPE_HR1:
+ case IWL_CFG_RF_TYPE_HR2:
+ case IWL_CFG_RF_TYPE_JF1:
+ case IWL_CFG_RF_TYPE_JF2:
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_INDONESIA_5G2,
+ &val);
+
+ if (!ret && val == DSM_VALUE_INDONESIA_ENABLE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
+ break;
+ default:
+ break;
+ }
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_DISABLE_SRD, &val);
+ if (!ret) {
+ if (val == DSM_VALUE_SRD_PASSIVE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
+ else if (val == DSM_VALUE_SRD_DISABLE)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+ }
+
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) {
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_REGULATORY_CONFIG,
+ &val);
+ /*
+ * China 2022 enable if the BIOS object does not exist or
+ * if it is enabled in BIOS.
+ */
+ if (ret < 0 || val & DSM_MASK_CHINA_22_REG)
+ config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK);
+ }
+
+ return config_bitmap;
+}
+IWL_EXPORT_SYMBOL(iwl_get_lari_config_bitmap);
+
+int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value)
+{
+ GET_BIOS_TABLE(dsm, fwrt, func, value);
+}
+IWL_EXPORT_SYMBOL(iwl_bios_get_dsm);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
new file mode 100644
index 000000000000..28e774766847
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (C) 2023 Intel Corporation
+ */
+
+#ifndef __fw_regulatory_h__
+#define __fw_regulatory_h__
+
+#include "fw/img.h"
+#include "fw/api/commands.h"
+#include "fw/api/power.h"
+#include "fw/api/phy.h"
+#include "fw/api/config.h"
+#include "fw/img.h"
+#include "iwl-trans.h"
+
+#define BIOS_SAR_MAX_PROFILE_NUM 4
+/*
+ * Each SAR profile has (up to, depends on the table revision) 4 chains:
+ * chain A, chain B, chain A when in CDB, chain B when in CDB
+ */
+#define BIOS_SAR_MAX_CHAINS_PER_PROFILE 4
+#define BIOS_SAR_NUM_CHAINS 2
+#define BIOS_SAR_MAX_SUB_BANDS_NUM 11
+
+#define BIOS_GEO_NUM_CHAINS 2
+#define BIOS_GEO_MAX_NUM_BANDS 3
+#define BIOS_GEO_MAX_PROFILE_NUM 8
+#define BIOS_GEO_MIN_PROFILE_NUM 3
+
+#define IWL_SAR_ENABLE_MSK BIT(0)
+
+/* PPAG gain value bounds in 1/8 dBm */
+#define IWL_PPAG_MIN_LB -16
+#define IWL_PPAG_MAX_LB 24
+#define IWL_PPAG_MIN_HB -16
+#define IWL_PPAG_MAX_HB 40
+
+#define IWL_PPAG_ETSI_CHINA_MASK 3
+#define IWL_PPAG_REV3_MASK 0x7FF
+
+#define IWL_WTAS_BLACK_LIST_MAX 16
+#define IWL_WTAS_ENABLED_MSK 0x1
+#define IWL_WTAS_OVERRIDE_IEC_MSK 0x2
+#define IWL_WTAS_ENABLE_IEC_MSK 0x4
+#define IWL_WTAS_USA_UHB_MSK BIT(16)
+
+/*
+ * The profile for revision 2 is a superset of revision 1, which is in
+ * turn a superset of revision 0. So we can store all revisions
+ * inside revision 2, which is what we represent here.
+ */
+
+/*
+ * struct iwl_sar_profile_chain - per-chain values of a SAR profile
+ * @subbands: the SAR value for each subband
+ */
+struct iwl_sar_profile_chain {
+ u8 subbands[BIOS_SAR_MAX_SUB_BANDS_NUM];
+};
+
+/*
+ * struct iwl_sar_profile - SAR profile from SAR tables
+ * @enabled: whether the profile is enabled or not
+ * @chains: per-chain SAR values
+ */
+struct iwl_sar_profile {
+ bool enabled;
+ struct iwl_sar_profile_chain chains[BIOS_SAR_MAX_CHAINS_PER_PROFILE];
+};
+
+/* Same thing as with SAR, all revisions fit in revision 2 */
+
+/*
+ * struct iwl_geo_profile_band - per-band geo SAR offsets
+ * @max: the max tx power allowed for the band
+ * @chains: SAR offsets values for each chain
+ */
+struct iwl_geo_profile_band {
+ u8 max;
+ u8 chains[BIOS_GEO_NUM_CHAINS];
+};
+
+/*
+ * struct iwl_geo_profile - geo profile
+ * @bands: per-band table of the SAR offsets
+ */
+struct iwl_geo_profile {
+ struct iwl_geo_profile_band bands[BIOS_GEO_MAX_NUM_BANDS];
+};
+
+/* Same thing as with SAR, all revisions fit in revision 2 */
+struct iwl_ppag_chain {
+ s8 subbands[BIOS_SAR_MAX_SUB_BANDS_NUM];
+};
+
+struct iwl_tas_data {
+ __le32 block_list_size;
+ __le32 block_list_array[IWL_WTAS_BLACK_LIST_MAX];
+ u8 override_tas_iec;
+ u8 enable_tas_iec;
+ u8 usa_tas_uhb_allowed;
+};
+
+/* For DSM revision 0 and 4 */
+enum iwl_dsm_funcs {
+ DSM_FUNC_QUERY = 0,
+ DSM_FUNC_DISABLE_SRD = 1,
+ DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
+ DSM_FUNC_ENABLE_6E = 3,
+ DSM_FUNC_REGULATORY_CONFIG = 4,
+ DSM_FUNC_11AX_ENABLEMENT = 6,
+ DSM_FUNC_ENABLE_UNII4_CHAN = 7,
+ DSM_FUNC_ACTIVATE_CHANNEL = 8,
+ DSM_FUNC_FORCE_DISABLE_CHANNELS = 9,
+ DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10,
+ DSM_FUNC_RFI_CONFIG = 11,
+ DSM_FUNC_NUM_FUNCS = 12,
+};
+
+enum iwl_dsm_values_srd {
+ DSM_VALUE_SRD_ACTIVE,
+ DSM_VALUE_SRD_PASSIVE,
+ DSM_VALUE_SRD_DISABLE,
+ DSM_VALUE_SRD_MAX
+};
+
+enum iwl_dsm_values_indonesia {
+ DSM_VALUE_INDONESIA_DISABLE,
+ DSM_VALUE_INDONESIA_ENABLE,
+ DSM_VALUE_INDONESIA_RESERVED,
+ DSM_VALUE_INDONESIA_MAX
+};
+
+enum iwl_dsm_values_rfi {
+ DSM_VALUE_RFI_DLVR_DISABLE = BIT(0),
+ DSM_VALUE_RFI_DDR_DISABLE = BIT(1),
+};
+
+#define DSM_VALUE_RFI_DISABLE (DSM_VALUE_RFI_DLVR_DISABLE |\
+ DSM_VALUE_RFI_DDR_DISABLE)
+
+enum iwl_dsm_masks_reg {
+ DSM_MASK_CHINA_22_REG = BIT(2)
+};
+
+struct iwl_fw_runtime;
+
+bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
+
+int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_per_chain_offset *table,
+ u32 n_bands, u32 n_profiles);
+
+int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
+ int prof_a, int prof_b);
+
+int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
+ union iwl_ppag_table_cmd *cmd,
+ int *cmd_size);
+
+bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt);
+
+bool iwl_is_tas_approved(void);
+
+int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data,
+ const u32 tas_selection);
+
+int iwl_bios_get_wrds_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_wgds_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_ppag_table(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data);
+
+int iwl_bios_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit);
+
+int iwl_bios_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
+int iwl_bios_get_eckv(struct iwl_fw_runtime *fwrt, u32 *ext_clk);
+
+__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
+
+int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value);
+
+static inline u32 iwl_bios_get_ppag_flags(const u32 ppag_modes,
+ const u8 ppag_ver)
+{
+ return ppag_modes & (ppag_ver < 3 ? IWL_PPAG_ETSI_CHINA_MASK :
+ IWL_PPAG_REV3_MASK);
+}
+#endif /* __fw_regulatory_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 357727774db9..b2bc4fd37abf 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __iwl_fw_runtime_h__
#define __iwl_fw_runtime_h__
@@ -14,6 +14,7 @@
#include "fw/api/power.h"
#include "iwl-eeprom-parse.h"
#include "fw/acpi.h"
+#include "fw/regulatory.h"
struct iwl_fw_runtime_ops {
void (*dump_start)(void *ctx);
@@ -100,6 +101,11 @@ struct iwl_txf_iter_data {
* @dump: debug dump data
* @uats_enabled: VLP or AFC AP is enabled
* @uats_table: AP type table
+ * @uefi_tables_lock_status: The status of the WIFI GUID UEFI variables lock:
+ * 0: Unlocked, 1 and 2: Locked.
+ * Only read the UEFI variables if locked.
+ * @sar_profiles: sar profiles as read from WRDS/EWRD BIOS tables
+ * @geo_profiles: geographic profiles as read from WGDS BIOS table
*/
struct iwl_fw_runtime {
struct iwl_trans *trans;
@@ -158,24 +164,22 @@ struct iwl_fw_runtime {
#ifdef CONFIG_IWLWIFI_DEBUGFS
bool tpc_enabled;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
-#ifdef CONFIG_ACPI
- struct iwl_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
+ struct iwl_sar_profile sar_profiles[BIOS_SAR_MAX_PROFILE_NUM];
u8 sar_chain_a_profile;
u8 sar_chain_b_profile;
- struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES_REV3];
+ u8 reduced_power_flags;
+ struct iwl_geo_profile geo_profiles[BIOS_GEO_MAX_PROFILE_NUM];
u32 geo_rev;
u32 geo_num_profiles;
bool geo_enabled;
struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS];
u32 ppag_flags;
- u32 ppag_ver;
- bool ppag_table_valid;
+ u8 ppag_ver;
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
- u8 reduced_power_flags;
- bool uats_enabled;
struct iwl_uats_table_cmd uats_table;
-#endif
+ u8 uefi_tables_lock_status;
+ bool uats_enabled;
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 2964c5fb11e9..e81fc0129b9d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2021-2023 Intel Corporation
+ * Copyright(c) 2021-2024 Intel Corporation
*/
#include "iwl-drv.h"
@@ -76,6 +76,42 @@ void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
return data;
}
+static
+void *iwl_uefi_get_verified_variable(struct iwl_trans *trans,
+ efi_char16_t *uefi_var_name,
+ char *var_name,
+ unsigned int expected_size,
+ unsigned long *size)
+{
+ void *var;
+ unsigned long var_size;
+
+ var = iwl_uefi_get_variable(uefi_var_name, &IWL_EFI_VAR_GUID,
+ &var_size);
+
+ if (IS_ERR(var)) {
+ IWL_DEBUG_RADIO(trans,
+ "%s UEFI variable not found 0x%lx\n", var_name,
+ PTR_ERR(var));
+ return var;
+ }
+
+ if (var_size < expected_size) {
+ IWL_DEBUG_RADIO(trans,
+ "Invalid %s UEFI variable len (%lu)\n",
+ var_name, var_size);
+ kfree(var);
+ return ERR_PTR(-EINVAL);
+ }
+
+ IWL_DEBUG_RADIO(trans, "%s from UEFI with size %lu\n", var_name,
+ var_size);
+
+ if (size)
+ *size = var_size;
+ return var;
+}
+
int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, struct iwl_pnvm_image *pnvm_data)
{
@@ -230,26 +266,13 @@ u8 *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
unsigned long package_size;
u8 *data;
- package = iwl_uefi_get_variable(IWL_UEFI_REDUCED_POWER_NAME,
- &IWL_EFI_VAR_GUID, &package_size);
-
- if (IS_ERR(package)) {
- IWL_DEBUG_FW(trans,
- "Reduced Power UEFI variable not found 0x%lx (len %lu)\n",
- PTR_ERR(package), package_size);
+ package = iwl_uefi_get_verified_variable(trans,
+ IWL_UEFI_REDUCED_POWER_NAME,
+ "Reduced Power",
+ sizeof(*package),
+ &package_size);
+ if (IS_ERR(package))
return ERR_CAST(package);
- }
-
- if (package_size < sizeof(*package)) {
- IWL_DEBUG_FW(trans,
- "Invalid Reduced Power UEFI variable len (%lu)\n",
- package_size);
- kfree(package);
- return ERR_PTR(-EINVAL);
- }
-
- IWL_DEBUG_FW(trans, "Read reduced power from UEFI with size %lu\n",
- package_size);
IWL_DEBUG_FW(trans, "rev %d, total_size %d, n_skus %d\n",
package->rev, package->total_size, package->n_skus);
@@ -283,32 +306,15 @@ static int iwl_uefi_step_parse(struct uefi_cnv_common_step_data *common_step_dat
void iwl_uefi_get_step_table(struct iwl_trans *trans)
{
struct uefi_cnv_common_step_data *data;
- unsigned long package_size;
int ret;
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return;
- data = iwl_uefi_get_variable(IWL_UEFI_STEP_NAME, &IWL_EFI_VAR_GUID,
- &package_size);
-
- if (IS_ERR(data)) {
- IWL_DEBUG_FW(trans,
- "STEP UEFI variable not found 0x%lx\n",
- PTR_ERR(data));
+ data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_STEP_NAME,
+ "STEP", sizeof(*data), NULL);
+ if (IS_ERR(data))
return;
- }
-
- if (package_size < sizeof(*data)) {
- IWL_DEBUG_FW(trans,
- "Invalid STEP table UEFI variable len (%lu)\n",
- package_size);
- kfree(data);
- return;
- }
-
- IWL_DEBUG_FW(trans, "Read STEP from UEFI with size %lu\n",
- package_size);
ret = iwl_uefi_step_parse(data, trans);
if (ret < 0)
@@ -318,7 +324,6 @@ void iwl_uefi_get_step_table(struct iwl_trans *trans)
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_step_table);
-#ifdef CONFIG_ACPI
static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data,
struct iwl_fw_runtime *fwrt)
{
@@ -355,31 +360,15 @@ void iwl_uefi_get_sgom_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt)
{
struct uefi_cnv_wlan_sgom_data *data;
- unsigned long package_size;
int ret;
if (!fwrt->geo_enabled)
return;
- data = iwl_uefi_get_variable(IWL_UEFI_SGOM_NAME, &IWL_EFI_VAR_GUID,
- &package_size);
- if (IS_ERR(data)) {
- IWL_DEBUG_FW(trans,
- "SGOM UEFI variable not found 0x%lx\n",
- PTR_ERR(data));
- return;
- }
-
- if (package_size < sizeof(*data)) {
- IWL_DEBUG_FW(trans,
- "Invalid SGOM table UEFI variable len (%lu)\n",
- package_size);
- kfree(data);
+ data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_SGOM_NAME,
+ "SGOM", sizeof(*data), NULL);
+ if (IS_ERR(data))
return;
- }
-
- IWL_DEBUG_FW(trans, "Read SGOM from UEFI with size %lu\n",
- package_size);
ret = iwl_uefi_sgom_parse(data, fwrt);
if (ret < 0)
@@ -404,28 +393,12 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans,
struct iwl_fw_runtime *fwrt)
{
struct uefi_cnv_wlan_uats_data *data;
- unsigned long package_size;
int ret;
- data = iwl_uefi_get_variable(IWL_UEFI_UATS_NAME, &IWL_EFI_VAR_GUID,
- &package_size);
- if (IS_ERR(data)) {
- IWL_DEBUG_FW(trans,
- "UATS UEFI variable not found 0x%lx\n",
- PTR_ERR(data));
+ data = iwl_uefi_get_verified_variable(trans, IWL_UEFI_UATS_NAME,
+ "UATS", sizeof(*data), NULL);
+ if (IS_ERR(data))
return -EINVAL;
- }
-
- if (package_size < sizeof(*data)) {
- IWL_DEBUG_FW(trans,
- "Invalid UATS table UEFI variable len (%lu)\n",
- package_size);
- kfree(data);
- return -EINVAL;
- }
-
- IWL_DEBUG_FW(trans, "Read UATS from UEFI with size %lu\n",
- package_size);
ret = iwl_uefi_uats_parse(data, fwrt);
if (ret < 0) {
@@ -438,4 +411,298 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans,
return 0;
}
IWL_EXPORT_SYMBOL(iwl_uefi_get_uats_table);
-#endif /* CONFIG_ACPI */
+
+static void iwl_uefi_set_sar_profile(struct iwl_fw_runtime *fwrt,
+ struct uefi_sar_profile *uefi_sar_prof,
+ u8 prof_index, bool enabled)
+{
+ memcpy(&fwrt->sar_profiles[prof_index].chains, uefi_sar_prof,
+ sizeof(struct uefi_sar_profile));
+
+ fwrt->sar_profiles[prof_index].enabled = enabled & IWL_SAR_ENABLE_MSK;
+}
+
+int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_wrds *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WRDS_NAME,
+ "WRDS", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WRDS_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDS revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ /* The profile from WRDS is officially profile 1, but goes
+ * into sar_profiles[0] (because we don't have a profile 0).
+ */
+ iwl_uefi_set_sar_profile(fwrt, &data->sar_profile, 0, data->mode);
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_ewrd *data;
+ int i, ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_EWRD_NAME,
+ "EWRD", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_EWRD_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI EWRD revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (data->num_profiles >= BIOS_SAR_MAX_PROFILE_NUM) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < data->num_profiles; i++)
+ /* The EWRD profiles officially go from 2 to 4, but we
+ * save them in sar_profiles[1-3] (because we don't
+ * have profile 0). So in the array we start from 1.
+ */
+ iwl_uefi_set_sar_profile(fwrt, &data->sar_profiles[i], i + 1,
+ data->mode);
+
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_wgds *data;
+ int i, ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WGDS_NAME,
+ "WGDS", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WGDS_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WGDS revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (data->num_profiles < BIOS_GEO_MIN_PROFILE_NUM ||
+ data->num_profiles > BIOS_GEO_MAX_PROFILE_NUM) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Invalid number of profiles in WGDS: %d\n",
+ data->num_profiles);
+ goto out;
+ }
+
+ fwrt->geo_rev = data->revision;
+ for (i = 0; i < data->num_profiles; i++)
+ memcpy(&fwrt->geo_profiles[i], &data->geo_profiles[i],
+ sizeof(struct iwl_geo_profile));
+
+ fwrt->geo_num_profiles = data->num_profiles;
+ fwrt->geo_enabled = true;
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ struct uefi_cnv_var_ppag *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_PPAG_NAME,
+ "PPAG", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision < IWL_UEFI_MIN_PPAG_REV ||
+ data->revision > IWL_UEFI_MAX_PPAG_REV) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI PPAG revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ fwrt->ppag_ver = data->revision;
+ fwrt->ppag_flags = iwl_bios_get_ppag_flags(data->ppag_modes,
+ fwrt->ppag_ver);
+
+ BUILD_BUG_ON(sizeof(fwrt->ppag_chains) != sizeof(data->ppag_chains));
+ memcpy(&fwrt->ppag_chains, &data->ppag_chains,
+ sizeof(data->ppag_chains));
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *tas_data)
+{
+ struct uefi_cnv_var_wtas *uefi_tas;
+ int ret = 0, enabled, i;
+
+ uefi_tas = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WTAS_NAME,
+ "WTAS", sizeof(*uefi_tas), NULL);
+ if (IS_ERR(uefi_tas))
+ return -EINVAL;
+
+ if (uefi_tas->revision != IWL_UEFI_WTAS_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WTAS revision:%d\n",
+ uefi_tas->revision);
+ goto out;
+ }
+
+ enabled = iwl_parse_tas_selection(fwrt, tas_data,
+ uefi_tas->tas_selection);
+ if (!enabled) {
+ IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n");
+ ret = 0;
+ goto out;
+ }
+
+ IWL_DEBUG_RADIO(fwrt, "Reading TAS table revision %d\n",
+ uefi_tas->revision);
+ if (uefi_tas->black_list_size > IWL_WTAS_BLACK_LIST_MAX) {
+ IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %d\n",
+ uefi_tas->black_list_size);
+ ret = -EINVAL;
+ goto out;
+ }
+ tas_data->block_list_size = cpu_to_le32(uefi_tas->black_list_size);
+ IWL_DEBUG_RADIO(fwrt, "TAS array size %u\n", uefi_tas->black_list_size);
+
+ for (i = 0; i < uefi_tas->black_list_size; i++) {
+ tas_data->block_list_array[i] =
+ cpu_to_le32(uefi_tas->black_list[i]);
+ IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n",
+ uefi_tas->black_list[i]);
+ }
+out:
+ kfree(uefi_tas);
+ return ret;
+}
+
+int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit)
+{
+ struct uefi_cnv_var_splc *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_SPLC_NAME,
+ "SPLC", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_SPLC_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI SPLC revision:%d\n",
+ data->revision);
+ goto out;
+ }
+ *dflt_pwr_limit = data->default_pwr_limit;
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
+{
+ struct uefi_cnv_var_wrdd *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WRDD_NAME,
+ "WRDD", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WRDD_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDD revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (data->mcc != UEFI_MCC_CHINA) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "UEFI WRDD is supported only for CN\n");
+ goto out;
+ }
+
+ mcc[0] = (data->mcc >> 8) & 0xff;
+ mcc[1] = data->mcc & 0xff;
+ mcc[2] = '\0';
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
+{
+ struct uefi_cnv_var_eckv *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_ECKV_NAME,
+ "ECKV", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_ECKV_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDD revision:%d\n",
+ data->revision);
+ goto out;
+ }
+ *extl_clk = data->ext_clock_valid;
+out:
+ kfree(data);
+ return ret;
+}
+
+int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value)
+{
+ struct uefi_cnv_var_general_cfg *data;
+ int ret = -EINVAL;
+
+ /* Not supported function index */
+ if (func >= DSM_FUNC_NUM_FUNCS || func == 5)
+ return -EOPNOTSUPP;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_DSM_NAME,
+ "DSM", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_DSM_REVISION) {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI DSM revision:%d\n",
+ data->revision);
+ goto out;
+ }
+
+ if (ARRAY_SIZE(data->functions) != UEFI_MAX_DSM_FUNCS) {
+ IWL_DEBUG_RADIO(fwrt, "Invalid size of DSM functions array\n");
+ goto out;
+ }
+
+ *value = data->functions[func];
+ ret = 0;
+out:
+ kfree(data);
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index bf61a8df1225..303cc299d1bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -5,15 +5,38 @@
#ifndef __iwl_fw_uefi__
#define __iwl_fw_uefi__
+#include "fw/regulatory.h"
+
#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
#define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower"
#define IWL_UEFI_SGOM_NAME L"UefiCnvWlanSarGeoOffsetMapping"
#define IWL_UEFI_STEP_NAME L"UefiCnvCommonSTEP"
#define IWL_UEFI_UATS_NAME L"CnvUefiWlanUATS"
+#define IWL_UEFI_WRDS_NAME L"UefiCnvWlanWRDS"
+#define IWL_UEFI_EWRD_NAME L"UefiCnvWlanEWRD"
+#define IWL_UEFI_WGDS_NAME L"UefiCnvWlanWGDS"
+#define IWL_UEFI_PPAG_NAME L"UefiCnvWlanPPAG"
+#define IWL_UEFI_WTAS_NAME L"UefiCnvWlanWTAS"
+#define IWL_UEFI_SPLC_NAME L"UefiCnvWlanSPLC"
+#define IWL_UEFI_WRDD_NAME L"UefiCnvWlanWRDD"
+#define IWL_UEFI_ECKV_NAME L"UefiCnvWlanECKV"
+#define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg"
+
#define IWL_SGOM_MAP_SIZE 339
#define IWL_UATS_MAP_SIZE 339
+#define IWL_UEFI_WRDS_REVISION 2
+#define IWL_UEFI_EWRD_REVISION 2
+#define IWL_UEFI_WGDS_REVISION 3
+#define IWL_UEFI_MIN_PPAG_REV 1
+#define IWL_UEFI_MAX_PPAG_REV 3
+#define IWL_UEFI_WTAS_REVISION 1
+#define IWL_UEFI_SPLC_REVISION 0
+#define IWL_UEFI_WRDD_REVISION 0
+#define IWL_UEFI_ECKV_REVISION 0
+#define IWL_UEFI_DSM_REVISION 4
+
struct pnvm_sku_package {
u8 rev;
u32 total_size;
@@ -42,6 +65,120 @@ struct uefi_cnv_common_step_data {
} __packed;
/*
+ * struct uefi_sar_profile - a SAR profile as defined in UEFI
+ *
+ * @chains: a per-chain table of SAR values
+ */
+struct uefi_sar_profile {
+ struct iwl_sar_profile_chain chains[BIOS_SAR_MAX_CHAINS_PER_PROFILE];
+} __packed;
+
+/*
+ * struct uefi_cnv_var_wrds - WRDS table as defined in UEFI
+ *
+ * @revision: the revision of the table
+ * @mode: is WRDS enbaled/disabled
+ * @sar_profile: sar profile #1
+ */
+struct uefi_cnv_var_wrds {
+ u8 revision;
+ u32 mode;
+ struct uefi_sar_profile sar_profile;
+} __packed;
+
+/*
+ * struct uefi_cnv_var_ewrd - EWRD table as defined in UEFI
+ * @revision: the revision of the table
+ * @mode: is WRDS enbaled/disabled
+ * @num_profiles: how many additional profiles we have in this table (0-3)
+ * @sar_profiles: the additional SAR profiles (#2-#4)
+ */
+struct uefi_cnv_var_ewrd {
+ u8 revision;
+ u32 mode;
+ u32 num_profiles;
+ struct uefi_sar_profile sar_profiles[BIOS_SAR_MAX_PROFILE_NUM - 1];
+} __packed;
+
+/*
+ * struct uefi_cnv_var_wgds - WGDS table as defined in UEFI
+ * @revision: the revision of the table
+ * @num_profiles: the number of geo profiles we have in the table.
+ * The first 3 are mandatory, and can have up to 8.
+ * @geo_profiles: a per-profile table of the offsets to add to SAR values.
+ */
+struct uefi_cnv_var_wgds {
+ u8 revision;
+ u8 num_profiles;
+ struct iwl_geo_profile geo_profiles[BIOS_GEO_MAX_PROFILE_NUM];
+} __packed;
+
+/*
+ * struct uefi_cnv_var_ppag - PPAG table as defined in UEFI
+ * @revision: the revision of the table
+ * @ppag_modes: values from &enum iwl_ppag_flags
+ * @ppag_chains: the PPAG values per chain and band
+ */
+struct uefi_cnv_var_ppag {
+ u8 revision;
+ u32 ppag_modes;
+ struct iwl_ppag_chain ppag_chains[IWL_NUM_CHAIN_LIMITS];
+} __packed;
+
+/* struct uefi_cnv_var_wtas - WTAS tabled as defined in UEFI
+ * @revision: the revision of the table
+ * @tas_selection: different options of TAS enablement.
+ * @black_list_size: the number of defined entried in the black list
+ * @black_list: a list of countries that are not allowed to use the TAS feature
+ */
+struct uefi_cnv_var_wtas {
+ u8 revision;
+ u32 tas_selection;
+ u8 black_list_size;
+ u16 black_list[IWL_WTAS_BLACK_LIST_MAX];
+} __packed;
+
+/* struct uefi_cnv_var_splc - SPLC tabled as defined in UEFI
+ * @revision: the revision of the table
+ * @default_pwr_limit: The default maximum power per device
+ */
+struct uefi_cnv_var_splc {
+ u8 revision;
+ u32 default_pwr_limit;
+} __packed;
+
+#define UEFI_MCC_CHINA 0x434e
+
+/* struct uefi_cnv_var_wrdd - WRDD table as defined in UEFI
+ * @revision: the revision of the table
+ * @mcc: country identifier as defined in ISO/IEC 3166-1 Alpha 2 code
+ */
+struct uefi_cnv_var_wrdd {
+ u8 revision;
+ u32 mcc;
+} __packed;
+
+/* struct uefi_cnv_var_eckv - ECKV table as defined in UEFI
+ * @revision: the revision of the table
+ * @ext_clock_valid: indicates if external 32KHz clock is valid
+ */
+struct uefi_cnv_var_eckv {
+ u8 revision;
+ u32 ext_clock_valid;
+} __packed;
+
+#define UEFI_MAX_DSM_FUNCS 32
+
+/* struct uefi_cnv_var_general_cfg - DSM-like table as defined in UEFI
+ * @revision: the revision of the table
+ * @functions: payload of the different DSM functions
+ */
+struct uefi_cnv_var_general_cfg {
+ u8 revision;
+ u32 functions[UEFI_MAX_DSM_FUNCS];
+} __packed;
+
+/*
* This is known to be broken on v4.19 and to work on v5.4. Until we
* figure out why this is the case and how to make it work, simply
* disable the feature in old kernels.
@@ -55,6 +192,21 @@ int iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
void iwl_uefi_get_step_table(struct iwl_trans *trans);
int iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
u32 tlv_len, struct iwl_pnvm_image *pnvm_data);
+int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data);
+int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit);
+int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
+int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk);
+int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
+ u32 *value);
+void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
+int iwl_uefi_get_uats_table(struct iwl_trans *trans,
+ struct iwl_fw_runtime *fwrt);
#else /* CONFIG_EFI */
static inline void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
{
@@ -85,13 +237,56 @@ iwl_uefi_handle_tlv_mem_desc(struct iwl_trans *trans, const u8 *data,
{
return 0;
}
-#endif /* CONFIG_EFI */
-#if defined(CONFIG_EFI) && defined(CONFIG_ACPI)
-void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
-int iwl_uefi_get_uats_table(struct iwl_trans *trans,
- struct iwl_fw_runtime *fwrt);
-#else
+static inline int iwl_uefi_get_wrds_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_ewrd_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_wgds_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_ppag_table(struct iwl_fw_runtime *fwrt)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_tas_table(struct iwl_fw_runtime *fwrt,
+ struct iwl_tas_data *data)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
+ u64 *dflt_pwr_limit)
+{
+ *dflt_pwr_limit = 0;
+ return 0;
+}
+
+static inline int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
+{
+ return -ENOENT;
+}
+
+static inline int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt,
+ enum iwl_dsm_funcs func, u32 *value)
+{
+ return -ENOENT;
+}
+
static inline
void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt)
{
@@ -103,6 +298,5 @@ int iwl_uefi_get_uats_table(struct iwl_trans *trans,
{
return 0;
}
-
-#endif
+#endif /* CONFIG_EFI */
#endif /* __iwl_fw_uefi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index ae6f1cd4d660..6aa4f7f9c708 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#ifndef __IWL_CONFIG_H__
#define __IWL_CONFIG_H__
@@ -12,6 +12,7 @@
#include <linux/ieee80211.h>
#include <linux/nl80211.h>
#include "iwl-csr.h"
+#include "iwl-drv.h"
enum iwl_device_family {
IWL_DEVICE_FAMILY_UNDEFINED,
@@ -418,6 +419,8 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_BZ 0x46
#define IWL_CFG_MAC_TYPE_GL 0x47
#define IWL_CFG_MAC_TYPE_SC 0x48
+#define IWL_CFG_MAC_TYPE_SC2 0x49
+#define IWL_CFG_MAC_TYPE_SC2F 0x4A
#define IWL_CFG_RF_TYPE_TH 0x105
#define IWL_CFG_RF_TYPE_TH1 0x108
@@ -442,6 +445,9 @@ struct iwl_cfg {
#define IWL_CFG_NO_160 0x1
#define IWL_CFG_160 0x0
+#define IWL_CFG_NO_320 0x1
+#define IWL_CFG_320 0x0
+
#define IWL_CFG_CORES_BT 0x0
#define IWL_CFG_CORES_BT_GNSS 0x5
@@ -471,6 +477,15 @@ struct iwl_dev_info {
const char *name;
};
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+extern const struct iwl_dev_info iwl_dev_info_table[];
+extern const unsigned int iwl_dev_info_table_size;
+const struct iwl_dev_info *
+iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
+ u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
+ u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step);
+#endif
+
/*
* This list declares the config structures for all devices.
*/
@@ -526,7 +541,10 @@ extern const char iwl_ax221_name[];
extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
extern const char iwl_bz_name[];
+extern const char iwl_mtp_name[];
extern const char iwl_sc_name[];
+extern const char iwl_sc2_name[];
+extern const char iwl_sc2f_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
extern const struct iwl_cfg iwl5100_agn_cfg;
@@ -632,6 +650,8 @@ extern const struct iwl_cfg iwl_cfg_bz;
extern const struct iwl_cfg iwl_cfg_gl;
extern const struct iwl_cfg iwl_cfg_sc;
+extern const struct iwl_cfg iwl_cfg_sc2;
+extern const struct iwl_cfg iwl_cfg_sc2f;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 72075720969c..561d0c261123 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -64,21 +64,22 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,},
};
-static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
- struct list_head *list)
+/* add a new TLV node, returning it so it can be modified */
+static struct iwl_ucode_tlv *iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
+ struct list_head *list)
{
u32 len = le32_to_cpu(tlv->length);
struct iwl_dbg_tlv_node *node;
- node = kzalloc(sizeof(*node) + len, GFP_KERNEL);
+ node = kzalloc(struct_size(node, tlv.data, len), GFP_KERNEL);
if (!node)
- return -ENOMEM;
+ return NULL;
memcpy(&node->tlv, tlv, sizeof(node->tlv));
memcpy(node->tlv.data, tlv->data, len);
list_add_tail(&node->list, list);
- return 0;
+ return &node->tlv;
}
static bool iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv *tlv)
@@ -103,10 +104,18 @@ static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
return -EINVAL;
+ /* we use this as a string, ensure input was NUL terminated */
+ if (strnlen(debug_info->debug_cfg_name,
+ sizeof(debug_info->debug_cfg_name)) ==
+ sizeof(debug_info->debug_cfg_name))
+ return -EINVAL;
+
IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
debug_info->debug_cfg_name);
- return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list);
+ if (!iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list))
+ return -ENOMEM;
+ return 0;
}
static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
@@ -175,7 +184,9 @@ static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
return -EINVAL;
}
- return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list);
+ if (!iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list))
+ return -ENOMEM;
+ return 0;
}
static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
@@ -246,11 +257,9 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
const struct iwl_ucode_tlv *tlv)
{
const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data;
- struct iwl_fw_ini_trigger_tlv *dup_trig;
u32 tp = le32_to_cpu(trig->time_point);
u32 rf = le32_to_cpu(trig->reset_fw);
- struct iwl_ucode_tlv *dup = NULL;
- int ret;
+ struct iwl_ucode_tlv *new_tlv;
if (le32_to_cpu(tlv->length) < sizeof(*trig))
return -EINVAL;
@@ -267,20 +276,18 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
"WRT: time point %u for trigger TLV with reset_fw %u\n",
tp, rf);
trans->dbg.last_tp_resetfw = 0xFF;
+
+ new_tlv = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
+ if (!new_tlv)
+ return -ENOMEM;
+
if (!le32_to_cpu(trig->occurrences)) {
- dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
- GFP_KERNEL);
- if (!dup)
- return -ENOMEM;
- dup_trig = (void *)dup->data;
- dup_trig->occurrences = cpu_to_le32(-1);
- tlv = dup;
- }
+ struct iwl_fw_ini_trigger_tlv *new_trig = (void *)new_tlv->data;
- ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
- kfree(dup);
+ new_trig->occurrences = cpu_to_le32(-1);
+ }
- return ret;
+ return 0;
}
static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
@@ -304,7 +311,9 @@ static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
return -EINVAL;
}
- return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list);
+ if (!iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list))
+ return -ENOMEM;
+ return 0;
}
static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
@@ -1148,7 +1157,9 @@ iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt,
if (!match) {
IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n",
le32_to_cpu(trig->time_point));
- return iwl_dbg_tlv_add(trig_tlv, trig_list);
+ if (!iwl_dbg_tlv_add(trig_tlv, trig_list))
+ return -ENOMEM;
+ return 0;
}
return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match);
@@ -1234,7 +1245,7 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
}
}
- fwrt->trans->dbg.restart_required = FALSE;
+ fwrt->trans->dbg.restart_required = false;
IWL_DEBUG_FW(fwrt, "WRT: tp %d, reset_fw %d\n",
tp, dump_data.trig->reset_fw);
IWL_DEBUG_FW(fwrt,
@@ -1244,22 +1255,22 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
if (fwrt->trans->trans_cfg->device_family ==
IWL_DEVICE_FAMILY_9000) {
- fwrt->trans->dbg.restart_required = TRUE;
+ fwrt->trans->dbg.restart_required = true;
} else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT &&
fwrt->trans->dbg.last_tp_resetfw ==
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
- fwrt->trans->dbg.restart_required = FALSE;
+ fwrt->trans->dbg.restart_required = false;
fwrt->trans->dbg.last_tp_resetfw = 0xFF;
IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n");
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) {
IWL_DEBUG_FW(fwrt, "WRT: stop and reload firmware\n");
- fwrt->trans->dbg.restart_required = TRUE;
+ fwrt->trans->dbg.restart_required = true;
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
IWL_DEBUG_FW(fwrt,
"WRT: stop only and no reload firmware\n");
- fwrt->trans->dbg.restart_required = FALSE;
+ fwrt->trans->dbg.restart_required = false;
fwrt->trans->dbg.last_tp_resetfw =
le32_to_cpu(dump_data.trig->reset_fw);
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index ffe2670720c9..4696d73c8971 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -128,6 +128,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
kfree(drv->fw.ucode_capa.cmd_versions);
kfree(drv->fw.phy_integration_ver);
kfree(drv->trans->dbg.pc_data);
+ drv->trans->dbg.pc_data = NULL;
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
@@ -186,6 +187,7 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf)
case IWL_CFG_RF_TYPE_HR1:
case IWL_CFG_RF_TYPE_HR2:
rf = "hr";
+ rf_step = 'b';
break;
case IWL_CFG_RF_TYPE_GF:
rf = "gf";
@@ -1423,35 +1425,25 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
const struct iwl_op_mode_ops *ops = op->ops;
struct dentry *dbgfs_dir = NULL;
struct iwl_op_mode *op_mode = NULL;
- int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
/* also protects start/stop from racing against each other */
lockdep_assert_held(&iwlwifi_opmode_table_mtx);
- for (retry = 0; retry <= max_retry; retry++) {
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
- drv->dbgfs_op_mode = debugfs_create_dir(op->name,
- drv->dbgfs_drv);
- dbgfs_dir = drv->dbgfs_op_mode;
+ drv->dbgfs_op_mode = debugfs_create_dir(op->name,
+ drv->dbgfs_drv);
+ dbgfs_dir = drv->dbgfs_op_mode;
#endif
- op_mode = ops->start(drv->trans, drv->trans->cfg,
- &drv->fw, dbgfs_dir);
-
- if (op_mode)
- return op_mode;
-
- if (test_bit(STATUS_TRANS_DEAD, &drv->trans->status))
- break;
-
- IWL_ERR(drv, "retry init count %d\n", retry);
+ op_mode = ops->start(drv->trans, drv->trans->cfg,
+ &drv->fw, dbgfs_dir);
+ if (op_mode)
+ return op_mode;
#ifdef CONFIG_IWLWIFI_DEBUGFS
- debugfs_remove_recursive(drv->dbgfs_op_mode);
- drv->dbgfs_op_mode = NULL;
+ debugfs_remove_recursive(drv->dbgfs_op_mode);
+ drv->dbgfs_op_mode = NULL;
#endif
- }
return NULL;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
index 3d1a27ba35c6..1549ff429549 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -6,6 +6,7 @@
#ifndef __iwl_drv_h__
#define __iwl_drv_h__
#include <linux/export.h>
+#include <kunit/visibility.h>
/* for all modules */
#define DRV_NAME "iwlwifi"
@@ -89,8 +90,13 @@ void iwl_drv_stop(struct iwl_drv *drv);
#define IWL_EXPORT_SYMBOL(sym)
#endif
-/* max retry for init flow */
-#define IWL_MAX_INIT_RETRY 2
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+#define EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(sym) EXPORT_SYMBOL_IF_KUNIT(sym)
+#define VISIBLE_IF_IWLWIFI_KUNIT
+#else
+#define EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(sym)
+#define VISIBLE_IF_IWLWIFI_KUNIT static
+#endif
#define FW_NAME_PRE_BUFSIZE 64
struct iwl_trans;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index 5aab64c63a13..2b290fab1ef2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -270,7 +270,7 @@ enum iwl_eeprom_enhanced_txpwr_flags {
};
/**
- * struct iwl_eeprom_enhanced_txpwr
+ * struct iwl_eeprom_enhanced_txpwr - enhanced regulatory TX power limits
* @flags: entry flags
* @channel: channel number
* @chain_a_max: chain a max power in 1/2 dBm
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index e0400ba2ab74..6ba374efaacb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021, 2023-2024 Intel Corporation
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fh_h__
@@ -570,18 +570,19 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
/**
* struct iwl_rb_status - reserve buffer status
* host memory mapped FH registers
- * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
- * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
- * @finished_rb_num [0:11] - Indicates the index of the current RB
+ * @closed_rb_num: [0:11] Indicates the index of the RB which was closed
+ * @closed_fr_num: [0:11] Indicates the index of the RX Frame which was closed
+ * @finished_rb_num: [0:11] Indicates the index of the current RB
* in which the last frame was written to
- * @finished_fr_num [0:11] - Indicates the index of the RX Frame
+ * @finished_fr_num: [0:11] Indicates the index of the RX Frame
* which was transferred
+ * @__spare: reserved
*/
struct iwl_rb_status {
__le16 closed_rb_num;
__le16 closed_fr_num;
__le16 finished_rb_num;
- __le16 finished_fr_nam;
+ __le16 finished_fr_num;
__le32 __spare;
} __packed;
@@ -651,15 +652,15 @@ struct iwl_tfd_tb {
*
* This structure contains dma address and length of transmission address
*
- * @tb_len length of the tx buffer
- * @addr 64 bits dma address
+ * @tb_len: length of the tx buffer
+ * @addr: 64 bits dma address
*/
struct iwl_tfh_tb {
__le16 tb_len;
__le64 addr;
} __packed;
-/**
+/*
* Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
* Both driver and device share these circular buffers, each of which must be
* contiguous 256 TFDs.
@@ -698,10 +699,11 @@ struct iwl_tfd {
/**
* struct iwl_tfh_tfd - Transmit Frame Descriptor (TFD)
- * @ num_tbs 0-4 number of active tbs
- * 5 -15 reserved
- * @ tbs[25] transmit frame buffer descriptors
- * @ __pad padding
+ * @num_tbs:
+ * 0-4 number of active tbs
+ * 5-15 reserved
+ * @tbs: transmit frame buffer descriptors
+ * @__pad: padding
*/
struct iwl_tfh_tfd {
__le16 num_tbs;
@@ -718,10 +720,12 @@ struct iwl_tfh_tfd {
* struct iwlagn_schedq_bc_tbl scheduler byte count table
* base physical address provided by SCD_DRAM_BASE_ADDR
* For devices up to 22000:
- * @tfd_offset 0-12 - tx command byte count
+ * @tfd_offset:
+ * For devices up to 22000:
+ * 0-12 - tx command byte count
* 12-16 - station index
- * For 22000:
- * @tfd_offset 0-12 - tx command byte count
+ * For 22000:
+ * 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 402896988686..baa39a18087a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -156,6 +156,8 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
* @NVM_CHANNEL_80MHZ: 80 MHz channel okay
* @NVM_CHANNEL_160MHZ: 160 MHz channel okay
* @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
+ * @NVM_CHANNEL_VLP: client support connection to UHB VLP AP
+ * @NVM_CHANNEL_AFC: client support connection to UHB AFC AP
*/
enum iwl_nvm_channel_flags {
NVM_CHANNEL_VALID = BIT(0),
@@ -170,6 +172,8 @@ enum iwl_nvm_channel_flags {
NVM_CHANNEL_80MHZ = BIT(10),
NVM_CHANNEL_160MHZ = BIT(11),
NVM_CHANNEL_DC_HIGH = BIT(12),
+ NVM_CHANNEL_VLP = BIT(13),
+ NVM_CHANNEL_AFC = BIT(14),
};
/**
@@ -309,7 +313,7 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
/* Note: already can print up to 101 characters, 110 is the limit! */
IWL_DEBUG_DEV(dev, level,
- "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
chan, flags,
CHECK_AND_PRINT_I(VALID),
CHECK_AND_PRINT_I(IBSS),
@@ -322,7 +326,9 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
CHECK_AND_PRINT_I(40MHZ),
CHECK_AND_PRINT_I(80MHZ),
CHECK_AND_PRINT_I(160MHZ),
- CHECK_AND_PRINT_I(DC_HIGH));
+ CHECK_AND_PRINT_I(DC_HIGH),
+ CHECK_AND_PRINT_I(VLP),
+ CHECK_AND_PRINT_I(AFC));
#undef CHECK_AND_PRINT_I
}
@@ -366,6 +372,12 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band,
(flags & IEEE80211_CHAN_NO_IR))
flags |= IEEE80211_CHAN_IR_CONCURRENT;
+ /* Set the AP type for the UHB case. */
+ if (!(nvm_flags & NVM_CHANNEL_VLP))
+ flags |= IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT;
+ if (!(nvm_flags & NVM_CHANNEL_AFC))
+ flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT;
+
return flags;
}
@@ -668,7 +680,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
.has_eht = true,
.eht_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 |
@@ -696,10 +707,11 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI,
.phy_cap_info[5] =
+ FIELD_PREP_CONST(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK,
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US) |
IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP |
- IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP |
- IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT,
+ IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP,
.phy_cap_info[6] =
IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK |
IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP,
@@ -733,6 +745,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
/*
* PPE thresholds for NSS = 2, and RU index bitmap set
* to 0xc.
+ * Note: just for stating what we want, not present in
+ * the transmitted data due to not including
+ * IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT.
*/
.eht_ppe_thres = {0xc1, 0x0e, 0xe0 }
},
@@ -745,7 +760,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
.mac_cap_info[0] =
IEEE80211_HE_MAC_CAP0_HTC_HE,
.mac_cap_info[1] =
- IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
@@ -793,7 +807,6 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
.has_eht = true,
.eht_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
IEEE80211_EHT_MAC_CAP0_OM_CONTROL |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2,
@@ -801,7 +814,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ |
IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI,
.phy_cap_info[5] =
- IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT,
+ FIELD_PREP_CONST(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK,
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US),
},
/* For all MCS and bandwidth, set 2 NSS for both Tx and
@@ -829,6 +843,9 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
/*
* PPE thresholds for NSS = 2, and RU index bitmap set
* to 0xc.
+ * Note: just for stating what we want, not present in
+ * the transmitted data due to not including
+ * IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT.
*/
.eht_ppe_thres = {0xc1, 0x0e, 0xe0 }
},
@@ -892,8 +909,9 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP);
bool no_320;
- no_320 = !trans->trans_cfg->integrated &&
- trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB;
+ no_320 = (!trans->trans_cfg->integrated &&
+ trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB) ||
+ trans->reduced_cap_sku;
if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be)
iftype_data->eht_cap.has_eht = false;
@@ -1020,8 +1038,7 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
iftype_data->eht_cap.has_eht) {
iftype_data->eht_cap.eht_cap_elem.mac_cap_info[0] &=
- ~(IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
- IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
+ ~(IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 |
IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2);
iftype_data->eht_cap.eht_cap_elem.phy_cap_info[3] &=
~(IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO |
@@ -1059,6 +1076,26 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
iftype_data->he_cap.he_cap_elem.phy_cap_info[7] &=
~IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
}
+
+ if (trans->step_urm) {
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs11_max_nss = 0;
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs13_max_nss = 0;
+ }
+
+ if (trans->no_160)
+ iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &=
+ ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+
+ if (trans->reduced_cap_sku) {
+ memset(&iftype_data->eht_cap.eht_mcs_nss_supp.bw._320, 0,
+ sizeof(iftype_data->eht_cap.eht_mcs_nss_supp.bw._320));
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss = 0;
+ iftype_data->eht_cap.eht_mcs_nss_supp.bw._160.rx_tx_mcs13_max_nss = 0;
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[8] &=
+ ~IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA;
+ iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &=
+ ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK;
+ }
}
static void iwl_init_he_hw_capab(struct iwl_trans *trans,
@@ -1575,7 +1612,8 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
struct iwl_reg_capa reg_capa,
- const struct iwl_cfg *cfg)
+ const struct iwl_cfg *cfg,
+ bool uats_enabled)
{
u32 flags = NL80211_RRF_NO_HT40;
@@ -1620,6 +1658,16 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
flags &= ~NL80211_RRF_NO_IR;
}
}
+
+ /* Set the AP type for the UHB case. */
+ if (uats_enabled) {
+ if (!(nvm_flags & NVM_CHANNEL_VLP))
+ flags |= NL80211_RRF_NO_6GHZ_VLP_CLIENT;
+
+ if (!(nvm_flags & NVM_CHANNEL_AFC))
+ flags |= NL80211_RRF_NO_6GHZ_AFC_CLIENT;
+ }
+
/*
* reg_capa is per regulatory domain so apply it for every channel
*/
@@ -1674,7 +1722,7 @@ static struct iwl_reg_capa iwl_get_reg_capa(u32 flags, u8 resp_ver)
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info, u32 cap, u8 resp_ver)
+ u16 geo_info, u32 cap, u8 resp_ver, bool uats_enabled)
{
int ch_idx;
u16 ch_flags;
@@ -1740,7 +1788,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
ch_flags, reg_capa,
- cfg);
+ cfg, uats_enabled);
/* we can't continue the same rule */
if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
@@ -2100,7 +2148,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
nvm->sku_cap_mimo_disabled =
!!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
- if (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM)
+ if (CSR_HW_RFID_TYPE(trans->hw_rf_id) >= IWL_CFG_RF_TYPE_FM)
nvm->sku_cap_11be_enable = true;
/* Initialize PHY sku data */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index 651ed25b683b..fd9c3bed9407 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -50,7 +50,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info, u32 cap, u8 resp_ver);
+ u16 geo_info, u32 cap, u8 resp_ver, bool uats_enabled);
/**
* struct iwl_nvm_section - describes an NVM section in memory.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
index 3dc618a7c70f..1ca82f3e4ebf 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -68,9 +68,11 @@ struct iwl_cfg;
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
* Must be atomic and called with BH disabled.
- * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
+ * @hw_rf_kill: notifies of a change in the HW rf kill switch. True means that
* the radio is killed. Return %true if the device should be stopped by
* the transport immediately after the call. May sleep.
+ * Note that this must not return %true for newer devices using gen2 PCIe
+ * transport.
* @free_skb: allows the transport layer to free skbs that haven't been
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index dd32c287b983..a7d44df06eab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -368,12 +368,19 @@ enum {
WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000,
};
-#define CNVI_AUX_MISC_CHIP 0xA200B0
+#define CNVI_AUX_MISC_CHIP 0xA200B0
+#define CNVI_AUX_MISC_CHIP_MAC_STEP(_val) (((_val) & 0xf000000) >> 24)
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE(_val) ((_val) & 0xfff)
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U 0x930
+
#define CNVR_AUX_MISC_CHIP 0xA2B800
#define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM 0xA29890
#define CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR 0xA29938
#define CNVI_SCU_SEQ_DATA_DW9 0xA27488
+#define CNVI_PMU_STEP_FLOW 0xA2D588
+#define CNVI_PMU_STEP_FLOW_FORCE_URM BIT(2)
+
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
/* device family 9000 WPROT register */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 5789a8735976..b93cef7b2330 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -519,6 +519,7 @@ struct iwl_pnvm_image {
* Must be atomic
* @reclaim: free packet until ssn. Returns a list of freed packets.
* Must be atomic
+ * @set_q_ptrs: set queue pointers internally, after D3 when HW state changed
* @txq_enable: setup a queue. To setup an AC queue, use the
* iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
* this one. The op_mode must not configure the HCMD queue. The scheduler
@@ -528,6 +529,8 @@ struct iwl_pnvm_image {
* hardware scheduler bug. May sleep.
* @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
+ * @txq_alloc: Allocate a new TX queue, may sleep.
+ * @txq_free: Free a previously allocated TX queue.
* @txq_set_shared_mode: change Tx queue shared/unshared marking
* @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
* @wait_txq_empty: wait until specific tx queue is empty. May sleep.
@@ -547,23 +550,27 @@ struct iwl_pnvm_image {
* the op_mode. May be called several times before start_fw, can't be
* called after that.
* @set_pmi: set the power pmi state
+ * @sw_reset: trigger software reset of the NIC
* @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
* Sleeping is not allowed between grab_nic_access and
* release_nic_access.
* @release_nic_access: let the NIC go to sleep. The "flags" parameter
* must be the same one that was sent before to the grab_nic_access.
- * @set_bits_mask - set SRAM register according to value and mask.
+ * @set_bits_mask: set SRAM register according to value and mask.
* @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
* TX'ed commands and similar. The buffer will be vfree'd by the caller.
* Note that the transport must fill in the proper file headers.
* @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
* of the trans debugfs
+ * @sync_nmi: trigger a firmware NMI and wait for it to complete
* @load_pnvm: save the pnvm data in DRAM
* @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
* context info.
* @load_reduce_power: copy reduce power table to the corresponding DRAM memory
* @set_reduce_power: set reduce power table addresses in the sratch buffer
* @interrupts: disable/enable interrupts to transport
+ * @imr_dma_data: set up IMR DMA
+ * @rxq_dma_data: retrieve RX queue DMA data, see @struct iwl_trans_rxq_dma_data
*/
struct iwl_trans_ops {
@@ -775,7 +782,7 @@ struct iwl_self_init_dram {
* @imr_size: imr dram size received from fw
* @sram_addr: sram address from debug tlv
* @sram_size: sram size from debug tlv
- * @imr2sram_remainbyte`: size remained after each dma transfer
+ * @imr2sram_remainbyte: size remained after each dma transfer
* @imr_curr_addr: current dst address used during dma transfer
* @imr_base_addr: imr address received from fw
*/
@@ -822,12 +829,16 @@ struct iwl_pc_data {
* @fw_mon: DRAM buffer for firmware monitor
* @hw_error: equals true if hw error interrupt was received from the FW
* @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
+ * @unsupported_region_msk: unsupported regions out of active_regions
* @active_regions: active regions
* @debug_info_tlv_list: list of debug info TLVs
* @time_point: array of debug time points
* @periodic_trig_list: periodic triggers list
* @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON
* @ucode_preset: preset based on ucode
+ * @restart_required: indicates debug restart is required
+ * @last_tp_resetfw: last handling of reset during debug timepoint
+ * @imr_data: IMR debug data allocation
* @dump_file_name_ext: dump file name extension
* @dump_file_name_ext_valid: dump file name extension if valid or not
* @num_pc: number of program counter for cpu
@@ -930,6 +941,7 @@ struct iwl_pcie_first_tb_buf {
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
* @frozen: tx stuck queue timer is frozen
* @frozen_expiry_remainder: remember how long until the timer fires
+ * @block: queue is blocked
* @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
* @write_ptr: 1-st empty entry (index) host_w
* @read_ptr: last used entry (index) host_r
@@ -938,6 +950,8 @@ struct iwl_pcie_first_tb_buf {
* @id: queue id
* @low_mark: low watermark, resume queue if free space more than this
* @high_mark: high watermark, stop queue if free space less than this
+ * @overflow_q: overflow queue for handling frames that didn't fit on HW queue
+ * @overflow_tx: need to transmit from overflow
*
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
@@ -990,10 +1004,19 @@ struct iwl_txq {
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @page_offs: offset from skb->cb to mac header page pointer
* @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
- * @queue_used - bit mask of used queues
- * @queue_stopped - bit mask of stopped queues
+ * @queue_used: bit mask of used queues
+ * @queue_stopped: bit mask of stopped queues
+ * @txq: array of TXQ data structures representing the TXQs
* @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
* @queue_alloc_cmd_ver: queue allocation command version
+ * @bc_pool: bytecount DMA allocations pool
+ * @bc_tbl_size: bytecount table size
+ * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
+ * (and similar usage)
+ * @tfd: TFD data
+ * @tfd.max_tbs: max number of buffers per TFD
+ * @tfd.size: TFD size
+ * @tfd.addr_size: TFD/TB address size
*/
struct iwl_trans_txqs {
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
@@ -1026,27 +1049,35 @@ struct iwl_trans_txqs {
/**
* struct iwl_trans - transport common data
*
- * @csme_own - true if we couldn't get ownership on the device
- * @ops - pointer to iwl_trans_ops
- * @op_mode - pointer to the op_mode
+ * @csme_own: true if we couldn't get ownership on the device
+ * @ops: pointer to iwl_trans_ops
+ * @op_mode: pointer to the op_mode
* @trans_cfg: the trans-specific configuration part
- * @cfg - pointer to the configuration
- * @drv - pointer to iwl_drv
+ * @cfg: pointer to the configuration
+ * @drv: pointer to iwl_drv
+ * @state: current device state
* @status: a bit-mask of transport status flags
- * @dev - pointer to struct device * that represents the device
+ * @dev: pointer to struct device * that represents the device
* @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
* 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
- * @hw_rf_id a u32 with the device RF ID
- * @hw_crf_id a u32 with the device CRF ID
- * @hw_wfpm_id a u32 with the device wfpm ID
+ * @hw_rf_id: a u32 with the device RF ID
+ * @hw_cnv_id: a u32 with the device CNV ID
+ * @hw_crf_id: a u32 with the device CRF ID
+ * @hw_wfpm_id: a u32 with the device wfpm ID
* @hw_id: a u32 with the ID of the device / sub-device.
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
+ * @sku_id: the SKU identifier (for PNVM matching)
+ * @pnvm_loaded: indicates PNVM was loaded
+ * @hw_rev: the revision data of the HW
* @hw_rev_step: The mac step of the HW
* @pm_support: set to true in start_hw if link pm is supported
* @ltr_enabled: set to true if the LTR is enabled
* @fail_to_parse_pnvm_image: set to true if pnvm parsing failed
+ * @reduce_power_loaded: indicates reduced power section was loaded
* @failed_to_load_reduce_power_image: set to true if pnvm loading failed
+ * @command_groups: pointer to command group name list array
+ * @command_groups_size: array size of @command_groups
* @wide_cmd_header: true when ucode supports wide command header format
* @wait_command_queue: wait queue for sync commands
* @num_rx_queues: number of RX queues allocated by the transport;
@@ -1055,19 +1086,29 @@ struct iwl_trans_txqs {
* @iml: a pointer to the image loader itself
* @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
* The user should use iwl_trans_{alloc,free}_tx_cmd.
+ * @dev_cmd_pool_name: name for the TX command allocation pool
+ * @dbgfs_dir: iwlwifi debugfs base dir for this device
+ * @sync_cmd_lockdep_map: lockdep map for checking sync commands
* @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
* starting the firmware, used for tracing
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
* start of the 802.11 header in the @rx_mpdu_cmd
+ * @dbg: additional debug data, see &struct iwl_trans_debug
+ * @init_dram: FW initialization DMA data
* @system_pm_mode: the system-wide power management mode in use.
* This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime.
+ * @name: the device name
* @txqs: transport tx queues data.
* @mbx_addr_0_step: step address data 0
* @mbx_addr_1_step: step address data 1
* @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
* only valid for discrete (not integrated) NICs
* @invalid_tx_cmd: invalid TX command buffer
+ * @reduced_cap_sku: reduced capability supported SKU
+ * @no_160: device not supporting 160 MHz
+ * @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz
+ * @trans_specific: data for the specific transport this is allocated for/with
*/
struct iwl_trans {
bool csme_own;
@@ -1090,6 +1131,8 @@ struct iwl_trans {
u32 hw_id;
char hw_id_str[52];
u32 sku_id[3];
+ bool reduced_cap_sku;
+ u8 no_160:1, step_urm:1;
u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 9fe1761691ec..535edb51d1c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -181,6 +181,9 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
struct iwl_mvm_sta *mvmsta;
u32 value;
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return 0;
+
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
if (!mvmsta)
return 0;
@@ -252,6 +255,124 @@ static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm,
swap(data->primary, data->secondary);
}
+static void iwl_mvm_bt_coex_enable_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int link_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ return;
+
+ /* Done already */
+ if (mvmvif->bt_coex_esr_disabled == !enable)
+ return;
+
+ mvmvif->bt_coex_esr_disabled = !enable;
+
+ /* Nothing to do */
+ if (mvmvif->esr_active == enable)
+ return;
+
+ if (enable) {
+ /* Try to re-enable eSR*/
+ iwl_mvm_mld_select_links(mvm, vif, false);
+ return;
+ }
+
+ /*
+ * Find the primary link, as we want to switch to it and drop the
+ * secondary one.
+ */
+ link_id = iwl_mvm_mld_get_primary_link(mvm, vif, vif->active_links);
+ WARN_ON(link_id < 0);
+
+ ieee80211_set_active_links_async(vif,
+ vif->active_links & BIT(link_id));
+}
+
+/*
+ * This function receives the LB link id and checks if eSR should be
+ * enabled or disabled (due to BT coex)
+ */
+bool
+iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id, int primary_link)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
+ bool have_wifi_loss_rate =
+ iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ BT_PROFILE_NOTIFICATION, 0) > 4;
+ s8 link_rssi = 0;
+ u8 wifi_loss_rate;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->last_bt_notif.wifi_loss_low_rssi == BT_OFF)
+ return true;
+
+ /* If LB link is the primary one we should always disable eSR */
+ if (link_id == primary_link)
+ return false;
+
+ /* The feature is not supported */
+ if (!have_wifi_loss_rate)
+ return true;
+
+ /*
+ * We might not have a link_info when checking whether we can
+ * (re)enable eSR - the LB link might not exist yet
+ */
+ if (link_info)
+ link_rssi = (s8)link_info->beacon_stats.avg_signal;
+
+ /*
+ * In case we don't know the RSSI - take the lower wifi loss,
+ * so we will more likely enter eSR, and if RSSI is low -
+ * we will get an update on this and exit eSR.
+ */
+ if (!link_rssi)
+ wifi_loss_rate = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+
+ else if (!mvmvif->bt_coex_esr_disabled)
+ /* RSSI needs to get really low to disable eSR... */
+ wifi_loss_rate =
+ link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ?
+ mvm->last_bt_notif.wifi_loss_low_rssi :
+ mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+ else
+ /* ...And really high before we enable it back */
+ wifi_loss_rate =
+ link_rssi <= -IWL_MVM_BT_COEX_ENABLE_ESR_THRESH ?
+ mvm->last_bt_notif.wifi_loss_low_rssi :
+ mvm->last_bt_notif.wifi_loss_mid_high_rssi;
+
+ return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH;
+}
+
+void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id)
+{
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
+ usable_links);
+ bool enable;
+
+ /* Not assoc, not MLD vif or only one usable link */
+ if (primary_link < 0)
+ return;
+
+ enable = iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link_id,
+ primary_link);
+
+ iwl_mvm_bt_coex_enable_esr(mvm, vif, enable);
+}
+
static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_bt_iterator_data *data,
@@ -297,6 +418,8 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
return;
}
+ iwl_mvm_bt_coex_update_vif_esr(mvm, vif, link_id);
+
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2))
min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
else
@@ -432,6 +555,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
return;
}
+ /* When BT is off this will be 0 */
+ if (data->notif->wifi_loss_low_rssi == BT_OFF)
+ iwl_mvm_bt_coex_enable_esr(mvm, vif, true);
+
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++)
iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id);
}
@@ -454,6 +581,11 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_bt_notif_iterator, &data);
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ rcu_read_unlock();
+ return;
+ }
+
iwl_mvm_bt_coex_tcm_based_ci(mvm, &data);
if (data.primary) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index c832068b5718..f5122c4678a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -11,6 +11,9 @@
#include "fw-api.h"
#define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20
+#define IWL_MVM_BT_COEX_DISABLE_ESR_THRESH 69
+#define IWL_MVM_BT_COEX_ENABLE_ESR_THRESH 63
+#define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0
#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 4582afb149d7..553c6fffc7c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -450,9 +450,9 @@ static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw,
}
static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *mvm_link)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_TSC_RSC_PARAM,
IWL_FW_CMD_VER_UNKNOWN);
int ret;
@@ -461,16 +461,14 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
struct wowlan_key_rsc_v5_data data = {};
int i;
- data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL);
+ data.rsc = kzalloc(sizeof(*data.rsc), GFP_KERNEL);
if (!data.rsc)
return -ENOMEM;
- memset(data.rsc, 0xff, sizeof(*data.rsc));
-
for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++)
data.rsc->mcast_key_id_map[i] =
IWL_MCAST_KEY_MAP_INVALID;
- data.rsc->sta_id = cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ data.rsc->sta_id = cpu_to_le32(mvm_link->ap_sta_id);
ieee80211_iter_keys(mvm->hw, vif,
iwl_mvm_wowlan_get_rsc_v5_data,
@@ -494,7 +492,7 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
if (ver == 4) {
size = sizeof(*data.rsc_tsc);
data.rsc_tsc->sta_id =
- cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ cpu_to_le32(mvm_link->ap_sta_id);
} else {
/* ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN */
size = sizeof(data.rsc_tsc->params);
@@ -668,10 +666,9 @@ static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
}
static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *mvm_link,
struct cfg80211_wowlan *wowlan)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_wowlan_patterns_cmd *pattern_cmd;
struct iwl_host_cmd cmd = {
.id = WOWLAN_PATTERNS,
@@ -693,7 +690,7 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
pattern_cmd->n_patterns = wowlan->n_patterns;
if (ver >= 3)
- pattern_cmd->sta_id = mvmvif->deflink.ap_sta_id;
+ pattern_cmd->sta_id = mvm_link->ap_sta_id;
for (i = 0; i < wowlan->n_patterns; i++) {
int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
@@ -723,14 +720,15 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_chanctx_conf *ctx;
u8 chains_static, chains_dynamic;
- struct cfg80211_chan_def chandef;
+ struct cfg80211_chan_def chandef, ap_def;
int ret, i;
struct iwl_binding_cmd_v1 binding_cmd = {};
struct iwl_time_quota_cmd quota_cmd = {};
struct iwl_time_quota_data *quota;
u32 status;
- if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm)))
+ if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm) ||
+ ieee80211_vif_is_mld(vif)))
return -EINVAL;
/* add back the PHY */
@@ -744,12 +742,13 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EINVAL;
}
chandef = ctx->def;
+ ap_def = ctx->ap;
chains_static = ctx->rx_chains_static;
chains_dynamic = ctx->rx_chains_dynamic;
rcu_read_unlock();
ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt, &chandef,
- chains_static, chains_dynamic);
+ &ap_def, chains_static, chains_dynamic);
if (ret)
return ret;
@@ -927,6 +926,9 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
+ if (ap_sta->mfp)
+ wowlan_config_cmd->flags |= IS_11W_ASSOC;
+
if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 6) {
/* Query the last used seqno and set it */
int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
@@ -987,7 +989,8 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
}
static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *mvm_link)
{
bool unified = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -1016,7 +1019,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
return -EIO;
}
- ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif);
+ ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif, mvm_link);
if (ret)
return ret;
@@ -1030,7 +1033,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
if (ver == 2) {
size = sizeof(tkip_data.tkip);
tkip_data.tkip.sta_id =
- cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ cpu_to_le32(mvm_link->ap_sta_id);
} else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) {
size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1);
} else {
@@ -1079,7 +1082,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len);
kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm);
- kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->deflink.ap_sta_id);
+ kek_kck_cmd.sta_id = cpu_to_le32(mvm_link->ap_sta_id);
if (cmd_ver == 4) {
cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v4);
@@ -1112,6 +1115,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
struct cfg80211_wowlan *wowlan,
struct iwl_wowlan_config_cmd *wowlan_config_cmd,
struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
+ struct iwl_mvm_vif_link_info *mvm_link,
struct ieee80211_sta *ap_sta)
{
int ret;
@@ -1130,7 +1134,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
return ret;
}
- ret = iwl_mvm_wowlan_config_key_params(mvm, vif);
+ ret = iwl_mvm_wowlan_config_key_params(mvm, vif, mvm_link);
if (ret)
return ret;
@@ -1142,7 +1146,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE))
- ret = iwl_mvm_send_patterns(mvm, vif, wowlan);
+ ret = iwl_mvm_send_patterns(mvm, mvm_link, wowlan);
else
ret = iwl_mvm_send_patterns_v1(mvm, wowlan);
if (ret)
@@ -1223,6 +1227,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct ieee80211_vif *vif = NULL;
struct iwl_mvm_vif *mvmvif = NULL;
struct ieee80211_sta *ap_sta = NULL;
+ struct iwl_mvm_vif_link_info *mvm_link;
struct iwl_d3_manager_config d3_cfg_cmd_data = {
/*
* Program the minimum sleep time to 10 seconds, as many
@@ -1237,7 +1242,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
.data[0] = &d3_cfg_cmd_data,
.len[0] = sizeof(d3_cfg_cmd_data),
};
- int ret;
+ int ret, primary_link;
int len __maybe_unused;
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -1251,21 +1256,46 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
return -EINVAL;
}
+ vif = iwl_mvm_get_bss_vif(mvm);
+ if (IS_ERR_OR_NULL(vif))
+ return 1;
+
+ if (ieee80211_vif_is_mld(vif) && vif->cfg.assoc) {
+ /*
+ * Select the 'best' link. May need to revisit, it seems
+ * better to not optimize for throughput but rather range,
+ * reliability and power here - and select 2.4 GHz ...
+ */
+ primary_link =
+ iwl_mvm_mld_get_primary_link(mvm, vif,
+ vif->active_links);
+
+ if (WARN_ONCE(primary_link < 0, "no primary link in 0x%x\n",
+ vif->active_links))
+ primary_link = __ffs(vif->active_links);
+
+ ret = ieee80211_set_active_links(vif, BIT(primary_link));
+ if (ret)
+ return ret;
+ } else {
+ primary_link = 0;
+ }
+
mutex_lock(&mvm->mutex);
set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
synchronize_net();
- vif = iwl_mvm_get_bss_vif(mvm);
- if (IS_ERR_OR_NULL(vif)) {
- ret = 1;
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ mvm_link = mvmvif->link[primary_link];
+ if (WARN_ON_ONCE(!mvm_link)) {
+ ret = -EINVAL;
goto out_noreset;
}
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- if (mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA) {
+ if (mvm_link->ap_sta_id == IWL_MVM_INVALID_STA) {
/* if we're not associated, this must be netdetect */
if (!wowlan->nd_config) {
ret = 1;
@@ -1279,24 +1309,31 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm->net_detect = true;
} else {
- struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+ .offloading_tid = 0,
+ };
- wowlan_config_cmd.sta_id = mvmvif->deflink.ap_sta_id;
+ wowlan_config_cmd.sta_id = mvm_link->ap_sta_id;
ap_sta = rcu_dereference_protected(
- mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id],
+ mvm->fw_id_to_mac_id[mvm_link->ap_sta_id],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(ap_sta)) {
ret = -EINVAL;
goto out_noreset;
}
+ ret = iwl_mvm_sta_ensure_queue(
+ mvm, ap_sta->txq[wowlan_config_cmd.offloading_tid]);
+ if (ret)
+ goto out_noreset;
+
ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
vif, mvmvif, ap_sta);
if (ret)
goto out_noreset;
ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
- vif, mvmvif, ap_sta);
+ vif, mvmvif, mvm_link, ap_sta);
if (ret)
goto out;
@@ -1462,7 +1499,8 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
status->pattern_number;
if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
- IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH |
+ IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE))
wakeup.disconnect = true;
if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
@@ -1486,6 +1524,9 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
wakeup.tcp_match = true;
+ if (reasons & IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC)
+ wakeup.unprot_deauth_disassoc = true;
+
if (status->wake_packet) {
int pktsize = status->wake_packet_bufsize;
int pktlen = status->wake_packet_length;
@@ -1839,9 +1880,12 @@ iwl_mvm_d3_set_igtk_bigtk_ipn(const struct iwl_multicast_key_data *key,
memcpy(seq->aes_gmac.pn, key->ipn, sizeof(seq->aes_gmac.pn));
break;
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
BUILD_BUG_ON(sizeof(seq->aes_cmac.pn) != sizeof(key->ipn));
memcpy(seq->aes_cmac.pn, key->ipn, sizeof(seq->aes_cmac.pn));
break;
+ default:
+ WARN_ON(1);
}
}
@@ -1931,7 +1975,7 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
struct ieee80211_vif *vif,
struct iwl_mvm *mvm, u32 gtk_cipher)
{
- int i;
+ int i, j;
struct ieee80211_key_conf *key;
struct {
struct ieee80211_key_conf conf;
@@ -1939,6 +1983,7 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
} conf = {
.conf.cipher = gtk_cipher,
};
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP);
@@ -1972,10 +2017,18 @@ static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
memcpy(conf.conf.key, status->gtk[i].key,
sizeof(status->gtk[i].key));
- key = ieee80211_gtk_rekey_add(vif, &conf.conf);
+ key = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
if (IS_ERR(key))
return false;
- iwl_mvm_set_key_rx_seq_idx(key, status, i);
+
+ for (j = 0; j < ARRAY_SIZE(status->gtk_seq); j++) {
+ if (!status->gtk_seq[j].valid ||
+ status->gtk_seq[j].key_id != key->keyidx)
+ continue;
+ iwl_mvm_set_key_rx_seq_idx(key, status, j);
+ break;
+ }
+ WARN_ON(j == ARRAY_SIZE(status->gtk_seq));
}
return true;
@@ -1995,6 +2048,7 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
.conf.keyidx = key_data->id,
};
struct ieee80211_key_seq seq;
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
if (!key_data->len)
return true;
@@ -2020,17 +2074,17 @@ iwl_mvm_d3_igtk_bigtk_rekey_add(struct iwl_wowlan_status_data *status,
BUILD_BUG_ON(sizeof(conf.key) < sizeof(key_data->key));
memcpy(conf.conf.key, key_data->key, conf.conf.keylen);
- key_config = ieee80211_gtk_rekey_add(vif, &conf.conf);
+ key_config = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
if (IS_ERR(key_config))
return false;
ieee80211_set_key_rx_seq(key_config, 0, &seq);
if (key_config->keyidx == 4 || key_config->keyidx == 5) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
- struct iwl_mvm_vif_link_info *mvm_link =
- mvmvif->link[link_id];
+ struct iwl_mvm_vif_link_info *mvm_link;
+ link_id = link_id < 0 ? 0 : link_id;
+ mvm_link = mvmvif->link[link_id];
mvm_link->igtk = key_config;
}
@@ -2065,7 +2119,6 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
.status = status,
};
int i;
-
u32 disconnection_reasons =
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
@@ -2073,9 +2126,6 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
if (!status || !vif->bss_conf.bssid)
return false;
- if (status->wakeup_reasons & disconnection_reasons)
- return false;
-
if (iwl_mvm_lookup_wowlan_status_ver(mvm) > 6 ||
iwl_fw_lookup_notif_ver(mvm->fw, PROT_OFFLOAD_GROUP,
WOWLAN_INFO_NOTIFICATION,
@@ -2136,6 +2186,9 @@ out:
mvmvif->seqno = status->non_qos_seq_ctr + 0x10;
}
+ if (status->wakeup_reasons & disconnection_reasons)
+ return false;
+
return true;
}
@@ -2193,7 +2246,10 @@ static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status,
static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
struct iwl_wowlan_igtk_status *data)
{
+ int i;
+
BUILD_BUG_ON(sizeof(status->igtk.key) < sizeof(data->key));
+ BUILD_BUG_ON(sizeof(status->igtk.ipn) != sizeof(data->ipn));
if (!data->key_len)
return;
@@ -2205,7 +2261,10 @@ static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
+ WOWLAN_IGTK_MIN_INDEX;
memcpy(status->igtk.key, data->key, sizeof(data->key));
- memcpy(status->igtk.ipn, data->ipn, sizeof(data->ipn));
+
+ /* mac80211 expects big endian for memcmp() to work, convert */
+ for (i = 0; i < sizeof(data->ipn); i++)
+ status->igtk.ipn[i] = data->ipn[sizeof(data->ipn) - i - 1];
}
static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status,
@@ -2839,6 +2898,9 @@ iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm,
u8 sta_id = mvm->net_detect ? IWL_MVM_INVALID_STA :
mvmvif->deflink.ap_sta_id;
+ /* bug - FW with MLO has status notification */
+ WARN_ON(ieee80211_vif_is_mld(vif));
+
d3_data->status = iwl_mvm_send_wowlan_get_status(mvm, sta_id);
}
@@ -2947,7 +3009,7 @@ static void iwl_mvm_nd_match_info_handler(struct iwl_mvm *mvm,
if (results->matched_profiles) {
memcpy(results->matches, notif->matches, matches_len);
- d3_data->nd_results_valid = TRUE;
+ d3_data->nd_results_valid = true;
}
/* no scan should be active at this point */
@@ -3345,6 +3407,7 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
+ unsigned long end = jiffies + 60 * HZ;
u32 pme_asserted;
while (true) {
@@ -3358,6 +3421,12 @@ static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
if (msleep_interruptible(100))
break;
+
+ if (time_is_before_jiffies(end)) {
+ IWL_ERR(mvm,
+ "ending pseudo-D3 with timeout after ~60 seconds\n");
+ return -ETIMEDOUT;
+ }
}
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index e8b881596baf..51b01f7528be 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -381,9 +381,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_bf(vif, param, value);
if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
else
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -578,34 +578,47 @@ static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->mvm;
- struct ieee80211_chanctx_conf *chanctx_conf;
- struct iwl_mvm_phy_ctxt *phy_ctxt;
+ struct ieee80211_bss_conf *link_conf;
u16 value;
- int ret;
+ int link_id, ret = -EINVAL;
ret = kstrtou16(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&mvm->mutex);
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->bss_conf.chanctx_conf);
- /* make sure the channel context is assigned */
- if (!chanctx_conf) {
+ mvm->dbgfs_rx_phyinfo = value;
+
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct cfg80211_chan_def min_def, ap_def;
+ struct iwl_mvm_phy_ctxt *phy_ctxt;
+ u8 chains_static, chains_dynamic;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
+ if (!chanctx_conf) {
+ rcu_read_unlock();
+ continue;
+ }
+ /* A command can't be sent with RCU lock held, so copy
+ * everything here and use it after unlocking
+ */
+ min_def = chanctx_conf->min_def;
+ ap_def = chanctx_conf->ap;
+ chains_static = chanctx_conf->rx_chains_static;
+ chains_dynamic = chanctx_conf->rx_chains_dynamic;
rcu_read_unlock();
- mutex_unlock(&mvm->mutex);
- return -EINVAL;
- }
- phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv];
- rcu_read_unlock();
+ phy_ctxt = mvmvif->link[link_id]->phy_ctxt;
+ if (!phy_ctxt)
+ continue;
- mvm->dbgfs_rx_phyinfo = value;
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &min_def, &ap_def,
+ chains_static, chains_dynamic);
+ }
- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def,
- chanctx_conf->rx_chains_static,
- chanctx_conf->rx_chains_dynamic);
mutex_unlock(&mvm->mutex);
return ret ?: count;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index edc8204f7c0e..79f4ac8cbc72 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -391,9 +391,7 @@ static ssize_t iwl_dbgfs_wifi_6e_enable_read(struct file *file,
char buf[12];
u32 value;
- err = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENABLE_6E,
- &iwl_guid, &value);
+ err = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_6E, &value);
if (err)
return err;
@@ -877,14 +875,14 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file,
le16_to_cpu(rsp->curr_mcc));
pos += scnprintf(pos, endpos - pos, "Block list entries:");
- for (i = 0; i < APCI_WTAS_BLACK_LIST_MAX; i++)
+ for (i = 0; i < IWL_WTAS_BLACK_LIST_MAX; i++)
pos += scnprintf(pos, endpos - pos, " 0x%x",
le16_to_cpu(rsp->block_list[i]));
pos += scnprintf(pos, endpos - pos, "\nOEM name: %s\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
pos += scnprintf(pos, endpos - pos, "\tVendor In Approved List: %s\n",
- iwl_mvm_is_vendor_in_approved_list() ? "YES" : "NO");
+ iwl_is_tas_approved() ? "YES" : "NO");
pos += scnprintf(pos, endpos - pos,
"\tDo TAS Support Dual Radio?: %s\n",
rsp->in_dual_radio ? "TRUE" : "FALSE");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 233ae81884a0..4863a3c74640 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
*/
#include <linux/etherdevice.h>
#include <linux/math64.h>
@@ -821,9 +821,10 @@ iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* If secure LTF is turned off, replace the flag with PMF only
*/
flags = le32_to_cpu(target->initiator_ap_flags);
- if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) &&
- !IWL_MVM_FTM_INITIATOR_SECURE_LTF) {
- flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
+ if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) {
+ if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF)
+ flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
+
flags |= IWL_INITIATOR_AP_FLAGS_PMF;
target->initiator_ap_flags = cpu_to_le32(flags);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index 8f10590f9cdd..8e760300a1ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -12,6 +12,9 @@ struct iwl_mvm_pasn_sta {
struct list_head list;
struct iwl_mvm_int_sta int_sta;
u8 addr[ETH_ALEN];
+
+ /* must be last as it followed by buffer holding the key */
+ struct ieee80211_key_conf keyconf;
};
struct iwl_mvm_pasn_hltk_data {
@@ -303,6 +306,10 @@ static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm,
{
list_del(&sta->list);
+ if (sta->keyconf.keylen)
+ iwl_mvm_sec_key_del_pasn(mvm, vif, BIT(sta->int_sta.sta_id),
+ &sta->keyconf);
+
if (iwl_mvm_has_mld_api(mvm->fw))
iwl_mvm_mld_rm_sta_id(mvm, sta->int_sta.sta_id);
else
@@ -342,6 +349,12 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
}
if (hltk && hltk_len) {
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT)) {
+ IWL_ERR(mvm, "No support for secure LTF measurement\n");
+ return -EINVAL;
+ }
+
hltk_data.cipher = iwl_mvm_cipher_to_location_cipher(cipher);
if (hltk_data.cipher == IWL_LOCATION_CIPHER_INVALID) {
IWL_ERR(mvm, "invalid cipher: %u\n", cipher);
@@ -352,12 +365,12 @@ int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
}
if (tk && tk_len) {
- sta = kzalloc(sizeof(*sta), GFP_KERNEL);
+ sta = kzalloc(sizeof(*sta) + tk_len, GFP_KERNEL);
if (!sta)
return -ENOBUFS;
ret = iwl_mvm_add_pasn_sta(mvm, vif, &sta->int_sta, addr,
- cipher, tk, tk_len);
+ cipher, tk, tk_len, &sta->keyconf);
if (ret) {
kfree(sta);
return ret;
@@ -425,7 +438,7 @@ int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
rcu_read_unlock();
phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
- ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def,
+ ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def, &ctx.ap,
ctx.rx_chains_static,
ctx.rx_chains_dynamic);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 1252084662c6..e1c2b7fc92ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -16,6 +16,7 @@
#include "fw/acpi.h"
#include "fw/pnvm.h"
#include "fw/uefi.h"
+#include "fw/regulatory.h"
#include "mvm.h"
#include "fw/dbg.h"
@@ -487,7 +488,6 @@ static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
#endif /* CONFIG_ACPI */
}
-#if defined(CONFIG_ACPI) && defined(CONFIG_EFI)
static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
{
u8 cmd_ver;
@@ -567,17 +567,6 @@ static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
return ret;
}
-#else
-
-static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
-{
- return 0;
-}
-
-static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
-{
-}
-#endif
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
{
@@ -677,6 +666,11 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
NULL);
+ if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ)
+ mvm->trans->step_urm = !!(iwl_read_umac_prph(mvm->trans,
+ CNVI_PMU_STEP_FLOW) &
+ CNVI_PMU_STEP_FLOW_FORCE_URM);
+
/* Send init config command to mark that we are sending NVM access
* commands
*/
@@ -890,7 +884,6 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
sizeof(cmd), &cmd);
}
-#ifdef CONFIG_ACPI
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
u32 cmd_id = REDUCE_TX_POWER_CMD;
@@ -931,9 +924,9 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
/* all structs have the same common part, add it */
len += sizeof(cmd.common);
- ret = iwl_sar_select_profile(&mvm->fwrt, per_chain,
- IWL_NUM_CHAIN_TABLES,
- n_subbands, prof_a, prof_b);
+ ret = iwl_sar_fill_profile(&mvm->fwrt, per_chain,
+ IWL_NUM_CHAIN_TABLES,
+ n_subbands, prof_a, prof_b);
/* return on error or if the profile is disabled (positive number) */
if (ret)
@@ -989,7 +982,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
resp = (void *)cmd.resp_pkt->data;
ret = le32_to_cpu(resp->profile_idx);
- if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES_REV3))
+ if (WARN_ON(ret > BIOS_GEO_MAX_PROFILE_NUM))
ret = -EIO;
iwl_free_resp(&cmd);
@@ -1003,7 +996,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
u16 len;
u32 n_bands;
u32 n_profiles;
- u32 sk = 0;
+ __le32 sk = cpu_to_le32(0);
int ret;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
IWL_FW_CMD_VER_UNKNOWN);
@@ -1020,27 +1013,35 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
/* the ops field is at the same spot for all versions, so set in v1 */
cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
+ /* Only set to South Korea if the table revision is 1 */
+ if (mvm->fwrt.geo_rev == 1)
+ sk = cpu_to_le32(1);
+
if (cmd_ver == 5) {
len = sizeof(cmd.v5);
n_bands = ARRAY_SIZE(cmd.v5.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
+ n_profiles = BIOS_GEO_MAX_PROFILE_NUM;
+ cmd.v5.table_revision = sk;
} else if (cmd_ver == 4) {
len = sizeof(cmd.v4);
n_bands = ARRAY_SIZE(cmd.v4.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
+ n_profiles = BIOS_GEO_MAX_PROFILE_NUM;
+ cmd.v4.table_revision = sk;
} else if (cmd_ver == 3) {
len = sizeof(cmd.v3);
n_bands = ARRAY_SIZE(cmd.v3.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES;
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
+ cmd.v3.table_revision = sk;
} else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
len = sizeof(cmd.v2);
n_bands = ARRAY_SIZE(cmd.v2.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES;
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
+ cmd.v2.table_revision = sk;
} else {
len = sizeof(cmd.v1);
n_bands = ARRAY_SIZE(cmd.v1.table[0]);
- n_profiles = ACPI_NUM_GEO_PROFILES;
+ n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
}
BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) !=
@@ -1052,8 +1053,8 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) !=
offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table));
/* the table is at the same position for all versions, so set use v1 */
- ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0],
- n_bands, n_profiles);
+ ret = iwl_sar_geo_fill_table(&mvm->fwrt, &cmd.v1.table[0][0],
+ n_bands, n_profiles);
/*
* It is a valid scenario to not support SAR, or miss wgds table,
@@ -1062,27 +1063,6 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
if (ret)
return 0;
- /* Only set to South Korea if the table revision is 1 */
- if (mvm->fwrt.geo_rev == 1)
- sk = 1;
-
- /*
- * Set the table_revision to South Korea (1) or not (0). The
- * element name is misleading, as it doesn't contain the table
- * revision number, but whether the South Korea variation
- * should be used.
- * This must be done after calling iwl_sar_geo_init().
- */
- if (cmd_ver == 5)
- cmd.v5.table_revision = cpu_to_le32(sk);
- else if (cmd_ver == 4)
- cmd.v4.table_revision = cpu_to_le32(sk);
- else if (cmd_ver == 3)
- cmd.v3.table_revision = cpu_to_le32(sk);
- else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
- IWL_UCODE_TLV_API_SAR_TABLE_VER))
- cmd.v2.table_revision = cpu_to_le32(sk);
-
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
@@ -1091,7 +1071,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
union iwl_ppag_table_cmd cmd;
int ret, cmd_size;
- ret = iwl_read_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
+ ret = iwl_fill_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
/* Not supporting PPAG table is a valid scenario */
if (ret < 0)
return 0;
@@ -1110,80 +1090,19 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
{
/* no need to read the table, done in INIT stage */
- if (!(iwl_acpi_is_ppag_approved(&mvm->fwrt)))
+ if (!(iwl_is_ppag_approved(&mvm->fwrt)))
return 0;
return iwl_mvm_ppag_send_cmd(mvm);
}
-static const struct dmi_system_id dmi_tas_approved_list[] = {
- { .ident = "HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HP"),
- },
- },
- { .ident = "SAMSUNG",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
- },
- },
- { .ident = "LENOVO",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- },
- },
- { .ident = "DELL",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- },
- },
- { .ident = "MSFT",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
- },
- },
- { .ident = "Acer",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- },
- },
- { .ident = "ASUS",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- },
- },
- { .ident = "GOOGLE-HP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Google"),
- DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
- },
- },
- { .ident = "MSI",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
- },
- },
- { .ident = "Honor",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
- },
- },
- /* keep last */
- {}
-};
-
-bool iwl_mvm_is_vendor_in_approved_list(void)
-{
- return dmi_check_system(dmi_tas_approved_list);
-}
-
static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc)
{
int i;
u32 size = le32_to_cpu(*le_size);
/* Verify that there is room for another country */
- if (size >= IWL_TAS_BLOCK_LIST_MAX)
+ if (size >= IWL_WTAS_BLACK_LIST_MAX)
return false;
for (i = 0; i < size; i++) {
@@ -1200,21 +1119,21 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
{
u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
int ret;
- union iwl_tas_config_cmd cmd = {};
+ struct iwl_tas_data data = {};
+ struct iwl_tas_config_cmd cmd = {};
int cmd_size, fw_ver;
- BUILD_BUG_ON(ARRAY_SIZE(cmd.v3.block_list_array) <
- APCI_WTAS_BLACK_LIST_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(data.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
+ BUILD_BUG_ON(ARRAY_SIZE(cmd.common.block_list_array) !=
+ IWL_WTAS_BLACK_LIST_MAX);
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n");
return;
}
- fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
- IWL_FW_CMD_VER_UNKNOWN);
-
- ret = iwl_acpi_get_tas(&mvm->fwrt, &cmd, fw_ver);
+ ret = iwl_bios_get_tas_table(&mvm->fwrt, &data);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"TAS table invalid or unavailable. (%d)\n",
@@ -1225,16 +1144,16 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
if (ret == 0)
return;
- if (!iwl_mvm_is_vendor_in_approved_list()) {
+ if (!iwl_is_tas_approved()) {
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
- if ((!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
- &cmd.v4.block_list_size,
- IWL_MCC_US)) ||
- (!iwl_mvm_add_to_tas_block_list(cmd.v4.block_list_array,
- &cmd.v4.block_list_size,
- IWL_MCC_CANADA))) {
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
+ if ((!iwl_mvm_add_to_tas_block_list(data.block_list_array,
+ &data.block_list_size,
+ IWL_MCC_US)) ||
+ (!iwl_mvm_add_to_tas_block_list(data.block_list_array,
+ &data.block_list_size,
+ IWL_MCC_CANADA))) {
IWL_DEBUG_RADIO(mvm,
"Unable to add US/Canada to TAS block list, disabling TAS\n");
return;
@@ -1242,41 +1161,64 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
} else {
IWL_DEBUG_RADIO(mvm,
"System vendor '%s' is in the approved list.\n",
- dmi_get_system_info(DMI_SYS_VENDOR));
+ dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
}
- /* v4 is the same size as v3, so no need to differentiate here */
- cmd_size = fw_ver < 3 ?
- sizeof(struct iwl_tas_config_cmd_v2) :
- sizeof(struct iwl_tas_config_cmd_v3);
+ fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ memcpy(&cmd.common, &data, sizeof(struct iwl_tas_config_cmd_common));
+
+ /* Set v3 or v4 specific parts. will be trunctated for fw_ver < 3 */
+ if (fw_ver == 4) {
+ cmd.v4.override_tas_iec = data.override_tas_iec;
+ cmd.v4.enable_tas_iec = data.enable_tas_iec;
+ cmd.v4.usa_tas_uhb_allowed = data.usa_tas_uhb_allowed;
+ } else {
+ cmd.v3.override_tas_iec = cpu_to_le16(data.override_tas_iec);
+ cmd.v3.enable_tas_iec = cpu_to_le16(data.enable_tas_iec);
+ }
+
+ cmd_size = sizeof(struct iwl_tas_config_cmd_common);
+ if (fw_ver >= 3)
+ /* v4 is the same size as v3 */
+ cmd_size += sizeof(struct iwl_tas_config_cmd_v3);
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
if (ret < 0)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
-static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
+static bool iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
{
- u8 value;
- int ret = iwl_acpi_get_dsm_u8(mvm->fwrt.dev, 0, DSM_RFI_FUNC_ENABLE,
- &iwl_rfi_guid, &value);
+ u32 value = 0;
+ /* default behaviour is disabled */
+ bool bios_enable_rfi = false;
+ int ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_RFI_CONFIG, &value);
+
if (ret < 0) {
IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret);
+ return bios_enable_rfi;
+ }
- } else if (value >= DSM_VALUE_RFI_MAX) {
- IWL_DEBUG_RADIO(mvm, "DSM RFI got invalid value, ret=%d\n",
- value);
-
- } else if (value == DSM_VALUE_RFI_ENABLE) {
+ value &= DSM_VALUE_RFI_DISABLE;
+ /* RFI BIOS CONFIG value can be 0 or 3 only.
+ * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled.
+ * 1 and 2 are invalid BIOS configurations, So, it's not possible to
+ * disable ddr/dlvr separately.
+ */
+ if (!value) {
IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n");
- return DSM_VALUE_RFI_ENABLE;
+ bios_enable_rfi = true;
+ } else if (value == DSM_VALUE_RFI_DISABLE) {
+ IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to disable\n");
+ } else {
+ IWL_DEBUG_RADIO(mvm,
+ "DSM RFI got invalid value, value=%d\n", value);
}
- IWL_DEBUG_RADIO(mvm, "DSM RFI is disabled\n");
-
- /* default behaviour is disabled */
- return DSM_VALUE_RFI_DISABLE;
+ return bios_enable_rfi;
}
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
@@ -1288,43 +1230,34 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE), 1);
- cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
+ cmd.config_bitmap = iwl_get_lari_config_bitmap(&mvm->fwrt);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_11AX_ENABLEMENT,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_11AX_ENABLEMENT, &value);
if (!ret)
cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENABLE_UNII4_CHAN,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
if (!ret)
cmd.oem_unii4_allow_bitmap = cpu_to_le32(value);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ACTIVATE_CHANNEL,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
if (!ret) {
if (cmd_ver < 8)
value &= ~ACTIVATE_5G2_IN_WW_MASK;
cmd.chan_state_active_bitmap = cpu_to_le32(value);
}
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENABLE_6E,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_6E, &value);
if (!ret)
cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_FORCE_DISABLE_CHANNELS,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS,
+ &value);
if (!ret)
cmd.force_disable_channels_bitmap = cpu_to_le32(value);
- ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
- DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
- &iwl_guid, &value);
+ ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
+ &value);
if (!ret)
cmd.edt_bitmap = cpu_to_le32(value);
@@ -1390,15 +1323,17 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
if (le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_VLP_AP_SUPPORTED ||
le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_AFC_AP_SUPPORTED)
- mvm->fwrt.uats_enabled = TRUE;
+ mvm->fwrt.uats_enabled = true;
}
-void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
+void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm)
{
int ret;
+ iwl_acpi_get_guid_lock_status(&mvm->fwrt);
+
/* read PPAG table */
- ret = iwl_acpi_get_ppag_table(&mvm->fwrt);
+ ret = iwl_bios_get_ppag_table(&mvm->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"PPAG BIOS table invalid or unavailable. (%d)\n",
@@ -1406,7 +1341,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
}
/* read SAR tables */
- ret = iwl_sar_get_wrds_table(&mvm->fwrt);
+ ret = iwl_bios_get_wrds_table(&mvm->fwrt);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
@@ -1415,7 +1350,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
* If not available, don't fail and don't bother with EWRD and
* WGDS */
- if (!iwl_sar_get_wgds_table(&mvm->fwrt)) {
+ if (!iwl_bios_get_wgds_table(&mvm->fwrt)) {
/*
* If basic SAR is not available, we check for WGDS,
* which should *not* be available either. If it is
@@ -1426,7 +1361,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
}
} else {
- ret = iwl_sar_get_ewrd_table(&mvm->fwrt);
+ ret = iwl_bios_get_ewrd_table(&mvm->fwrt);
/* if EWRD is not available, we can still use
* WRDS, so don't fail */
if (ret < 0)
@@ -1436,7 +1371,7 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
/* read geo SAR table */
if (iwl_sar_geo_support(&mvm->fwrt)) {
- ret = iwl_sar_get_wgds_table(&mvm->fwrt);
+ ret = iwl_bios_get_wgds_table(&mvm->fwrt);
if (ret < 0)
IWL_DEBUG_RADIO(mvm,
"Geo SAR BIOS table invalid or unavailable. (%d)\n",
@@ -1446,59 +1381,18 @@ void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
}
iwl_acpi_get_phy_filters(&mvm->fwrt, &mvm->phy_filters);
-}
-#else /* CONFIG_ACPI */
-inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
- int prof_a, int prof_b)
-{
- return 1;
+ if (iwl_bios_get_eckv(&mvm->fwrt, &mvm->ext_clock_valid))
+ IWL_DEBUG_RADIO(mvm, "ECKV table doesn't exist in BIOS\n");
}
-inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
+static void iwl_mvm_disconnect_iterator(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- return -ENOENT;
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ieee80211_hw_restart_disconnect(vif);
}
-static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
-{
- return 0;
-}
-
-int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
-{
- return -ENOENT;
-}
-
-static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
-{
- return 0;
-}
-
-static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
-{
-}
-
-static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
-{
-}
-
-bool iwl_mvm_is_vendor_in_approved_list(void)
-{
- return false;
-}
-
-static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
-{
- return DSM_VALUE_RFI_DISABLE;
-}
-
-void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
-{
-}
-
-#endif /* CONFIG_ACPI */
-
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
{
u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
@@ -1543,10 +1437,15 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
/* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
if (flags & ERROR_RECOVERY_UPDATE_DB) {
resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data);
- if (resp)
+ if (resp) {
IWL_ERR(mvm,
"Failed to send recovery cmd blob was invalid %d\n",
resp);
+
+ ieee80211_iterate_interfaces(mvm->hw, 0,
+ iwl_mvm_disconnect_iterator,
+ mvm);
+ }
}
}
@@ -1781,9 +1680,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (!mvm->ptp_data.ptp_clock)
iwl_mvm_ptp_init(mvm);
- if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid))
- IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n");
-
ret = iwl_mvm_ppag_init(mvm);
if (ret)
goto error;
@@ -1803,7 +1699,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
iwl_mvm_uats_init(mvm);
if (iwl_rfi_supported(mvm)) {
- if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE)
+ if (iwl_mvm_eval_dsm_rfi(mvm))
iwl_rfi_send_config_cmd(mvm, NULL);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index be48b0fc9cb6..f13f13e6b71a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
*/
#include "mvm.h"
#include "time-event.h"
@@ -53,6 +53,8 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
unsigned int link_id = link_conf->link_id;
struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
struct iwl_link_config_cmd cmd = {};
+ unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1);
if (WARN_ON_ONCE(!link_info))
return -EINVAL;
@@ -84,7 +86,8 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid)
memcpy(cmd.ibss_bssid_addr, link_conf->bssid, ETH_ALEN);
- cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
+ if (cmd_ver < 2)
+ cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
return iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_ADD);
}
@@ -100,6 +103,8 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_link_config_cmd cmd = {};
u32 ht_flag, flags = 0, flags_mask = 0;
int ret;
+ unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD);
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1);
if (WARN_ON_ONCE(!link_info ||
link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID))
@@ -190,12 +195,21 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
if (changes & LINK_CONTEXT_MODIFY_EHT_PARAMS) {
+ struct ieee80211_chanctx_conf *ctx;
+ struct cfg80211_chan_def *def = NULL;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(link_conf->chanctx_conf);
+ if (ctx)
+ def = iwl_mvm_chanctx_def(mvm, ctx);
+
if (iwlwifi_mod_params.disable_11be ||
- !link_conf->eht_support)
+ !link_conf->eht_support || !def ||
+ iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) >= 6)
changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS;
else
- cmd.puncture_mask =
- cpu_to_le16(link_conf->eht_puncturing);
+ cmd.puncture_mask = cpu_to_le16(def->punctured);
+ rcu_read_unlock();
}
cmd.bss_color = link_conf->he_bss_color.color;
@@ -224,7 +238,8 @@ send_cmd:
cmd.flags = cpu_to_le32(flags);
cmd.flags_mask = cpu_to_le32(flags_mask);
cmd.spec_link_id = link_conf->link_id;
- cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
+ if (cmd_ver < 2)
+ cmd.listen_lmac = cpu_to_le32(link_info->listen_lmac);
ret = iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_MODIFY);
if (!ret && (changes & LINK_CONTEXT_MODIFY_ACTIVE))
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index c4f96125cf33..228ede7b8957 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -31,6 +31,17 @@ const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = {
IWL_GEN2_TRIG_TX_FIFO_BK,
};
+const u8 iwl_mvm_ac_to_bz_tx_fifo[] = {
+ IWL_BZ_EDCA_TX_FIFO_VO,
+ IWL_BZ_EDCA_TX_FIFO_VI,
+ IWL_BZ_EDCA_TX_FIFO_BE,
+ IWL_BZ_EDCA_TX_FIFO_BK,
+ IWL_BZ_TRIG_TX_FIFO_VO,
+ IWL_BZ_TRIG_TX_FIFO_VI,
+ IWL_BZ_TRIG_TX_FIFO_BE,
+ IWL_BZ_TRIG_TX_FIFO_BK,
+};
+
struct iwl_mvm_mac_iface_iterator_data {
struct iwl_mvm *mvm;
struct ieee80211_vif *vif;
@@ -455,7 +466,7 @@ void iwl_mvm_set_fw_protection_flags(struct iwl_mvm *mvm,
break;
case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
/* Protect when channel wider than 20MHz */
- if (link_conf->chandef.width > NL80211_CHAN_WIDTH_20)
+ if (link_conf->chanreq.oper.width > NL80211_CHAN_WIDTH_20)
*protection_flags |= cpu_to_le32(ht_flag);
break;
default:
@@ -494,7 +505,7 @@ void iwl_mvm_set_fw_qos_params(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (link_conf->qos)
*qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
- if (link_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
+ if (link_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)
*qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
}
@@ -910,8 +921,8 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct iwl_mvm *mvm,
link_conf = rcu_dereference(vif->link_conf[link_id]);
if (link_conf) {
basic = link_conf->basic_rates;
- if (link_conf->chandef.chan)
- band = link_conf->chandef.chan->band;
+ if (link_conf->chanreq.oper.chan)
+ band = link_conf->chanreq.oper.chan->band;
}
rcu_read_unlock();
}
@@ -1466,8 +1477,8 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
mvmvif->csa_countdown = true;
- if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
- int c = ieee80211_beacon_update_cntdwn(csa_vif);
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif, 0)) {
+ int c = ieee80211_beacon_update_cntdwn(csa_vif, 0);
iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif,
&csa_vif->bss_conf);
@@ -1486,7 +1497,7 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
}
} else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) {
/* we don't have CSA NoA scheduled yet, switch now */
- ieee80211_csa_finish(csa_vif);
+ ieee80211_csa_finish(csa_vif, 0);
RCU_INIT_POINTER(mvm->csa_vif, NULL);
}
}
@@ -1626,10 +1637,22 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
* TODO: the threshold should be adjusted based on latency conditions,
* and/or in case of a CS flow on one of the other AP vifs.
*/
- if (rx_missed_bcon > IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG)
- iwl_mvm_connection_loss(mvm, vif, "missed beacons");
- else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD)
- ieee80211_beacon_loss(vif);
+ if (rx_missed_bcon >= IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG) {
+ if (rx_missed_bcon_since_rx >= IWL_MVM_MISSED_BEACONS_SINCE_RX_THOLD) {
+ iwl_mvm_connection_loss(mvm, vif, "missed beacons");
+ } else {
+ IWL_WARN(mvm,
+ "missed beacons exceeds threshold, but receiving data. Stay connected, Expect bugs.\n");
+ IWL_WARN(mvm,
+ "missed_beacons:%d, missed_beacons_since_rx:%d\n",
+ rx_missed_bcon, rx_missed_bcon_since_rx);
+ }
+ } else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) {
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ ieee80211_beacon_loss(vif);
+ else
+ ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC);
+ }
iwl_dbg_tlv_time_point(&mvm->fwrt,
IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data);
@@ -1832,7 +1855,7 @@ void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT *
csa_vif->bss_conf.beacon_int));
- ieee80211_csa_finish(csa_vif);
+ ieee80211_csa_finish(csa_vif, 0);
rcu_read_unlock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 7f13dff04b26..1935630d3def 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -138,7 +138,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
resp->channels,
__le16_to_cpu(resp->mcc),
__le16_to_cpu(resp->geo_info),
- le32_to_cpu(resp->cap), resp_ver);
+ le32_to_cpu(resp->cap), resp_ver,
+ mvm->fwrt.uats_enabled);
/* Store the return source id */
src_id = resp->source_id;
if (IS_ERR_OR_NULL(regd)) {
@@ -263,6 +264,9 @@ static const u8 tm_if_types_ext_capa_sta[] = {
__bf_shf(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY) | \
IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US << \
__bf_shf(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY))
+#define IWL_MVM_MLD_CAPA_OPS FIELD_PREP_CONST( \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME)
static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
{
@@ -272,6 +276,7 @@ static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
.extended_capabilities_len = sizeof(he_if_types_ext_capa_sta),
/* relevant only if EHT is supported */
.eml_capabilities = IWL_MVM_EMLSR_CAPA,
+ .mld_capa_and_ops = IWL_MVM_MLD_CAPA_OPS,
},
{
.iftype = NL80211_IFTYPE_STATION,
@@ -280,6 +285,7 @@ static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = {
.extended_capabilities_len = sizeof(tm_if_types_ext_capa_sta),
/* relevant only if EHT is supported */
.eml_capabilities = IWL_MVM_EMLSR_CAPA,
+ .mld_capa_and_ops = IWL_MVM_MLD_CAPA_OPS,
},
};
@@ -490,6 +496,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_TIME_SYNC_BOTH_FTM_TM))
hw->wiphy->hw_timestamp_max_peers = 1;
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT);
+
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
hw->wiphy->features |=
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
@@ -695,6 +706,18 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
}
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(LOCATION_GROUP,
+ TOF_RANGE_REQ_CMD),
+ IWL_FW_CMD_VER_UNKNOWN) >= 11) {
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE);
+
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_SECURE_LTF);
+ }
+
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
#ifdef CONFIG_PM_SLEEP
@@ -1195,14 +1218,12 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
- int retry, max_retry = 0;
mutex_lock(&mvm->mutex);
/* we are starting the mac not in error flow, and restart is enabled */
if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
iwlwifi_mod_params.fw_restart) {
- max_retry = IWL_MAX_INIT_RETRY;
/*
* This will prevent mac80211 recovery flows to trigger during
* init failures
@@ -1210,13 +1231,7 @@ int iwl_mvm_mac_start(struct ieee80211_hw *hw)
set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
}
- for (retry = 0; retry <= max_retry; retry++) {
- ret = __iwl_mvm_mac_start(mvm);
- if (!ret || mvm->pldr_sync)
- break;
-
- IWL_ERR(mvm, "mac start retry %d\n", retry);
- }
+ ret = __iwl_mvm_mac_start(mvm);
clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
mutex_unlock(&mvm->mutex);
@@ -1350,6 +1365,7 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
* discover that its list is now empty.
*/
cancel_work_sync(&mvm->async_handlers_wk);
+ wiphy_work_cancel(hw->wiphy, &mvm->async_handlers_wiphy_wk);
}
struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
@@ -1433,7 +1449,7 @@ int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) {
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
if (ret)
goto out_unlock;
@@ -1454,7 +1470,8 @@ out_unlock:
}
void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -1600,7 +1617,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
*/
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC) {
- iwl_mvm_vif_dbgfs_add_link(mvm, vif);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_vif_dbgfs_add_link(mvm, vif);
ret = 0;
goto out;
}
@@ -1616,7 +1634,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_remove_mac;
/* beacon filtering */
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
goto out_remove_mac;
@@ -1627,6 +1645,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
}
+ if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5)
+ vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
+
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
mvm->p2p_device_vif = vif;
@@ -1637,10 +1658,11 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_MONITOR) {
mvm->monitor_on = true;
mvm->monitor_p80 =
- iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chandef);
+ iwl_mvm_chandef_get_primary_80(&vif->bss_conf.chanreq.oper);
}
- iwl_mvm_vif_dbgfs_add_link(mvm, vif);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_vif_dbgfs_add_link(mvm, vif);
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
@@ -2558,7 +2580,7 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
iwl_mvm_stop_session_protection(mvm, vif);
iwl_mvm_sf_update(mvm, vif, false);
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
}
if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
@@ -2579,7 +2601,7 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
/* FIXME: need to update per link when FW API will
* support it
*/
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
if (ret)
IWL_ERR(mvm,
"failed to update CQM thresholds\n");
@@ -2606,9 +2628,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/
if (changes & BSS_CHANGED_ASSOC && vif->cfg.assoc) {
if ((vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax) ||
- (vif->bss_conf.eht_support &&
- !iwlwifi_mod_params.disable_11be))
+ !iwlwifi_mod_params.disable_11ax))
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id);
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
@@ -2617,10 +2637,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/* Update MU EDCA params */
if (changes & BSS_CHANGED_QOS && mvmvif->associated &&
vif->cfg.assoc &&
- ((vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax) ||
- (vif->bss_conf.eht_support &&
- !iwlwifi_mod_params.disable_11be)))
+ (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax))
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->deflink.ap_sta_id);
/*
@@ -3416,16 +3433,16 @@ iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
.tolerated = true,
};
- if (WARN_ON_ONCE(!link_conf->chandef.chan ||
+ if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan ||
!mvmvif->link[link_id]))
return;
- if (!(link_conf->chandef.chan->flags & IEEE80211_CHAN_RADAR)) {
+ if (!(link_conf->chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR)) {
mvmvif->link[link_id]->he_ru_2mhz_block = false;
return;
}
- cfg80211_bss_iter(hw->wiphy, &link_conf->chandef,
+ cfg80211_bss_iter(hw->wiphy, &link_conf->chanreq.oper,
iwl_mvm_check_he_obss_narrow_bw_ru_iter,
&iter_data);
@@ -3485,10 +3502,10 @@ static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm,
return;
/* FIXME: MEI needs to be updated for MLO */
- if (!vif->bss_conf.chandef.chan)
+ if (!vif->bss_conf.chanreq.oper.chan)
return;
- conn_info.channel = vif->bss_conf.chandef.chan->hw_value;
+ conn_info.channel = vif->bss_conf.chanreq.oper.chan->hw_value;
switch (mvm_sta->pairwise_cipher) {
case WLAN_CIPHER_SUITE_TKIP:
@@ -3685,6 +3702,9 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
NL80211_TDLS_SETUP);
}
+ if (ret)
+ return ret;
+
for_each_sta_active_link(vif, sta, link_sta, i)
link_sta->agg.max_rc_amsdu_len = 1;
@@ -3693,6 +3713,19 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
mvmvif->ap_sta = sta;
+ /*
+ * Initialize the rates here already - this really tells
+ * the firmware only what the supported legacy rates are
+ * (may be) since it's initialized already from what the
+ * AP advertised in the beacon/probe response. This will
+ * allow the firmware to send auth/assoc frames with one
+ * of the supported rates already, rather than having to
+ * use a mandatory rate.
+ * If we're the AP, we'll just assume mandatory rates at
+ * this point, but we know nothing about the STA anyway.
+ */
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
+
return 0;
}
@@ -3719,10 +3752,8 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw,
* the default bss_conf
*/
if (!mvm->mld_api_is_used &&
- ((vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax) ||
- (vif->bss_conf.eht_support &&
- !iwlwifi_mod_params.disable_11be)))
+ (vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax))
iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->deflink.sta_id);
} else if (vif->type == NL80211_IFTYPE_STATION) {
iwl_mvm_vif_set_he_support(hw, vif, sta, true);
@@ -3774,7 +3805,7 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
NL80211_TDLS_ENABLE_LINK);
} else {
/* enable beacon filtering */
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
mvmvif->authorized = 1;
@@ -3791,13 +3822,17 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
mvm_sta->authorized = true;
- iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
-
/* MFP is set by default before the station is authorized.
* Clear it here in case it's not used.
*/
- if (!sta->mfp)
- return callbacks->update_sta(mvm, vif, sta);
+ if (!sta->mfp) {
+ int ret = callbacks->update_sta(mvm, vif, sta);
+
+ if (ret)
+ return ret;
+ }
+
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
return 0;
}
@@ -3828,7 +3863,7 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
mvmvif->authorized = 0;
/* disable beacon filtering */
- iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ iwl_mvm_disable_beacon_filter(mvm, vif);
}
return 0;
@@ -4407,44 +4442,6 @@ static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
return true;
}
-#define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
-#define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
-#define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
-#define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
-#define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
-
-static void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
- u32 duration_ms,
- u32 *duration_tu,
- u32 *delay)
-{
- u32 dtim_interval = vif->bss_conf.dtim_period *
- vif->bss_conf.beacon_int;
-
- *delay = AUX_ROC_MIN_DELAY;
- *duration_tu = MSEC_TO_TU(duration_ms);
-
- /*
- * If we are associated we want the delay time to be at least one
- * dtim interval so that the FW can wait until after the DTIM and
- * then start the time event, this will potentially allow us to
- * remain off-channel for the max duration.
- * Since we want to use almost a whole dtim interval we would also
- * like the delay to be for 2-3 dtim intervals, in case there are
- * other time events with higher priority.
- */
- if (vif->cfg.assoc) {
- *delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
- /* We cannot remain off-channel longer than the DTIM interval */
- if (dtim_interval <= *duration_tu) {
- *duration_tu = dtim_interval - AUX_ROC_SAFETY_BUFFER;
- if (*duration_tu <= AUX_ROC_MIN_DURATION)
- *duration_tu = dtim_interval -
- AUX_ROC_MIN_SAFETY_BUFFER;
- }
- }
-}
-
static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
struct ieee80211_channel *channel,
struct ieee80211_vif *vif,
@@ -4542,48 +4539,6 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
return res;
}
-static int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
- struct ieee80211_channel *channel,
- struct ieee80211_vif *vif,
- int duration, u32 activity)
-{
- int res;
- u32 duration_tu, delay;
- struct iwl_roc_req roc_req = {
- .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
- .activity = cpu_to_le32(activity),
- .sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
- };
-
- lockdep_assert_held(&mvm->mutex);
-
- /* Set the channel info data */
- iwl_mvm_set_chan_info(mvm, &roc_req.channel_info,
- channel->hw_value,
- iwl_mvm_phy_band_from_nl80211(channel->band),
- IWL_PHY_CHANNEL_MODE20, 0);
-
- iwl_mvm_roc_duration_and_delay(vif, duration, &duration_tu,
- &delay);
- roc_req.duration = cpu_to_le32(duration_tu);
- roc_req.max_delay = cpu_to_le32(delay);
-
- IWL_DEBUG_TE(mvm,
- "\t(requested = %ums, max_delay = %ums)\n",
- duration, delay);
- IWL_DEBUG_TE(mvm,
- "Requesting to remain on channel %u for %utu\n",
- channel->hw_value, duration_tu);
-
- /* Set the node address */
- memcpy(roc_req.node_addr, vif->addr, ETH_ALEN);
-
- res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
- 0, sizeof(roc_req), &roc_req);
-
- return res;
-}
-
static int iwl_mvm_add_aux_sta_for_hs20(struct iwl_mvm *mvm, u32 lmac_id)
{
int ret = 0;
@@ -4700,7 +4655,7 @@ static int iwl_mvm_p2p_find_phy_ctxt(struct iwl_mvm *mvm,
cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
return iwl_mvm_phy_ctxt_add(mvm, mvmvif->deflink.phy_ctxt,
- &chandef, 1, 1);
+ &chandef, NULL, 1, 1);
}
/* Execute the common part for MLD and non-MLD modes */
@@ -4788,8 +4743,8 @@ static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac,
data->responder = true;
}
-static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
- struct ieee80211_chanctx_conf *ctx)
+bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm_ftm_responder_iter_data data = {
.responder = false,
@@ -4808,9 +4763,7 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
{
u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
struct iwl_mvm_phy_ctxt *phy_ctxt;
- bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) ||
- iwl_mvm_enable_fils(mvm, ctx);
- struct cfg80211_chan_def *def = use_def ? &ctx->def : &ctx->min_def;
+ struct cfg80211_chan_def *def = iwl_mvm_chanctx_def(mvm, ctx);
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -4823,7 +4776,7 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
goto out;
}
- ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, def,
+ ret = iwl_mvm_phy_ctxt_add(mvm, phy_ctxt, def, &ctx->ap,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
if (ret) {
@@ -4876,9 +4829,7 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
- bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) ||
- iwl_mvm_enable_fils(mvm, ctx);
- struct cfg80211_chan_def *def = use_def ? &ctx->def : &ctx->min_def;
+ struct cfg80211_chan_def *def = iwl_mvm_chanctx_def(mvm, ctx);
if (WARN_ONCE((phy_ctxt->ref > 1) &&
(changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
@@ -4903,7 +4854,7 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
}
iwl_mvm_bt_coex_vif_change(mvm);
- iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def,
+ iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, &ctx->ap,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
@@ -5356,8 +5307,8 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
return -EINVAL;
if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
- return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
- return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ return iwl_mvm_enable_beacon_filter(mvm, vif);
+ return iwl_mvm_disable_beacon_filter(mvm, vif);
}
return -EOPNOTSUPP;
@@ -5441,7 +5392,7 @@ static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm,
iwl_mvm_csa_client_absent(mvm, vif);
if (mvmvif->bf_data.bf_enabled) {
- int ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ int ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
return ret;
@@ -5601,8 +5552,16 @@ void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
if (chsw->count >= mvmvif->csa_count && chsw->block_tx) {
if (mvmvif->csa_misbehave) {
+ struct ieee80211_bss_conf *link_conf;
+
/* Second time, give up on this AP*/
- iwl_mvm_abort_channel_switch(hw, vif);
+
+ link_conf = wiphy_dereference(hw->wiphy,
+ vif->link_conf[chsw->link_id]);
+ if (WARN_ON(!link_conf))
+ return;
+
+ iwl_mvm_abort_channel_switch(hw, vif, link_conf);
ieee80211_chswitch_done(vif, false, 0);
mvmvif->csa_misbehave = false;
return;
@@ -6103,6 +6062,7 @@ void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
}
}
+#define SYNC_RX_QUEUE_TIMEOUT (HZ)
void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
enum iwl_mvm_rxq_notif_type type,
bool sync,
@@ -6151,11 +6111,12 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
ret = wait_event_timeout(mvm->rx_sync_waitq,
READ_ONCE(mvm->queue_sync_state) == 0 ||
- iwl_mvm_is_radio_killed(mvm),
- HZ);
- WARN_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm),
- "queue sync: failed to sync, state is 0x%lx\n",
- mvm->queue_sync_state);
+ iwl_mvm_is_radio_hw_killed(mvm),
+ SYNC_RX_QUEUE_TIMEOUT);
+ WARN_ONCE(!ret && !iwl_mvm_is_radio_hw_killed(mvm),
+ "queue sync: failed to sync, state is 0x%lx, cookie %d\n",
+ mvm->queue_sync_state,
+ mvm->queue_sync_cookie);
}
out:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
index ea3e9e9c6e26..8a38fc4b0b0f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
*/
#include <linux/kernel.h>
#include <net/mac80211.h>
@@ -62,11 +62,13 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
struct ieee80211_key_conf *keyconf)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool pairwise = keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+ bool igtk = keyconf->keyidx == 4 || keyconf->keyidx == 5;
u32 flags = 0;
lockdep_assert_held(&mvm->mutex);
- if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ if (!pairwise)
flags |= IWL_SEC_KEY_FLAG_MCAST_KEY;
switch (keyconf->cipher) {
@@ -96,14 +98,19 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
if (!sta && vif->type == NL80211_IFTYPE_STATION)
sta = mvmvif->ap_sta;
- /* Set the MFP flag also for an AP interface where the key is an IGTK
- * key as in such a case the station would always be NULL
+ /*
+ * If we are installing an iGTK (in AP or STA mode), we need to tell
+ * the firmware this key will en/decrypt MGMT frames.
+ * Same goes if we are installing a pairwise key for an MFP station.
+ * In case we're installing a groupwise key (which is not an iGTK),
+ * then, we will not use this key for MGMT frames.
*/
- if ((!IS_ERR_OR_NULL(sta) && sta->mfp) ||
- (vif->type == NL80211_IFTYPE_AP &&
- (keyconf->keyidx == 4 || keyconf->keyidx == 5)))
+ if ((!IS_ERR_OR_NULL(sta) && sta->mfp && pairwise) || igtk)
flags |= IWL_SEC_KEY_FLAG_MFP;
+ if (keyconf->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
+ flags |= IWL_SEC_KEY_FLAG_SPP_AMSDU;
+
return flags;
}
@@ -335,6 +342,21 @@ static int _iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
return ret;
}
+int iwl_mvm_sec_key_del_pasn(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 sta_mask,
+ struct ieee80211_key_conf *keyconf)
+{
+ u32 key_flags = iwl_mvm_get_sec_flags(mvm, vif, NULL, keyconf) |
+ IWL_SEC_KEY_FLAG_MFP;
+
+ if (WARN_ON(!sta_mask))
+ return -EINVAL;
+
+ return __iwl_mvm_sec_key_del(mvm, sta_mask, key_flags, keyconf->keyidx,
+ 0);
+}
+
int iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
index f313a8d771e4..bb7851042177 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
*/
#include "mvm.h"
@@ -167,7 +167,7 @@ static int iwl_mvm_mld_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_PROMISC |
- MAC_FILTER_IN_CONTROL_AND_MGMT |
+ MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT |
MAC_CFG_FILTER_ACCEPT_BEACON |
MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
MAC_CFG_FILTER_ACCEPT_GRP);
@@ -205,8 +205,11 @@ static int iwl_mvm_mld_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
cmd.p2p_dev.is_disc_extended =
iwl_mac_ctxt_p2p_dev_has_extended_disc(mvm, vif);
- /* Override the filter flags to accept only probe requests */
- cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ);
+ /* Override the filter flags to accept all management frames. This is
+ * needed to support both P2P device discovery using probe requests and
+ * P2P service discovery using action frames
+ */
+ cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT);
return iwl_mvm_mld_mac_ctxt_send_cmd(mvm, &cmd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 61170173f917..084314bf6f36 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022-2023 Intel Corporation
+ * Copyright (C) 2022-2024 Intel Corporation
*/
#include "mvm.h"
@@ -47,7 +47,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
goto out_unlock;
/* beacon filtering */
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
goto out_remove_mac;
@@ -81,7 +81,8 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
}
- iwl_mvm_vif_dbgfs_add_link(mvm, vif);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_vif_dbgfs_add_link(mvm, vif);
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
@@ -253,9 +254,6 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
if (!rcu_access_pointer(link_conf->chanctx_conf))
n_active++;
- if (n_active > iwl_mvm_max_active_links(mvm, vif))
- return -EOPNOTSUPP;
-
if (WARN_ON_ONCE(!mvmvif->link[link_id]))
return -EINVAL;
@@ -437,6 +435,9 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
mvmvif->ap_ibss_active = false;
}
+ iwl_mvm_link_changed(mvm, vif, link_conf,
+ LINK_CONTEXT_MODIFY_ACTIVE, false);
+
if (iwl_mvm_is_esr_supported(mvm->fwrt.trans) && n_active > 1) {
int ret = iwl_mvm_esr_mode_inactive(mvm, vif);
@@ -448,9 +449,6 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_MONITOR)
iwl_mvm_mld_rm_snif_sta(mvm, vif);
- iwl_mvm_link_changed(mvm, vif, link_conf,
- LINK_CONTEXT_MODIFY_ACTIVE, false);
-
if (switching_chanctx)
return;
mvmvif->link[link_id]->phy_ctxt = NULL;
@@ -606,6 +604,7 @@ static int iwl_mvm_mld_mac_sta_state(struct ieee80211_hw *hw,
struct iwl_mvm_link_sel_data {
u8 link_id;
enum nl80211_band band;
+ enum nl80211_chan_width width;
bool active;
};
@@ -657,7 +656,8 @@ void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
continue;
data[n_data].link_id = link_id;
- data[n_data].band = link_conf->chandef.chan->band;
+ data[n_data].band = link_conf->chanreq.oper.chan->band;
+ data[n_data].width = link_conf->chanreq.oper.width;
data[n_data].active = vif->active_links & BIT(link_id);
n_data++;
}
@@ -752,8 +752,8 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
}
- /* Update EHT Puncturing info */
- if (changes & BSS_CHANGED_EHT_PUNCTURING && vif->cfg.assoc)
+ /* if associated, maybe puncturing changed - we'll check later */
+ if (vif->cfg.assoc)
link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS;
if (link_changes) {
@@ -1121,17 +1121,12 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
{
struct iwl_mvm_vif_link_info *new_link[IEEE80211_MLD_MAX_NUM_LINKS] = {};
- unsigned int n_active = iwl_mvm_mld_count_active_links(vif);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u16 removed = old_links & ~new_links;
u16 added = new_links & ~old_links;
int err, i;
- if (hweight16(new_links) > 1 &&
- n_active > iwl_mvm_max_active_links(mvm, vif))
- return -EOPNOTSUPP;
-
for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
int r;
@@ -1223,6 +1218,146 @@ iwl_mvm_mld_change_sta_links(struct ieee80211_hw *hw,
return ret;
}
+/*
+ * This function receives a subset of the usable links bitmap and
+ * returns the primary link id, and -1 if such link doesn't exist
+ * (e.g. non-MLO connection) or wasn't found.
+ */
+int iwl_mvm_mld_get_primary_link(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ unsigned long usable_links)
+{
+ struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
+ u8 link_id, n_data = 0;
+
+ if (!ieee80211_vif_is_mld(vif) || !vif->cfg.assoc)
+ return -1;
+
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ data[n_data].link_id = link_id;
+ data[n_data].band = link_conf->chanreq.oper.chan->band;
+ data[n_data].width = link_conf->chanreq.oper.width;
+ data[n_data].active = true;
+ n_data++;
+ }
+
+ if (n_data <= 1)
+ return -1;
+
+ /* The logic should be modified to handle more than 2 links */
+ WARN_ON_ONCE(n_data > 2);
+
+ /* Primary link is the link with the wider bandwidth or higher band */
+ if (data[0].width > data[1].width)
+ return data[0].link_id;
+ if (data[0].width < data[1].width)
+ return data[1].link_id;
+ if (data[0].band >= data[1].band)
+ return data[0].link_id;
+
+ return data[1].link_id;
+}
+
+/*
+ * This function receives a bitmap of usable links and check if we can enter
+ * eSR on those links.
+ */
+static bool iwl_mvm_can_enter_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ unsigned long desired_links)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
+ desired_links);
+ const struct wiphy_iftype_ext_capab *ext_capa;
+ bool ret = true;
+ int link_id;
+
+ if (primary_link < 0)
+ return false;
+
+ if (!(vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP))
+ return false;
+
+ ext_capa = cfg80211_get_iftype_ext_capa(mvm->hw->wiphy,
+ ieee80211_vif_type_p2p(vif));
+ if (!ext_capa ||
+ !(ext_capa->eml_capabilities & IEEE80211_EML_CAP_EMLSR_SUPP))
+ return false;
+
+ for_each_set_bit(link_id, &desired_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ /* BT Coex effects eSR mode only if one of the link is on LB */
+ if (link_conf->chanreq.oper.chan->band != NL80211_BAND_2GHZ)
+ continue;
+
+ ret = iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link_id,
+ primary_link);
+ // Mark eSR as disabled for the next time
+ if (!ret)
+ mvmvif->bt_coex_esr_disabled = true;
+ break;
+ }
+
+ return ret;
+}
+
+static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 desired_links)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int n_links = hweight16(desired_links);
+ bool ret = true;
+
+ if (n_links <= 1)
+ return true;
+
+ mutex_lock(&mvm->mutex);
+
+ /* Check if HW supports the wanted number of links */
+ if (n_links > iwl_mvm_max_active_links(mvm, vif)) {
+ ret = false;
+ goto unlock;
+ }
+
+ /* If it is an eSR device, check that we can enter eSR */
+ if (iwl_mvm_is_esr_supported(mvm->fwrt.trans))
+ ret = iwl_mvm_can_enter_esr(mvm, vif, desired_links);
+unlock:
+ mutex_unlock(&mvm->mutex);
+ return ret;
+}
+
+static enum ieee80211_neg_ttlm_res
+iwl_mvm_mld_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ u16 map;
+ u8 i;
+
+ /* Verify all TIDs are mapped to the same links set */
+ map = neg_ttlm->downlink[0];
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if (neg_ttlm->downlink[i] != neg_ttlm->uplink[i] ||
+ neg_ttlm->uplink[i] != map)
+ return NEG_TTLM_RES_REJECT;
+ }
+
+ return NEG_TTLM_RES_ACCEPT;
+}
+
const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.tx = iwl_mvm_mac_tx,
.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
@@ -1317,4 +1452,6 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.change_vif_links = iwl_mvm_mld_change_vif_links,
.change_sta_links = iwl_mvm_mld_change_sta_links,
+ .can_activate_links = iwl_mvm_mld_can_activate_links,
+ .can_neg_ttlm = iwl_mvm_mld_can_neg_ttlm,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 40627961b834..a10b48947bca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -40,8 +40,9 @@
#define IWL_MVM_MAX_ADDRESSES 5
/* RSSI offset for WkP */
#define IWL_RSSI_OFFSET 50
+#define IWL_MVM_MISSED_BEACONS_SINCE_RX_THOLD 4
#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
-#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 16
+#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 19
/* A TimeUnit is 1024 microsecond */
#define MSEC_TO_TU(_msec) (_msec*1000/1024)
@@ -105,6 +106,7 @@ struct iwl_mvm_phy_ctxt {
/* track for RLC config command */
u32 center_freq1;
bool rlc_disabled;
+ u32 channel_load_by_us;
};
struct iwl_mvm_time_event_data {
@@ -121,7 +123,7 @@ struct iwl_mvm_time_event_data {
* if the te is in the time event list or not (when id == TE_MAX)
*/
u32 id;
- u8 link_id;
+ s8 link_id;
};
/* Power management */
@@ -359,6 +361,7 @@ struct iwl_mvm_vif_link_info {
* @pm_enabled - indicate if MAC power management is allowed
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
+ * @bt_coex_esr_disabled: indicates if esr is disabled due to bt coex
* @low_latency: bit flags for low latency
* see enum &iwl_mvm_low_latency_cause for causes.
* @low_latency_actual: boolean, indicates low latency is set,
@@ -389,6 +392,7 @@ struct iwl_mvm_vif {
bool pm_enabled;
bool monitor_active;
bool esr_active;
+ bool bt_coex_esr_disabled;
u8 low_latency: 6;
u8 low_latency_actual: 1;
@@ -537,8 +541,8 @@ struct iwl_mvm_tt_mgmt {
#ifdef CONFIG_THERMAL
/**
- *struct iwl_mvm_thermal_device - thermal zone related data
- * @temp_trips: temperature thresholds for report
+ * struct iwl_mvm_thermal_device - thermal zone related data
+ * @trips: temperature thresholds for report
* @fw_trips_index: keep indexes to original array - temp_trips
* @tzone: thermal zone device data
*/
@@ -848,6 +852,9 @@ struct iwl_mvm {
spinlock_t async_handlers_lock;
struct work_struct async_handlers_wk;
+ /* For async rx handlers that require the wiphy lock */
+ struct wiphy_work async_handlers_wiphy_wk;
+
struct work_struct roc_done_wk;
unsigned long init_status;
@@ -1215,7 +1222,6 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
- * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
* @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
* @IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE: suppress one error log
* if this is set, when intentionally triggered
@@ -1230,7 +1236,6 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_ROC_AUX_RUNNING,
IWL_MVM_STATUS_FIRMWARE_RUNNING,
- IWL_MVM_STATUS_NEED_FLUSH_P2P,
IWL_MVM_STATUS_IN_D3,
IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
IWL_MVM_STATUS_STARTING,
@@ -1567,13 +1572,17 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_trans *trans = mvm->fwrt.trans;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
if (vif->type == NL80211_IFTYPE_AP)
return mvm->fw->ucode_capa.num_beacons;
- if (iwl_mvm_is_esr_supported(trans) ||
- (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
- CSR_HW_RFID_IS_CDB(trans->hw_rf_id)))
+ if ((iwl_mvm_is_esr_supported(trans) &&
+ !mvmvif->bt_coex_esr_disabled) ||
+ ((CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
+ CSR_HW_RFID_IS_CDB(trans->hw_rf_id))))
return IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM;
return 1;
@@ -1581,12 +1590,16 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
extern const u8 iwl_mvm_ac_to_tx_fifo[];
extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[];
+extern const u8 iwl_mvm_ac_to_bz_tx_fifo[];
static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm,
enum ieee80211_ac_numbers ac)
{
- return iwl_mvm_has_new_tx_api(mvm) ?
- iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac];
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ return iwl_mvm_ac_to_bz_tx_fifo[ac];
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_mvm_ac_to_gen2_tx_fifo[ac];
+ return iwl_mvm_ac_to_tx_fifo[ac];
}
struct iwl_rate_info {
@@ -1801,18 +1814,20 @@ void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
/* MVM PHY */
struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm);
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic);
int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic);
void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt);
void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt);
int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm);
-u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef);
-u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef);
+u8 iwl_mvm_get_channel_width(const struct cfg80211_chan_def *chandef);
+u8 iwl_mvm_get_ctrl_pos(const struct cfg80211_chan_def *chandef);
int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
u8 chains_static, u8 chains_dynamic);
@@ -2116,6 +2131,12 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
+bool iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id, int primary_link);
+void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id);
/* beacon filtering */
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -2129,11 +2150,9 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
{}
#endif
int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags);
+ struct ieee80211_vif *vif);
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags);
+ struct ieee80211_vif *vif);
/* SMPS */
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_smps_type_request req_type,
@@ -2366,7 +2385,7 @@ u64 iwl_mvm_ptp_get_adj_time(struct iwl_mvm *mvm, u64 base_time);
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm);
int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm);
-void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm);
+void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm);
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_link_sta_add_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -2387,6 +2406,10 @@ int iwl_mvm_sec_key_del(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *keyconf);
+int iwl_mvm_sec_key_del_pasn(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 sta_mask,
+ struct ieee80211_key_conf *keyconf);
void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_vif_link_info *link,
@@ -2511,7 +2534,7 @@ static inline void iwl_mvm_set_chan_info(struct iwl_mvm *mvm,
static inline void
iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
struct iwl_fw_channel_info *ci,
- struct cfg80211_chan_def *chandef)
+ const struct cfg80211_chan_def *chandef)
{
enum nl80211_band band = chandef->chan->band;
@@ -2601,7 +2624,6 @@ static inline bool iwl_mvm_mei_filter_scan(struct iwl_mvm *mvm,
void iwl_mvm_send_roaming_forbidden_event(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool forbidden);
-bool iwl_mvm_is_vendor_in_approved_list(void);
/* Callbacks for ieee80211_ops */
void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
@@ -2691,7 +2713,8 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw);
void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf);
void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw);
@@ -2730,4 +2753,28 @@ bool iwl_mvm_enable_fils(struct iwl_mvm *mvm,
struct ieee80211_chanctx_conf *ctx);
void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool valid_links_changed);
+int iwl_mvm_mld_get_primary_link(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ unsigned long usable_links);
+
+bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
+ struct ieee80211_chanctx_conf *ctx);
+
+static inline struct cfg80211_chan_def *
+iwl_mvm_chanctx_def(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx)
+{
+ bool use_def = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx) ||
+ iwl_mvm_enable_fils(mvm, ctx);
+
+ return use_def ? &ctx->def : &ctx->min_def;
+}
+
+void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
+ u32 duration_ms,
+ u32 *duration_tu,
+ u32 *delay);
+int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration, u32 activity);
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index c0dd441e800e..ae8177222881 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -590,7 +590,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
return -EIO;
if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
- !iwl_acpi_get_mcc(mvm->dev, mcc)) {
+ !iwl_bios_get_mcc(&mvm->fwrt, mcc)) {
kfree(regd);
regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
MCC_SOURCE_BIOS, NULL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index adbbe19aeae5..a93981cb9714 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -161,9 +161,9 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
if (!vif || vif->type != NL80211_IFTYPE_STATION)
return;
- if (!vif->bss_conf.chandef.chan ||
- vif->bss_conf.chandef.chan->band != NL80211_BAND_2GHZ ||
- vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40)
+ if (!vif->bss_conf.chanreq.oper.chan ||
+ vif->bss_conf.chanreq.oper.chan->band != NL80211_BAND_2GHZ ||
+ vif->bss_conf.chanreq.oper.width < NL80211_CHAN_WIDTH_40)
return;
if (!vif->cfg.assoc)
@@ -219,7 +219,7 @@ void iwl_mvm_update_link_smps(struct ieee80211_vif *vif,
return;
if (mvm->fw_static_smps_request &&
- link_conf->chandef.width == NL80211_CHAN_WIDTH_160 &&
+ link_conf->chanreq.oper.width == NL80211_CHAN_WIDTH_160 &&
link_conf->he_support)
mode = IEEE80211_SMPS_STATIC;
@@ -259,7 +259,7 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
}
/**
- * enum iwl_rx_handler_context context for Rx handler
+ * enum iwl_rx_handler_context: context for Rx handler
* @RX_HANDLER_SYNC : this means that it will be called in the Rx path
* which can't acquire mvm->mutex.
* @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
@@ -267,15 +267,19 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
* it will be called from a worker with mvm->mutex held.
* @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
* mutex itself, it will be called from a worker without mvm->mutex held.
+ * @RX_HANDLER_ASYNC_LOCKED_WIPHY: If the handler needs to hold the wiphy lock
+ * and mvm->mutex. Will be handled with the wiphy_work queue infra
+ * instead of regular work queue.
*/
enum iwl_rx_handler_context {
RX_HANDLER_SYNC,
RX_HANDLER_ASYNC_LOCKED,
RX_HANDLER_ASYNC_UNLOCKED,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
};
/**
- * struct iwl_rx_handlers handler for FW notification
+ * struct iwl_rx_handlers: handler for FW notification
* @cmd_id: command id
* @min_size: minimum size to expect for the notification
* @context: see &iwl_rx_handler_context
@@ -316,7 +320,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
struct iwl_tlc_update_notif),
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
- RX_HANDLER_ASYNC_LOCKED, struct iwl_bt_coex_profile_notif),
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_bt_coex_profile_notif),
RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
@@ -324,7 +329,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_NOTIF,
iwl_mvm_handle_rx_system_oper_stats,
- RX_HANDLER_ASYNC_LOCKED,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
struct iwl_system_statistics_notif_oper),
RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF,
iwl_mvm_handle_rx_system_oper_part1_stats,
@@ -673,6 +678,8 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+ struct wiphy_work *work);
static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
{
@@ -682,7 +689,7 @@ static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
if (!backoff)
return 0;
- dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
+ iwl_bios_get_pwr_limit(&mvm->fwrt, &dflt_pwr_limit);
while (backoff->pwr) {
if (dflt_pwr_limit >= backoff->pwr)
@@ -1194,7 +1201,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
&iwl_mvm_sanitize_ops, mvm, dbgfs_dir);
- iwl_mvm_get_acpi_tables(mvm);
+ iwl_mvm_get_bios_tables(mvm);
iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
iwl_uefi_get_step_table(trans);
@@ -1265,6 +1272,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_LIST_HEAD(&mvm->add_stream_txqs);
spin_lock_init(&mvm->add_stream_lock);
+ wiphy_work_init(&mvm->async_handlers_wiphy_wk,
+ iwl_mvm_async_handlers_wiphy_wk);
init_waitqueue_head(&mvm->rx_sync_waitq);
mvm->queue_sync_state = 0;
@@ -1551,35 +1560,62 @@ void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
spin_unlock_bh(&mvm->async_handlers_lock);
}
-static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+/*
+ * This function receives a bitmap of rx async handler contexts
+ * (&iwl_rx_handler_context) to handle, and runs only them
+ */
+static void iwl_mvm_async_handlers_by_context(struct iwl_mvm *mvm,
+ u8 contexts)
{
- struct iwl_mvm *mvm =
- container_of(wk, struct iwl_mvm, async_handlers_wk);
struct iwl_async_handler_entry *entry, *tmp;
LIST_HEAD(local_list);
- /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
-
/*
- * Sync with Rx path with a lock. Remove all the entries from this list,
- * add them to a local one (lock free), and then handle them.
+ * Sync with Rx path with a lock. Remove all the entries of the
+ * wanted contexts from this list, add them to a local one (lock free),
+ * and then handle them.
*/
spin_lock_bh(&mvm->async_handlers_lock);
- list_splice_init(&mvm->async_handlers_list, &local_list);
+ list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
+ if (!(BIT(entry->context) & contexts))
+ continue;
+ list_del(&entry->list);
+ list_add_tail(&entry->list, &local_list);
+ }
spin_unlock_bh(&mvm->async_handlers_lock);
list_for_each_entry_safe(entry, tmp, &local_list, list) {
- if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+ if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
mutex_lock(&mvm->mutex);
entry->fn(mvm, &entry->rxb);
iwl_free_rxb(&entry->rxb);
list_del(&entry->list);
- if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+ if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
mutex_unlock(&mvm->mutex);
kfree(entry);
}
}
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, async_handlers_wiphy_wk);
+ u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED_WIPHY);
+
+ iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, async_handlers_wk);
+ u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED) |
+ BIT(RX_HANDLER_ASYNC_UNLOCKED);
+
+ iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
@@ -1659,7 +1695,11 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
spin_lock(&mvm->async_handlers_lock);
list_add_tail(&entry->list, &mvm->async_handlers_list);
spin_unlock(&mvm->async_handlers_lock);
- schedule_work(&mvm->async_handlers_wk);
+ if (rx_h->context == RX_HANDLER_ASYNC_LOCKED_WIPHY)
+ wiphy_work_queue(mvm->hw->wiphy,
+ &mvm->async_handlers_wiphy_wk);
+ else
+ schedule_work(&mvm->async_handlers_wk);
break;
}
}
@@ -1788,12 +1828,8 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
{
- bool state = iwl_mvm_is_radio_killed(mvm);
-
- if (state)
- wake_up(&mvm->rx_sync_waitq);
-
- wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
+ wiphy_rfkill_set_hw_state(mvm->hw->wiphy,
+ iwl_mvm_is_radio_killed(mvm));
}
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
@@ -1818,10 +1854,12 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done);
bool unified = iwl_mvm_has_unified_ucode(mvm);
- if (state)
+ if (state) {
set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
- else
+ wake_up(&mvm->rx_sync_waitq);
+ } else {
clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+ }
iwl_mvm_set_rfkill_state(mvm);
@@ -1955,7 +1993,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
ieee80211_restart_hw(mvm->hw);
} else if (mvm->fwrt.trans->dbg.restart_required) {
IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n");
- mvm->fwrt.trans->dbg.restart_required = FALSE;
+ mvm->fwrt.trans->dbg.restart_required = false;
ieee80211_restart_hw(mvm->hw);
} else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
ieee80211_restart_hw(mvm->hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 334d1f59f6e4..ce264b386029 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -9,7 +9,7 @@
#include "mvm.h"
/* Maps the driver specific channel width definition to the fw values */
-u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
+u8 iwl_mvm_get_channel_width(const struct cfg80211_chan_def *chandef)
{
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -33,7 +33,7 @@ u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
* Maps the driver specific control channel position (relative to the center
* freq) definitions to the the fw values
*/
-u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
+u8 iwl_mvm_get_ctrl_pos(const struct cfg80211_chan_def *chandef)
{
int offs = chandef->chan->center_freq - chandef->center_freq1;
int abs_offs = abs(offs);
@@ -116,7 +116,7 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
struct iwl_phy_context_cmd_v1 *cmd,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
{
struct iwl_phy_context_cmd_tail *tail =
@@ -137,7 +137,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
struct iwl_phy_context_cmd *cmd,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic)
{
cmd->lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm,
@@ -197,14 +197,18 @@ int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
*/
static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic,
u32 action)
{
int ret;
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1);
- if (ver == 3 || ver == 4) {
+ if (ver < 5 || !ap || !ap->chan)
+ ap = NULL;
+
+ if (ver >= 3 && ver <= 6) {
struct iwl_phy_context_cmd cmd = {};
/* Set the command header fields */
@@ -215,6 +219,14 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
chains_static,
chains_dynamic);
+ if (ap) {
+ cmd.sbb_bandwidth = iwl_mvm_get_channel_width(ap);
+ cmd.sbb_ctrl_channel_loc = iwl_mvm_get_ctrl_pos(ap);
+ }
+
+ if (ver == 6)
+ cmd.puncture_mask = cpu_to_le16(chandef->punctured);
+
ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD,
0, sizeof(cmd), &cmd);
} else if (ver < 3) {
@@ -254,7 +266,8 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
* Send a command to add a PHY context based on the current HW configuration.
*/
int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic)
{
int ret;
@@ -267,7 +280,7 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
ctxt->width = chandef->width;
ctxt->center_freq1 = chandef->center_freq1;
- ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ap,
chains_static, chains_dynamic,
FW_CTXT_ACTION_ADD);
@@ -300,7 +313,8 @@ void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
* changed.
*/
int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
- struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *chandef,
+ const struct cfg80211_chan_def *ap,
u8 chains_static, u8 chains_dynamic)
{
enum iwl_ctxt_action action = FW_CTXT_ACTION_MODIFY;
@@ -324,7 +338,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
int ret;
/* ... remove it here ...*/
- ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, NULL,
chains_static, chains_dynamic,
FW_CTXT_ACTION_REMOVE);
if (ret)
@@ -338,7 +352,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
ctxt->width = chandef->width;
ctxt->center_freq1 = chandef->center_freq1;
- return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+ return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef, ap,
chains_static, chains_dynamic,
action);
}
@@ -358,7 +372,7 @@ void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
cfg80211_chandef_create(&chandef, ctxt->channel, NL80211_CHAN_NO_HT);
- iwl_mvm_phy_ctxt_apply(mvm, ctxt, &chandef, 1, 1,
+ iwl_mvm_phy_ctxt_apply(mvm, ctxt, &chandef, NULL, 1, 1,
FW_CTXT_ACTION_REMOVE);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index 1b9b06e0443f..41e68aa6bec8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -20,8 +20,7 @@
static
int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
- struct iwl_beacon_filter_cmd *cmd,
- u32 flags)
+ struct iwl_beacon_filter_cmd *cmd)
{
u16 len;
@@ -62,7 +61,7 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
len = offsetof(struct iwl_beacon_filter_cmd,
bf_threshold_absolute_low);
- return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
+ return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, 0,
len, cmd);
}
@@ -813,8 +812,7 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_beacon_filter_cmd *cmd,
- u32 cmd_flags)
+ struct iwl_beacon_filter_cmd *cmd)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
@@ -825,7 +823,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd);
if (!ret)
mvmvif->bf_data.bf_enabled = true;
@@ -834,20 +832,18 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
}
int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags)
+ struct ieee80211_vif *vif)
{
struct iwl_beacon_filter_cmd cmd = {
IWL_BF_CMD_CONFIG_DEFAULTS,
.bf_enable_beacon_filter = cpu_to_le32(1),
};
- return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags);
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd);
}
static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags)
+ struct ieee80211_vif *vif)
{
struct iwl_beacon_filter_cmd cmd = {};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -856,7 +852,7 @@ static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
if (!ret)
mvmvif->bf_data.bf_enabled = false;
@@ -865,10 +861,9 @@ static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
}
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- u32 flags)
+ struct ieee80211_vif *vif)
{
- return _iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+ return _iwl_mvm_disable_beacon_filter(mvm, vif);
}
static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm)
@@ -919,7 +914,7 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
!vif->cfg.ps ||
iwl_mvm_vif_low_latency(mvmvif));
- return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0);
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd);
}
int iwl_mvm_power_update_ps(struct iwl_mvm *mvm)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 6cba8a353b53..00860feefa7a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include "rs.h"
#include "fw-api.h"
@@ -479,9 +479,15 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
}
if (flags & IWL_TLC_NOTIF_FLAG_AMSDU && !mvm_link_sta->orig_amsdu_len) {
+ u32 enabled = le32_to_cpu(notif->amsdu_enabled);
u16 size = le32_to_cpu(notif->amsdu_size);
int i;
+ if (size < 2000) {
+ size = 0;
+ enabled = 0;
+ }
+
if (link_sta->agg.max_amsdu_len < size) {
/*
* In debug link_sta->agg.max_amsdu_len < size
@@ -492,7 +498,7 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
goto out;
}
- mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
+ mvmsta->amsdu_enabled = enabled;
mvmsta->max_amsdu_len = size;
link_sta->agg.max_rc_amsdu_len = mvmsta->max_amsdu_len;
@@ -525,10 +531,10 @@ u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta,
const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap;
const struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap;
- if (WARN_ON_ONCE(!link_conf->chandef.chan))
+ if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan))
return IEEE80211_MAX_MPDU_LEN_VHT_3895;
- if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
+ if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
switch (le16_get_bits(link_sta->he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN)) {
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
@@ -538,7 +544,7 @@ u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta,
default:
return IEEE80211_MAX_MPDU_LEN_VHT_3895;
}
- } else if (link_conf->chandef.chan->band == NL80211_BAND_2GHZ &&
+ } else if (link_conf->chanreq.oper.chan->band == NL80211_BAND_2GHZ &&
eht_cap->has_eht) {
switch (u8_get_bits(eht_cap->eht_cap_elem.mac_cap_info[0],
IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 481d68cbbbd8..a8c4e354e2ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -4161,6 +4161,8 @@ static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
* @mvm: The mvm component
* @mvmsta: The station
* @enable: Enable Tx protection?
+ *
+ * Returns: an error code
*/
int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool enable)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 8caa971770c6..b1add7942c5b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -752,6 +752,19 @@ iwl_mvm_update_tcm_from_stats(struct iwl_mvm *mvm, __le32 *air_time_le,
spin_unlock(&mvm->tcm.lock);
}
+static void iwl_mvm_handle_per_phy_stats(struct iwl_mvm *mvm,
+ struct iwl_stats_ntfy_per_phy *per_phy)
+{
+ int i;
+
+ for (i = 0; i < NUM_PHY_CTX; i++) {
+ if (!mvm->phy_ctxts[i].ref)
+ continue;
+ mvm->phy_ctxts[i].channel_load_by_us =
+ le32_to_cpu(per_phy[i].channel_load_by_us);
+ }
+}
+
static void
iwl_mvm_stats_ver_15(struct iwl_mvm *mvm,
struct iwl_statistics_operational_ntfy *stats)
@@ -766,6 +779,7 @@ iwl_mvm_stats_ver_15(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_stat_iterator_all_macs,
&data);
+ iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy);
}
static void
@@ -841,6 +855,7 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
struct iwl_stats_ntfy_per_link *link_stats;
struct ieee80211_bss_conf *bss_conf;
struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_vif_link_info *link_info;
int link_id;
int sig;
@@ -857,20 +872,26 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
continue;
mvmvif = iwl_mvm_vif_from_mac80211(bss_conf->vif);
- if (!mvmvif || !mvmvif->link[link_id])
+ link_info = mvmvif->link[link_id];
+ if (!link_info)
continue;
link_stats = &per_link[fw_link_id];
- mvmvif->link[link_id]->beacon_stats.num_beacons =
+ link_info->beacon_stats.num_beacons =
le32_to_cpu(link_stats->beacon_counter);
/* we basically just use the u8 to store 8 bits and then treat
* it as a s8 whenever we take it out to a different type.
*/
- mvmvif->link[link_id]->beacon_stats.avg_signal =
+ link_info->beacon_stats.avg_signal =
-le32_to_cpu(link_stats->beacon_average_energy);
+ if (link_info->phy_ctxt &&
+ link_info->phy_ctxt->channel->band == NL80211_BAND_2GHZ)
+ iwl_mvm_bt_coex_update_vif_esr(mvm, bss_conf->vif,
+ link_id);
+
/* make sure that beacon statistics don't go backwards with TCM
* request to clear statistics
*/
@@ -935,6 +956,7 @@ void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter,
average_energy);
+ iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy);
}
void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 886d00098528..1484eaedf452 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -282,6 +282,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
u32 status,
struct ieee80211_rx_status *stats)
{
+ struct wireless_dev *wdev;
struct iwl_mvm_sta *mvmsta;
struct iwl_mvm_vif *mvmvif;
u8 keyid;
@@ -303,9 +304,15 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
if (!ieee80211_is_beacon(hdr->frame_control))
return 0;
+ if (!sta)
+ return -1;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
/* key mismatch - will also report !MIC_OK but we shouldn't count it */
if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID))
- return -1;
+ goto report;
/* good cases */
if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
@@ -314,13 +321,6 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
return 0;
}
- if (!sta)
- return -1;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
- mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
-
/*
* both keys will have the same cipher and MIC length, use
* whichever one is available
@@ -329,11 +329,11 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
if (!key) {
key = rcu_dereference(mvmvif->bcn_prot.keys[1]);
if (!key)
- return -1;
+ goto report;
}
if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
- return -1;
+ goto report;
/* get the real key ID */
keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
@@ -347,7 +347,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
return -1;
key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]);
if (!key)
- return -1;
+ goto report;
}
/* Report status to mac80211 */
@@ -355,6 +355,10 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
ieee80211_key_mic_failure(key);
else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
ieee80211_key_replay(key);
+report:
+ wdev = ieee80211_vif_to_wdev(mvmsta->vif);
+ if (wdev->netdev)
+ cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr, len);
return -1;
}
@@ -397,8 +401,11 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
case IWL_RX_MPDU_STATUS_SEC_GCM:
BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
/* alg is CCM: check MIC only */
- if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
+ if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) {
+ IWL_DEBUG_DROP(mvm,
+ "Dropping packet, bad MIC (CCM/GCM)\n");
return -1;
+ }
stats->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED;
*crypt_len = IEEE80211_CCMP_HDR_LEN;
@@ -505,6 +512,10 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
return false;
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (WARN_ON_ONCE(!mvm_sta->dup_data))
+ return false;
+
dup_data = &mvm_sta->dup_data[queue];
/*
@@ -512,11 +523,9 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
* (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
*/
if (ieee80211_is_ctl(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control) ||
- is_multicast_ether_addr(hdr->addr1)) {
- rx_status->flag |= RX_FLAG_DUP_VALIDATED;
+ ieee80211_is_any_nullfunc(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
return false;
- }
if (ieee80211_is_data_qos(hdr->frame_control)) {
/* frame has qos control */
@@ -642,10 +651,8 @@ static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
rcu_read_lock();
ba_data = rcu_dereference(mvm->baid_map[baid]);
- if (!ba_data) {
- WARN(true, "BAID %d not found in map\n", baid);
+ if (WARN(!ba_data, "BAID %d not found in map\n", baid))
goto out;
- }
/* pick any STA ID to find the pointer */
sta_id = ffs(ba_data->sta_mask) - 1;
@@ -681,11 +688,11 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
return;
len -= sizeof(*notif) + sizeof(*internal_notif);
- if (internal_notif->sync &&
- mvm->queue_sync_cookie != internal_notif->cookie) {
- WARN_ONCE(1, "Received expired RX queue sync message\n");
+ if (WARN_ONCE(internal_notif->sync &&
+ mvm->queue_sync_cookie != internal_notif->cookie,
+ "Received expired RX queue sync message (cookie %d but wanted %d, queue %d)\n",
+ internal_notif->cookie, mvm->queue_sync_cookie, queue))
return;
- }
switch (internal_notif->type) {
case IWL_MVM_RXQ_EMPTY:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 7b6f1cdca067..f3e3986b4c72 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -241,13 +241,11 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
return IWL_SCAN_TYPE_FRAGMENTED;
/*
- * in case of DCM with GO where BSS DTIM interval < 220msec
- * set all scan requests as fast-balance scan
+ * in case of DCM with P2P GO set all scan requests as
+ * fast-balance scan
*/
if (vif && vif->type == NL80211_IFTYPE_STATION &&
- data.is_dcm_with_p2p_go &&
- ((vif->bss_conf.beacon_int *
- vif->bss_conf.dtim_period) < 220))
+ data.is_dcm_with_p2p_go)
return IWL_SCAN_TYPE_FAST_BALANCE;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index 30d4233595e8..16285ae7cae9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2013-2014, 2018-2019, 2022-2023 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2019, 2022-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
*/
#include "mvm.h"
@@ -232,6 +232,9 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
};
struct ieee80211_sta *sta = NULL;
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SMART_FIFO_OFFLOAD))
+ return 0;
/*
* Ignore the call if we are in HW Restart flow, or if the handled
* vif is a p2p device.
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 2a3ca9785974..491c449fd431 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2015, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2015, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -71,7 +71,7 @@ u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta,
mpdu_dens = link_sta->ht_cap.ampdu_density;
}
- if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) {
+ if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) {
/* overwrite HT values on 6 GHz */
mpdu_dens = le16_get_bits(link_sta->he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
@@ -208,7 +208,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
if (sta->deflink.ht_cap.ht_supported ||
- mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ)
+ mvm_sta->vif->bss_conf.chanreq.oper.chan->band == NL80211_BAND_6GHZ)
add_sta_cmd.station_flags_msk |=
cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
STA_FLG_AGG_MPDU_DENS_MSK);
@@ -1502,6 +1502,34 @@ out_err:
return ret;
}
+int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm,
+ struct ieee80211_txq *txq)
+{
+ struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+ int ret = -EINVAL;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
+ !txq->sta) {
+ return 0;
+ }
+
+ if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) {
+ set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+ ret = 0;
+ }
+
+ local_bh_disable();
+ spin_lock(&mvm->add_stream_lock);
+ if (!list_empty(&mvmtxq->list))
+ list_del_init(&mvmtxq->list);
+ spin_unlock(&mvm->add_stream_lock);
+ local_bh_enable();
+
+ return ret;
+}
+
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
@@ -2989,16 +3017,6 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
kfree_rcu(baid_data, rcu_head);
IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
-
- /*
- * After we've deleted it, do another queue sync
- * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently
- * running it won't find a new session in the old
- * BAID. It can find the NULL pointer for the BAID,
- * but we must not have it find a different session.
- */
- iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY,
- true, NULL, 0);
}
return 0;
@@ -3559,6 +3577,9 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
key_flags = cpu_to_le16(keyidx);
key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
+ if (key->flags & IEEE80211_KEY_FLAG_SPP_AMSDU)
+ key_flags |= cpu_to_le16(STA_KEY_FLG_AMSDU_SPP);
+
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
@@ -4298,12 +4319,12 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
- u8 *key, u32 key_len)
+ u8 *key, u32 key_len,
+ struct ieee80211_key_conf *keyconf)
{
int ret;
u16 queue;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct ieee80211_key_conf *keyconf;
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, false, false);
bool mld = iwl_mvm_has_mld_api(mvm->fw);
@@ -4328,12 +4349,6 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (ret)
goto out;
- keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
- if (!keyconf) {
- ret = -ENOBUFS;
- goto out;
- }
-
keyconf->cipher = cipher;
memcpy(keyconf->key, key, key_len);
keyconf->keylen = key_len;
@@ -4354,10 +4369,9 @@ int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
0, NULL, 0, 0, true);
}
- kfree(keyconf);
- return 0;
out:
- iwl_mvm_dealloc_int_sta(mvm, sta);
+ if (ret)
+ iwl_mvm_dealloc_int_sta(mvm, sta);
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index b33a0ce096d4..b3450569864e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@@ -571,10 +571,12 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
bool disable);
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, struct ieee80211_txq *txq);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
- u8 *key, u32 key_len);
+ u8 *key, u32 key_len,
+ struct ieee80211_key_conf *key_conf_out);
void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 218fdf1ed530..a59d264a11c5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
*/
@@ -45,32 +45,24 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
te_data->link_id = -1;
}
-void iwl_mvm_roc_done_wk(struct work_struct *wk)
+static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
{
- struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
-
/*
* Clear the ROC_RUNNING status bit.
* This will cause the TX path to drop offchannel transmissions.
* That would also be done by mac80211, but it is racy, in particular
- * in the case that the time event actually completed in the firmware
- * (which is handled in iwl_mvm_te_handle_notif).
- */
- clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
-
- synchronize_net();
-
- /*
- * Flush the offchannel queue -- this is called when the time
+ * in the case that the time event actually completed in the firmware.
+ *
+ * Also flush the offchannel queue -- this is called when the time
* event finishes or is canceled, so that frames queued for it
* won't get stuck on the queue and be transmitted in the next
* time event.
*/
-
- mutex_lock(&mvm->mutex);
- if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
struct iwl_mvm_vif *mvmvif;
+ synchronize_net();
+
/*
* NB: access to this pointer would be racy, but the flush bit
* can only be set when we had a P2P-Device VIF, and we have a
@@ -105,21 +97,16 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
}
}
- /*
- * Clear the ROC_AUX_RUNNING status bit.
- * This will cause the TX path to drop offchannel transmissions.
- * That would also be done by mac80211, but it is racy, in particular
- * in the case that the time event actually completed in the firmware
- * (which is handled in iwl_mvm_te_handle_notif).
- */
+ /* Do the same for AUX ROC */
if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
- /* do the same in case of hot spot 2.0 */
+ synchronize_net();
+
iwl_mvm_flush_sta(mvm, mvm->aux_sta.sta_id,
mvm->aux_sta.tfd_queue_msk);
if (mvm->mld_api_is_used) {
iwl_mvm_mld_rm_aux_sta(mvm);
- goto out_unlock;
+ return;
}
/* In newer version of this command an aux station is added only
@@ -128,8 +115,14 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
if (iwl_mvm_has_new_station_api(mvm->fw))
iwl_mvm_rm_aux_sta(mvm);
}
+}
-out_unlock:
+void iwl_mvm_roc_done_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_cleanup_roc(mvm);
mutex_unlock(&mvm->mutex);
}
@@ -163,12 +156,12 @@ static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
* So we just do nothing here and the switch
* will be performed on the last TBTT.
*/
- if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif, 0)) {
IWL_WARN(mvm, "CSA NOA started too early\n");
goto out_unlock;
}
- ieee80211_csa_finish(csa_vif);
+ ieee80211_csa_finish(csa_vif, 0);
rcu_read_unlock();
@@ -294,18 +287,6 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
}
}
-static void iwl_mvm_p2p_roc_finished(struct iwl_mvm *mvm)
-{
- /*
- * If the IWL_MVM_STATUS_NEED_FLUSH_P2P is already set, then the
- * roc_done_wk is already scheduled or running, so don't schedule it
- * again to avoid a race where the roc_done_wk clears this bit after
- * it is set here, affecting the next run of the roc_done_wk.
- */
- if (!test_and_set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status))
- iwl_mvm_roc_finished(mvm);
-}
-
/*
* Handles a FW notification for an event that is known to the driver.
*
@@ -357,7 +338,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
switch (te_data->vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
ieee80211_remain_on_channel_expired(mvm->hw);
- iwl_mvm_p2p_roc_finished(mvm);
+ iwl_mvm_roc_finished(mvm);
break;
case NL80211_IFTYPE_STATION:
/*
@@ -692,7 +673,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
/* Determine whether mac or link id should be used, and validate the link id */
static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 link_id)
+ s8 link_id)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
@@ -706,8 +687,7 @@ static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
"Invalid link ID for session protection: %u\n", link_id))
return -EINVAL;
- if (WARN(ieee80211_vif_is_mld(vif) &&
- !(vif->active_links & BIT(link_id)),
+ if (WARN(!mvmvif->link[link_id]->active,
"Session Protection on an inactive link: %u\n", link_id))
return -EINVAL;
@@ -716,7 +696,7 @@ static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 id, u32 link_id)
+ u32 id, s8 link_id)
{
int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
struct iwl_mvm_session_prot_cmd cmd = {
@@ -745,7 +725,7 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct ieee80211_vif *vif = te_data->vif;
struct iwl_mvm_vif *mvmvif;
enum nl80211_iftype iftype;
- unsigned int link_id;
+ s8 link_id;
if (!vif)
return false;
@@ -783,7 +763,7 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
iwl_mvm_cancel_session_protection(mvm, vif, id,
link_id);
if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
- iwl_mvm_p2p_roc_finished(mvm);
+ iwl_mvm_roc_finished(mvm);
}
}
return false;
@@ -929,7 +909,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
if (WARN(ver > 2 && mvmvif->time_event_data.link_id >= 0 &&
mvmvif->time_event_data.link_id != notif_link_id,
- "SESION_PROTECTION_NOTIF was received for link %u, while the current time event is on link %u\n",
+ "SESSION_PROTECTION_NOTIF was received for link %u, while the current time event is on link %u\n",
notif_link_id, mvmvif->time_event_data.link_id))
goto out_unlock;
@@ -972,7 +952,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
/* End TE, notify mac80211 */
mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
- iwl_mvm_p2p_roc_finished(mvm);
+ mvmvif->time_event_data.link_id = -1;
+ iwl_mvm_roc_finished(mvm);
ieee80211_remain_on_channel_expired(mvm->hw);
} else if (le32_to_cpu(notif->start)) {
if (WARN_ON(mvmvif->time_event_data.id !=
@@ -986,6 +967,86 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
rcu_read_unlock();
}
+#define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
+#define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
+#define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
+#define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
+#define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
+
+void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
+ u32 duration_ms,
+ u32 *duration_tu,
+ u32 *delay)
+{
+ u32 dtim_interval = vif->bss_conf.dtim_period *
+ vif->bss_conf.beacon_int;
+
+ *delay = AUX_ROC_MIN_DELAY;
+ *duration_tu = MSEC_TO_TU(duration_ms);
+
+ /*
+ * If we are associated we want the delay time to be at least one
+ * dtim interval so that the FW can wait until after the DTIM and
+ * then start the time event, this will potentially allow us to
+ * remain off-channel for the max duration.
+ * Since we want to use almost a whole dtim interval we would also
+ * like the delay to be for 2-3 dtim intervals, in case there are
+ * other time events with higher priority.
+ */
+ if (vif->cfg.assoc) {
+ *delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
+ /* We cannot remain off-channel longer than the DTIM interval */
+ if (dtim_interval <= *duration_tu) {
+ *duration_tu = dtim_interval - AUX_ROC_SAFETY_BUFFER;
+ if (*duration_tu <= AUX_ROC_MIN_DURATION)
+ *duration_tu = dtim_interval -
+ AUX_ROC_MIN_SAFETY_BUFFER;
+ }
+ }
+}
+
+int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration, u32 activity)
+{
+ int res;
+ u32 duration_tu, delay;
+ struct iwl_roc_req roc_req = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+ .activity = cpu_to_le32(activity),
+ .sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Set the channel info data */
+ iwl_mvm_set_chan_info(mvm, &roc_req.channel_info,
+ channel->hw_value,
+ iwl_mvm_phy_band_from_nl80211(channel->band),
+ IWL_PHY_CHANNEL_MODE20, 0);
+
+ iwl_mvm_roc_duration_and_delay(vif, duration, &duration_tu,
+ &delay);
+ roc_req.duration = cpu_to_le32(duration_tu);
+ roc_req.max_delay = cpu_to_le32(delay);
+
+ IWL_DEBUG_TE(mvm,
+ "\t(requested = %ums, max_delay = %ums)\n",
+ duration, delay);
+ IWL_DEBUG_TE(mvm,
+ "Requesting to remain on channel %u for %utu\n",
+ channel->hw_value, duration_tu);
+
+ /* Set the node address */
+ memcpy(roc_req.node_addr, vif->addr, ETH_ALEN);
+
+ res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
+ 0, sizeof(roc_req), &roc_req);
+
+ return res;
+}
+
static int
iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -1163,18 +1224,22 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ te_data = &mvmvif->time_event_data;
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (te_data->id >= SESSION_PROTECT_CONF_MAX_ID) {
+ IWL_DEBUG_TE(mvm,
+ "No remain on channel event\n");
+ return;
+ }
+
iwl_mvm_cancel_session_protection(mvm, vif,
- mvmvif->time_event_data.id,
- mvmvif->time_event_data.link_id);
- iwl_mvm_p2p_roc_finished(mvm);
+ te_data->id,
+ te_data->link_id);
} else {
iwl_mvm_roc_station_remove(mvm, mvmvif);
- iwl_mvm_roc_finished(mvm);
}
-
- return;
+ goto cleanup_roc;
}
te_data = iwl_mvm_get_roc_te(mvm);
@@ -1185,13 +1250,21 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
- if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
- iwl_mvm_p2p_roc_finished(mvm);
- } else {
+ else
iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
- iwl_mvm_roc_finished(mvm);
- }
+
+cleanup_roc:
+ /*
+ * In case we get here before the ROC event started,
+ * (so the status bit isn't set) set it here so iwl_mvm_cleanup_roc will
+ * cleanup things properly
+ */
+ set_bit(vif->type == NL80211_IFTYPE_P2P_DEVICE ?
+ IWL_MVM_STATUS_ROC_RUNNING : IWL_MVM_STATUS_ROC_AUX_RUNNING,
+ &mvm->status);
+ iwl_mvm_cleanup_roc(mvm);
}
void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
@@ -1296,7 +1369,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
struct iwl_notification_wait wait_notif;
- int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
+ int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, (s8)link_id);
struct iwl_mvm_session_prot_cmd cmd = {
.id_and_color = cpu_to_le32(mac_link_id),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index db986bfc4dc3..782ddc8c296b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -520,13 +520,49 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
}
}
+static bool iwl_mvm_use_host_rate(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvmsta,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info)
+{
+ if (unlikely(!mvmsta))
+ return true;
+
+ if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
+ return true;
+
+ if (likely(ieee80211_is_data(hdr->frame_control) &&
+ mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED))
+ return false;
+
+ /*
+ * Not a data frame, use host rate if on an old device that
+ * can't possibly be doing MLO (firmware may be selecting a
+ * bad rate), if we might be doing MLO we need to let FW pick
+ * (since we don't necesarily know the link), but FW rate
+ * selection was fixed.
+ */
+ return mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ;
+}
+
+static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen,
+ const u8 *addr3_override)
+{
+ struct ieee80211_hdr *out_hdr = cmd;
+
+ memcpy(cmd, hdr, hdrlen);
+ if (addr3_override)
+ memcpy(out_hdr->addr3, addr3_override, ETH_ALEN);
+}
+
/*
* Allocates and sets the Tx cmd the driver data pointers in the skb
*/
static struct iwl_device_tx_cmd *
iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info, int hdrlen,
- struct ieee80211_sta *sta, u8 sta_id)
+ struct ieee80211_sta *sta, u8 sta_id,
+ const u8 *addr3_override)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_device_tx_cmd *dev_cmd;
@@ -556,12 +592,12 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
/*
- * For data and mgmt packets rate info comes from the fw. Only
+ * For data and mgmt packets rate info comes from the fw (for
+ * new devices, older FW is somewhat broken for this). Only
* set rate/antenna for injected frames with fixed rate, or
- * when no sta is given.
+ * when no sta is given, or with older firmware.
*/
- if (unlikely(!sta ||
- info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
+ if (unlikely(iwl_mvm_use_host_rate(mvm, mvmsta, hdr, info))) {
flags |= IWL_TX_FLAGS_CMD_RATE;
rate_n_flags =
iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
@@ -584,7 +620,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
cmd->len = cpu_to_le16((u16)skb->len);
/* Copy MAC header from skb into command buffer */
- memcpy(cmd->hdr, hdr, hdrlen);
+ iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
cmd->flags = cpu_to_le16(flags);
cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
@@ -599,7 +635,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
cmd->len = cpu_to_le16((u16)skb->len);
/* Copy MAC header from skb into command buffer */
- memcpy(cmd->hdr, hdr, hdrlen);
+ iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
cmd->flags = cpu_to_le32(flags);
cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
@@ -617,7 +653,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
/* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdrlen);
+ iwl_mvm_copy_hdr(tx_cmd->hdr, hdr, hdrlen, addr3_override);
out:
return dev_cmd;
@@ -820,7 +856,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
- dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id,
+ NULL);
if (!dev_cmd)
return -1;
@@ -869,10 +906,10 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
if (WARN_ON(!link_conf))
band = NL80211_BAND_2GHZ;
else
- band = link_conf->chandef.chan->band;
+ band = link_conf->chanreq.oper.chan->band;
rcu_read_unlock();
} else {
- band = mvmsta->vif->bss_conf.chandef.chan->band;
+ band = mvmsta->vif->bss_conf.chanreq.oper.chan->band;
}
lmac = iwl_mvm_get_lmac_id(mvm, band);
@@ -914,9 +951,15 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
next = skb_gso_segment(skb, netdev_flags);
skb_shinfo(skb)->gso_size = mss;
skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
- if (WARN_ON_ONCE(IS_ERR(next)))
- return -EINVAL;
- else if (next)
+
+ if (IS_ERR(next) && PTR_ERR(next) == -ENOMEM)
+ return -ENOMEM;
+
+ if (WARN_ONCE(IS_ERR(next),
+ "skb_gso_segment error: %d\n", (int)PTR_ERR(next)))
+ return PTR_ERR(next);
+
+ if (next)
consume_skb(skb);
skb_list_walk_safe(next, tmp, next) {
@@ -972,8 +1015,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
u8 tid;
- snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
- tcp_hdrlen(skb);
+ snap_ip_tcp = 8 + skb_network_header_len(skb) + tcp_hdrlen(skb);
if (!mvmsta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
@@ -1140,7 +1182,8 @@ static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
*/
static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta,
+ const u8 *addr3_override)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_mvm_sta *mvmsta;
@@ -1172,7 +1215,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_probe_resp_set_noa(mvm, skb);
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
- sta, mvmsta->deflink.sta_id);
+ sta, mvmsta->deflink.sta_id,
+ addr3_override);
if (!dev_cmd)
goto drop;
@@ -1294,9 +1338,11 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_tx_info info;
struct sk_buff_head mpdus_skbs;
+ struct ieee80211_vif *vif;
unsigned int payload_len;
int ret;
struct sk_buff *orig_skb = skb;
+ const u8 *addr3;
if (WARN_ON_ONCE(!mvmsta))
return -1;
@@ -1307,26 +1353,59 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
memcpy(&info, skb->cb, sizeof(info));
if (!skb_is_gso(skb))
- return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
tcp_hdrlen(skb) + skb->data_len;
if (payload_len <= skb_shinfo(skb)->gso_size)
- return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
__skb_queue_head_init(&mpdus_skbs);
+ vif = info.control.vif;
+ if (!vif)
+ return -1;
+
ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
if (ret)
return ret;
WARN_ON(skb_queue_empty(&mpdus_skbs));
+ /*
+ * As described in IEEE sta 802.11-2020, table 9-30 (Address
+ * field contents), A-MSDU address 3 should contain the BSSID
+ * address.
+ * Pass address 3 down to iwl_mvm_tx_mpdu() and further to set it
+ * in the command header. We need to preserve the original
+ * address 3 in the skb header to correctly create all the
+ * A-MSDU subframe headers from it.
+ */
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ addr3 = vif->cfg.ap_addr;
+ break;
+ case NL80211_IFTYPE_AP:
+ addr3 = vif->addr;
+ break;
+ default:
+ addr3 = NULL;
+ break;
+ }
+
while (!skb_queue_empty(&mpdus_skbs)) {
+ struct ieee80211_hdr *hdr;
+ bool amsdu;
+
skb = __skb_dequeue(&mpdus_skbs);
+ hdr = (void *)skb->data;
+ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
- ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta,
+ amsdu ? addr3 : NULL);
if (ret) {
/* Free skbs created as part of TSO logic that have not yet been dequeued */
__skb_queue_purge(&mpdus_skbs);
@@ -1587,12 +1666,18 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
* of the batch. This is why the SSN of the SCD is written at the end of the
* whole struct at a variable offset. This function knows how to cope with the
* variable offset and returns the SSN of the SCD.
+ *
+ * For 22000-series and lower, this is just 12 bits. For later, 16 bits.
*/
static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *tx_resp)
{
- return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
- tx_resp->frame_count) & 0xfff;
+ u32 val = le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
+ tx_resp->frame_count);
+
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return val & 0xFFFF;
+ return val & 0xFFF;
}
static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
@@ -2125,6 +2210,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
tfd_cnt, pkt_len))
return;
+ IWL_DEBUG_TX_REPLY(mvm,
+ "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
+ sta_id, le32_to_cpu(ba_res->flags),
+ le16_to_cpu(ba_res->txed),
+ le16_to_cpu(ba_res->done));
+
rcu_read_lock();
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
@@ -2160,12 +2251,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
iwl_mvm_tx_airtime(mvm, mvmsta,
le32_to_cpu(ba_res->wireless_time));
rcu_read_unlock();
-
- IWL_DEBUG_TX_REPLY(mvm,
- "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
- sta_id, le32_to_cpu(ba_res->flags),
- le16_to_cpu(ba_res->txed),
- le16_to_cpu(ba_res->done));
return;
}
@@ -2197,9 +2282,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
rcu_read_unlock();
- iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
- tid_data->rate_n_flags, false);
-
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from %pM, sta_id = %d\n",
ba_notif->sta_addr, ba_notif->sta_id);
@@ -2212,6 +2294,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
ba_notif->reduced_txp);
+
+ iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
+ tid_data->rate_n_flags, false);
}
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 91286018a69d..ab56ff87c6f9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -249,6 +249,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
* This is the special case in which init is set and we call a callback in
* this case to clear the state indicating that station creation is in
* progress.
+ *
+ * Returns: an error code indicating success or failure
*/
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index fa4a14546860..c8fc8b4fd85c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -119,7 +119,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
prph_sc_ctrl->version.version = 0;
prph_sc_ctrl->version.mac_id =
- cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+ cpu_to_le16((u16)trans->hw_rev);
prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
control_flags |= IWL_PRPH_SCRATCH_MTR_MODE;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 5f55efe64bf5..0fa92704cd14 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@@ -180,7 +180,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
ctxt_info->version.version = 0;
ctxt_info->version.mac_id =
- cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+ cpu_to_le16((u16)trans->hw_rev);
/* size is in DWs */
ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 2c9b98c8184b..4a657036b9d6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -502,12 +502,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* Bz devices */
{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
/* Sc devices */
{IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0xE340, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0xD340, PCI_ANY_ID, iwl_sc_trans_cfg)},
+ {IWL_PCI_DEVICE(0x6E70, PCI_ANY_ID, iwl_sc_trans_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -526,7 +530,7 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
IWL_CFG_ANY, _cfg, _name)
-static const struct iwl_dev_info iwl_dev_info_table[] = {
+VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
#if IS_ENABLED(CONFIG_IWLMVM)
/* 9000 */
IWL_DEV_INFO(0x2526, 0x1550, iwl9260_2ac_cfg, iwl9260_killer_1550_name),
@@ -1008,8 +1012,13 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_gl, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_gl, iwl_mtp_name),
/* SoF with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -1115,8 +1124,24 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
iwl_cfg_sc, iwl_sc_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2, iwl_sc2_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_sc2f, iwl_sc2f_name),
#endif /* CONFIG_IWLMVM */
};
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table);
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+const unsigned int iwl_dev_info_table_size = ARRAY_SIZE(iwl_dev_info_table);
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table_size);
+#endif
/*
* Read rf id and cdb info from prph register and store it
@@ -1143,6 +1168,20 @@ static void get_crf_id(struct iwl_trans *iwl_trans)
iwl_trans->hw_cnv_id =
iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP);
+ /* In BZ, the MAC step must be read from the CNVI aux register */
+ if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ) {
+ u8 step = CNVI_AUX_MISC_CHIP_MAC_STEP(iwl_trans->hw_cnv_id);
+
+ /* For BZ-U, take B step also when A step is indicated */
+ if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(iwl_trans->hw_cnv_id) ==
+ CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U) &&
+ step == SILICON_A_STEP)
+ step = SILICON_B_STEP;
+
+ iwl_trans->hw_rev_step = step;
+ iwl_trans->hw_rev |= step;
+ }
+
/* Read cdb info (also contains the jacket info if needed in the future */
iwl_trans->hw_wfpm_id =
iwl_read_umac_prph_no_grab(iwl_trans, WFPM_OTP_CFG1_ADDR);
@@ -1236,7 +1275,7 @@ out:
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
-static const struct iwl_dev_info *
+VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info *
iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step)
@@ -1299,6 +1338,7 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
return NULL;
}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_pci_find_dev_info);
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -1382,6 +1422,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (dev_info) {
iwl_trans->cfg = dev_info->cfg;
iwl_trans->name = dev_info->name;
+ iwl_trans->no_160 = dev_info->no_160 == IWL_CFG_NO_160;
}
#if IS_ENABLED(CONFIG_IWLMVM)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 63e13577aff8..6c76b2dd6878 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1484,12 +1484,9 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
state ? "disabled" : "enabled");
- if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
- if (trans->trans_cfg->gen2)
- _iwl_trans_pcie_gen2_stop_device(trans);
- else
- _iwl_trans_pcie_stop_device(trans, from_irq);
- }
+ if (iwl_op_mode_hw_rf_kill(trans->op_mode, state) &&
+ !WARN_ON(trans->trans_cfg->gen2))
+ _iwl_trans_pcie_stop_device(trans, from_irq);
}
void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
@@ -1718,6 +1715,7 @@ enable_msi:
static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
{
+#if defined(CONFIG_SMP)
int iter_rx_q, i, ret, cpu, offset;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1738,6 +1736,7 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
"Failed to set affinity mask for IRQ %d\n",
trans_pcie->msix_entries[i].vector);
}
+#endif
}
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 6c2b37e56c78..fa8eba47dc4c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1331,7 +1331,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
trans->txqs.tfd.size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
- ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
+ ip_hdrlen = skb_network_header_len(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
amsdu_pad = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
index ca74b1b63cac..33973a60d0bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
*/
#include <net/tso.h>
#include <linux/tcp.h>
@@ -271,9 +271,10 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
meta = NULL;
goto unmap;
}
- IWL_WARN(trans,
- "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
- len, (unsigned long long)oldphys, (unsigned long long)phys);
+ IWL_DEBUG_TX(trans,
+ "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+ len, (unsigned long long)oldphys,
+ (unsigned long long)phys);
ret = 0;
unmap:
@@ -352,7 +353,7 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
&dev_cmd->hdr, start_len, 0);
- ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
+ ip_hdrlen = skb_network_header_len(skb);
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
amsdu_pad = 0;
@@ -1601,8 +1602,8 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (read_ptr == tfd_num)
goto out;
- IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
- txq_id, txq->read_ptr, tfd_num, ssn);
+ IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
+ txq_id, read_ptr, txq->read_ptr, tfd_num, ssn);
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
@@ -1630,7 +1631,8 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
struct sk_buff *skb = txq->entries[read_ptr].skb;
- if (WARN_ON_ONCE(!skb))
+ if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",
+ read_ptr, txq->read_ptr, txq_id))
continue;
iwl_txq_free_tso_page(trans, skb);
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
new file mode 100644
index 000000000000..5658471bdf0a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+iwlwifi-tests-y += module.o devinfo.o
+
+ccflags-y += -I$(srctree)/$(src)/../
+
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlwifi-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
new file mode 100644
index 000000000000..7aa47fce6e2d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * KUnit tests for the iwlwifi device info table
+ *
+ * Copyright (C) 2023 Intel Corporation
+ */
+#include <kunit/test.h>
+#include "iwl-drv.h"
+#include "iwl-config.h"
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+
+static void iwl_pci_print_dev_info(const char *pfx, const struct iwl_dev_info *di)
+{
+ printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,no_160=%d,cores=%.2x\n",
+ pfx, di->device, di->subdevice, di->mac_type, di->mac_step,
+ di->rf_type, di->cdb, di->jacket, di->rf_id, di->no_160,
+ di->cores);
+}
+
+static void devinfo_table_order(struct kunit *test)
+{
+ int idx;
+
+ for (idx = 0; idx < iwl_dev_info_table_size; idx++) {
+ const struct iwl_dev_info *di = &iwl_dev_info_table[idx];
+ const struct iwl_dev_info *ret;
+
+ ret = iwl_pci_find_dev_info(di->device, di->subdevice,
+ di->mac_type, di->mac_step,
+ di->rf_type, di->cdb,
+ di->jacket, di->rf_id,
+ di->no_160, di->cores, di->rf_step);
+ if (ret != di) {
+ iwl_pci_print_dev_info("searched: ", di);
+ iwl_pci_print_dev_info("found: ", ret);
+ KUNIT_FAIL(test,
+ "unusable entry at index %d (found index %d instead)\n",
+ idx, (int)(ret - iwl_dev_info_table));
+ }
+ }
+}
+
+static struct kunit_case devinfo_test_cases[] = {
+ KUNIT_CASE(devinfo_table_order),
+ {}
+};
+
+static struct kunit_suite iwlwifi_devinfo = {
+ .name = "iwlwifi-devinfo",
+ .test_cases = devinfo_test_cases,
+};
+
+kunit_test_suite(iwlwifi_devinfo);
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/module.c b/drivers/net/wireless/intel/iwlwifi/tests/module.c
new file mode 100644
index 000000000000..0c54f818e5a7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/tests/module.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Module boilerplate for the iwlwifi kunit module.
+ *
+ * Copyright (C) 2023 Intel Corporation
+ */
+#include <linux/module.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("kunit tests for iwlwifi");
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index c6084683aedd..687841b2fa2a 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -704,6 +704,10 @@ static void p54_set_coverage_class(struct ieee80211_hw *dev,
}
static const struct ieee80211_ops p54_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = p54_tx_80211,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = p54_start,
diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c
index ce0179b8ab36..0073b5e0f9c9 100644
--- a/drivers/net/wireless/intersil/p54/p54spi.c
+++ b/drivers/net/wireless/intersil/p54/p54spi.c
@@ -700,6 +700,7 @@ static struct spi_driver p54spi_driver = {
module_spi_driver(p54spi_driver);
+MODULE_DESCRIPTION("Prism54 SPI wireless driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
MODULE_ALIAS("spi:cx3110x");
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index 104d2b6dc9af..5a525da434c2 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -1132,7 +1132,7 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
if (!cmdarray[i].cmdbuf) {
lbs_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n");
ret = -1;
- goto done;
+ goto free_cmd_array;
}
}
@@ -1140,8 +1140,17 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
init_waitqueue_head(&cmdarray[i].cmdwait_q);
lbs_cleanup_and_insert_cmd(priv, &cmdarray[i]);
}
- ret = 0;
+ return 0;
+free_cmd_array:
+ for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
+ if (cmdarray[i].cmdbuf) {
+ kfree(cmdarray[i].cmdbuf);
+ cmdarray[i].cmdbuf = NULL;
+ }
+ }
+ kfree(priv->cmd_array);
+ priv->cmd_array = NULL;
done:
return ret;
}
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 199d33ed3bb9..9cca69fe04d7 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -473,6 +473,10 @@ static int lbtf_op_get_survey(struct ieee80211_hw *hw, int idx,
}
static const struct ieee80211_ops lbtf_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = lbtf_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = lbtf_op_start,
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index da211372a481..b90f922f1cdc 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -288,6 +288,6 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
mwifiex_dbg(priv->adapter, MSG,
"indicating channel switch completion to kernel\n");
wiphy_lock(priv->wdev.wiphy);
- cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0, 0);
+ cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0);
wiphy_unlock(priv->wdev.wiphy);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 90e401100898..c0c635e74bc5 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -392,12 +392,10 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
chan_list =
(struct mwifiex_ie_types_chan_list_param_set *) *buffer;
- memset(chan_list, 0,
- sizeof(struct mwifiex_ie_types_chan_list_param_set));
+ memset(chan_list, 0, struct_size(chan_list, chan_scan_param, 1));
chan_list->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
- chan_list->header.len = cpu_to_le16(
- sizeof(struct mwifiex_ie_types_chan_list_param_set) -
- sizeof(struct mwifiex_ie_types_header));
+ chan_list->header.len =
+ cpu_to_le16(sizeof(struct mwifiex_chan_scan_param_set));
chan_list->chan_scan_param[0].chan_number =
bss_desc->bcn_ht_oper->primary_chan;
chan_list->chan_scan_param[0].radio_type =
@@ -411,8 +409,8 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
(bss_desc->bcn_ht_oper->ht_param &
IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
- *buffer += sizeof(struct mwifiex_ie_types_chan_list_param_set);
- ret_len += sizeof(struct mwifiex_ie_types_chan_list_param_set);
+ *buffer += struct_size(chan_list, chan_scan_param, 1);
+ ret_len += struct_size(chan_list, chan_scan_param, 1);
}
if (bss_desc->bcn_bss_co_2040) {
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 3604abcbcff9..b909a7665e9c 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -3359,7 +3359,7 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
}
if (!wowlan->patterns[i].pkt_offset) {
- if (!(byte_seq[0] & 0x01) &&
+ if (is_unicast_ether_addr(byte_seq) &&
(byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 1)) {
mef_cfg->criteria |= MWIFIEX_CRITERIA_UNICAST;
continue;
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index f9c9fec7c792..9deaf59dcb62 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -566,14 +566,8 @@ mwifiex_verext_write(struct file *file, const char __user *ubuf,
int ret;
u32 versionstrsel;
struct mwifiex_private *priv = (void *)file->private_data;
- char buf[16];
- memset(buf, 0, sizeof(buf));
-
- if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
- return -EFAULT;
-
- ret = kstrtou32(buf, 10, &versionstrsel);
+ ret = kstrtou32_from_user(ubuf, count, 10, &versionstrsel);
if (ret)
return ret;
@@ -874,19 +868,14 @@ mwifiex_timeshare_coex_write(struct file *file, const char __user *ubuf,
{
bool timeshare_coex;
struct mwifiex_private *priv = file->private_data;
- char kbuf[16];
int ret;
if (priv->adapter->fw_api_ver != MWIFIEX_FW_V15)
return -EOPNOTSUPP;
- memset(kbuf, 0, sizeof(kbuf));
-
- if (copy_from_user(&kbuf, ubuf, min_t(size_t, sizeof(kbuf) - 1, count)))
- return -EFAULT;
-
- if (kstrtobool(kbuf, &timeshare_coex))
- return -EINVAL;
+ ret = kstrtobool_from_user(ubuf, count, &timeshare_coex);
+ if (ret)
+ return ret;
ret = mwifiex_send_cmd(priv, HostCmd_CMD_ROBUST_COEX,
HostCmd_ACT_GEN_SET, 0, &timeshare_coex, true);
@@ -970,9 +959,6 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
priv->dfs_dev_dir = debugfs_create_dir(priv->netdev->name,
mwifiex_dfs_dir);
- if (!priv->dfs_dev_dir)
- return;
-
MWIFIEX_DFS_ADD_FILE(info);
MWIFIEX_DFS_ADD_FILE(debug);
MWIFIEX_DFS_ADD_FILE(getlog);
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 62f3c9a52a1d..3adc447b715f 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -770,7 +770,7 @@ struct mwifiex_chan_scan_param_set {
struct mwifiex_ie_types_chan_list_param_set {
struct mwifiex_ie_types_header header;
- struct mwifiex_chan_scan_param_set chan_scan_param[1];
+ struct mwifiex_chan_scan_param_set chan_scan_param[];
} __packed;
struct mwifiex_ie_types_rxba_sync {
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 318b42b1896f..175882485a19 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -28,11 +28,9 @@
#include <linux/inetdevice.h>
#include <linux/devcoredump.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index a2ddac363b10..0326b121747c 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -664,15 +664,14 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
/* Copy the current channel TLV to the command being
prepared */
- memcpy(chan_tlv_out->chan_scan_param + tlv_idx,
+ memcpy(&chan_tlv_out->chan_scan_param[tlv_idx],
tmp_chan_list,
- sizeof(chan_tlv_out->chan_scan_param));
+ sizeof(*chan_tlv_out->chan_scan_param));
/* Increment the TLV header length by the size
appended */
le16_unaligned_add_cpu(&chan_tlv_out->header.len,
- sizeof(
- chan_tlv_out->chan_scan_param));
+ sizeof(*chan_tlv_out->chan_scan_param));
/*
* The tlv buffer length is set to the number of bytes
@@ -2369,12 +2368,11 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
chan_idx < MWIFIEX_BG_SCAN_CHAN_MAX &&
bgscan_cfg_in->chan_list[chan_idx].chan_number;
chan_idx++) {
- temp_chan = chan_list_tlv->chan_scan_param + chan_idx;
+ temp_chan = &chan_list_tlv->chan_scan_param[chan_idx];
/* Increment the TLV header length by size appended */
le16_unaligned_add_cpu(&chan_list_tlv->header.len,
- sizeof(
- chan_list_tlv->chan_scan_param));
+ sizeof(*chan_list_tlv->chan_scan_param));
temp_chan->chan_number =
bgscan_cfg_in->chan_list[chan_idx].chan_number;
@@ -2413,7 +2411,7 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
chan_scan_param);
le16_unaligned_add_cpu(&chan_list_tlv->header.len,
chan_num *
- sizeof(chan_list_tlv->chan_scan_param[0]));
+ sizeof(*chan_list_tlv->chan_scan_param));
}
tlv_pos += (sizeof(chan_list_tlv->header)
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index 00a5679b5c51..8558995e8fc7 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -871,7 +871,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
}
} else {
memcpy(ra, skb->data, ETH_ALEN);
- if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
+ if (is_multicast_ether_addr(ra) || mwifiex_is_skb_mgmt_frame(skb))
eth_broadcast_addr(ra);
ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
}
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 13bcb123d122..ce8fea76dbb2 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -5610,6 +5610,10 @@ static void mwl8k_sw_scan_complete(struct ieee80211_hw *hw,
}
static const struct ieee80211_ops mwl8k_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mwl8k_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = mwl8k_start,
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index d6575fe18c6b..f7f2d9a8ab0f 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_MT792x_USB) += mt792x-usb.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
- tx.o agg-rx.o mcu.o
+ tx.o agg-rx.o mcu.o wed.o
mt76-$(CONFIG_PCI) += pci.o
mt76-$(CONFIG_NL80211_TESTMODE) += testmode.o
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 10cbd9e560e7..07c386c7b4d0 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -122,7 +122,7 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
struct ieee80211_bar *bar = mt76_skb_get_hdr(skb);
struct mt76_wcid *wcid = status->wcid;
struct mt76_rx_tid *tid;
- u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
+ u8 tidno;
u16 seqno;
if (!ieee80211_is_ctl(bar->frame_control))
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 00230f106294..72a7bd5a8576 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -197,9 +197,8 @@ mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
q->tail = q->head;
}
-static void
-__mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
- bool reset_idx)
+void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx)
{
if (!q || !q->ndesc)
return;
@@ -219,8 +218,7 @@ __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
mt76_dma_sync_idx(dev, q);
}
-static void
-mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
+void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
{
__mt76_dma_queue_reset(dev, q, true);
}
@@ -632,9 +630,8 @@ free_skb:
return ret;
}
-static int
-mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
- bool allow_direct)
+int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+ bool allow_direct)
{
int len = SKB_WITH_OVERHEAD(q->buf_size);
int frames = 0;
@@ -681,81 +678,6 @@ done:
return frames;
}
-int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
-{
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
- int ret = 0, type, ring;
- u16 flags;
-
- if (!q || !q->ndesc)
- return -EINVAL;
-
- flags = q->flags;
- if (!q->wed || !mtk_wed_device_active(q->wed))
- q->flags &= ~MT_QFLAG_WED;
-
- if (!(q->flags & MT_QFLAG_WED))
- return 0;
-
- type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
- ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
-
- switch (type) {
- case MT76_WED_Q_TX:
- ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs,
- reset);
- if (!ret)
- q->wed_regs = q->wed->tx_ring[ring].reg_base;
- break;
- case MT76_WED_Q_TXFREE:
- /* WED txfree queue needs ring to be initialized before setup */
- q->flags = 0;
- mt76_dma_queue_reset(dev, q);
- mt76_dma_rx_fill(dev, q, false);
-
- ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
- if (!ret)
- q->wed_regs = q->wed->txfree_ring.reg_base;
- break;
- case MT76_WED_Q_RX:
- ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs,
- reset);
- if (!ret)
- q->wed_regs = q->wed->rx_ring[ring].reg_base;
- break;
- case MT76_WED_RRO_Q_DATA:
- q->flags &= ~MT_QFLAG_WED;
- __mt76_dma_queue_reset(dev, q, false);
- mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
- q->head = q->ndesc - 1;
- q->queued = q->head;
- break;
- case MT76_WED_RRO_Q_MSDU_PG:
- q->flags &= ~MT_QFLAG_WED;
- __mt76_dma_queue_reset(dev, q, false);
- mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
- q->head = q->ndesc - 1;
- q->queued = q->head;
- break;
- case MT76_WED_RRO_Q_IND:
- q->flags &= ~MT_QFLAG_WED;
- mt76_dma_queue_reset(dev, q);
- mt76_dma_rx_fill(dev, q, false);
- mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- q->flags = flags;
-
- return ret;
-#else
- return 0;
-#endif
-}
-EXPORT_SYMBOL_GPL(mt76_dma_wed_setup);
-
static int
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize,
@@ -800,7 +722,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
if (ret)
return ret;
- ret = mt76_dma_wed_setup(dev, q, false);
+ ret = mt76_wed_dma_setup(dev, q, false);
if (ret)
return ret;
@@ -863,7 +785,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
mt76_dma_rx_cleanup(dev, q);
/* reset WED rx queues */
- mt76_dma_wed_setup(dev, q, true);
+ mt76_wed_dma_setup(dev, q, true);
if (mt76_queue_is_wed_tx_free(q))
return;
@@ -1054,20 +976,6 @@ void mt76_dma_attach(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_dma_attach);
-void mt76_dma_wed_reset(struct mt76_dev *dev)
-{
- struct mt76_mmio *mmio = &dev->mmio;
-
- if (!test_bit(MT76_STATE_WED_RESET, &dev->phy.state))
- return;
-
- complete(&mmio->wed_reset);
-
- if (!wait_for_completion_timeout(&mmio->wed_reset_complete, 3 * HZ))
- dev_err(dev->dev, "wed reset complete timeout\n");
-}
-EXPORT_SYMBOL_GPL(mt76_dma_wed_reset);
-
void mt76_dma_cleanup(struct mt76_dev *dev)
{
int i;
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index c479cc6388ef..1de5a2b20f74 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -79,15 +79,18 @@ enum mt76_dma_wed_ind_reason {
int mt76_dma_rx_poll(struct napi_struct *napi, int budget);
void mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
-int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset);
-void mt76_dma_wed_reset(struct mt76_dev *dev);
+int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+ bool allow_direct);
+void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset_idx);
+void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q);
static inline void
mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
dev->queue_ops->reset_q(dev, q);
if (mtk_wed_device_active(&dev->mmio.wed))
- mt76_dma_wed_setup(dev, q, true);
+ mt76_wed_dma_setup(dev, q, true);
}
static inline void
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 8a3a90d1bfac..068206e48aec 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -579,13 +579,18 @@ EXPORT_SYMBOL_GPL(mt76_unregister_phy);
int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
{
+ bool is_qrx = mt76_queue_is_rx(dev, q);
struct page_pool_params pp_params = {
.order = 0,
.flags = 0,
.nid = NUMA_NO_NODE,
.dev = dev->dma_dev,
};
- int idx = q - dev->q_rx;
+ int idx = is_qrx ? q - dev->q_rx : -1;
+
+ /* Allocate page_pools just for rx/wed_tx_free queues */
+ if (!is_qrx && !mt76_queue_is_wed_tx_free(q))
+ return 0;
switch (idx) {
case MT_RXQ_MAIN:
@@ -604,6 +609,9 @@ int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
pp_params.dma_dir = DMA_FROM_DEVICE;
pp_params.max_len = PAGE_SIZE;
pp_params.offset = 0;
+ /* NAPI is available just for rx queues */
+ if (idx >= 0 && idx < ARRAY_SIZE(dev->napi))
+ pp_params.napi = &dev->napi[idx];
}
q->page_pool = page_pool_create(&pp_params);
@@ -1613,8 +1621,8 @@ EXPORT_SYMBOL_GPL(mt76_get_sar_power);
static void
__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
- ieee80211_csa_finish(vif);
+ if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif, 0))
+ ieee80211_csa_finish(vif, 0);
}
void mt76_csa_finish(struct mt76_dev *dev)
@@ -1638,7 +1646,7 @@ __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->bss_conf.csa_active)
return;
- dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
+ dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif, 0);
}
void mt76_csa_check(struct mt76_dev *dev)
@@ -1854,19 +1862,3 @@ enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
return MT_DFS_STATE_ACTIVE;
}
EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
-
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-int mt76_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct net_device *netdev, enum tc_setup_type type,
- void *type_data)
-{
- struct mt76_phy *phy = hw->priv;
- struct mtk_wed_device *wed = &phy->dev->mmio.wed;
-
- if (!mtk_wed_device_active(wed))
- return -EOPNOTSUPP;
-
- return mtk_wed_device_setup_tc(wed, netdev, type, type_data);
-}
-EXPORT_SYMBOL_GPL(mt76_net_setup_tc);
-#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
index c3e0e23e0161..cd2e9737c3bf 100644
--- a/drivers/net/wireless/mediatek/mt76/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -85,113 +85,6 @@ void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
}
EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
-{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
- int i;
-
- for (i = 0; i < dev->rx_token_size; i++) {
- struct mt76_txwi_cache *t;
-
- t = mt76_rx_token_release(dev, i);
- if (!t || !t->ptr)
- continue;
-
- mt76_put_page_pool_buf(t->ptr, false);
- t->ptr = NULL;
-
- mt76_put_rxwi(dev, t);
- }
-
- mt76_free_pending_rxwi(dev);
-}
-EXPORT_SYMBOL_GPL(mt76_mmio_wed_release_rx_buf);
-
-u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
-{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
- struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- int i, len = SKB_WITH_OVERHEAD(q->buf_size);
- struct mt76_txwi_cache *t = NULL;
-
- for (i = 0; i < size; i++) {
- enum dma_data_direction dir;
- dma_addr_t addr;
- u32 offset;
- int token;
- void *buf;
-
- t = mt76_get_rxwi(dev);
- if (!t)
- goto unmap;
-
- buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
- if (!buf)
- goto unmap;
-
- addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
- dir = page_pool_get_dma_dir(q->page_pool);
- dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
-
- desc->buf0 = cpu_to_le32(addr);
- token = mt76_rx_token_consume(dev, buf, t, addr);
- if (token < 0) {
- mt76_put_page_pool_buf(buf, false);
- goto unmap;
- }
-
- token = FIELD_PREP(MT_DMA_CTL_TOKEN, token);
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- token |= FIELD_PREP(MT_DMA_CTL_SDP0_H, addr >> 32);
-#endif
- desc->token |= cpu_to_le32(token);
- desc++;
- }
-
- return 0;
-
-unmap:
- if (t)
- mt76_put_rxwi(dev, t);
- mt76_mmio_wed_release_rx_buf(wed);
-
- return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(mt76_mmio_wed_init_rx_buf);
-
-int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed)
-{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
-
- spin_lock_bh(&dev->token_lock);
- dev->token_size = wed->wlan.token_start;
- spin_unlock_bh(&dev->token_lock);
-
- return !wait_event_timeout(dev->tx_wait, !dev->wed_token_count, HZ);
-}
-EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_enable);
-
-void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed)
-{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
-
- spin_lock_bh(&dev->token_lock);
- dev->token_size = dev->drv->token_size;
- spin_unlock_bh(&dev->token_lock);
-}
-EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_disable);
-
-void mt76_mmio_wed_reset_complete(struct mtk_wed_device *wed)
-{
- struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
-
- complete(&dev->mmio.wed_reset_complete);
-}
-EXPORT_SYMBOL_GPL(mt76_mmio_wed_reset_complete);
-#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
-
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
{
static const struct mt76_bus_ops mt76_mmio_ops = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index b20c34d5a0f7..a91f6ddacbd9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -210,6 +210,8 @@ struct mt76_queue {
u16 first;
u16 head;
u16 tail;
+ u8 hw_idx;
+ u8 ep;
int ndesc;
int queued;
int buf_size;
@@ -217,7 +219,6 @@ struct mt76_queue {
bool blocked;
u8 buf_offset;
- u8 hw_idx;
u16 flags;
struct mtk_wed_device *wed;
@@ -1081,12 +1082,6 @@ bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
void mt76_pci_disable_aspm(struct pci_dev *pdev);
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-int mt76_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct net_device *netdev, enum tc_setup_type type,
- void *type_data);
-#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
-
static inline u16 mt76_chip(struct mt76_dev *dev)
{
return dev->rev >> 16;
@@ -1097,13 +1092,34 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
return dev->rev & 0xffff;
}
+void mt76_wed_release_rx_buf(struct mtk_wed_device *wed);
+void mt76_wed_offload_disable(struct mtk_wed_device *wed);
+void mt76_wed_reset_complete(struct mtk_wed_device *wed);
+void mt76_wed_dma_reset(struct mt76_dev *dev);
+int mt76_wed_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct net_device *netdev, enum tc_setup_type type,
+ void *type_data);
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
-u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size);
-void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed);
-int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed);
-void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed);
-void mt76_mmio_wed_reset_complete(struct mtk_wed_device *wed);
-#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
+u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size);
+int mt76_wed_offload_enable(struct mtk_wed_device *wed);
+int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset);
+#else
+static inline u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+{
+ return 0;
+}
+
+static inline int mt76_wed_offload_enable(struct mtk_wed_device *wed)
+{
+ return 0;
+}
+
+static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q,
+ bool reset)
+{
+ return 0;
+}
+#endif /* CONFIG_NET_MEDIATEK_SOC_WED */
#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
@@ -1470,13 +1486,6 @@ static inline bool mt76u_urb_error(struct urb *urb)
urb->status != -ENOENT;
}
-/* Map hardware queues to usb endpoints */
-static inline u8 q2ep(u8 qid)
-{
- /* TODO: take management packets to queue 5 */
- return qid + 1;
-}
-
static inline int
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
int timeout, int ep)
@@ -1598,6 +1607,18 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
struct mt76_power_limits *dest,
s8 target_power);
+static inline bool mt76_queue_is_rx(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
+ if (q == &dev->q_rx[i])
+ return true;
+ }
+
+ return false;
+}
+
static inline bool mt76_queue_is_wed_tx_free(struct mt76_queue *q)
{
return (q->flags & MT_QFLAG_WED) &&
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 89d738deea62..9b49267b1eab 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -701,6 +701,10 @@ static void mt7603_tx(struct ieee80211_hw *hw,
}
const struct ieee80211_ops mt7603_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7603_tx,
.start = mt7603_start,
.stop = mt7603_stop,
@@ -728,6 +732,7 @@ const struct ieee80211_ops mt7603_ops = {
.set_sar_specs = mt7603_set_sar_specs,
};
+MODULE_DESCRIPTION("MediaTek MT7603E and MT76x8 wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
static int __init mt7603_init(void)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index dab16b5fc386..0971c164b57e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -1375,4 +1375,5 @@ const struct ieee80211_ops mt7615_ops = {
};
EXPORT_SYMBOL_GPL(mt7615_ops);
+MODULE_DESCRIPTION("MediaTek MT7615E and MT7663E wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index ae34d019e588..c807bd8d928d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -353,7 +353,7 @@ static void
mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
if (vif->bss_conf.csa_active)
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index ac036a072439..87a956ea3ad7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -270,4 +270,5 @@ static void __exit mt7615_exit(void)
module_init(mt7615_init);
module_exit(mt7615_exit);
+MODULE_DESCRIPTION("MediaTek MT7615E MMIO helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index 67cedd2555f9..9692890ba51b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -253,4 +253,5 @@ module_sdio_driver(mt7663s_driver);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7663S (SDIO) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index 04963b9f7498..df737e1ff27b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -281,4 +281,5 @@ module_usb_driver(mt7663u_driver);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7663U (USB) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
index 0052d103e276..820b39590027 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
@@ -349,4 +349,5 @@ EXPORT_SYMBOL_GPL(mt7663_usb_sdio_register_device);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek MT7663 SDIO/USB helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index fdde3d70b300..98d64d3d2993 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -227,6 +227,11 @@ static inline bool is_mt7992(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7992;
}
+static inline bool is_mt799x(struct mt76_dev *dev)
+{
+ return is_mt7996(dev) || is_mt7992(dev);
+}
+
static inline bool is_mt7622(struct mt76_dev *dev)
{
if (!IS_ENABLED(CONFIG_MT7622_WMAC))
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
index bd2a92467a97..5f132115ebfc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
@@ -32,6 +32,11 @@ enum {
MT_LMAC_PSMP0,
};
+enum {
+ MT_TXS_MPDU_FMT = 0,
+ MT_TXS_PPDU_FMT = 2,
+};
+
#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
#define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
#define MT_TX_FREE_COUNT GENMASK(12, 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index c7914643e9c0..b841bf628d02 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -544,7 +544,7 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD5_PID, pid);
if (pid >= MT_PACKET_ID_FIRST) {
val |= MT_TXD5_TX_STATUS_HOST;
- amsdu_en = amsdu_en && !is_mt7921(dev);
+ amsdu_en = 0;
}
txwi[5] = cpu_to_le32(val);
@@ -579,6 +579,8 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
spe_idx = 24 + phy_idx;
txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, spe_idx));
}
+
+ txwi[7] &= ~cpu_to_le32(MT_TXD7_HW_AMSDU);
}
}
EXPORT_SYMBOL_GPL(mt76_connac2_mac_write_txwi);
@@ -714,6 +716,9 @@ bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct sk_buff_head list;
struct sk_buff *skb;
+ if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == MT_TXS_PPDU_FMT)
+ return false;
+
mt76_tx_status_lock(dev, &list);
skb = mt76_tx_status_skb_get(dev, wcid, pid, &list);
if (skb) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 96494ba2fdf7..af0c2b2aacb0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -66,7 +66,7 @@ int mt76_connac_mcu_init_download(struct mt76_dev *dev, u32 addr, u32 len,
if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) ||
(is_mt7921(dev) && addr == 0x900000) ||
- (is_mt7925(dev) && addr == 0x900000) ||
+ (is_mt7925(dev) && (addr == 0x900000 || addr == 0xe0002800)) ||
(is_mt7996(dev) && addr == 0x900000) ||
(is_mt7992(dev) && addr == 0x900000))
cmd = MCU_CMD(PATCH_START_REQ);
@@ -283,6 +283,9 @@ __mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
};
struct sk_buff *skb;
+ if (is_mt799x(dev) && !wcid->sta)
+ hdr.muar_idx = 0xe;
+
mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
&hdr.wlan_idx_hi);
skb = mt76_mcu_msg_alloc(dev, NULL, len);
@@ -2101,7 +2104,7 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
int j, msg_len, num_ch;
struct sk_buff *skb;
- num_ch = i == batch_size - 1 ? n_chan % batch_len : batch_len;
+ num_ch = i == batch_size - 1 ? n_chan - i * batch_len : batch_len;
msg_len = sizeof(tx_power_tlv) + num_ch * sizeof(sku_tlbv);
skb = mt76_mcu_msg_alloc(dev, NULL, msg_len);
if (!skb) {
@@ -3160,4 +3163,5 @@ exit:
EXPORT_SYMBOL_GPL(mt76_connac2_mcu_fill_message);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT76x connac layer helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index ae6d0179727d..657a4d1f856b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -808,6 +808,7 @@ enum {
STA_REC_MLD = 0x20,
STA_REC_EHT = 0x22,
STA_REC_PN_INFO = 0x26,
+ STA_REC_KEY_V3 = 0x27,
STA_REC_HDRT = 0x28,
STA_REC_HDR_TRANS = 0x2B,
STA_REC_MAX_NUM
@@ -935,6 +936,9 @@ enum {
PHY_TYPE_INDEX_NUM
};
+#define HR_DSSS_ERP_BASIC_RATE GENMASK(3, 0)
+#define OFDM_BASIC_RATE (BIT(6) | BIT(8) | BIT(10))
+
#define PHY_TYPE_BIT_HR_DSSS BIT(PHY_TYPE_HR_DSSS_INDEX)
#define PHY_TYPE_BIT_ERP BIT(PHY_TYPE_ERP_INDEX)
#define PHY_TYPE_BIT_OFDM BIT(PHY_TYPE_OFDM_INDEX)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index c3a392a1a659..bcd24c9072ec 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -342,4 +342,5 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
return 0;
}
+MODULE_DESCRIPTION("MediaTek MT76x EEPROM helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 9277ff38b7a2..79b7996ad1a8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -59,6 +59,10 @@ mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static const struct ieee80211_ops mt76x0e_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt76x02_tx,
.start = mt76x0e_start,
.stop = mt76x0e_stop,
@@ -302,6 +306,7 @@ static const struct pci_device_id mt76x0e_device_table[] = {
MODULE_DEVICE_TABLE(pci, mt76x0e_device_table);
MODULE_FIRMWARE(MT7610E_FIRMWARE);
MODULE_FIRMWARE(MT7650E_FIRMWARE);
+MODULE_DESCRIPTION("MediaTek MT76x0E (PCIe) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
static struct pci_driver mt76x0e_driver = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 0422c332354a..bba44f289b4e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -118,6 +118,10 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
}
static const struct ieee80211_ops mt76x0u_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt76x02_tx,
.start = mt76x0u_start,
.stop = mt76x0u_stop,
@@ -336,6 +340,7 @@ err:
MODULE_DEVICE_TABLE(usb, mt76x0_device_table);
MODULE_FIRMWARE(MT7610E_FIRMWARE);
MODULE_FIRMWARE(MT7610U_FIRMWARE);
+MODULE_DESCRIPTION("MediaTek MT76x0U (USB) wireless driver");
MODULE_LICENSE("GPL");
static struct usb_driver mt76x0_driver = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 85a78dea4085..29b9a15f8dbe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -67,7 +67,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct mt76_tx_info *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
- int pid, len = tx_info->skb->len, ep = q2ep(dev->mphy.q_tx[qid]->hw_idx);
+ int pid, len = tx_info->skb->len, ep = dev->mphy.q_tx[qid]->ep;
struct mt76x02_txwi *txwi;
bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
enum mt76_qsel qsel;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index 02da543dfc5c..b2cc44914294 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -293,4 +293,5 @@ void mt76x02u_init_mcu(struct mt76_dev *dev)
EXPORT_SYMBOL_GPL(mt76x02u_init_mcu);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("MediaTek MT76x02 MCU helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 8a0e8124b894..8020446be37b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -696,4 +696,5 @@ void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x02_config_mac_addr_list);
+MODULE_DESCRIPTION("MediaTek MT76x02 helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index 8c01855885ce..1fe5f5a02f93 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -506,4 +506,5 @@ int mt76x2_eeprom_init(struct mt76x02_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76x2_eeprom_init);
+MODULE_DESCRIPTION("MediaTek MT76x2 EEPROM helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index df85ebc6e1df..30959746e924 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -165,6 +165,7 @@ mt76x2e_resume(struct pci_dev *pdev)
MODULE_DEVICE_TABLE(pci, mt76x2e_device_table);
MODULE_FIRMWARE(MT7662_FIRMWARE);
MODULE_FIRMWARE(MT7662_ROM_PATCH);
+MODULE_DESCRIPTION("MediaTek MT76x2E (PCIe) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
static struct pci_driver mt76pci_driver = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index b38bb7a2362b..bfc8c69f43fa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -132,6 +132,10 @@ static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
}
const struct ieee80211_ops mt76x2_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt76x02_tx,
.start = mt76x2_start,
.stop = mt76x2_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index 55068f3252ef..e92bb871f231 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -18,6 +18,7 @@ static const struct usb_device_id mt76x2u_device_table[] = {
{ USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
{ USB_DEVICE(0x0e8d, 0x7632) }, /* HC-M7662BU1 */
{ USB_DEVICE(0x2c4e, 0x0103) }, /* Mercury UD13 */
+ { USB_DEVICE(0x0846, 0x9014) }, /* Netgear WNDA3100v3 */
{ USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
{ USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
{ USB_DEVICE(0x045e, 0x02fe) }, /* XBox One Wireless Adapter */
@@ -147,4 +148,5 @@ static struct usb_driver mt76x2u_driver = {
module_usb_driver(mt76x2u_driver);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("MediaTek MT76x2U (USB) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index ac07ed1f63a3..9fe390fdd730 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -103,6 +103,10 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
}
const struct ieee80211_ops mt76x2u_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt76x02_tx,
.start = mt76x2u_start,
.stop = mt76x2u_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index c91a1c54027f..0baa82c8df5a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -614,7 +614,7 @@ int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
mtk_wed_device_dma_reset(wed);
mt7915_dma_disable(dev, force);
- mt76_dma_wed_reset(&dev->mt76);
+ mt76_wed_dma_reset(&dev->mt76);
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index b01edbed969c..e45361111f9b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -1520,12 +1520,6 @@ void mt7915_mac_reset_work(struct work_struct *work)
if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
return;
- if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
- mtk_wed_device_stop(&dev->mt76.mmio.wed);
- if (!is_mt798x(&dev->mt76))
- mt76_wr(dev, MT_INT_WED_MASK_CSR, 0);
- }
-
ieee80211_stop_queues(mt76_hw(dev));
if (ext_phy)
ieee80211_stop_queues(ext_phy->hw);
@@ -1545,6 +1539,9 @@ void mt7915_mac_reset_work(struct work_struct *work)
mutex_lock(&dev->mt76.mutex);
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mtk_wed_device_stop(&dev->mt76.mmio.wed);
+
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index df2d4279790d..3709d18da0e6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -1708,6 +1708,6 @@ const struct ieee80211_ops mt7915_ops = {
.set_radar_background = mt7915_set_radar_background,
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
.net_fill_forward_path = mt7915_net_fill_forward_path,
- .net_setup_tc = mt76_net_setup_tc,
+ .net_setup_tc = mt76_wed_net_setup_tc,
#endif
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index c67c4f6ca2aa..d90f98c50039 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -228,7 +228,7 @@ mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->bss_conf.csa_active || vif->type == NL80211_IFTYPE_STATION)
return;
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
static void
@@ -463,10 +463,10 @@ static bool mt7915_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
.tolerated = true,
};
- if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ if (!(vif->bss_conf.chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR))
return false;
- cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef,
+ cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chanreq.oper,
mt7915_check_he_obss_narrow_bw_ru_iter,
&iter_data);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index aff4f21e843d..d6ecd698cdcd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -490,6 +490,11 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
return dev->reg.map[i].maps + ofs;
}
+ return 0;
+}
+
+static u32 __mt7915_reg_remap_addr(struct mt7915_dev *dev, u32 addr)
+{
if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
(addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
(addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
@@ -514,15 +519,30 @@ void mt7915_memcpy_fromio(struct mt7915_dev *dev, void *buf, u32 offset,
{
u32 addr = __mt7915_reg_addr(dev, offset);
- memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+ if (addr) {
+ memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+ return;
+ }
+
+ spin_lock_bh(&dev->reg_lock);
+ memcpy_fromio(buf, dev->mt76.mmio.regs +
+ __mt7915_reg_remap_addr(dev, offset), len);
+ spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
- u32 addr = __mt7915_reg_addr(dev, offset);
+ u32 addr = __mt7915_reg_addr(dev, offset), val;
- return dev->bus_ops->rr(mdev, addr);
+ if (addr)
+ return dev->bus_ops->rr(mdev, addr);
+
+ spin_lock_bh(&dev->reg_lock);
+ val = dev->bus_ops->rr(mdev, __mt7915_reg_remap_addr(dev, offset));
+ spin_unlock_bh(&dev->reg_lock);
+
+ return val;
}
static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
@@ -530,7 +550,14 @@ static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
u32 addr = __mt7915_reg_addr(dev, offset);
- dev->bus_ops->wr(mdev, addr, val);
+ if (addr) {
+ dev->bus_ops->wr(mdev, addr, val);
+ return;
+ }
+
+ spin_lock_bh(&dev->reg_lock);
+ dev->bus_ops->wr(mdev, __mt7915_reg_remap_addr(dev, offset), val);
+ spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
@@ -538,7 +565,14 @@ static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
u32 addr = __mt7915_reg_addr(dev, offset);
- return dev->bus_ops->rmw(mdev, addr, mask, val);
+ if (addr)
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+
+ spin_lock_bh(&dev->reg_lock);
+ val = dev->bus_ops->rmw(mdev, __mt7915_reg_remap_addr(dev, offset), mask, val);
+ spin_unlock_bh(&dev->reg_lock);
+
+ return val;
}
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
@@ -672,13 +706,13 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
}
wed->wlan.init_buf = mt7915_wed_init_buf;
- wed->wlan.offload_enable = mt76_mmio_wed_offload_enable;
- wed->wlan.offload_disable = mt76_mmio_wed_offload_disable;
- wed->wlan.init_rx_buf = mt76_mmio_wed_init_rx_buf;
- wed->wlan.release_rx_buf = mt76_mmio_wed_release_rx_buf;
+ wed->wlan.offload_enable = mt76_wed_offload_enable;
+ wed->wlan.offload_disable = mt76_wed_offload_disable;
+ wed->wlan.init_rx_buf = mt76_wed_init_rx_buf;
+ wed->wlan.release_rx_buf = mt76_wed_release_rx_buf;
wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats;
wed->wlan.reset = mt7915_mmio_wed_reset;
- wed->wlan.reset_complete = mt76_mmio_wed_reset_complete;
+ wed->wlan.reset_complete = mt76_wed_reset_complete;
dev->mt76.rx_token_size = wed->wlan.rx_npkt;
@@ -707,6 +741,7 @@ static int mt7915_mmio_init(struct mt76_dev *mdev,
dev = container_of(mdev, struct mt7915_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
+ spin_lock_init(&dev->reg_lock);
switch (device_id) {
case 0x7915:
@@ -958,4 +993,5 @@ static void __exit mt7915_exit(void)
module_init(mt7915_init);
module_exit(mt7915_exit);
+MODULE_DESCRIPTION("MediaTek MT7915E MMIO helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 4727d9c7b11d..6e79bc65f5a5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -287,6 +287,7 @@ struct mt7915_dev {
struct list_head sta_rc_list;
struct list_head twt_list;
+ spinlock_t reg_lock;
u32 hw_pattern;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
index 8b4809703efc..f5b99917c08e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
@@ -516,7 +516,8 @@ static int mt798x_wmac_adie_patch_7976(struct mt7915_dev *dev, u8 adie)
if (ret)
return ret;
- if (version == 0x8a00 || version == 0x8a10 || version == 0x8b00) {
+ if (version == 0x8a00 || version == 0x8a10 ||
+ version == 0x8b00 || version == 0x8c10) {
rg_xo_01 = 0x1d59080f;
rg_xo_03 = 0x34c00fe0;
} else {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index 48433c6d5e7d..ef0c721d26e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -138,9 +138,14 @@ mt7921_regd_notifier(struct wiphy *wiphy,
if (pm->suspended)
return;
+ dev->regd_in_progress = true;
+
mt792x_mutex_acquire(dev);
mt7921_regd_update(dev);
mt792x_mutex_release(dev);
+
+ dev->regd_in_progress = false;
+ wake_up(&dev->wait);
}
int mt7921_mac_init(struct mt792x_dev *dev)
@@ -261,6 +266,7 @@ int mt7921_register_device(struct mt792x_dev *dev)
spin_lock_init(&dev->pm.wake.lock);
mutex_init(&dev->pm.mutex);
init_waitqueue_head(&dev->pm.wait);
+ init_waitqueue_head(&dev->wait);
if (mt76_is_sdio(&dev->mt76))
init_waitqueue_head(&dev->mt76.sdio.wait);
spin_lock_init(&dev->pm.txq_lock);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 0645417e0582..ca36de34171b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -325,6 +325,19 @@ static void mt7921_roc_iter(void *priv, u8 *mac,
mt7921_mcu_abort_roc(phy, mvif, phy->roc_token_id);
}
+void mt7921_roc_abort_sync(struct mt792x_dev *dev)
+{
+ struct mt792x_phy *phy = &dev->phy;
+
+ del_timer_sync(&phy->roc_timer);
+ cancel_work_sync(&phy->roc_work);
+ if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
+ ieee80211_iterate_active_interfaces(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_roc_iter, (void *)phy);
+}
+EXPORT_SYMBOL_GPL(mt7921_roc_abort_sync);
+
void mt7921_roc_work(struct work_struct *work)
{
struct mt792x_phy *phy;
@@ -1418,5 +1431,6 @@ const struct ieee80211_ops mt7921_ops = {
};
EXPORT_SYMBOL_GPL(mt7921_ops);
+MODULE_DESCRIPTION("MediaTek MT7921 core driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index f5582477c7e4..8b4ce32a2cd1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -1272,7 +1272,7 @@ int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
.mtcl_conf = mt792x_acpi_get_mtcl_conf(&dev->phy, alpha2),
};
int ret, valid_cnt = 0;
- u16 buf_len = 0;
+ u32 buf_len = 0;
u8 *pos;
if (!clc)
@@ -1283,7 +1283,7 @@ int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
if (mt76_find_power_limits_node(&dev->mt76))
req.cap |= CLC_CAP_DTS_EN;
- buf_len = le16_to_cpu(clc->len) - sizeof(*clc);
+ buf_len = le32_to_cpu(clc->len) - sizeof(*clc);
pos = clc->data;
while (buf_len > 16) {
struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 1cb21133992b..3016636d18c6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -322,4 +322,5 @@ int mt7921_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
enum mt7921_roc_req type, u8 token_id);
int mt7921_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
u8 token_id);
+void mt7921_roc_abort_sync(struct mt792x_dev *dev);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index 57903c6e4f11..cda853e86676 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/of.h>
#include "mt7921.h"
#include "../mt76_connac2_mac.h"
@@ -369,6 +370,9 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
if (ret)
goto err_free_irq;
+ if (of_property_read_bool(dev->mt76.dev->of_node, "wakeup-source"))
+ device_init_wakeup(dev->mt76.dev, true);
+
return 0;
err_free_irq:
@@ -386,7 +390,11 @@ static void mt7921_pci_remove(struct pci_dev *pdev)
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
+ if (of_property_read_bool(dev->mt76.dev->of_node, "wakeup-source"))
+ device_init_wakeup(dev->mt76.dev, false);
+
mt7921e_unregister_device(dev);
+ set_bit(MT76_REMOVED, &mdev->phy.state);
devm_free_irq(&pdev->dev, pdev->irq, dev);
mt76_free_device(&dev->mt76);
pci_free_irq_vectors(pdev);
@@ -405,10 +413,15 @@ static int mt7921_pci_suspend(struct device *device)
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
+ mt7921_roc_abort_sync(dev);
+
err = mt792x_mcu_drv_pmctrl(dev);
if (err < 0)
goto restore_suspend;
+ wait_event_timeout(dev->wait,
+ !dev->regd_in_progress, 5 * HZ);
+
err = mt76_connac_mcu_set_hif_suspend(mdev, true);
if (err)
goto restore_suspend;
@@ -544,4 +557,5 @@ MODULE_FIRMWARE(MT7922_FIRMWARE_WM);
MODULE_FIRMWARE(MT7922_ROM_PATCH);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7921E (PCIe) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
index 7591e54d2897..004d942ee11a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -216,6 +216,8 @@ static int mt7921s_suspend(struct device *__dev)
cancel_delayed_work_sync(&pm->ps_work);
cancel_work_sync(&pm->wake_work);
+ mt7921_roc_abort_sync(dev);
+
err = mt792x_mcu_drv_pmctrl(dev);
if (err < 0)
goto restore_suspend;
@@ -323,5 +325,6 @@ static struct sdio_driver mt7921s_driver = {
.drv.pm = pm_sleep_ptr(&mt7921s_pm_ops),
};
module_sdio_driver(mt7921s_driver);
+MODULE_DESCRIPTION("MediaTek MT7921S (SDIO) wireless driver");
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index e5258c74fc07..8b7c03c47598 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -336,5 +336,6 @@ static struct usb_driver mt7921u_driver = {
};
module_usb_driver(mt7921u_driver);
+MODULE_DESCRIPTION("MediaTek MT7921U (USB) wireless driver");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
index 8f9b7a2f376c..c4cbc8976046 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
@@ -2,11 +2,61 @@
/* Copyright (C) 2023 MediaTek Inc. */
#include <linux/etherdevice.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/thermal.h>
#include <linux/firmware.h>
#include "mt7925.h"
#include "mac.h"
#include "mcu.h"
+static ssize_t mt7925_thermal_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ switch (to_sensor_dev_attr(attr)->index) {
+ case 0: {
+ struct mt792x_phy *phy = dev_get_drvdata(dev);
+ struct mt792x_dev *mdev = phy->dev;
+ int temperature;
+
+ mt792x_mutex_acquire(mdev);
+ temperature = mt7925_mcu_get_temperature(phy);
+ mt792x_mutex_release(mdev);
+
+ if (temperature < 0)
+ return temperature;
+ /* display in millidegree Celsius */
+ return sprintf(buf, "%u\n", temperature * 1000);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+static SENSOR_DEVICE_ATTR_RO(temp1_input, mt7925_thermal_temp, 0);
+
+static struct attribute *mt7925_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mt7925_hwmon);
+
+static int mt7925_thermal_init(struct mt792x_phy *phy)
+{
+ struct wiphy *wiphy = phy->mt76->hw->wiphy;
+ struct device *hwmon;
+ const char *name;
+
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return 0;
+
+ name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7925_%s",
+ wiphy_name(wiphy));
+
+ hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy,
+ mt7925_hwmon_groups);
+ return PTR_ERR_OR_ZERO(hwmon);
+}
static void
mt7925_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *req)
@@ -142,6 +192,12 @@ static void mt7925_init_work(struct work_struct *work)
return;
}
+ ret = mt7925_thermal_init(&dev->phy);
+ if (ret) {
+ dev_err(dev->mt76.dev, "thermal init failed\n");
+ return;
+ }
+
/* we support chip reset now */
dev->hw_init_done = true;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 8f1075da4903..6179798a8845 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -359,6 +359,7 @@ mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mvif->sta.wcid.phy_idx = mvif->mt76.band_idx;
mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mvif->sta.vif = mvif;
mt76_wcid_init(&mvif->sta.wcid);
mt7925_mac_wtbl_update(dev, idx,
@@ -526,7 +527,7 @@ static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (cmd == SET_KEY && !mvif->mt76.cipher) {
struct mt792x_phy *phy = mt792x_hw_phy(hw);
- mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ mvif->mt76.cipher = mt7925_mcu_get_cipher(key->cipher);
mt7925_mcu_add_bss_info(phy, mvif->mt76.ctx, vif, sta, true);
}
@@ -710,7 +711,7 @@ static void mt7925_bss_info_changed(struct ieee80211_hw *hw,
if (slottime != phy->slottime) {
phy->slottime = slottime;
- mt792x_mac_set_timeing(phy);
+ mt7925_mcu_set_timing(phy, vif);
}
}
@@ -1274,6 +1275,25 @@ mt7925_channel_switch_beacon(struct ieee80211_hw *hw,
}
static int
+mt7925_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ static const u8 mq_to_aci[] = {
+ [IEEE80211_AC_VO] = 3,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ };
+
+ /* firmware uses access class index */
+ mvif->queue_params[mq_to_aci[queue]] = *params;
+
+ return 0;
+}
+
+static int
mt7925_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
@@ -1396,7 +1416,7 @@ const struct ieee80211_ops mt7925_ops = {
.add_interface = mt7925_add_interface,
.remove_interface = mt792x_remove_interface,
.config = mt7925_config,
- .conf_tx = mt792x_conf_tx,
+ .conf_tx = mt7925_conf_tx,
.configure_filter = mt7925_configure_filter,
.bss_info_changed = mt7925_bss_info_changed,
.start_ap = mt7925_start_ap,
@@ -1450,4 +1470,5 @@ const struct ieee80211_ops mt7925_ops = {
EXPORT_SYMBOL_GPL(mt7925_ops);
MODULE_AUTHOR("Deren Wu <deren.wu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek MT7925 core driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index c5fd7116929b..bd37cb8d734b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -656,6 +656,42 @@ int mt7925_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl)
return ret;
}
+int mt7925_mcu_get_temperature(struct mt792x_phy *phy)
+{
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+ u8 _rsv2[4];
+ } __packed req = {
+ .tag = cpu_to_le16(0x0),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ };
+ struct mt7925_thermal_evt {
+ u8 rsv[4];
+ __le32 temperature;
+ } __packed * evt;
+ struct mt792x_dev *dev = phy->dev;
+ int temperature, ret;
+ struct sk_buff *skb;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_WM_UNI_CMD_QUERY(THERMAL),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ skb_pull(skb, 4 + sizeof(struct tlv));
+ evt = (struct mt7925_thermal_evt *)skb->data;
+
+ temperature = le32_to_cpu(evt->temperature);
+
+ dev_kfree_skb(skb);
+
+ return temperature;
+}
+
static void
mt7925_mcu_parse_phy_cap(struct mt792x_dev *dev, char *data)
{
@@ -814,6 +850,7 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct sta_rec_hdr_trans *hdr_trans;
struct mt76_wcid *wcid;
struct tlv *tlv;
@@ -827,7 +864,11 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb,
else
hdr_trans->from_ds = true;
- wcid = (struct mt76_wcid *)sta->drv_priv;
+ if (sta)
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+ else
+ wcid = &mvif->sta.wcid;
+
if (!wcid)
return;
@@ -895,7 +936,7 @@ int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
e = (struct edca *)tlv;
e->set = WMM_PARAM_SET;
- e->queue = ac + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
+ e->queue = ac;
e->aifs = q->aifs;
e->txop = cpu_to_le16(q->txop);
@@ -921,61 +962,67 @@ mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd)
{
+ struct mt792x_sta *msta = container_of(wcid, struct mt792x_sta, wcid);
struct sta_rec_sec_uni *sec;
+ struct mt792x_vif *mvif = msta->vif;
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
struct tlv *tlv;
- tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
+ sta = msta == &mvif->sta ?
+ NULL :
+ container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V3, sizeof(*sec));
sec = (struct sta_rec_sec_uni *)tlv;
- sec->add = cmd;
+ sec->bss_idx = mvif->mt76.idx;
+ sec->is_authenticator = 0;
+ sec->mgmt_prot = 0;
+ sec->wlan_idx = (u8)wcid->idx;
+
+ if (sta) {
+ sec->tx_key = 1;
+ sec->key_type = 1;
+ memcpy(sec->peer_addr, sta->addr, ETH_ALEN);
+ } else {
+ memcpy(sec->peer_addr, vif->bss_conf.bssid, ETH_ALEN);
+ }
if (cmd == SET_KEY) {
- struct sec_key_uni *sec_key;
u8 cipher;
- cipher = mt76_connac_mcu_get_cipher(key->cipher);
- if (cipher == MCU_CIPHER_NONE)
+ sec->add = 1;
+ cipher = mt7925_mcu_get_cipher(key->cipher);
+ if (cipher == CONNAC3_CIPHER_NONE)
return -EOPNOTSUPP;
- sec_key = &sec->key[0];
- sec_key->cipher_len = sizeof(*sec_key);
-
- if (cipher == MCU_CIPHER_BIP_CMAC_128) {
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
- sec_key->key_id = sta_key_conf->keyidx;
- sec_key->key_len = 16;
- memcpy(sec_key->key, sta_key_conf->key, 16);
-
- sec_key = &sec->key[1];
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
- sec_key->cipher_len = sizeof(*sec_key);
- sec_key->key_len = 16;
- memcpy(sec_key->key, key->key, 16);
- sec->n_cipher = 2;
+ if (cipher == CONNAC3_CIPHER_BIP_CMAC_128) {
+ sec->cipher_id = CONNAC3_CIPHER_BIP_CMAC_128;
+ sec->key_id = sta_key_conf->keyidx;
+ sec->key_len = 32;
+ memcpy(sec->key, sta_key_conf->key, 16);
+ memcpy(sec->key + 16, key->key, 16);
} else {
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->cipher_id = cipher;
- sec_key->key_id = key->keyidx;
- sec_key->key_len = key->keylen;
- memcpy(sec_key->key, key->key, key->keylen);
+ sec->cipher_id = cipher;
+ sec->key_id = key->keyidx;
+ sec->key_len = key->keylen;
+ memcpy(sec->key, key->key, key->keylen);
- if (cipher == MCU_CIPHER_TKIP) {
+ if (cipher == CONNAC3_CIPHER_TKIP) {
/* Rx/Tx MIC keys are swapped */
- memcpy(sec_key->key + 16, key->key + 24, 8);
- memcpy(sec_key->key + 24, key->key + 16, 8);
+ memcpy(sec->key + 16, key->key + 24, 8);
+ memcpy(sec->key + 24, key->key + 16, 8);
}
/* store key_conf for BIP batch update */
- if (cipher == MCU_CIPHER_AES_CCMP) {
+ if (cipher == CONNAC3_CIPHER_AES_CCMP) {
memcpy(sta_key_conf->key, key->key, key->keylen);
sta_key_conf->keyidx = key->keyidx;
}
-
- sec->n_cipher = 1;
}
} else {
- sec->n_cipher = 0;
+ sec->add = 0;
}
return 0;
@@ -1460,12 +1507,10 @@ mt7925_mcu_sta_phy_tlv(struct sk_buff *skb,
struct tlv *tlv;
u8 af = 0, mm = 0;
- if (!sta->deflink.ht_cap.ht_supported && !sta->deflink.he_6ghz_capa.capa)
- return;
-
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy));
phy = (struct sta_rec_phy *)tlv;
phy->phy_type = mt76_connac_get_phy_mode_v2(mvif->phy->mt76, vif, chandef->chan->band, sta);
+ phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
if (sta->deflink.ht_cap.ht_supported) {
af = sta->deflink.ht_cap.ampdu_factor;
mm = sta->deflink.ht_cap.ampdu_density;
@@ -1573,8 +1618,6 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
{
struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
- struct wtbl_req_hdr *wtbl_hdr;
- struct tlv *sta_wtbl;
struct sk_buff *skb;
skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid,
@@ -1598,30 +1641,11 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
mt7925_mcu_sta_state_v2_tlv(phy, skb, info->sta,
info->vif, info->rcpi,
info->state);
- mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->sta);
mt7925_mcu_sta_mld_tlv(skb, info->vif, info->sta);
}
- sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
- sizeof(struct tlv));
-
- wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, info->wcid,
- WTBL_RESET_AND_SET,
- sta_wtbl, &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- if (info->enable) {
- mt76_connac_mcu_wtbl_generic_tlv(dev, skb, info->vif,
- info->sta, sta_wtbl,
- wtbl_hdr);
- mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, info->vif, info->wcid,
- sta_wtbl, wtbl_hdr);
- if (info->sta)
- mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta,
- sta_wtbl, wtbl_hdr,
- true, true);
- }
+ if (info->enable)
+ mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->sta);
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
}
@@ -2049,9 +2073,9 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb,
struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef;
enum nl80211_band band = chandef->chan->band;
struct mt76_connac_bss_basic_tlv *basic_req;
- u8 idx, basic_phy;
struct tlv *tlv;
int conn_type;
+ u8 idx;
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BASIC, sizeof(*basic_req));
basic_req = (struct mt76_connac_bss_basic_tlv *)tlv;
@@ -2062,8 +2086,10 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb,
basic_req->phymode_ext = mt7925_get_phy_mode_ext(phy, vif, band, sta);
- basic_phy = mt76_connac_get_phy_mode_v2(phy, vif, band, sta);
- basic_req->nonht_basic_phy = cpu_to_le16(basic_phy);
+ if (band == NL80211_BAND_2GHZ)
+ basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_ERP_INDEX);
+ else
+ basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_OFDM_INDEX);
memcpy(basic_req->bssid, vif->bss_conf.bssid, ETH_ALEN);
basic_req->phymode = mt76_connac_get_phy_mode(phy, vif, band, sta);
@@ -2122,21 +2148,21 @@ mt7925_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
sec = (struct bss_sec_tlv *)tlv;
switch (mvif->cipher) {
- case MCU_CIPHER_GCMP_256:
- case MCU_CIPHER_GCMP:
+ case CONNAC3_CIPHER_GCMP_256:
+ case CONNAC3_CIPHER_GCMP:
sec->mode = MODE_WPA3_SAE;
sec->status = 8;
break;
- case MCU_CIPHER_AES_CCMP:
+ case CONNAC3_CIPHER_AES_CCMP:
sec->mode = MODE_WPA2_PSK;
sec->status = 6;
break;
- case MCU_CIPHER_TKIP:
+ case CONNAC3_CIPHER_TKIP:
sec->mode = MODE_WPA2_PSK;
sec->status = 4;
break;
- case MCU_CIPHER_WEP104:
- case MCU_CIPHER_WEP40:
+ case CONNAC3_CIPHER_WEP104:
+ case CONNAC3_CIPHER_WEP40:
sec->mode = MODE_SHARED;
sec->status = 0;
break;
@@ -2167,6 +2193,11 @@ mt7925_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt792x_phy *phy,
bmc = (struct bss_rate_tlv *)tlv;
+ if (band == NL80211_BAND_2GHZ)
+ bmc->basic_rate = cpu_to_le16(HR_DSSS_ERP_BASIC_RATE);
+ else
+ bmc->basic_rate = cpu_to_le16(OFDM_BASIC_RATE);
+
bmc->short_preamble = (band == NL80211_BAND_2GHZ);
bmc->bc_fixed_rate = idx;
bmc->mc_fixed_rate = idx;
@@ -2249,6 +2280,38 @@ mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
vif->bss_conf.he_bss_color.color : 0;
}
+static void
+mt7925_mcu_bss_ifs_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_phy *phy = mvif->phy;
+ struct bss_ifs_time_tlv *ifs_time;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_IFS_TIME, sizeof(*ifs_time));
+ ifs_time = (struct bss_ifs_time_tlv *)tlv;
+ ifs_time->slot_valid = true;
+ ifs_time->slot_time = cpu_to_le16(phy->slottime);
+}
+
+int mt7925_mcu_set_timing(struct mt792x_phy *phy,
+ struct ieee80211_vif *vif)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_dev *dev = phy->dev;
+ struct sk_buff *skb;
+
+ skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ MT7925_BSS_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7925_mcu_bss_ifs_tlv(skb, vif);
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD(BSS_INFO_UPDATE), true);
+}
+
int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_chanctx_conf *ctx,
struct ieee80211_vif *vif,
@@ -2273,6 +2336,7 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
mt7925_mcu_bss_bmc_tlv(skb, phy, ctx, vif, sta);
mt7925_mcu_bss_qos_tlv(skb, vif);
mt7925_mcu_bss_mld_tlv(skb, vif, sta);
+ mt7925_mcu_bss_ifs_tlv(skb, vif);
if (vif->bss_conf.he_support) {
mt7925_mcu_bss_he_tlv(skb, vif, phy);
@@ -2845,12 +2909,16 @@ int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (cmd & __MCU_CMD_FIELD_UNI) {
uni_txd = (struct mt76_connac2_mcu_uni_txd *)txd;
uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd));
- uni_txd->option = MCU_CMD_UNI_EXT_ACK;
uni_txd->cid = cpu_to_le16(mcu_cmd);
uni_txd->s2d_index = MCU_S2D_H2N;
uni_txd->pkt_type = MCU_PKT_ID;
uni_txd->seq = seq;
+ if (cmd & __MCU_CMD_FIELD_QUERY)
+ uni_txd->option = MCU_CMD_UNI_QUERY_ACK;
+ else
+ uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+
goto exit;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
index 3c41e21303b1..2a0bbfe7bfa5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
@@ -159,6 +159,20 @@ enum {
UNI_EVENT_SCAN_DONE_NLO = 3,
};
+enum connac3_mcu_cipher_type {
+ CONNAC3_CIPHER_NONE = 0,
+ CONNAC3_CIPHER_WEP40 = 1,
+ CONNAC3_CIPHER_TKIP = 2,
+ CONNAC3_CIPHER_AES_CCMP = 4,
+ CONNAC3_CIPHER_WEP104 = 5,
+ CONNAC3_CIPHER_BIP_CMAC_128 = 6,
+ CONNAC3_CIPHER_WEP128 = 7,
+ CONNAC3_CIPHER_WAPI = 8,
+ CONNAC3_CIPHER_CCMP_256 = 10,
+ CONNAC3_CIPHER_GCMP = 11,
+ CONNAC3_CIPHER_GCMP_256 = 12,
+};
+
struct mt7925_mcu_scan_chinfo_event {
u8 nr_chan;
u8 alpha2[3];
@@ -208,7 +222,7 @@ struct scan_req_tlv {
__le16 channel_dwell_time; /* channel Dwell interval */
__le16 timeout_value;
__le16 probe_delay_time;
- u8 func_mask_ext;
+ __le32 func_mask_ext;
};
struct scan_ssid_tlv {
@@ -334,7 +348,8 @@ struct bss_req_hdr {
struct bss_rate_tlv {
__le16 tag;
__le16 len;
- u8 __rsv1[4];
+ u8 __rsv1[2];
+ __le16 basic_rate;
__le16 bc_trans;
__le16 mc_trans;
u8 short_preamble;
@@ -382,25 +397,22 @@ struct sta_rec_eht {
u8 _rsv2[3];
} __packed;
-struct sec_key_uni {
- __le16 wlan_idx;
- u8 mgmt_prot;
- u8 cipher_id;
- u8 cipher_len;
- u8 key_id;
- u8 key_len;
- u8 need_resp;
- u8 key[32];
-} __packed;
-
struct sta_rec_sec_uni {
__le16 tag;
__le16 len;
u8 add;
- u8 n_cipher;
- u8 rsv[2];
-
- struct sec_key_uni key[2];
+ u8 tx_key;
+ u8 key_type;
+ u8 is_authenticator;
+ u8 peer_addr[6];
+ u8 bss_idx;
+ u8 cipher_id;
+ u8 key_id;
+ u8 key_len;
+ u8 wlan_idx;
+ u8 mgmt_prot;
+ u8 key[32];
+ u8 key_rsc[16];
} __packed;
struct sta_rec_hdr_trans {
@@ -428,6 +440,22 @@ struct sta_rec_mld {
} __packed link[2];
} __packed;
+struct bss_ifs_time_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 slot_valid;
+ u8 sifs_valid;
+ u8 rifs_valid;
+ u8 eifs_valid;
+ __le16 slot_time;
+ __le16 sifs_time;
+ __le16 rifs_time;
+ __le16 eifs_time;
+ u8 eifs_cck_valid;
+ u8 rsv;
+ __le16 eifs_cck_time;
+} __packed;
+
#define MT7925_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct sta_rec_basic) + \
sizeof(struct sta_rec_bf) + \
@@ -440,7 +468,7 @@ struct sta_rec_mld {
sizeof(struct sta_rec_bfee) + \
sizeof(struct sta_rec_phy) + \
sizeof(struct sta_rec_ra) + \
- sizeof(struct sta_rec_sec) + \
+ sizeof(struct sta_rec_sec_uni) + \
sizeof(struct sta_rec_ra_fixed) + \
sizeof(struct sta_rec_he_6g_capa) + \
sizeof(struct sta_rec_eht) + \
@@ -455,6 +483,7 @@ struct sta_rec_mld {
sizeof(struct bss_mld_tlv) + \
sizeof(struct bss_info_uni_he) + \
sizeof(struct bss_info_uni_bss_color) + \
+ sizeof(struct bss_ifs_time_tlv) + \
sizeof(struct tlv))
#define MT_CONNAC3_SKU_POWER_LIMIT 449
@@ -509,6 +538,33 @@ struct mt7925_wow_pattern_tlv {
u8 rsv[4];
} __packed;
+static inline enum connac3_mcu_cipher_type
+mt7925_mcu_get_cipher(int cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return CONNAC3_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return CONNAC3_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return CONNAC3_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ return CONNAC3_CIPHER_BIP_CMAC_128;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return CONNAC3_CIPHER_AES_CCMP;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return CONNAC3_CIPHER_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return CONNAC3_CIPHER_GCMP;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return CONNAC3_CIPHER_GCMP_256;
+ case WLAN_CIPHER_SUITE_SMS4:
+ return CONNAC3_CIPHER_WAPI;
+ default:
+ return CONNAC3_CIPHER_NONE;
+ }
+}
+
int mt7925_mcu_set_dbdc(struct mt76_phy *phy);
int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req);
@@ -525,6 +581,8 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
int enable);
+int mt7925_mcu_set_timing(struct mt792x_phy *phy,
+ struct ieee80211_vif *vif);
int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable);
int mt7925_mcu_set_channel_domain(struct mt76_phy *phy);
int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
index 33785f526acf..8a4a71f6bcb6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
@@ -271,6 +271,7 @@ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7925_mcu_config_sniffer(struct mt792x_vif *vif,
struct ieee80211_chanctx_conf *ctx);
+int mt7925_mcu_get_temperature(struct mt792x_phy *phy);
int mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index 734f31ee40d3..07b74d492ce1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -386,6 +386,8 @@ static int mt7925_pci_probe(struct pci_dev *pdev,
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ mt76_rmw_field(dev, MT_HW_EMI_CTL, MT_HW_EMI_CTL_SLPPROT_EN, 1);
+
ret = mt792x_wfsys_reset(dev);
if (ret)
goto err_free_dev;
@@ -425,6 +427,7 @@ static void mt7925_pci_remove(struct pci_dev *pdev)
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
mt7925e_unregister_device(dev);
+ set_bit(MT76_REMOVED, &mdev->phy.state);
devm_free_irq(&pdev->dev, pdev->irq, dev);
mt76_free_device(&dev->mt76);
pci_free_irq_vectors(pdev);
@@ -583,4 +586,5 @@ MODULE_FIRMWARE(MT7925_FIRMWARE_WM);
MODULE_FIRMWARE(MT7925_ROM_PATCH);
MODULE_AUTHOR("Deren Wu <deren.wu@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7925E (PCIe) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
index 9b885c5b3ed5..1e0f094fc905 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
@@ -329,4 +329,5 @@ static struct usb_driver mt7925u_driver = {
module_usb_driver(mt7925u_driver);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT7925U (USB) wireless driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index 3c897b34aaa7..a8556de3d480 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -186,6 +186,8 @@ struct mt792x_dev {
bool hw_init_done:1;
bool fw_assert:1;
bool has_eht:1;
+ bool regd_in_progress:1;
+ wait_queue_head_t wait;
struct work_struct init_work;
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
index e7afea87e82e..9317f8ff2070 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
@@ -66,13 +66,15 @@ free:
}
/* MTCL : Country List Table for 6G band */
-static void
+static int
mt792x_asar_acpi_read_mtcl(struct mt792x_dev *dev, u8 **table, u8 *version)
{
- if (mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, NULL) < 0)
- *version = 1;
- else
- *version = 2;
+ int ret;
+
+ *version = ((ret = mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, NULL)) < 0)
+ ? 1 : 2;
+
+ return ret;
}
/* MTDS : Dynamic SAR Power Table */
@@ -166,16 +168,16 @@ int mt792x_init_acpi_sar(struct mt792x_dev *dev)
if (!asar)
return -ENOMEM;
- mt792x_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver);
+ ret = mt792x_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver);
+ if (ret) {
+ devm_kfree(dev->mt76.dev, asar->countrylist);
+ asar->countrylist = NULL;
+ }
- /* MTDS is mandatory. Return error if table is invalid */
ret = mt792x_asar_acpi_read_mtds(dev, (u8 **)&asar->dyn, asar->ver);
if (ret) {
devm_kfree(dev->mt76.dev, asar->dyn);
- devm_kfree(dev->mt76.dev, asar->countrylist);
- devm_kfree(dev->mt76.dev, asar);
-
- return ret;
+ asar->dyn = NULL;
}
/* MTGS is optional */
@@ -290,7 +292,7 @@ int mt792x_init_acpi_sar_power(struct mt792x_phy *phy, bool set_default)
const struct cfg80211_sar_capa *capa = phy->mt76->hw->wiphy->sar_capa;
int i;
- if (!phy->acpisar)
+ if (!phy->acpisar || !((struct mt792x_acpi_sar *)phy->acpisar)->dyn)
return 0;
/* When ACPI SAR enabled in HW, we should apply rules for .frp
@@ -353,11 +355,15 @@ static u8
mt792x_acpi_get_mtcl_map(int row, int column, struct mt792x_asar_cl *cl)
{
u8 config = 0;
+ u8 mode_6g, mode_5g9;
+
+ mode_6g = (cl->mode_6g > 0x02) ? 0 : cl->mode_6g;
+ mode_5g9 = (cl->mode_5g9 > 0x01) ? 0 : cl->mode_5g9;
- if (cl->cl6g[row] & BIT(column))
- config |= (cl->mode_6g & 0x3) << 2;
+ if ((cl->cl6g[row] & BIT(column)) || cl->mode_6g == 0x02)
+ config |= (mode_6g & 0x3) << 2;
if (cl->version > 1 && cl->cl5g9[row] & BIT(column))
- config |= (cl->mode_5g9 & 0x3);
+ config |= (mode_5g9 & 0x3);
return config;
}
@@ -374,7 +380,7 @@ u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2)
"AT", "BE", "BG", "CY", "CZ", "HR", "DK", "EE",
"FI", "FR", "DE", "GR", "HU", "IS", "IE", "IT",
"LV", "LI", "LT", "LU", "MT", "NL", "NO", "PL",
- "PT", "RO", "MT", "SK", "SI", "ES", "CH",
+ "PT", "RO", "SK", "SI", "ES", "SE", "CH",
};
struct mt792x_acpi_sar *sar = phy->acpisar;
struct mt792x_asar_cl *cl;
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
index 502be22dbe36..a405af8d9052 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
@@ -354,6 +354,7 @@ static const char mt792x_gstrings_stats[][ETH_GSTRING_LEN] = {
"v_tx_bw_40",
"v_tx_bw_80",
"v_tx_bw_160",
+ "v_tx_bw_320",
"v_tx_mcs_0",
"v_tx_mcs_1",
"v_tx_mcs_2",
@@ -684,9 +685,10 @@ mt792x_get_mac80211_ops(struct device *dev,
if (!(*fw_features & MT792x_FW_CAP_CNM)) {
ops->remain_on_channel = NULL;
ops->cancel_remain_on_channel = NULL;
- ops->add_chanctx = NULL;
- ops->remove_chanctx = NULL;
- ops->change_chanctx = NULL;
+ ops->add_chanctx = ieee80211_emulate_add_chanctx;
+ ops->remove_chanctx = ieee80211_emulate_remove_chanctx;
+ ops->change_chanctx = ieee80211_emulate_change_chanctx;
+ ops->switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx;
ops->assign_vif_chanctx = NULL;
ops->unassign_vif_chanctx = NULL;
ops->mgd_prepare_tx = NULL;
@@ -862,5 +864,6 @@ int mt792x_load_firmware(struct mt792x_dev *dev)
}
EXPORT_SYMBOL_GPL(mt792x_load_firmware);
+MODULE_DESCRIPTION("MediaTek MT792x core driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
index 488326ce5ed4..5cc2d59b774a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
@@ -12,6 +12,8 @@ irqreturn_t mt792x_irq_handler(int irq, void *dev_instance)
{
struct mt792x_dev *dev = dev_instance;
+ if (test_bit(MT76_REMOVED, &dev->mt76.phy.state))
+ return IRQ_NONE;
mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
@@ -123,14 +125,13 @@ static void mt792x_dma_prefetch(struct mt792x_dev *dev)
int mt792x_dma_enable(struct mt792x_dev *dev)
{
- if (is_mt7925(&dev->mt76))
- mt76_rmw(dev, MT_UWFDMA0_GLO_CFG_EXT1, BIT(28), BIT(28));
-
/* configure perfetch settings */
mt792x_dma_prefetch(dev);
/* reset dma idx */
mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
+ if (is_mt7925(&dev->mt76))
+ mt76_wr(dev, MT_WFDMA0_RST_DRX_PTR, ~0);
/* configure delay interrupt */
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
@@ -140,12 +141,20 @@ int mt792x_dma_enable(struct mt792x_dev *dev)
MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ FIELD_PREP(MT_WFDMA0_GLO_CFG_DMA_SIZE, 3) |
+ MT_WFDMA0_GLO_CFG_FIFO_DIS_CHECK |
+ MT_WFDMA0_GLO_CFG_RX_WB_DDONE |
MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
mt76_set(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ if (is_mt7925(&dev->mt76)) {
+ mt76_rmw(dev, MT_UWFDMA0_GLO_CFG_EXT1, BIT(28), BIT(28));
+ mt76_set(dev, MT_WFDMA0_INT_RX_PRI, 0x0F00);
+ mt76_set(dev, MT_WFDMA0_INT_TX_PRI, 0x7F00);
+ }
mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
/* enable interrupts for TX/RX rings */
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_regs.h b/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
index a99af23e4b56..458cfd0260b1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
@@ -292,9 +292,12 @@
#define MT_WFDMA0_GLO_CFG_TX_DMA_BUSY BIT(1)
#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
#define MT_WFDMA0_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WFDMA0_GLO_CFG_DMA_SIZE GENMASK(5, 4)
#define MT_WFDMA0_GLO_CFG_TX_WB_DDONE BIT(6)
#define MT_WFDMA0_GLO_CFG_FW_DWLD_BYPASS_DMASHDL BIT(9)
+#define MT_WFDMA0_GLO_CFG_FIFO_DIS_CHECK BIT(11)
#define MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MT_WFDMA0_GLO_CFG_RX_WB_DDONE BIT(13)
#define MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN BIT(15)
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO BIT(27)
@@ -322,6 +325,8 @@
#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
#define MT_WFDMA0_RST_DRX_PTR MT_WFDMA0(0x280)
+#define MT_WFDMA0_INT_RX_PRI MT_WFDMA0(0x298)
+#define MT_WFDMA0_INT_TX_PRI MT_WFDMA0(0x29c)
#define MT_WFDMA0_GLO_CFG_EXT0 MT_WFDMA0(0x2b0)
#define MT_WFDMA0_CSR_TX_DMASHDL_ENABLE BIT(6)
#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
@@ -389,6 +394,9 @@
#define MT_HW_CHIPID 0x70010200
#define MT_HW_REV 0x70010204
+#define MT_HW_EMI_CTL 0x18011100
+#define MT_HW_EMI_CTL_SLPPROT_EN BIT(1)
+
#define MT_PCIE_MAC_BASE 0x10000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
index 2dd283caed36..b49668a4b784 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
@@ -121,44 +121,25 @@ static void mt792xu_uhw_wr(struct mt76_dev *dev, u32 addr, u32 val)
static void mt792xu_dma_prefetch(struct mt792x_dev *dev)
{
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(0),
- MT_WPDMA0_BASE_PTR_MASK, 0x80);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(1),
- MT_WPDMA0_BASE_PTR_MASK, 0xc0);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(2),
- MT_WPDMA0_BASE_PTR_MASK, 0x100);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(3),
- MT_WPDMA0_BASE_PTR_MASK, 0x140);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(4),
- MT_WPDMA0_BASE_PTR_MASK, 0x180);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(16),
- MT_WPDMA0_BASE_PTR_MASK, 0x280);
-
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17),
- MT_WPDMA0_MAX_CNT_MASK, 4);
- mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL(17),
- MT_WPDMA0_BASE_PTR_MASK, 0x2c0);
+#define DMA_PREFETCH_CONF(_idx_, _cnt_, _base_) \
+ mt76_rmw(dev, MT_UWFDMA0_TX_RING_EXT_CTRL((_idx_)), \
+ MT_WPDMA0_MAX_CNT_MASK | MT_WPDMA0_BASE_PTR_MASK, \
+ FIELD_PREP(MT_WPDMA0_MAX_CNT_MASK, (_cnt_)) | \
+ FIELD_PREP(MT_WPDMA0_BASE_PTR_MASK, (_base_)))
+
+ DMA_PREFETCH_CONF(0, 4, 0x080);
+ DMA_PREFETCH_CONF(1, 4, 0x0c0);
+ DMA_PREFETCH_CONF(2, 4, 0x100);
+ DMA_PREFETCH_CONF(3, 4, 0x140);
+ DMA_PREFETCH_CONF(4, 4, 0x180);
+ DMA_PREFETCH_CONF(16, 4, 0x280);
+ DMA_PREFETCH_CONF(17, 4, 0x2c0);
}
static void mt792xu_wfdma_init(struct mt792x_dev *dev)
{
+ int i;
+
mt792xu_dma_prefetch(dev);
mt76_clear(dev, MT_UWFDMA0_GLO_CFG, MT_WFDMA0_GLO_CFG_OMIT_RX_INFO);
@@ -169,10 +150,27 @@ static void mt792xu_wfdma_init(struct mt792x_dev *dev)
MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- /* disable dmashdl */
- mt76_clear(dev, MT_UWFDMA0_GLO_CFG_EXT0,
- MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
- mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
+ mt76_rmw(dev, MT_DMASHDL_REFILL, MT_DMASHDL_REFILL_MASK, 0xffe00000);
+ mt76_clear(dev, MT_DMASHDL_PAGE, MT_DMASHDL_GROUP_SEQ_ORDER);
+ mt76_rmw(dev, MT_DMASHDL_PKT_MAX_SIZE,
+ MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
+ FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 0));
+ for (i = 0; i < 5; i++)
+ mt76_wr(dev, MT_DMASHDL_GROUP_QUOTA(i),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x3) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0xfff));
+ for (i = 5; i < 16; i++)
+ mt76_wr(dev, MT_DMASHDL_GROUP_QUOTA(i),
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x0) |
+ FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x0));
+ mt76_wr(dev, MT_DMASHDL_Q_MAP(0), 0x32013201);
+ mt76_wr(dev, MT_DMASHDL_Q_MAP(1), 0x32013201);
+ mt76_wr(dev, MT_DMASHDL_Q_MAP(2), 0x55555444);
+ mt76_wr(dev, MT_DMASHDL_Q_MAP(3), 0x55555444);
+
+ mt76_wr(dev, MT_DMASHDL_SCHED_SET(0), 0x76540132);
+ mt76_wr(dev, MT_DMASHDL_SCHED_SET(1), 0xFEDCBA98);
mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
}
@@ -314,5 +312,6 @@ void mt792xu_disconnect(struct usb_interface *usb_intf)
}
EXPORT_SYMBOL_GPL(mt792xu_disconnect);
+MODULE_DESCRIPTION("MediaTek MT792x USB helpers");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
index 483ad81b6eec..73e633d0d700 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
@@ -237,7 +237,8 @@ void mt7996_dma_start(struct mt7996_dev *dev, bool reset, bool wed_reset)
MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 |
+ MT_WFDMA0_GLO_CFG_EXT_EN);
if (dev->hif2)
mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
@@ -694,7 +695,7 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force)
mtk_wed_device_dma_reset(&dev->mt76.mmio.wed);
mt7996_dma_disable(dev, force);
- mt76_dma_wed_reset(&dev->mt76);
+ mt76_wed_dma_reset(&dev->mt76);
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index 0cf0d1fe420a..283df84f1b43 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -493,7 +493,7 @@ static void mt7996_mac_init_basic_rates(struct mt7996_dev *dev)
void mt7996_mac_init(struct mt7996_dev *dev)
{
-#define HIF_TXD_V2_1 4
+#define HIF_TXD_V2_1 0x21
int i;
mt76_clear(dev, MT_MDP_DCR2, MT_MDP_DCR2_RX_TRANS_SHORT);
@@ -507,11 +507,6 @@ void mt7996_mac_init(struct mt7996_dev *dev)
mt76_rmw_field(dev, i, MT_LED_GPIO_SEL_MASK, 4);
}
- /* txs report queue */
- mt76_rmw_field(dev, MT_DMA_TCRF1(0), MT_DMA_TCRF1_QIDX, 0);
- mt76_rmw_field(dev, MT_DMA_TCRF1(1), MT_DMA_TCRF1_QIDX, 6);
- mt76_rmw_field(dev, MT_DMA_TCRF1(2), MT_DMA_TCRF1_QIDX, 0);
-
/* rro module init */
if (is_mt7996(&dev->mt76))
mt7996_mcu_set_rro(dev, UNI_RRO_SET_PLATFORM_TYPE, 2);
@@ -1012,11 +1007,12 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
/* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
elem->phy_cap_info[7] |= min_t(int, sts - 1, 2) << 3;
- if (vif != NL80211_IFTYPE_AP)
+ if (!(vif == NL80211_IFTYPE_AP || vif == NL80211_IFTYPE_STATION))
return;
elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
- elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+ if (vif == NL80211_IFTYPE_AP)
+ elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
sts - 1) |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index 53258488d49f..0384fb059ddf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -732,6 +732,9 @@ mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
txwi[2] |= cpu_to_le32(val);
+
+ if (wcid->amsdu)
+ txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU);
}
static void
@@ -862,8 +865,6 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
val |= MT_TXD3_PROTECT_FRAME;
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
val |= MT_TXD3_NO_ACK;
- if (wcid->amsdu)
- val |= MT_TXD3_HW_AMSDU;
txwi[3] = cpu_to_le32(val);
txwi[4] = 0;
@@ -1188,25 +1189,28 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_tx_info *info;
struct sk_buff_head list;
struct rate_info rate = {};
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
bool cck = false;
u32 txrate, txs, mode, stbc;
txs = le32_to_cpu(txs_data[0]);
mt76_tx_status_lock(mdev, &list);
- skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
- if (skb) {
- info = IEEE80211_SKB_CB(skb);
- if (!(txs & MT_TXS0_ACK_ERROR_MASK))
- info->flags |= IEEE80211_TX_STAT_ACK;
+ /* only report MPDU TXS */
+ if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) {
+ skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
+ if (skb) {
+ info = IEEE80211_SKB_CB(skb);
+ if (!(txs & MT_TXS0_ACK_ERROR_MASK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ampdu_len = 1;
- info->status.ampdu_ack_len =
- !!(info->flags & IEEE80211_TX_STAT_ACK);
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len =
+ !!(info->flags & IEEE80211_TX_STAT_ACK);
- info->status.rates[0].idx = -1;
+ info->status.rates[0].idx = -1;
+ }
}
if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) {
@@ -2527,6 +2531,34 @@ static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
return 0;
}
+static bool
+mt7996_mac_twt_param_equal(struct mt7996_sta *msta,
+ struct ieee80211_twt_params *twt_agrt)
+{
+ u16 type = le16_to_cpu(twt_agrt->req_type);
+ u8 exp;
+ int i;
+
+ exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
+ for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) {
+ struct mt7996_twt_flow *f;
+
+ if (!(msta->twt.flowid_mask & BIT(i)))
+ continue;
+
+ f = &msta->twt.flow[i];
+ if (f->duration == twt_agrt->min_twt_dur &&
+ f->mantissa == twt_agrt->mantissa &&
+ f->exp == exp &&
+ f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
+ f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
+ f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
+ return true;
+ }
+
+ return false;
+}
+
void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct ieee80211_twt_setup *twt)
@@ -2538,8 +2570,7 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
enum ieee80211_twt_setup_cmd sta_setup_cmd;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_twt_flow *flow;
- int flowid, table_id;
- u8 exp;
+ u8 flowid, table_id, exp;
if (mt7996_mac_check_twt_req(twt))
goto out;
@@ -2552,9 +2583,19 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
goto unlock;
+ if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) {
+ setup_cmd = TWT_SETUP_CMD_DICTATE;
+ twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR;
+ goto unlock;
+ }
+
+ if (mt7996_mac_twt_param_equal(msta, twt_agrt))
+ goto unlock;
+
flowid = ffs(~msta->twt.flowid_mask) - 1;
- le16p_replace_bits(&twt_agrt->req_type, flowid,
- IEEE80211_TWT_REQTYPE_FLOWID);
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
+ twt_agrt->req_type |= le16_encode_bits(flowid,
+ IEEE80211_TWT_REQTYPE_FLOWID);
table_id = ffs(~dev->twt.table_mask) - 1;
exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
@@ -2601,10 +2642,10 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
unlock:
mutex_unlock(&dev->mt76.mutex);
out:
- le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
- IEEE80211_TWT_REQTYPE_SETUP_CMD);
- twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
- (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt_agrt->req_type |=
+ le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED;
}
void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 51deea84b642..f7da8d6dd903 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -350,9 +350,12 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
case WLAN_CIPHER_SUITE_SMS4:
+ break;
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
- break;
+ if (key->keyidx == 6 || key->keyidx == 7)
+ break;
+ fallthrough;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
default:
@@ -1450,6 +1453,10 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw,
#endif
const struct ieee80211_ops mt7996_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7996_tx,
.start = mt7996_start,
.stop = mt7996_stop,
@@ -1495,6 +1502,6 @@ const struct ieee80211_ops mt7996_ops = {
.set_radar_background = mt7996_set_radar_background,
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
.net_fill_forward_path = mt7996_net_fill_forward_path,
- .net_setup_tc = mt76_net_setup_tc,
+ .net_setup_tc = mt76_wed_net_setup_tc,
#endif
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 3c729b563edc..b44abe2acc81 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -341,7 +341,7 @@ mt7996_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->bss_conf.csa_active || vif->type == NL80211_IFTYPE_STATION)
return;
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
static void
@@ -732,13 +732,10 @@ void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb)
static struct tlv *
mt7996_mcu_add_uni_tlv(struct sk_buff *skb, u16 tag, u16 len)
{
- struct tlv *ptlv, tlv = {
- .tag = cpu_to_le16(tag),
- .len = cpu_to_le16(len),
- };
+ struct tlv *ptlv = skb_put(skb, len);
- ptlv = skb_put(skb, len);
- memcpy(ptlv, &tlv, sizeof(tlv));
+ ptlv->tag = cpu_to_le16(tag);
+ ptlv->len = cpu_to_le16(len);
return ptlv;
}
@@ -1240,6 +1237,9 @@ mt7996_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
static void
mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct ieee80211_vif *vif = container_of((void *)msta->vif,
+ struct ieee80211_vif, drv_priv);
struct ieee80211_eht_mcs_nss_supp *mcs_map;
struct ieee80211_eht_cap_elem_fixed *elem;
struct sta_rec_eht *eht;
@@ -1259,8 +1259,17 @@ mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
eht->phy_cap = cpu_to_le64(*(u64 *)elem->phy_cap_info);
eht->phy_cap_ext = cpu_to_le64(elem->phy_cap_info[8]);
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
- memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz, sizeof(eht->mcs_map_bw20));
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)) == 0) {
+ memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz,
+ sizeof(eht->mcs_map_bw20));
+ return;
+ }
+
memcpy(eht->mcs_map_bw80, &mcs_map->bw._80, sizeof(eht->mcs_map_bw80));
memcpy(eht->mcs_map_bw160, &mcs_map->bw._160, sizeof(eht->mcs_map_bw160));
memcpy(eht->mcs_map_bw320, &mcs_map->bw._320, sizeof(eht->mcs_map_bw320));
@@ -2510,7 +2519,7 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw,
info = IEEE80211_SKB_CB(skb);
info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
- len = sizeof(*bcn) + MT_TXD_SIZE + skb->len;
+ len = ALIGN(sizeof(*bcn) + MT_TXD_SIZE + skb->len, 4);
tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_BCN_CONTENT, len);
bcn = (struct bss_bcn_content_tlv *)tlv;
bcn->enable = en;
@@ -2579,8 +2588,7 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev,
info->band = band;
info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->mt76->band_idx);
- len = sizeof(*discov) + MT_TXD_SIZE + skb->len;
-
+ len = ALIGN(sizeof(*discov) + MT_TXD_SIZE + skb->len, 4);
tlv = mt7996_mcu_add_uni_tlv(rskb, UNI_BSS_INFO_OFFLOAD, len);
discov = (struct bss_inband_discovery_tlv *)tlv;
@@ -3539,7 +3547,7 @@ int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset)
u32 addr = le32_to_cpu(*(__le32 *)(skb->data + 12));
u8 *buf = (u8 *)dev->mt76.eeprom.data + addr;
- skb_pull(skb, 64);
+ skb_pull(skb, 48);
memcpy(buf, skb->data, MT7996_EEPROM_BLOCK_SIZE);
}
@@ -4477,7 +4485,8 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
skb_put_data(skb, &req, sizeof(req));
/* cck and ofdm */
- skb_put_data(skb, &la.cck, sizeof(la.cck) + sizeof(la.ofdm));
+ skb_put_data(skb, &la.cck, sizeof(la.cck));
+ skb_put_data(skb, &la.ofdm, sizeof(la.ofdm));
/* ht20 */
skb_put_data(skb, &la.mcs[0], 8);
/* ht40 */
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
index 36cacc495c75..43468bcaffc6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
@@ -800,10 +800,10 @@ enum {
sizeof(struct sta_rec_hdr_trans) + \
sizeof(struct tlv))
-#define MT7996_MAX_BEACON_SIZE 1342
+#define MT7996_MAX_BEACON_SIZE 1338
#define MT7996_BEACON_UPDATE_SIZE (sizeof(struct bss_req_hdr) + \
sizeof(struct bss_bcn_content_tlv) + \
- MT_TXD_SIZE + \
+ 4 + MT_TXD_SIZE + \
sizeof(struct bss_bcn_cntdwn_tlv) + \
sizeof(struct bss_bcn_mbss_tlv))
#define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index c50d89a445e9..304e5fd14803 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -140,7 +140,6 @@ static u32 mt7996_reg_map_l1(struct mt7996_dev *dev, u32 addr)
u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
- dev->reg_l1_backup = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L1);
dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L1,
MT_HIF_REMAP_L1_MASK,
FIELD_PREP(MT_HIF_REMAP_L1_MASK, base));
@@ -155,7 +154,6 @@ static u32 mt7996_reg_map_l2(struct mt7996_dev *dev, u32 addr)
u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
- dev->reg_l2_backup = dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2);
dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2,
MT_HIF_REMAP_L2_MASK,
FIELD_PREP(MT_HIF_REMAP_L2_MASK, base));
@@ -165,26 +163,10 @@ static u32 mt7996_reg_map_l2(struct mt7996_dev *dev, u32 addr)
return MT_HIF_REMAP_BASE_L2 + offset;
}
-static void mt7996_reg_remap_restore(struct mt7996_dev *dev)
-{
- /* remap to ori status */
- if (unlikely(dev->reg_l1_backup)) {
- dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L1, dev->reg_l1_backup);
- dev->reg_l1_backup = 0;
- }
-
- if (dev->reg_l2_backup) {
- dev->bus_ops->wr(&dev->mt76, MT_HIF_REMAP_L2, dev->reg_l2_backup);
- dev->reg_l2_backup = 0;
- }
-}
-
static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
{
int i;
- mt7996_reg_remap_restore(dev);
-
if (addr < 0x100000)
return addr;
@@ -201,6 +183,11 @@ static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
return dev->reg.map[i].mapped + ofs;
}
+ return 0;
+}
+
+static u32 __mt7996_reg_remap_addr(struct mt7996_dev *dev, u32 addr)
+{
if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
(addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
(addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
@@ -225,28 +212,60 @@ void mt7996_memcpy_fromio(struct mt7996_dev *dev, void *buf, u32 offset,
{
u32 addr = __mt7996_reg_addr(dev, offset);
- memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+ if (addr) {
+ memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+ return;
+ }
+
+ spin_lock_bh(&dev->reg_lock);
+ memcpy_fromio(buf, dev->mt76.mmio.regs +
+ __mt7996_reg_remap_addr(dev, offset), len);
+ spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7996_rr(struct mt76_dev *mdev, u32 offset)
{
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ u32 addr = __mt7996_reg_addr(dev, offset), val;
+
+ if (addr)
+ return dev->bus_ops->rr(mdev, addr);
- return dev->bus_ops->rr(mdev, __mt7996_reg_addr(dev, offset));
+ spin_lock_bh(&dev->reg_lock);
+ val = dev->bus_ops->rr(mdev, __mt7996_reg_remap_addr(dev, offset));
+ spin_unlock_bh(&dev->reg_lock);
+
+ return val;
}
static void mt7996_wr(struct mt76_dev *mdev, u32 offset, u32 val)
{
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ u32 addr = __mt7996_reg_addr(dev, offset);
- dev->bus_ops->wr(mdev, __mt7996_reg_addr(dev, offset), val);
+ if (addr) {
+ dev->bus_ops->wr(mdev, addr, val);
+ return;
+ }
+
+ spin_lock_bh(&dev->reg_lock);
+ dev->bus_ops->wr(mdev, __mt7996_reg_remap_addr(dev, offset), val);
+ spin_unlock_bh(&dev->reg_lock);
}
static u32 mt7996_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
{
struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
+ u32 addr = __mt7996_reg_addr(dev, offset);
+
+ if (addr)
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+
+ spin_lock_bh(&dev->reg_lock);
+ val = dev->bus_ops->rmw(mdev, __mt7996_reg_remap_addr(dev, offset), mask, val);
+ spin_unlock_bh(&dev->reg_lock);
- return dev->bus_ops->rmw(mdev, __mt7996_reg_addr(dev, offset), mask, val);
+ return val;
}
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
@@ -391,13 +410,13 @@ int mt7996_mmio_wed_init(struct mt7996_dev *dev, void *pdev_ptr,
wed->wlan.amsdu_max_len = 1536;
wed->wlan.init_buf = mt7996_wed_init_buf;
- wed->wlan.init_rx_buf = mt76_mmio_wed_init_rx_buf;
- wed->wlan.release_rx_buf = mt76_mmio_wed_release_rx_buf;
- wed->wlan.offload_enable = mt76_mmio_wed_offload_enable;
- wed->wlan.offload_disable = mt76_mmio_wed_offload_disable;
+ wed->wlan.init_rx_buf = mt76_wed_init_rx_buf;
+ wed->wlan.release_rx_buf = mt76_wed_release_rx_buf;
+ wed->wlan.offload_enable = mt76_wed_offload_enable;
+ wed->wlan.offload_disable = mt76_wed_offload_disable;
if (!hif2) {
wed->wlan.reset = mt7996_mmio_wed_reset;
- wed->wlan.reset_complete = mt76_mmio_wed_reset_complete;
+ wed->wlan.reset_complete = mt76_wed_reset_complete;
}
if (mtk_wed_device_attach(wed))
@@ -421,6 +440,7 @@ static int mt7996_mmio_init(struct mt76_dev *mdev,
dev = container_of(mdev, struct mt7996_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
+ spin_lock_init(&dev->reg_lock);
switch (device_id) {
case 0x7990:
@@ -650,4 +670,5 @@ static void __exit mt7996_exit(void)
module_init(mt7996_init);
module_exit(mt7996_exit);
+MODULE_DESCRIPTION("MediaTek MT7996 MMIO helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index bc73bcb47bf0..36d1f247d55a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -53,6 +53,7 @@
#define MT7996_MAX_TWT_AGRT 16
#define MT7996_MAX_STA_TWT_AGRT 8
+#define MT7996_MIN_TWT_DUR 64
#define MT7996_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 3)
/* NOTE: used to map mt76_rates. idx may change if firmware expands table */
@@ -320,12 +321,11 @@ struct mt7996_dev {
struct rchan *relay_fwlog;
struct {
- u8 table_mask;
+ u16 table_mask;
u8 n_agrt;
} twt;
- u32 reg_l1_backup;
- u32 reg_l2_backup;
+ spinlock_t reg_lock;
u8 wtbl_size_group;
};
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index c52d550f0c32..3e88798df017 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -672,4 +672,5 @@ EXPORT_SYMBOL_GPL(mt76s_init);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("MediaTek MT76x SDIO helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 1584665fe3cb..342c3aea549d 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -767,7 +767,7 @@ static void mt76u_status_worker(struct mt76_worker *w)
if (!test_bit(MT76_STATE_RUNNING, &dev->phy.state))
return;
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ for (i = 0; i <= MT_TXQ_PSD; i++) {
q = dev->phy.q_tx[i];
if (!q)
continue;
@@ -872,9 +872,8 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
if (err < 0)
return err;
- mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
- q->entry[idx].urb, mt76u_complete_tx,
- &q->entry[idx]);
+ mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q->ep, q->entry[idx].urb,
+ mt76u_complete_tx, &q->entry[idx]);
q->head = (q->head + 1) % q->ndesc;
q->entry[idx].skb = tx_info.skb;
@@ -906,9 +905,13 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
}
}
-static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
+static void
+mt76u_ac_to_hwq(struct mt76_dev *dev, struct mt76_queue *q, u8 qid)
{
- if (mt76_chip(dev) == 0x7663) {
+ u8 ac = qid < IEEE80211_NUM_ACS ? qid : IEEE80211_AC_BE;
+
+ switch (mt76_chip(dev)) {
+ case 0x7663: {
static const u8 lmac_queue_map[] = {
/* ac to lmac mapping */
[IEEE80211_AC_BK] = 0,
@@ -917,33 +920,36 @@ static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
[IEEE80211_AC_VO] = 4,
};
- if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
- return 1; /* BE */
-
- return lmac_queue_map[ac];
+ q->hw_idx = lmac_queue_map[ac];
+ q->ep = q->hw_idx + 1;
+ break;
+ }
+ case 0x7961:
+ case 0x7925:
+ q->hw_idx = mt76_ac_to_hwq(ac);
+ q->ep = qid == MT_TXQ_PSD ? MT_EP_OUT_HCCA : q->hw_idx + 1;
+ break;
+ default:
+ q->hw_idx = mt76_ac_to_hwq(ac);
+ q->ep = q->hw_idx + 1;
+ break;
}
-
- return mt76_ac_to_hwq(ac);
}
static int mt76u_alloc_tx(struct mt76_dev *dev)
{
- struct mt76_queue *q;
- int i, j, err;
+ int i;
for (i = 0; i <= MT_TXQ_PSD; i++) {
- if (i >= IEEE80211_NUM_ACS) {
- dev->phy.q_tx[i] = dev->phy.q_tx[0];
- continue;
- }
+ struct mt76_queue *q;
+ int j, err;
q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
if (!q)
return -ENOMEM;
spin_lock_init(&q->lock);
- q->hw_idx = mt76u_ac_to_hwq(dev, i);
-
+ mt76u_ac_to_hwq(dev, q, i);
dev->phy.q_tx[i] = q;
q->entry = devm_kcalloc(dev->dev,
@@ -969,7 +975,7 @@ static void mt76u_free_tx(struct mt76_dev *dev)
mt76_worker_teardown(&dev->usb.status_worker);
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ for (i = 0; i <= MT_TXQ_PSD; i++) {
struct mt76_queue *q;
int j;
@@ -999,7 +1005,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
dev_err(dev->dev, "timed out waiting for pending tx\n");
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ for (i = 0; i <= MT_TXQ_PSD; i++) {
q = dev->phy.q_tx[i];
if (!q)
continue;
@@ -1013,7 +1019,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
/* On device removal we maight queue skb's, but mt76u_tx_kick()
* will fail to submit urb, cleanup those skb's manually.
*/
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ for (i = 0; i <= MT_TXQ_PSD; i++) {
q = dev->phy.q_tx[i];
if (!q)
continue;
@@ -1128,4 +1134,5 @@ int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
EXPORT_SYMBOL_GPL(mt76u_init);
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
+MODULE_DESCRIPTION("MediaTek MT76x USB helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
index fc76c66ff1a5..d6c01a2dd198 100644
--- a/drivers/net/wireless/mediatek/mt76/util.c
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -138,4 +138,5 @@ int __mt76_worker_fn(void *ptr)
}
EXPORT_SYMBOL_GPL(__mt76_worker_fn);
+MODULE_DESCRIPTION("MediaTek MT76x helpers");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/wed.c b/drivers/net/wireless/mediatek/mt76/wed.c
new file mode 100644
index 000000000000..f89e4537555c
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/wed.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2023 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include "mt76.h"
+#include "dma.h"
+
+void mt76_wed_release_rx_buf(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+ int i;
+
+ for (i = 0; i < dev->rx_token_size; i++) {
+ struct mt76_txwi_cache *t;
+
+ t = mt76_rx_token_release(dev, i);
+ if (!t || !t->ptr)
+ continue;
+
+ mt76_put_page_pool_buf(t->ptr, false);
+ t->ptr = NULL;
+
+ mt76_put_rxwi(dev, t);
+ }
+
+ mt76_free_pending_rxwi(dev);
+}
+EXPORT_SYMBOL_GPL(mt76_wed_release_rx_buf);
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+ struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i, len = SKB_WITH_OVERHEAD(q->buf_size);
+ struct mt76_txwi_cache *t = NULL;
+
+ for (i = 0; i < size; i++) {
+ enum dma_data_direction dir;
+ dma_addr_t addr;
+ u32 offset;
+ int token;
+ void *buf;
+
+ t = mt76_get_rxwi(dev);
+ if (!t)
+ goto unmap;
+
+ buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
+ if (!buf)
+ goto unmap;
+
+ addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
+ dir = page_pool_get_dma_dir(q->page_pool);
+ dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
+
+ desc->buf0 = cpu_to_le32(addr);
+ token = mt76_rx_token_consume(dev, buf, t, addr);
+ if (token < 0) {
+ mt76_put_page_pool_buf(buf, false);
+ goto unmap;
+ }
+
+ token = FIELD_PREP(MT_DMA_CTL_TOKEN, token);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ token |= FIELD_PREP(MT_DMA_CTL_SDP0_H, addr >> 32);
+#endif
+ desc->token |= cpu_to_le32(token);
+ desc++;
+ }
+
+ return 0;
+
+unmap:
+ if (t)
+ mt76_put_rxwi(dev, t);
+ mt76_wed_release_rx_buf(wed);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(mt76_wed_init_rx_buf);
+
+int mt76_wed_offload_enable(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+
+ spin_lock_bh(&dev->token_lock);
+ dev->token_size = wed->wlan.token_start;
+ spin_unlock_bh(&dev->token_lock);
+
+ return !wait_event_timeout(dev->tx_wait, !dev->wed_token_count, HZ);
+}
+EXPORT_SYMBOL_GPL(mt76_wed_offload_enable);
+
+int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
+{
+ int ret = 0, type, ring;
+ u16 flags;
+
+ if (!q || !q->ndesc)
+ return -EINVAL;
+
+ flags = q->flags;
+ if (!q->wed || !mtk_wed_device_active(q->wed))
+ q->flags &= ~MT_QFLAG_WED;
+
+ if (!(q->flags & MT_QFLAG_WED))
+ return 0;
+
+ type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
+ ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
+
+ switch (type) {
+ case MT76_WED_Q_TX:
+ ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs,
+ reset);
+ if (!ret)
+ q->wed_regs = q->wed->tx_ring[ring].reg_base;
+ break;
+ case MT76_WED_Q_TXFREE:
+ /* WED txfree queue needs ring to be initialized before setup */
+ q->flags = 0;
+ mt76_dma_queue_reset(dev, q);
+ mt76_dma_rx_fill(dev, q, false);
+
+ ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
+ if (!ret)
+ q->wed_regs = q->wed->txfree_ring.reg_base;
+ break;
+ case MT76_WED_Q_RX:
+ ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs,
+ reset);
+ if (!ret)
+ q->wed_regs = q->wed->rx_ring[ring].reg_base;
+ break;
+ case MT76_WED_RRO_Q_DATA:
+ q->flags &= ~MT_QFLAG_WED;
+ __mt76_dma_queue_reset(dev, q, false);
+ mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
+ q->head = q->ndesc - 1;
+ q->queued = q->head;
+ break;
+ case MT76_WED_RRO_Q_MSDU_PG:
+ q->flags &= ~MT_QFLAG_WED;
+ __mt76_dma_queue_reset(dev, q, false);
+ mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
+ q->head = q->ndesc - 1;
+ q->queued = q->head;
+ break;
+ case MT76_WED_RRO_Q_IND:
+ q->flags &= ~MT_QFLAG_WED;
+ mt76_dma_queue_reset(dev, q);
+ mt76_dma_rx_fill(dev, q, false);
+ mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ q->flags = flags;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76_wed_dma_setup);
+#endif /*CONFIG_NET_MEDIATEK_SOC_WED */
+
+void mt76_wed_offload_disable(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+
+ spin_lock_bh(&dev->token_lock);
+ dev->token_size = dev->drv->token_size;
+ spin_unlock_bh(&dev->token_lock);
+}
+EXPORT_SYMBOL_GPL(mt76_wed_offload_disable);
+
+void mt76_wed_reset_complete(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+
+ complete(&dev->mmio.wed_reset_complete);
+}
+EXPORT_SYMBOL_GPL(mt76_wed_reset_complete);
+
+int mt76_wed_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct mt76_phy *phy = hw->priv;
+ struct mtk_wed_device *wed = &phy->dev->mmio.wed;
+
+ if (!mtk_wed_device_active(wed))
+ return -EOPNOTSUPP;
+
+ return mtk_wed_device_setup_tc(wed, netdev, type, type_data);
+}
+EXPORT_SYMBOL_GPL(mt76_wed_net_setup_tc);
+
+void mt76_wed_dma_reset(struct mt76_dev *dev)
+{
+ struct mt76_mmio *mmio = &dev->mmio;
+
+ if (!test_bit(MT76_STATE_WED_RESET, &dev->phy.state))
+ return;
+
+ complete(&mmio->wed_reset);
+
+ if (!wait_for_completion_timeout(&mmio->wed_reset_complete, 3 * HZ))
+ dev_err(dev->dev, "wed reset complete timeout\n");
+}
+EXPORT_SYMBOL_GPL(mt76_wed_dma_reset);
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index c8d332456a6b..a7330576486b 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -405,6 +405,10 @@ out:
}
const struct ieee80211_ops mt7601u_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7601u_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = mt7601u_start,
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index ad2509d8c99a..089102ed9ae5 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -356,7 +356,7 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
memcpy(vif->auth.ssid.ssid, sme->ssid, sme->ssid_len);
vif->auth.ssid.ssid_len = sme->ssid_len;
}
- vif->auth.key_mgmt_suite = cpu_to_be32(sme->crypto.akm_suites[0]);
+ vif->auth.key_mgmt_suite = sme->crypto.akm_suites[0];
ether_addr_copy(vif->auth.bssid, sme->bssid);
break;
@@ -1518,7 +1518,7 @@ static struct wilc_vif *wilc_get_vif_from_type(struct wilc *wl, int type)
{
struct wilc_vif *vif;
- list_for_each_entry_rcu(vif, &wl->vif_list, list) {
+ wilc_for_each_vif(wl, vif) {
if (vif->iftype == type)
return vif;
}
@@ -1609,7 +1609,6 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
cfg80211_unregister_netdevice(vif->ndev);
vif->monitor_flag = 0;
- wilc_set_operation_mode(vif, 0, 0, 0);
mutex_lock(&wl->vif_mutex);
list_del_rcu(&vif->list);
wl->vif_num--;
@@ -1804,15 +1803,24 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
INIT_LIST_HEAD(&wl->rxq_head.list);
INIT_LIST_HEAD(&wl->vif_list);
+ wl->hif_workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ wiphy_name(wl->wiphy));
+ if (!wl->hif_workqueue) {
+ ret = -ENOMEM;
+ goto free_cfg;
+ }
vif = wilc_netdev_ifc_init(wl, "wlan%d", WILC_STATION_MODE,
NL80211_IFTYPE_STATION, false);
if (IS_ERR(vif)) {
ret = PTR_ERR(vif);
- goto free_cfg;
+ goto free_hq;
}
return 0;
+free_hq:
+ destroy_workqueue(wl->hif_workqueue);
+
free_cfg:
wilc_wlan_cfg_deinit(wl);
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index 839f142663e8..f1085ccb7eed 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -107,7 +107,7 @@ static struct wilc_vif *wilc_get_vif_from_idx(struct wilc *wilc, int idx)
if (index < 0 || index >= WILC_NUM_CONCURRENT_IFC)
return NULL;
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
if (vif->idx == index)
return vif;
}
@@ -377,38 +377,49 @@ struct wilc_join_bss_param *
wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct cfg80211_crypto_settings *crypto)
{
- struct wilc_join_bss_param *param;
- struct ieee80211_p2p_noa_attr noa_attr;
- u8 rates_len = 0;
- const u8 *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
+ const u8 *ies_data, *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
const u8 *ht_ie, *wpa_ie, *wmm_ie, *rsn_ie;
+ struct ieee80211_p2p_noa_attr noa_attr;
+ const struct cfg80211_bss_ies *ies;
+ struct wilc_join_bss_param *param;
+ u8 rates_len = 0, ies_len;
int ret;
- const struct cfg80211_bss_ies *ies = rcu_dereference(bss->ies);
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param)
return NULL;
+ rcu_read_lock();
+ ies = rcu_dereference(bss->ies);
+ ies_data = kmemdup(ies->data, ies->len, GFP_ATOMIC);
+ if (!ies_data) {
+ rcu_read_unlock();
+ kfree(param);
+ return NULL;
+ }
+ ies_len = ies->len;
+ rcu_read_unlock();
+
param->beacon_period = cpu_to_le16(bss->beacon_interval);
param->cap_info = cpu_to_le16(bss->capability);
param->bss_type = WILC_FW_BSS_TYPE_INFRA;
param->ch = ieee80211_frequency_to_channel(bss->channel->center_freq);
ether_addr_copy(param->bssid, bss->bssid);
- ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
+ ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies_data, ies_len);
if (ssid_elm) {
if (ssid_elm[1] <= IEEE80211_MAX_SSID_LEN)
memcpy(param->ssid, ssid_elm + 2, ssid_elm[1]);
}
- tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len);
+ tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies_data, ies_len);
if (tim_elm && tim_elm[1] >= 2)
param->dtim_period = tim_elm[3];
memset(param->p_suites, 0xFF, 3);
memset(param->akm_suites, 0xFF, 3);
- rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len);
+ rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies_data, ies_len);
if (rates_ie) {
rates_len = rates_ie[1];
if (rates_len > WILC_MAX_RATES_SUPPORTED)
@@ -419,7 +430,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
if (rates_len < WILC_MAX_RATES_SUPPORTED) {
supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
- ies->data, ies->len);
+ ies_data, ies_len);
if (supp_rates_ie) {
u8 ext_rates = supp_rates_ie[1];
@@ -434,11 +445,11 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
}
}
- ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len);
+ ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies_data, ies_len);
if (ht_ie)
param->ht_capable = true;
- ret = cfg80211_get_p2p_attr(ies->data, ies->len,
+ ret = cfg80211_get_p2p_attr(ies_data, ies_len,
IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
(u8 *)&noa_attr, sizeof(noa_attr));
if (ret > 0) {
@@ -462,7 +473,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
}
wmm_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
- ies->data, ies->len);
+ ies_data, ies_len);
if (wmm_ie) {
struct ieee80211_wmm_param_ie *ie;
@@ -477,13 +488,13 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
wpa_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
- ies->data, ies->len);
+ ies_data, ies_len);
if (wpa_ie) {
param->mode_802_11i = 1;
param->rsn_found = true;
}
- rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len);
+ rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies_data, ies_len);
if (rsn_ie) {
int rsn_ie_len = sizeof(struct element) + rsn_ie[1];
int offset = 8;
@@ -517,6 +528,7 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
param->akm_suites[i] = crypto->akm_suites[i] & 0xFF;
}
+ kfree(ies_data);
return (void *)param;
}
@@ -1555,26 +1567,28 @@ int wilc_deinit(struct wilc_vif *vif)
void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
{
- int result;
- struct host_if_msg *msg;
- int id;
struct host_if_drv *hif_drv;
+ struct host_if_msg *msg;
struct wilc_vif *vif;
+ int srcu_idx;
+ int result;
+ int id;
id = get_unaligned_le32(&buffer[length - 4]);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
- return;
- hif_drv = vif->hif_drv;
+ goto out;
+ hif_drv = vif->hif_drv;
if (!hif_drv) {
netdev_err(vif->ndev, "driver not init[%p]\n", hif_drv);
- return;
+ goto out;
}
msg = wilc_alloc_work(vif, handle_rcvd_ntwrk_info, false);
if (IS_ERR(msg))
- return;
+ goto out;
msg->body.net_info.frame_len = get_unaligned_le16(&buffer[6]) - 1;
msg->body.net_info.rssi = buffer[8];
@@ -1583,7 +1597,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
GFP_KERNEL);
if (!msg->body.net_info.mgmt) {
kfree(msg);
- return;
+ goto out;
}
result = wilc_enqueue_work(msg);
@@ -1592,43 +1606,41 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
kfree(msg->body.net_info.mgmt);
kfree(msg);
}
+out:
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
{
- int result;
- struct host_if_msg *msg;
- int id;
struct host_if_drv *hif_drv;
+ struct host_if_msg *msg;
struct wilc_vif *vif;
+ int srcu_idx;
+ int result;
+ int id;
mutex_lock(&wilc->deinit_lock);
id = get_unaligned_le32(&buffer[length - 4]);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
- if (!vif) {
- mutex_unlock(&wilc->deinit_lock);
- return;
- }
+ if (!vif)
+ goto out;
hif_drv = vif->hif_drv;
if (!hif_drv) {
- mutex_unlock(&wilc->deinit_lock);
- return;
+ goto out;
}
if (!hif_drv->conn_info.conn_result) {
netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__);
- mutex_unlock(&wilc->deinit_lock);
- return;
+ goto out;
}
msg = wilc_alloc_work(vif, handle_rcvd_gnrl_async_info, false);
- if (IS_ERR(msg)) {
- mutex_unlock(&wilc->deinit_lock);
- return;
- }
+ if (IS_ERR(msg))
+ goto out;
msg->body.mac_info.status = buffer[7];
result = wilc_enqueue_work(msg);
@@ -1636,32 +1648,36 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__);
kfree(msg);
}
-
+out:
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
mutex_unlock(&wilc->deinit_lock);
}
void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
{
- int result;
- int id;
struct host_if_drv *hif_drv;
struct wilc_vif *vif;
+ int srcu_idx;
+ int result;
+ int id;
id = get_unaligned_le32(&buffer[length - 4]);
+ srcu_idx = srcu_read_lock(&wilc->srcu);
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif)
- return;
- hif_drv = vif->hif_drv;
+ goto out;
- if (!hif_drv)
- return;
+ hif_drv = vif->hif_drv;
+ if (!hif_drv) {
+ goto out;
+ }
if (hif_drv->usr_scan_req.scan_result) {
struct host_if_msg *msg;
msg = wilc_alloc_work(vif, handle_scan_complete, false);
if (IS_ERR(msg))
- return;
+ goto out;
result = wilc_enqueue_work(msg);
if (result) {
@@ -1670,6 +1686,8 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
kfree(msg);
}
}
+out:
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie, u16 chan,
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 91d71e0f7ef2..710e29bea560 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -96,7 +96,7 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
struct wilc_vif *vif;
struct ieee80211_hdr *h = (struct ieee80211_hdr *)mac_header;
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
if (vif->iftype == WILC_STATION_MODE)
if (ether_addr_equal_unaligned(h->addr2, vif->bssid)) {
ndev = vif->ndev;
@@ -132,7 +132,7 @@ int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
struct wilc_vif *vif;
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
if (!is_zero_ether_addr(vif->bssid))
ret_val++;
}
@@ -140,6 +140,19 @@ int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
return ret_val;
}
+static void wilc_wake_tx_queues(struct wilc *wl)
+{
+ int srcu_idx;
+ struct wilc_vif *ifc;
+
+ srcu_idx = srcu_read_lock(&wl->srcu);
+ wilc_for_each_vif(wl, ifc) {
+ if (ifc->mac_opened && netif_queue_stopped(ifc->ndev))
+ netif_wake_queue(ifc->ndev);
+ }
+ srcu_read_unlock(&wl->srcu, srcu_idx);
+}
+
static int wilc_txq_task(void *vp)
{
int ret;
@@ -160,17 +173,7 @@ static int wilc_txq_task(void *vp)
do {
ret = wilc_wlan_handle_txq(wl, &txq_count);
if (txq_count < FLOW_CONTROL_LOWER_THRESHOLD) {
- int srcu_idx;
- struct wilc_vif *ifc;
-
- srcu_idx = srcu_read_lock(&wl->srcu);
- list_for_each_entry_rcu(ifc, &wl->vif_list,
- list) {
- if (ifc->mac_opened &&
- netif_queue_stopped(ifc->ndev))
- netif_wake_queue(ifc->ndev);
- }
- srcu_read_unlock(&wl->srcu, srcu_idx);
+ wilc_wake_tx_queues(wl);
}
if (ret != WILC_VMM_ENTRY_FULL_RETRY)
break;
@@ -284,7 +287,7 @@ static int wilc_init_fw_config(struct net_device *dev, struct wilc_vif *vif)
if (!wilc_wlan_cfg_set(vif, 0, WID_11G_OPERATING_MODE, &b, 1, 0, 0))
goto fail;
- b = WILC_FW_PREAMBLE_SHORT;
+ b = WILC_FW_PREAMBLE_AUTO;
if (!wilc_wlan_cfg_set(vif, 0, WID_PREAMBLE, &b, 1, 0, 0))
goto fail;
@@ -416,7 +419,7 @@ static int wilc_init_fw_config(struct net_device *dev, struct wilc_vif *vif)
b = 1;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_IMMEDIATE_BA_ENABLED, &b, 1,
- 1, 1))
+ 1, 0))
goto fail;
return 0;
@@ -665,7 +668,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
/* Verify MAC Address is not already in use: */
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(tmp_vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, tmp_vif) {
wilc_get_mac_address(tmp_vif, mac_addr);
if (ether_addr_equal(addr->sa_data, mac_addr)) {
if (vif != tmp_vif) {
@@ -768,7 +771,7 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
struct wilc_vif *vif;
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
if (vif->mac_opened)
netif_stop_queue(vif->ndev);
}
@@ -811,19 +814,21 @@ static int wilc_mac_close(struct net_device *ndev)
void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
u32 pkt_offset)
{
- unsigned int frame_len = 0;
- int stats;
unsigned char *buff_to_send = NULL;
- struct sk_buff *skb;
struct net_device *wilc_netdev;
+ unsigned int frame_len = 0;
struct wilc_vif *vif;
+ struct sk_buff *skb;
+ int srcu_idx;
+ int stats;
if (!wilc)
return;
+ srcu_idx = srcu_read_lock(&wilc->srcu);
wilc_netdev = get_if_handler(wilc, buff);
if (!wilc_netdev)
- return;
+ goto out;
buff += pkt_offset;
vif = netdev_priv(wilc_netdev);
@@ -834,7 +839,7 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
skb = dev_alloc_skb(frame_len);
if (!skb)
- return;
+ goto out;
skb->dev = wilc_netdev;
@@ -847,6 +852,8 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
stats = netif_rx(skb);
netdev_dbg(wilc_netdev, "netif_rx ret value is: %d\n", stats);
}
+out:
+ srcu_read_unlock(&wilc->srcu, srcu_idx);
}
void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
@@ -855,7 +862,7 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
struct wilc_vif *vif;
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ wilc_for_each_vif(wilc, vif) {
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buff;
u16 type = le16_to_cpup((__le16 *)buff);
u32 type_bit = BIT(type >> 4);
@@ -890,8 +897,7 @@ static const struct net_device_ops wilc_netdev_ops = {
void wilc_netdev_cleanup(struct wilc *wilc)
{
- struct wilc_vif *vif;
- int srcu_idx, ifc_cnt = 0;
+ struct wilc_vif *vif, *vif_tmp;
if (!wilc)
return;
@@ -901,32 +907,19 @@ void wilc_netdev_cleanup(struct wilc *wilc)
wilc->firmware = NULL;
}
- srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ list_for_each_entry_safe(vif, vif_tmp, &wilc->vif_list, list) {
+ mutex_lock(&wilc->vif_mutex);
+ list_del_rcu(&vif->list);
+ wilc->vif_num--;
+ mutex_unlock(&wilc->vif_mutex);
+ synchronize_srcu(&wilc->srcu);
if (vif->ndev)
unregister_netdev(vif->ndev);
}
- srcu_read_unlock(&wilc->srcu, srcu_idx);
wilc_wfi_deinit_mon_interface(wilc, false);
destroy_workqueue(wilc->hif_workqueue);
- while (ifc_cnt < WILC_NUM_CONCURRENT_IFC) {
- mutex_lock(&wilc->vif_mutex);
- if (wilc->vif_num <= 0) {
- mutex_unlock(&wilc->vif_mutex);
- break;
- }
- vif = wilc_get_wl_to_vif(wilc);
- if (!IS_ERR(vif))
- list_del_rcu(&vif->list);
-
- wilc->vif_num--;
- mutex_unlock(&wilc->vif_mutex);
- synchronize_srcu(&wilc->srcu);
- ifc_cnt++;
- }
-
wilc_wlan_cfg_deinit(wilc);
wlan_deinit_locks(wilc);
wiphy_unregister(wilc->wiphy);
@@ -941,7 +934,7 @@ static u8 wilc_get_available_idx(struct wilc *wl)
int srcu_idx;
srcu_idx = srcu_read_lock(&wl->srcu);
- list_for_each_entry_rcu(vif, &wl->vif_list, list) {
+ wilc_for_each_vif(wl, vif) {
if (vif->idx == 0)
idx = 1;
else
@@ -989,13 +982,6 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
goto error;
}
- wl->hif_workqueue = alloc_ordered_workqueue("%s-wq", WQ_MEM_RECLAIM,
- ndev->name);
- if (!wl->hif_workqueue) {
- ret = -ENOMEM;
- goto unregister_netdev;
- }
-
ndev->needs_free_netdev = true;
vif->iftype = vif_type;
vif->idx = wilc_get_available_idx(wl);
@@ -1008,15 +994,15 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
return vif;
-unregister_netdev:
+error:
if (rtnl_locked)
cfg80211_unregister_netdevice(ndev);
else
unregister_netdev(ndev);
- error:
free_netdev(ndev);
return ERR_PTR(ret);
}
+MODULE_DESCRIPTION("Atmel WILC1000 core wireless driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(WILC1000_FW(WILC1000_API_VER));
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index aafe3dc44ac6..5937d6d45695 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -13,6 +13,7 @@
#include <net/ieee80211_radiotap.h>
#include <linux/if_arp.h>
#include <linux/gpio/consumer.h>
+#include <linux/rculist.h>
#include "hif.h"
#include "wlan.h"
@@ -29,6 +30,11 @@
#define TX_BACKOFF_WEIGHT_MS 1
+#define wilc_for_each_vif(w, v) \
+ struct wilc *_w = w; \
+ list_for_each_entry_srcu(v, &_w->vif_list, list, \
+ srcu_read_lock_held(&_w->srcu))
+
struct wilc_wfi_stats {
unsigned long rx_packets;
unsigned long tx_packets;
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 0d13e3e46e98..d6d394693090 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -984,4 +984,5 @@ static struct sdio_driver wilc_sdio_driver = {
module_driver(wilc_sdio_driver,
sdio_register_driver,
sdio_unregister_driver);
+MODULE_DESCRIPTION("Atmel WILC1000 SDIO wireless driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 77b4cdff73c3..61c3572ce321 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(enable_crc16,
#define WILC_SPI_RSP_HDR_EXTRA_DATA 8
struct wilc_spi {
- bool isinit; /* true if SPI protocol has been configured */
+ bool isinit; /* true if wilc_spi_init was successful */
bool probing_crc; /* true if we're probing chip's CRC config */
bool crc7_enabled; /* true if crc7 is currently enabled */
bool crc16_enabled; /* true if crc16 is currently enabled */
@@ -55,6 +55,8 @@ struct wilc_spi {
static const struct wilc_hif_func wilc_hif_spi;
static int wilc_spi_reset(struct wilc *wilc);
+static int wilc_spi_configure_bus_protocol(struct wilc *wilc);
+static int wilc_validate_chipid(struct wilc *wilc);
/********************************************
*
@@ -192,11 +194,11 @@ static void wilc_wlan_power(struct wilc *wilc, bool on)
/* assert ENABLE: */
gpiod_set_value(gpios->enable, 1);
mdelay(5);
- /* assert RESET: */
- gpiod_set_value(gpios->reset, 1);
- } else {
/* deassert RESET: */
gpiod_set_value(gpios->reset, 0);
+ } else {
+ /* assert RESET: */
+ gpiod_set_value(gpios->reset, 1);
/* deassert ENABLE: */
gpiod_set_value(gpios->enable, 0);
}
@@ -232,8 +234,27 @@ static int wilc_bus_probe(struct spi_device *spi)
}
clk_prepare_enable(wilc->rtc_clk);
+ dev_info(&spi->dev, "Selected CRC config: crc7=%s, crc16=%s\n",
+ enable_crc7 ? "on" : "off", enable_crc16 ? "on" : "off");
+
+ /* we need power to configure the bus protocol and to read the chip id: */
+
+ wilc_wlan_power(wilc, true);
+
+ ret = wilc_spi_configure_bus_protocol(wilc);
+ if (ret)
+ goto power_down;
+
+ ret = wilc_validate_chipid(wilc);
+ if (ret)
+ goto power_down;
+
+ wilc_wlan_power(wilc, false);
return 0;
+power_down:
+ clk_disable_unprepare(wilc->rtc_clk);
+ wilc_wlan_power(wilc, false);
netdev_cleanup:
wilc_netdev_cleanup(wilc);
free:
@@ -273,6 +294,7 @@ static struct spi_driver wilc_spi_driver = {
.remove = wilc_bus_remove,
};
module_spi_driver(wilc_spi_driver);
+MODULE_DESCRIPTION("Atmel WILC1000 SPI wireless driver");
MODULE_LICENSE("GPL");
static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len)
@@ -300,7 +322,6 @@ static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len)
memset(&msg, 0, sizeof(msg));
spi_message_init(&msg);
- msg.spi = spi;
spi_message_add_tail(&tr, &msg);
ret = spi_sync(spi, &msg);
@@ -343,7 +364,6 @@ static int wilc_spi_rx(struct wilc *wilc, u8 *rb, u32 rlen)
memset(&msg, 0, sizeof(msg));
spi_message_init(&msg);
- msg.spi = spi;
spi_message_add_tail(&tr, &msg);
ret = spi_sync(spi, &msg);
@@ -381,8 +401,6 @@ static int wilc_spi_tx_rx(struct wilc *wilc, u8 *wb, u8 *rb, u32 rlen)
memset(&msg, 0, sizeof(msg));
spi_message_init(&msg);
- msg.spi = spi;
-
spi_message_add_tail(&tr, &msg);
ret = spi_sync(spi, &msg);
if (ret < 0)
@@ -476,7 +494,7 @@ static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz)
********************************************/
static u8 wilc_get_crc7(u8 *buffer, u32 len)
{
- return crc7_be(0xfe, buffer, len);
+ return crc7_be(0xfe, buffer, len) | 0x01;
}
static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b,
@@ -1105,26 +1123,34 @@ static int wilc_spi_deinit(struct wilc *wilc)
static int wilc_spi_init(struct wilc *wilc, bool resume)
{
- struct spi_device *spi = to_spi_device(wilc->dev);
struct wilc_spi *spi_priv = wilc->bus_data;
- u32 reg;
- u32 chipid;
- int ret, i;
+ int ret;
if (spi_priv->isinit) {
/* Confirm we can read chipid register without error: */
- ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid);
- if (ret == 0)
+ if (wilc_validate_chipid(wilc) == 0)
return 0;
-
- dev_err(&spi->dev, "Fail cmd read chip id...\n");
}
wilc_wlan_power(wilc, true);
- /*
- * configure protocol
- */
+ ret = wilc_spi_configure_bus_protocol(wilc);
+ if (ret) {
+ wilc_wlan_power(wilc, false);
+ return ret;
+ }
+
+ spi_priv->isinit = true;
+
+ return 0;
+}
+
+static int wilc_spi_configure_bus_protocol(struct wilc *wilc)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ struct wilc_spi *spi_priv = wilc->bus_data;
+ u32 reg;
+ int ret, i;
/*
* Infer the CRC settings that are currently in effect. This
@@ -1176,6 +1202,15 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
spi_priv->probing_crc = false;
+ return 0;
+}
+
+static int wilc_validate_chipid(struct wilc *wilc)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ u32 chipid;
+ int ret;
+
/*
* make sure can read chip id without protocol error
*/
@@ -1184,9 +1219,10 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
dev_err(&spi->dev, "Fail cmd read chip id...\n");
return ret;
}
-
- spi_priv->isinit = true;
-
+ if (!is_wilc1000(chipid)) {
+ dev_err(&spi->dev, "Unknown chip id 0x%x\n", chipid);
+ return -ENODEV;
+ }
return 0;
}
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index 9eb115c79c90..a9e872a7b2c3 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -12,11 +12,6 @@
#define WAKE_UP_TRIAL_RETRY 10000
-static inline bool is_wilc1000(u32 id)
-{
- return (id & (~WILC_CHIP_REV_FIELD)) == WILC_1000_BASE_ID;
-}
-
static inline void acquire_bus(struct wilc *wilc, enum bus_acquire acquire)
{
mutex_lock(&wilc->hif_cs);
@@ -730,7 +725,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
mutex_lock(&wilc->txq_add_to_head_cs);
srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list)
+ wilc_for_each_vif(wilc, vif)
wilc_wlan_txq_filter_dup_tcp_ack(vif->ndev);
srcu_read_unlock(&wilc->srcu, srcu_idx);
@@ -1198,27 +1193,32 @@ int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif)
acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
- ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, &reg);
- if (ret) {
- netdev_err(vif->ndev, "Error while reading reg\n");
+ ret = wilc->hif_func->hif_read_reg(wilc, GLOBAL_MODE_CONTROL, &reg);
+ if (ret)
goto release;
- }
- ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0,
- (reg | WILC_ABORT_REQ_BIT));
- if (ret) {
- netdev_err(vif->ndev, "Error while writing reg\n");
+ reg &= ~WILC_GLOBAL_MODE_ENABLE_WIFI;
+ ret = wilc->hif_func->hif_write_reg(wilc, GLOBAL_MODE_CONTROL, reg);
+ if (ret)
goto release;
- }
- ret = wilc->hif_func->hif_read_reg(wilc, WILC_FW_HOST_COMM, &reg);
+ ret = wilc->hif_func->hif_read_reg(wilc, PWR_SEQ_MISC_CTRL, &reg);
+ if (ret)
+ goto release;
+
+ reg &= ~WILC_PWR_SEQ_ENABLE_WIFI_SLEEP;
+ ret = wilc->hif_func->hif_write_reg(wilc, PWR_SEQ_MISC_CTRL, reg);
+ if (ret)
+ goto release;
+
+ ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, &reg);
if (ret) {
netdev_err(vif->ndev, "Error while reading reg\n");
goto release;
}
- reg = BIT(0);
- ret = wilc->hif_func->hif_write_reg(wilc, WILC_FW_HOST_COMM, reg);
+ ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0,
+ (reg | WILC_ABORT_REQ_BIT));
if (ret) {
netdev_err(vif->ndev, "Error while writing reg\n");
goto release;
@@ -1410,7 +1410,7 @@ static int init_chip(struct net_device *dev)
struct wilc_vif *vif = netdev_priv(dev);
struct wilc *wilc = vif->wilc;
- acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
+ acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
chipid = wilc_get_chipid(wilc, true);
@@ -1440,7 +1440,7 @@ static int init_chip(struct net_device *dev)
}
release:
- release_bus(wilc, WILC_BUS_RELEASE_ONLY);
+ release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
return ret;
}
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h
index a72cd5cac81d..54643d8fef04 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.h
@@ -156,6 +156,12 @@
#define WILC_GP_REG_0 0x149c
#define WILC_GP_REG_1 0x14a0
+#define GLOBAL_MODE_CONTROL 0x1614
+#define PWR_SEQ_MISC_CTRL 0x3008
+
+#define WILC_GLOBAL_MODE_ENABLE_WIFI BIT(0)
+#define WILC_PWR_SEQ_ENABLE_WIFI_SLEEP BIT(28)
+
#define WILC_HAVE_SDIO_IRQ_GPIO BIT(0)
#define WILC_HAVE_USE_PMU BIT(1)
#define WILC_HAVE_SLEEP_CLK_SRC_RTC BIT(2)
@@ -403,6 +409,11 @@ struct wilc_cfg_rsp {
struct wilc_vif;
+static inline bool is_wilc1000(u32 id)
+{
+ return (id & (~WILC_CHIP_REV_FIELD)) == WILC_1000_BASE_ID;
+}
+
int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
u32 buffer_size);
int wilc_wlan_start(struct wilc *wilc);
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
index 506d2f31efb5..641f847d47ab 100644
--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
@@ -7,7 +7,6 @@
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/usb.h>
-#include <linux/gpio.h>
#include <linux/jiffies.h>
#include <net/ieee80211_radiotap.h>
@@ -685,6 +684,10 @@ static int plfxlc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
}
static const struct ieee80211_ops plfxlc_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = plfxlc_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = plfxlc_op_start,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 3b283e93a13e..76b07db284f8 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -478,7 +478,7 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
continue;
wiphy_lock(priv_to_wiphy(vif->mac));
- cfg80211_ch_switch_notify(vif->netdev, &chandef, 0, 0);
+ cfg80211_ch_switch_notify(vif->netdev, &chandef, 0);
wiphy_unlock(priv_to_wiphy(vif->mac));
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
index 13dd672b825e..42e21e9f303b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
@@ -1705,6 +1705,10 @@ static int rt2400pci_tx_last_beacon(struct ieee80211_hw *hw)
}
static const struct ieee80211_ops rt2400pci_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
index ecddda4c471e..36ddc5a69fa4 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
@@ -2003,6 +2003,10 @@ static int rt2500pci_tx_last_beacon(struct ieee80211_hw *hw)
}
static const struct ieee80211_ops rt2500pci_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
index 13fdcff0ad66..09923765e2db 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c
@@ -1794,6 +1794,10 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
}
static const struct ieee80211_ops rt2500usb_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index aaf31857ae1e..3bb81bcff0ac 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -10946,13 +10946,13 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
/* Apparently the data is read from end to start */
reg = rt2800_register_read_lock(rt2x00dev, efuse_data3_reg);
/* The returned value is in CPU order, but eeprom is le */
- *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
+ *(__le32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
reg = rt2800_register_read_lock(rt2x00dev, efuse_data2_reg);
- *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
+ *(__le32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
reg = rt2800_register_read_lock(rt2x00dev, efuse_data1_reg);
- *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
+ *(__le32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
reg = rt2800_register_read_lock(rt2x00dev, efuse_data0_reg);
- *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
+ *(__le32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
mutex_unlock(&rt2x00dev->csr_mutex);
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
index dcb56f708a5f..14c45aba836f 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
@@ -287,6 +287,10 @@ static int rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
}
static const struct ieee80211_ops rt2800pci_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
index 7118d4f9038d..701ba54bf3e5 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
@@ -132,6 +132,10 @@ static int rt2800soc_write_firmware(struct rt2x00_dev *rt2x00dev,
}
static const struct ieee80211_ops rt2800soc_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index b2a8e75a901b..160bef79acdb 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -629,6 +629,10 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
}
static const struct ieee80211_ops rt2800usb_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c b/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c
index ad95f9eba301..1000fbfb94b8 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c
@@ -197,10 +197,7 @@ void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
transfer += header_length;
} else {
skb_push(skb, iv_len + align);
- if (align < icv_len)
- skb_put(skb, icv_len - align);
- else if (align > icv_len)
- skb_trim(skb, rxdesc->size + iv_len + icv_len);
+ skb_put(skb, icv_len - align);
/* Move ieee80211 header */
memmove(skb->data + transfer,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index 483723bf514b..d1cd5694e3c7 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -2872,6 +2872,10 @@ static u64 rt61pci_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops rt61pci_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index dfa9d5213898..b79dda952a33 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -2291,6 +2291,10 @@ static u64 rt73usb_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops rt73usb_mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rt2x00mac_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rt2x00mac_start,
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index f6c25a52b69a..77b6cb7e1f6b 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -1607,6 +1607,10 @@ static void rtl8180_configure_filter(struct ieee80211_hw *dev,
}
static const struct ieee80211_ops rtl8180_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rtl8180_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rtl8180_start,
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index 04945f905d6d..78d99afa373d 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1377,6 +1377,10 @@ static int rtl8187_conf_tx(struct ieee80211_hw *dev,
static const struct ieee80211_ops rtl8187_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rtl8187_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rtl8187_start,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 4695fb4e2d2d..fd92d23c43d9 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -6,6 +6,7 @@
*/
#include <asm/byteorder.h>
+#include <linux/average.h>
#define RTL8XXXU_DEBUG_REG_WRITE 0x01
#define RTL8XXXU_DEBUG_REG_READ 0x02
@@ -498,6 +499,7 @@ struct rtl8xxxu_txdesc40 {
#define DESC_RATE_ID_SHIFT 16
#define DESC_RATE_ID_MASK 0xf
#define TXDESC_NAVUSEHDR BIT(20)
+#define TXDESC_EN_DESC_ID BIT(21)
#define TXDESC_SEC_RC4 0x00400000
#define TXDESC_SEC_AES 0x00c00000
#define TXDESC_PKT_OFFSET_SHIFT 26
@@ -1774,6 +1776,8 @@ struct rtl8xxxu_cfo_tracking {
#define RTL8XXXU_HW_LED_CONTROL 2
#define RTL8XXXU_MAX_MAC_ID_NUM 128
#define RTL8XXXU_BC_MC_MACID 0
+#define RTL8XXXU_BC_MC_MACID1 1
+#define RTL8XXXU_MAX_SEC_CAM_NUM 64
struct rtl8xxxu_priv {
struct ieee80211_hw *hw;
@@ -1855,6 +1859,8 @@ struct rtl8xxxu_priv {
int next_mbox;
int nr_out_eps;
+ /* Ensure no added or deleted stas while iterating */
+ struct mutex sta_mutex;
struct mutex h2c_mutex;
/* Protect the indirect register accesses of RTL8710BU. */
struct mutex syson_indirect_access_mutex;
@@ -1889,18 +1895,14 @@ struct rtl8xxxu_priv {
u8 pi_enabled:1;
u8 no_pape:1;
u8 int_buf[USB_INTR_CONTENT_LENGTH];
- u8 rssi_level;
DECLARE_BITMAP(tx_aggr_started, IEEE80211_NUM_TIDS);
DECLARE_BITMAP(tid_tx_operational, IEEE80211_NUM_TIDS);
- /*
- * Only one virtual interface permitted because only STA mode
- * is supported and no iface_combinations are provided.
- */
- struct ieee80211_vif *vif;
+
+ struct ieee80211_vif *vifs[2];
struct delayed_work ra_watchdog;
struct work_struct c2hcmd_work;
struct sk_buff_head c2hcmd_queue;
- struct work_struct update_beacon_work;
+ struct delayed_work update_beacon_work;
struct rtl8xxxu_btcoex bt_coex;
struct rtl8xxxu_ra_report ra_report;
struct rtl8xxxu_cfo_tracking cfo_tracking;
@@ -1910,13 +1912,23 @@ struct rtl8xxxu_priv {
char led_name[32];
struct led_classdev led_cdev;
DECLARE_BITMAP(mac_id_map, RTL8XXXU_MAX_MAC_ID_NUM);
+ DECLARE_BITMAP(cam_map, RTL8XXXU_MAX_SEC_CAM_NUM);
};
+DECLARE_EWMA(rssi, 10, 16);
+
struct rtl8xxxu_sta_info {
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
u8 macid;
+ struct ewma_rssi avg_rssi;
+ u8 rssi_level;
+};
+
+struct rtl8xxxu_vif {
+ int port_num;
+ u8 hw_key_idx;
};
struct rtl8xxxu_rx_urb {
@@ -1986,11 +1998,13 @@ struct rtl8xxxu_fileops {
u8 init_reg_rxfltmap:1;
u8 init_reg_pkt_life_time:1;
u8 init_reg_hmtfr:1;
+ u8 supports_concurrent:1;
u8 ampdu_max_time;
u8 ustime_tsf_edca;
u16 max_aggr_num;
u8 supports_ap:1;
u16 max_macid_num;
+ u16 max_sec_cam_num;
u32 adda_1t_init;
u32 adda_1t_path_on;
u32 adda_2t_path_on_a;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
index 6d0f975f891b..afe9cc1b49dc 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
@@ -1699,7 +1699,7 @@ void rtl8188e_handle_ra_tx_report2(struct rtl8xxxu_priv *priv, struct sk_buff *s
/* We only use macid 0, so only the first item is relevant.
* AP mode will use more of them if it's ever implemented.
*/
- if (!priv->vif || priv->vif->type == NL80211_IFTYPE_STATION)
+ if (!priv->vifs[0] || priv->vifs[0]->type == NL80211_IFTYPE_STATION)
items = 1;
for (macid = 0; macid < items; macid++) {
@@ -1882,6 +1882,7 @@ struct rtl8xxxu_fileops rtl8188eu_fops = {
.has_tx_report = 1,
.init_reg_pkt_life_time = 1,
.gen2_thermal_meter = 1,
+ .max_sec_cam_num = 32,
.adda_1t_init = 0x0b1b25a0,
.adda_1t_path_on = 0x0bdb25a0,
/*
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
index 1e1c8fa194cb..464216d007ce 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
@@ -1751,6 +1751,8 @@ struct rtl8xxxu_fileops rtl8188fu_fops = {
.max_aggr_num = 0x0c14,
.supports_ap = 1,
.max_macid_num = 16,
+ .max_sec_cam_num = 16,
+ .supports_concurrent = 1,
.adda_1t_init = 0x03c00014,
.adda_1t_path_on = 0x03c00014,
.trxff_boundary = 0x3f7f,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
index b30a9a513cb8..3ee7d8f87da6 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
@@ -613,6 +613,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
+ .max_sec_cam_num = 32,
.adda_1t_init = 0x0b1b25a0,
.adda_1t_path_on = 0x0bdb25a0,
.adda_2t_path_on_a = 0x04db25a4,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index 47bcaec6f2db..63b73ace27ec 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1769,6 +1769,7 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
.needs_full_init = 1,
.supports_ap = 1,
.max_macid_num = 128,
+ .max_sec_cam_num = 64,
.adda_1t_init = 0x0fc01616,
.adda_1t_path_on = 0x0fc01616,
.adda_2t_path_on_a = 0x0fc01616,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c
index 28e93835e05a..21e4204769d0 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c
@@ -2014,26 +2014,40 @@ static int rtl8192fu_led_brightness_set(struct led_classdev *led_cdev,
struct rtl8xxxu_priv *priv = container_of(led_cdev,
struct rtl8xxxu_priv,
led_cdev);
- u16 ledcfg;
+ u32 ledcfg;
/* Values obtained by observing the USB traffic from the Windows driver. */
rtl8xxxu_write32(priv, REG_SW_GPIO_SHARE_CTRL_0, 0x20080);
rtl8xxxu_write32(priv, REG_SW_GPIO_SHARE_CTRL_1, 0x1b0000);
- ledcfg = rtl8xxxu_read16(priv, REG_LEDCFG0);
+ ledcfg = rtl8xxxu_read32(priv, REG_LEDCFG0);
+
+ /* Comfast CF-826F uses LED1. Asus USB-N13 C1 uses LED0. Set both. */
+
+ u32p_replace_bits(&ledcfg, LED_GPIO_ENABLE, LEDCFG0_LED2EN);
+ u32p_replace_bits(&ledcfg, LED_IO_MODE_OUTPUT, LEDCFG0_LED0_IO_MODE);
+ u32p_replace_bits(&ledcfg, LED_IO_MODE_OUTPUT, LEDCFG0_LED1_IO_MODE);
if (brightness == LED_OFF) {
- /* Value obtained like above. */
- ledcfg = BIT(1) | BIT(7);
+ u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED0CM);
+ u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED0SV);
+ u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED1CM);
+ u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED1SV);
} else if (brightness == LED_ON) {
- /* Value obtained like above. */
- ledcfg = BIT(1) | BIT(7) | BIT(11);
+ u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED0CM);
+ u32p_replace_bits(&ledcfg, LED_SW_ON, LEDCFG0_LED0SV);
+ u32p_replace_bits(&ledcfg, LED_MODE_SW_CTRL, LEDCFG0_LED1CM);
+ u32p_replace_bits(&ledcfg, LED_SW_ON, LEDCFG0_LED1SV);
} else if (brightness == RTL8XXXU_HW_LED_CONTROL) {
- /* Value obtained by brute force. */
- ledcfg = BIT(8) | BIT(9);
+ u32p_replace_bits(&ledcfg, LED_MODE_TX_OR_RX_EVENTS,
+ LEDCFG0_LED0CM);
+ u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED0SV);
+ u32p_replace_bits(&ledcfg, LED_MODE_TX_OR_RX_EVENTS,
+ LEDCFG0_LED1CM);
+ u32p_replace_bits(&ledcfg, LED_SW_OFF, LEDCFG0_LED1SV);
}
- rtl8xxxu_write16(priv, REG_LEDCFG0, ledcfg);
+ rtl8xxxu_write32(priv, REG_LEDCFG0, ledcfg);
return 0;
}
@@ -2081,6 +2095,7 @@ struct rtl8xxxu_fileops rtl8192fu_fops = {
.max_aggr_num = 0x1f1f,
.supports_ap = 1,
.max_macid_num = 128,
+ .max_sec_cam_num = 64,
.trxff_boundary = 0x3f3f,
.pbp_rx = PBP_PAGE_SIZE_256,
.pbp_tx = PBP_PAGE_SIZE_256,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c
index 871b8cca8a18..46d57510e9fc 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c
@@ -1877,6 +1877,7 @@ struct rtl8xxxu_fileops rtl8710bu_fops = {
.max_aggr_num = 0x0c14,
.supports_ap = 1,
.max_macid_num = 16,
+ .max_sec_cam_num = 32,
.adda_1t_init = 0x03c00016,
.adda_1t_path_on = 0x03c00016,
.trxff_boundary = 0x3f7f,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
index 15a30e496221..ad1bb9377ca2 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
@@ -510,6 +510,7 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
+ .max_sec_cam_num = 32,
.adda_1t_init = 0x0b1b25a0,
.adda_1t_path_on = 0x0bdb25a0,
.adda_2t_path_on_a = 0x04db25a4,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 954369ed6226..9640c841d20a 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1744,6 +1744,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.max_aggr_num = 0x0c14,
.supports_ap = 1,
.max_macid_num = 128,
+ .max_sec_cam_num = 64,
.adda_1t_init = 0x01c00014,
.adda_1t_path_on = 0x01c00014,
.adda_2t_path_on_a = 0x01c00014,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 180907319e8c..4a49f8f9d80f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1633,33 +1633,41 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
}
static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv,
- enum nl80211_iftype linktype)
+ enum nl80211_iftype linktype, int port_num)
{
- u8 val8;
-
- val8 = rtl8xxxu_read8(priv, REG_MSR);
- val8 &= ~MSR_LINKTYPE_MASK;
+ u8 val8, type;
switch (linktype) {
case NL80211_IFTYPE_UNSPECIFIED:
- val8 |= MSR_LINKTYPE_NONE;
+ type = MSR_LINKTYPE_NONE;
break;
case NL80211_IFTYPE_ADHOC:
- val8 |= MSR_LINKTYPE_ADHOC;
+ type = MSR_LINKTYPE_ADHOC;
break;
case NL80211_IFTYPE_STATION:
- val8 |= MSR_LINKTYPE_STATION;
+ type = MSR_LINKTYPE_STATION;
break;
case NL80211_IFTYPE_AP:
- val8 |= MSR_LINKTYPE_AP;
+ type = MSR_LINKTYPE_AP;
break;
default:
- goto out;
+ return;
+ }
+
+ switch (port_num) {
+ case 0:
+ val8 = rtl8xxxu_read8(priv, REG_MSR) & 0x0c;
+ val8 |= type;
+ break;
+ case 1:
+ val8 = rtl8xxxu_read8(priv, REG_MSR) & 0x03;
+ val8 |= type << 2;
+ break;
+ default:
+ return;
}
rtl8xxxu_write8(priv, REG_MSR, val8);
-out:
- return;
}
static void
@@ -3572,27 +3580,47 @@ void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
}
-static int rtl8xxxu_set_mac(struct rtl8xxxu_priv *priv)
+static int rtl8xxxu_set_mac(struct rtl8xxxu_priv *priv, int port_num)
{
int i;
u16 reg;
- reg = REG_MACID;
+ switch (port_num) {
+ case 0:
+ reg = REG_MACID;
+ break;
+ case 1:
+ reg = REG_MACID1;
+ break;
+ default:
+ WARN_ONCE(1, "%s: invalid port_num\n", __func__);
+ return -EINVAL;
+ }
for (i = 0; i < ETH_ALEN; i++)
- rtl8xxxu_write8(priv, reg + i, priv->mac_addr[i]);
+ rtl8xxxu_write8(priv, reg + i, priv->vifs[port_num]->addr[i]);
return 0;
}
-static int rtl8xxxu_set_bssid(struct rtl8xxxu_priv *priv, const u8 *bssid)
+static int rtl8xxxu_set_bssid(struct rtl8xxxu_priv *priv, const u8 *bssid, int port_num)
{
int i;
u16 reg;
dev_dbg(&priv->udev->dev, "%s: (%pM)\n", __func__, bssid);
- reg = REG_BSSID;
+ switch (port_num) {
+ case 0:
+ reg = REG_BSSID;
+ break;
+ case 1:
+ reg = REG_BSSID1;
+ break;
+ default:
+ WARN_ONCE(1, "%s: invalid port_num\n", __func__);
+ return -EINVAL;
+ }
for (i = 0; i < ETH_ALEN; i++)
rtl8xxxu_write8(priv, reg + i, bssid[i]);
@@ -4025,10 +4053,13 @@ static inline u8 rtl8xxxu_get_macid(struct rtl8xxxu_priv *priv,
{
struct rtl8xxxu_sta_info *sta_info;
- if (!priv->vif || priv->vif->type == NL80211_IFTYPE_STATION || !sta)
+ if (!sta)
return 0;
sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ if (!sta_info)
+ return 0;
+
return sta_info->macid;
}
@@ -4235,9 +4266,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff);
}
- rtl8xxxu_set_mac(priv);
- rtl8xxxu_set_linktype(priv, NL80211_IFTYPE_STATION);
-
/*
* Configure initial WMAC settings
*/
@@ -4511,6 +4539,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8188e_ra_info_init_all(&priv->ra_info);
set_bit(RTL8XXXU_BC_MC_MACID, priv->mac_id_map);
+ set_bit(RTL8XXXU_BC_MC_MACID1, priv->mac_id_map);
exit:
return ret;
@@ -4530,8 +4559,10 @@ static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv,
* This is a bit of a hack - the lower bits of the cipher
* suite selector happens to match the cipher index in the CAM
*/
- addr = key->keyidx << CAM_CMD_KEY_SHIFT;
+ addr = key->hw_key_idx << CAM_CMD_KEY_SHIFT;
ctrl = (key->cipher & 0x0f) << 2 | key->keyidx | CAM_WRITE_VALID;
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ ctrl |= BIT(6);
for (j = 5; j >= 0; j--) {
switch (j) {
@@ -4574,7 +4605,7 @@ static int rtl8xxxu_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
{
struct rtl8xxxu_priv *priv = hw->priv;
- schedule_work(&priv->update_beacon_work);
+ schedule_delayed_work(&priv->update_beacon_work, 0);
return 0;
}
@@ -4839,10 +4870,9 @@ static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
dev_dbg(&priv->udev->dev, "%s: rates %08x\n", __func__, rate_cfg);
- while (rate_cfg) {
- rate_cfg = (rate_cfg >> 1);
- rate_idx++;
- }
+ if (rate_cfg)
+ rate_idx = __fls(rate_cfg);
+
rtl8xxxu_write8(priv, REG_INIRTS_RATE_SEL, rate_idx);
}
@@ -4888,14 +4918,20 @@ static void rtl8xxxu_set_aifs(struct rtl8xxxu_priv *priv, u8 slot_time)
u8 aifs, aifsn, sifs;
int i;
- if (priv->vif) {
+ for (i = 0; i < ARRAY_SIZE(priv->vifs); i++) {
struct ieee80211_sta *sta;
+ if (!priv->vifs[i])
+ continue;
+
rcu_read_lock();
- sta = ieee80211_find_sta(priv->vif, priv->vif->bss_conf.bssid);
+ sta = ieee80211_find_sta(priv->vifs[i], priv->vifs[i]->bss_conf.bssid);
if (sta)
wireless_mode = rtl8xxxu_wireless_mode(priv->hw, sta);
rcu_read_unlock();
+
+ if (wireless_mode)
+ break;
}
if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ ||
@@ -4952,19 +4988,21 @@ static void
rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, u64 changed)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
+ struct rtl8xxxu_sta_info *sta_info;
struct ieee80211_sta *sta;
struct rtl8xxxu_ra_report *rarpt;
+ u8 val8, macid;
u32 val32;
- u8 val8;
rarpt = &priv->ra_report;
if (changed & BSS_CHANGED_ASSOC) {
dev_dbg(dev, "Changed ASSOC: %i!\n", vif->cfg.assoc);
- rtl8xxxu_set_linktype(priv, vif->type);
+ rtl8xxxu_set_linktype(priv, vif->type, rtlvif->port_num);
if (vif->cfg.assoc) {
u32 ramask;
@@ -4980,6 +5018,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rcu_read_unlock();
goto error;
}
+ macid = rtl8xxxu_get_macid(priv, sta);
if (sta->deflink.ht_cap.ht_supported)
dev_info(dev, "%s: HT supported\n", __func__);
@@ -5000,19 +5039,20 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
bw = RATE_INFO_BW_40;
else
bw = RATE_INFO_BW_20;
+
+ sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ sta_info->rssi_level = RTL8XXXU_RATR_STA_INIT;
rcu_read_unlock();
rtl8xxxu_update_ra_report(rarpt, highest_rate, sgi, bw);
- priv->vif = vif;
- priv->rssi_level = RTL8XXXU_RATR_STA_INIT;
-
priv->fops->update_rate_mask(priv, ramask, 0, sgi,
- bw == RATE_INFO_BW_40, 0);
+ bw == RATE_INFO_BW_40, macid);
rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
- rtl8xxxu_stop_tx_beacon(priv);
+ if (rtlvif->port_num == 0)
+ rtl8xxxu_stop_tx_beacon(priv);
/* joinbss sequence */
rtl8xxxu_write16(priv, REG_BCN_PSR_RPT,
@@ -5054,7 +5094,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changed & BSS_CHANGED_BSSID) {
dev_dbg(dev, "Changed BSSID!\n");
- rtl8xxxu_set_bssid(priv, bss_conf->bssid);
+ rtl8xxxu_set_bssid(priv, bss_conf->bssid, rtlvif->port_num);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
@@ -5070,7 +5110,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
if (changed & BSS_CHANGED_BEACON)
- schedule_work(&priv->update_beacon_work);
+ schedule_delayed_work(&priv->update_beacon_work, 0);
error:
return;
@@ -5079,11 +5119,12 @@ error:
static int rtl8xxxu_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
dev_dbg(dev, "Start AP mode\n");
- rtl8xxxu_set_bssid(priv, vif->bss_conf.bssid);
+ rtl8xxxu_set_bssid(priv, vif->bss_conf.bssid, rtlvif->port_num);
rtl8xxxu_write16(priv, REG_BCN_INTERVAL, vif->bss_conf.beacon_int);
priv->fops->report_connect(priv, RTL8XXXU_BC_MC_MACID, 0, true);
@@ -5509,13 +5550,14 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
struct rtl8xxxu_tx_urb *tx_urb;
struct ieee80211_sta *sta = NULL;
struct ieee80211_vif *vif = tx_info->control.vif;
+ struct rtl8xxxu_vif *rtlvif = vif ? (struct rtl8xxxu_vif *)vif->drv_priv : NULL;
struct device *dev = &priv->udev->dev;
u32 queue, rts_rate;
u16 pktlen = skb->len;
int tx_desc_size = priv->fops->tx_desc_size;
u8 macid;
int ret;
- bool ampdu_enable, sgi = false, short_preamble = false;
+ bool ampdu_enable, sgi = false, short_preamble = false, bmc = false;
if (skb_headroom(skb) < tx_desc_size) {
dev_warn(dev,
@@ -5557,10 +5599,14 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
tx_desc->txdw0 =
TXDESC_OWN | TXDESC_FIRST_SEGMENT | TXDESC_LAST_SEGMENT;
if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
- is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+ is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
tx_desc->txdw0 |= TXDESC_BROADMULTICAST;
+ bmc = true;
+ }
+
tx_desc->txdw1 = cpu_to_le32(queue << TXDESC_QUEUE_SHIFT);
+ macid = rtl8xxxu_get_macid(priv, sta);
if (tx_info->control.hw_key) {
switch (tx_info->control.hw_key->cipher) {
@@ -5575,6 +5621,10 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
default:
break;
}
+ if (bmc && rtlvif && rtlvif->hw_key_idx != 0xff) {
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC_EN_DESC_ID);
+ macid = rtlvif->hw_key_idx;
+ }
}
/* (tx_info->flags & IEEE80211_TX_CTL_AMPDU) && */
@@ -5618,7 +5668,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
else
rts_rate = 0;
- macid = rtl8xxxu_get_macid(priv, sta);
priv->fops->fill_txdesc(hw, hdr, tx_info, tx_desc, sgi, short_preamble,
ampdu_enable, rts_rate, macid);
@@ -5680,18 +5729,44 @@ static void rtl8xxxu_send_beacon_frame(struct ieee80211_hw *hw,
static void rtl8xxxu_update_beacon_work_callback(struct work_struct *work)
{
struct rtl8xxxu_priv *priv =
- container_of(work, struct rtl8xxxu_priv, update_beacon_work);
+ container_of(work, struct rtl8xxxu_priv, update_beacon_work.work);
struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_vif *vif = priv->vif;
+ struct ieee80211_vif *vif = priv->vifs[0];
if (!vif) {
WARN_ONCE(true, "no vif to update beacon\n");
return;
}
+ if (vif->bss_conf.csa_active) {
+ if (ieee80211_beacon_cntdwn_is_complete(vif, 0)) {
+ ieee80211_csa_finish(vif, 0);
+ return;
+ }
+ schedule_delayed_work(&priv->update_beacon_work,
+ msecs_to_jiffies(vif->bss_conf.beacon_int));
+ }
rtl8xxxu_send_beacon_frame(hw, vif);
}
+static inline bool rtl8xxxu_is_packet_match_bssid(struct rtl8xxxu_priv *priv,
+ struct ieee80211_hdr *hdr,
+ int port_num)
+{
+ return priv->vifs[port_num] &&
+ priv->vifs[port_num]->type == NL80211_IFTYPE_STATION &&
+ priv->vifs[port_num]->cfg.assoc &&
+ ether_addr_equal(priv->vifs[port_num]->bss_conf.bssid, hdr->addr2);
+}
+
+static inline bool rtl8xxxu_is_sta_sta(struct rtl8xxxu_priv *priv)
+{
+ return (priv->vifs[0] && priv->vifs[0]->cfg.assoc &&
+ priv->vifs[0]->type == NL80211_IFTYPE_STATION) &&
+ (priv->vifs[1] && priv->vifs[1]->cfg.assoc &&
+ priv->vifs[1]->type == NL80211_IFTYPE_STATION);
+}
+
void rtl8723au_rx_parse_phystats(struct rtl8xxxu_priv *priv,
struct ieee80211_rx_status *rx_status,
struct rtl8723au_phy_stats *phy_stats,
@@ -5708,12 +5783,11 @@ void rtl8723au_rx_parse_phystats(struct rtl8xxxu_priv *priv,
rx_status->signal = priv->fops->cck_rssi(priv, phy_stats);
} else {
bool parse_cfo = priv->fops->set_crystal_cap &&
- priv->vif &&
- priv->vif->type == NL80211_IFTYPE_STATION &&
- priv->vif->cfg.assoc &&
!crc_icv_err &&
!ieee80211_is_ctl(hdr->frame_control) &&
- ether_addr_equal(priv->vif->bss_conf.bssid, hdr->addr2);
+ !rtl8xxxu_is_sta_sta(priv) &&
+ (rtl8xxxu_is_packet_match_bssid(priv, hdr, 0) ||
+ rtl8xxxu_is_packet_match_bssid(priv, hdr, 1));
if (parse_cfo) {
priv->cfo_tracking.cfo_tail[0] = phy_stats->path_cfotail[0];
@@ -5748,12 +5822,11 @@ static void jaguar2_rx_parse_phystats_type1(struct rtl8xxxu_priv *priv,
bool crc_icv_err)
{
bool parse_cfo = priv->fops->set_crystal_cap &&
- priv->vif &&
- priv->vif->type == NL80211_IFTYPE_STATION &&
- priv->vif->cfg.assoc &&
!crc_icv_err &&
!ieee80211_is_ctl(hdr->frame_control) &&
- ether_addr_equal(priv->vif->bss_conf.bssid, hdr->addr2);
+ !rtl8xxxu_is_sta_sta(priv) &&
+ (rtl8xxxu_is_packet_match_bssid(priv, hdr, 0) ||
+ rtl8xxxu_is_packet_match_bssid(priv, hdr, 1));
u8 pwdb_max = 0;
int rx_path;
@@ -6029,18 +6102,20 @@ void rtl8723bu_update_bt_link_info(struct rtl8xxxu_priv *priv, u8 bt_info)
btcoex->bt_busy = false;
}
+static inline bool rtl8xxxu_is_assoc(struct rtl8xxxu_priv *priv)
+{
+ return (priv->vifs[0] && priv->vifs[0]->cfg.assoc) ||
+ (priv->vifs[1] && priv->vifs[1]->cfg.assoc);
+}
+
static
void rtl8723bu_handle_bt_inquiry(struct rtl8xxxu_priv *priv)
{
- struct ieee80211_vif *vif;
struct rtl8xxxu_btcoex *btcoex;
- bool wifi_connected;
- vif = priv->vif;
btcoex = &priv->bt_coex;
- wifi_connected = (vif && vif->cfg.assoc);
- if (!wifi_connected) {
+ if (!rtl8xxxu_is_assoc(priv)) {
rtl8723bu_set_ps_tdma(priv, 0x8, 0x0, 0x0, 0x0, 0x0);
rtl8723bu_set_coex_with_type(priv, 0);
} else if (btcoex->has_sco || btcoex->has_hid || btcoex->has_a2dp) {
@@ -6058,15 +6133,11 @@ void rtl8723bu_handle_bt_inquiry(struct rtl8xxxu_priv *priv)
static
void rtl8723bu_handle_bt_info(struct rtl8xxxu_priv *priv)
{
- struct ieee80211_vif *vif;
struct rtl8xxxu_btcoex *btcoex;
- bool wifi_connected;
- vif = priv->vif;
btcoex = &priv->bt_coex;
- wifi_connected = (vif && vif->cfg.assoc);
- if (wifi_connected) {
+ if (rtl8xxxu_is_assoc(priv)) {
u32 val32 = 0;
u32 high_prio_tx = 0, high_prio_rx = 0;
@@ -6249,6 +6320,76 @@ static void rtl8188e_c2hcmd_callback(struct work_struct *work)
}
}
+#define rtl8xxxu_iterate_vifs_atomic(priv, iterator, data) \
+ ieee80211_iterate_active_interfaces_atomic((priv)->hw, \
+ IEEE80211_IFACE_ITER_NORMAL, iterator, data)
+
+struct rtl8xxxu_rx_update_rssi_data {
+ struct rtl8xxxu_priv *priv;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_rx_status *rx_status;
+ u8 *bssid;
+};
+
+static void rtl8xxxu_rx_update_rssi_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtl8xxxu_rx_update_rssi_data *iter_data = data;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr = iter_data->hdr;
+ struct rtl8xxxu_priv *priv = iter_data->priv;
+ struct rtl8xxxu_sta_info *sta_info;
+ struct ieee80211_rx_status *rx_status = iter_data->rx_status;
+ u8 *bssid = iter_data->bssid;
+
+ if (!ether_addr_equal(vif->bss_conf.bssid, bssid))
+ return;
+
+ if (!(ether_addr_equal(vif->addr, hdr->addr1) ||
+ ieee80211_is_beacon(hdr->frame_control)))
+ return;
+
+ sta = ieee80211_find_sta_by_ifaddr(priv->hw, hdr->addr2,
+ vif->addr);
+ if (!sta)
+ return;
+
+ sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ ewma_rssi_add(&sta_info->avg_rssi, -rx_status->signal);
+}
+
+static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
+{
+ __le16 fc = hdr->frame_control;
+ u8 *bssid;
+
+ if (ieee80211_has_tods(fc))
+ bssid = hdr->addr1;
+ else if (ieee80211_has_fromds(fc))
+ bssid = hdr->addr2;
+ else
+ bssid = hdr->addr3;
+
+ return bssid;
+}
+
+static void rtl8xxxu_rx_update_rssi(struct rtl8xxxu_priv *priv,
+ struct ieee80211_rx_status *rx_status,
+ struct ieee80211_hdr *hdr)
+{
+ struct rtl8xxxu_rx_update_rssi_data data = {};
+
+ if (ieee80211_is_ctl(hdr->frame_control))
+ return;
+
+ data.priv = priv;
+ data.hdr = hdr;
+ data.rx_status = rx_status;
+ data.bssid = get_hdr_bssid(hdr);
+
+ rtl8xxxu_iterate_vifs_atomic(priv, rtl8xxxu_rx_update_rssi_iter, &data);
+}
+
int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
{
struct ieee80211_hw *hw = priv->hw;
@@ -6308,18 +6449,26 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
skb_queue_tail(&priv->c2hcmd_queue, skb);
schedule_work(&priv->c2hcmd_work);
} else {
+ struct ieee80211_hdr *hdr;
+
phy_stats = (struct rtl8723au_phy_stats *)skb->data;
skb_pull(skb, drvinfo_sz + desc_shift);
skb_trim(skb, pkt_len);
- if (rx_desc->phy_stats)
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (rx_desc->phy_stats) {
priv->fops->parse_phystats(
priv, rx_status, phy_stats,
rx_desc->rxmcs,
- (struct ieee80211_hdr *)skb->data,
+ hdr,
rx_desc->crc32 || rx_desc->icverr);
+ if (!rx_desc->crc32 && !rx_desc->icverr)
+ rtl8xxxu_rx_update_rssi(priv,
+ rx_status,
+ hdr);
+ }
rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
@@ -6416,10 +6565,15 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
} else {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- if (rx_desc->phy_stats)
+ if (rx_desc->phy_stats) {
priv->fops->parse_phystats(priv, rx_status, phy_stats,
rx_desc->rxmcs, hdr,
rx_desc->crc32 || rx_desc->icverr);
+ if (!rx_desc->crc32 && !rx_desc->icverr)
+ rtl8xxxu_rx_update_rssi(priv,
+ rx_status,
+ hdr);
+ }
rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
@@ -6563,29 +6717,123 @@ error:
return ret;
}
+static void rtl8xxxu_switch_ports(struct rtl8xxxu_priv *priv)
+{
+ u8 macid[ETH_ALEN], bssid[ETH_ALEN], macid_1[ETH_ALEN], bssid_1[ETH_ALEN];
+ u8 msr, bcn_ctrl, bcn_ctrl_1, atimwnd[2], atimwnd_1[2];
+ struct rtl8xxxu_vif *rtlvif;
+ struct ieee80211_vif *vif;
+ u8 tsftr[8], tsftr_1[8];
+ int i;
+
+ msr = rtl8xxxu_read8(priv, REG_MSR);
+ bcn_ctrl = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+ bcn_ctrl_1 = rtl8xxxu_read8(priv, REG_BEACON_CTRL_1);
+
+ for (i = 0; i < ARRAY_SIZE(atimwnd); i++)
+ atimwnd[i] = rtl8xxxu_read8(priv, REG_ATIMWND + i);
+ for (i = 0; i < ARRAY_SIZE(atimwnd_1); i++)
+ atimwnd_1[i] = rtl8xxxu_read8(priv, REG_ATIMWND_1 + i);
+
+ for (i = 0; i < ARRAY_SIZE(tsftr); i++)
+ tsftr[i] = rtl8xxxu_read8(priv, REG_TSFTR + i);
+ for (i = 0; i < ARRAY_SIZE(tsftr); i++)
+ tsftr_1[i] = rtl8xxxu_read8(priv, REG_TSFTR1 + i);
+
+ for (i = 0; i < ARRAY_SIZE(macid); i++)
+ macid[i] = rtl8xxxu_read8(priv, REG_MACID + i);
+
+ for (i = 0; i < ARRAY_SIZE(bssid); i++)
+ bssid[i] = rtl8xxxu_read8(priv, REG_BSSID + i);
+
+ for (i = 0; i < ARRAY_SIZE(macid_1); i++)
+ macid_1[i] = rtl8xxxu_read8(priv, REG_MACID1 + i);
+
+ for (i = 0; i < ARRAY_SIZE(bssid_1); i++)
+ bssid_1[i] = rtl8xxxu_read8(priv, REG_BSSID1 + i);
+
+ /* disable bcn function, disable update TSF */
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, (bcn_ctrl &
+ (~BEACON_FUNCTION_ENABLE)) | BEACON_DISABLE_TSF_UPDATE);
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL_1, (bcn_ctrl_1 &
+ (~BEACON_FUNCTION_ENABLE)) | BEACON_DISABLE_TSF_UPDATE);
+
+ /* switch msr */
+ msr = (msr & 0xf0) | ((msr & 0x03) << 2) | ((msr & 0x0c) >> 2);
+ rtl8xxxu_write8(priv, REG_MSR, msr);
+
+ /* write port0 */
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, bcn_ctrl_1 & ~BEACON_FUNCTION_ENABLE);
+ for (i = 0; i < ARRAY_SIZE(atimwnd_1); i++)
+ rtl8xxxu_write8(priv, REG_ATIMWND + i, atimwnd_1[i]);
+ for (i = 0; i < ARRAY_SIZE(tsftr_1); i++)
+ rtl8xxxu_write8(priv, REG_TSFTR + i, tsftr_1[i]);
+ for (i = 0; i < ARRAY_SIZE(macid_1); i++)
+ rtl8xxxu_write8(priv, REG_MACID + i, macid_1[i]);
+ for (i = 0; i < ARRAY_SIZE(bssid_1); i++)
+ rtl8xxxu_write8(priv, REG_BSSID + i, bssid_1[i]);
+
+ /* write port1 */
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL_1, bcn_ctrl & ~BEACON_FUNCTION_ENABLE);
+ for (i = 0; i < ARRAY_SIZE(atimwnd); i++)
+ rtl8xxxu_write8(priv, REG_ATIMWND_1 + i, atimwnd[i]);
+ for (i = 0; i < ARRAY_SIZE(tsftr); i++)
+ rtl8xxxu_write8(priv, REG_TSFTR1 + i, tsftr[i]);
+ for (i = 0; i < ARRAY_SIZE(macid); i++)
+ rtl8xxxu_write8(priv, REG_MACID1 + i, macid[i]);
+ for (i = 0; i < ARRAY_SIZE(bssid); i++)
+ rtl8xxxu_write8(priv, REG_BSSID1 + i, bssid[i]);
+
+ /* write bcn ctl */
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, bcn_ctrl_1);
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL_1, bcn_ctrl);
+
+ vif = priv->vifs[0];
+ priv->vifs[0] = priv->vifs[1];
+ priv->vifs[1] = vif;
+
+ /* priv->vifs[0] is NULL here, based on how this function is currently
+ * called from rtl8xxxu_add_interface().
+ * When this function will be used in the future for a different
+ * scenario, please check whether vifs[0] or vifs[1] can be NULL and if
+ * necessary add code to set port_num = 1.
+ */
+ rtlvif = (struct rtl8xxxu_vif *)priv->vifs[1]->drv_priv;
+ rtlvif->port_num = 1;
+}
+
static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
- int ret;
+ int port_num;
u8 val8;
- if (!priv->vif)
- priv->vif = vif;
+ if (!priv->vifs[0])
+ port_num = 0;
+ else if (!priv->vifs[1])
+ port_num = 1;
else
return -EOPNOTSUPP;
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- rtl8xxxu_stop_tx_beacon(priv);
+ if (port_num == 0) {
+ rtl8xxxu_stop_tx_beacon(priv);
- val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
- val8 |= BEACON_ATIM | BEACON_FUNCTION_ENABLE |
- BEACON_DISABLE_TSF_UPDATE;
- rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
- ret = 0;
+ val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+ val8 |= BEACON_ATIM | BEACON_FUNCTION_ENABLE |
+ BEACON_DISABLE_TSF_UPDATE;
+ rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+ }
break;
case NL80211_IFTYPE_AP:
+ if (port_num == 1) {
+ rtl8xxxu_switch_ports(priv);
+ port_num = 0;
+ }
+
rtl8xxxu_write8(priv, REG_BEACON_CTRL,
BEACON_DISABLE_TSF_UPDATE | BEACON_CTRL_MBSSID);
rtl8xxxu_write8(priv, REG_ATIMWND, 0x0c); /* 12ms */
@@ -6602,29 +6850,31 @@ static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
val8 = rtl8xxxu_read8(priv, REG_CCK_CHECK);
val8 &= ~BIT_BCN_PORT_SEL;
rtl8xxxu_write8(priv, REG_CCK_CHECK, val8);
-
- ret = 0;
break;
default:
- ret = -EOPNOTSUPP;
+ return -EOPNOTSUPP;
}
- rtl8xxxu_set_linktype(priv, vif->type);
+ priv->vifs[port_num] = vif;
+ rtlvif->port_num = port_num;
+ rtlvif->hw_key_idx = 0xff;
+
+ rtl8xxxu_set_linktype(priv, vif->type, port_num);
ether_addr_copy(priv->mac_addr, vif->addr);
- rtl8xxxu_set_mac(priv);
+ rtl8xxxu_set_mac(priv, port_num);
- return ret;
+ return 0;
}
static void rtl8xxxu_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
dev_dbg(&priv->udev->dev, "%s\n", __func__);
- if (priv->vif)
- priv->vif = NULL;
+ priv->vifs[rtlvif->port_num] = NULL;
}
static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
@@ -6746,8 +6996,8 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
else
rcr |= RCR_CHECK_BSSID_BEACON | RCR_CHECK_BSSID_MATCH;
- if (priv->vif && priv->vif->type == NL80211_IFTYPE_AP)
- rcr &= ~RCR_CHECK_BSSID_MATCH;
+ if (priv->vifs[0] && priv->vifs[0]->type == NL80211_IFTYPE_AP)
+ rcr &= ~(RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON);
if (*total_flags & FIF_CONTROL)
rcr |= RCR_ACCEPT_CTRL_FRAME;
@@ -6784,11 +7034,19 @@ static int rtl8xxxu_set_rts_threshold(struct ieee80211_hw *hw, u32 rts)
return 0;
}
+static int rtl8xxxu_get_free_sec_cam(struct ieee80211_hw *hw)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+
+ return find_first_zero_bit(priv->cam_map, priv->fops->max_sec_cam_num);
+}
+
static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
u8 mac_addr[ETH_ALEN];
@@ -6800,9 +7058,6 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
dev_dbg(dev, "%s: cmd %02x, cipher %08x, index %i\n",
__func__, cmd, key->cipher, key->keyidx);
- if (vif->type != NL80211_IFTYPE_STATION)
- return -EOPNOTSUPP;
-
if (key->keyidx > 3)
return -EOPNOTSUPP;
@@ -6826,7 +7081,7 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
ether_addr_copy(mac_addr, sta->addr);
} else {
dev_dbg(dev, "%s: group key\n", __func__);
- eth_broadcast_addr(mac_addr);
+ ether_addr_copy(mac_addr, vif->bss_conf.bssid);
}
val16 = rtl8xxxu_read16(priv, REG_CR);
@@ -6840,16 +7095,28 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
- key->hw_key_idx = key->keyidx;
+
+ retval = rtl8xxxu_get_free_sec_cam(hw);
+ if (retval < 0)
+ return -EOPNOTSUPP;
+
+ key->hw_key_idx = retval;
+
+ if (vif->type == NL80211_IFTYPE_AP && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ rtlvif->hw_key_idx = key->hw_key_idx;
+
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
rtl8xxxu_cam_write(priv, key, mac_addr);
+ set_bit(key->hw_key_idx, priv->cam_map);
retval = 0;
break;
case DISABLE_KEY:
rtl8xxxu_write32(priv, REG_CAM_WRITE, 0x00000000);
val32 = CAM_CMD_POLLING | CAM_CMD_WRITE |
- key->keyidx << CAM_CMD_KEY_SHIFT;
+ key->hw_key_idx << CAM_CMD_KEY_SHIFT;
rtl8xxxu_write32(priv, REG_CAM_CMD, val32);
+ rtlvif->hw_key_idx = 0xff;
+ clear_bit(key->hw_key_idx, priv->cam_map);
retval = 0;
break;
default:
@@ -6930,6 +7197,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
int signal, struct ieee80211_sta *sta,
bool force)
{
+ struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
struct ieee80211_hw *hw = priv->hw;
u16 wireless_mode;
u8 rssi_level, ratr_idx;
@@ -6938,7 +7206,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
u8 go_up_gap = 5;
u8 macid = rtl8xxxu_get_macid(priv, sta);
- rssi_level = priv->rssi_level;
+ rssi_level = sta_info->rssi_level;
snr = rtl8xxxu_signal_to_snr(signal);
snr_thresh_high = RTL8XXXU_SNR_THRESH_HIGH;
snr_thresh_low = RTL8XXXU_SNR_THRESH_LOW;
@@ -6963,18 +7231,16 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
else
rssi_level = RTL8XXXU_RATR_STA_LOW;
- if (rssi_level != priv->rssi_level || force) {
+ if (rssi_level != sta_info->rssi_level || force) {
int sgi = 0;
u32 rate_bitmap = 0;
- rcu_read_lock();
rate_bitmap = (sta->deflink.supp_rates[0] & 0xfff) |
(sta->deflink.ht_cap.mcs.rx_mask[0] << 12) |
(sta->deflink.ht_cap.mcs.rx_mask[1] << 20);
if (sta->deflink.ht_cap.cap &
(IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
sgi = 1;
- rcu_read_unlock();
wireless_mode = rtl8xxxu_wireless_mode(hw, sta);
switch (wireless_mode) {
@@ -7055,7 +7321,7 @@ static void rtl8xxxu_refresh_rate_mask(struct rtl8xxxu_priv *priv,
break;
}
- priv->rssi_level = rssi_level;
+ sta_info->rssi_level = rssi_level;
priv->fops->update_rate_mask(priv, rate_bitmap, ratr_idx, sgi, txbw_40mhz, macid);
}
}
@@ -7085,7 +7351,7 @@ static void rtl8xxxu_track_cfo(struct rtl8xxxu_priv *priv)
int cfo_khz_a, cfo_khz_b, cfo_average;
int crystal_cap;
- if (!priv->vif || !priv->vif->cfg.assoc) {
+ if (!rtl8xxxu_is_assoc(priv)) {
/* Reset */
cfo->adjust = true;
@@ -7148,41 +7414,64 @@ static void rtl8xxxu_track_cfo(struct rtl8xxxu_priv *priv)
rtl8xxxu_set_atc_status(priv, abs(cfo_average) >= CFO_TH_ATC);
}
-static void rtl8xxxu_watchdog_callback(struct work_struct *work)
+static void rtl8xxxu_ra_iter(void *data, struct ieee80211_sta *sta)
{
- struct ieee80211_vif *vif;
- struct rtl8xxxu_priv *priv;
+ struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ struct rtl8xxxu_priv *priv = data;
+ int signal = -ewma_rssi_read(&sta_info->avg_rssi);
- priv = container_of(work, struct rtl8xxxu_priv, ra_watchdog.work);
- vif = priv->vif;
+ priv->fops->report_rssi(priv, rtl8xxxu_get_macid(priv, sta),
+ rtl8xxxu_signal_to_snr(signal));
+ rtl8xxxu_refresh_rate_mask(priv, signal, sta, false);
+}
- if (vif && vif->type == NL80211_IFTYPE_STATION) {
- int signal;
- struct ieee80211_sta *sta;
+struct rtl8xxxu_stas_entry {
+ struct list_head list;
+ struct ieee80211_sta *sta;
+};
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
- if (!sta) {
- struct device *dev = &priv->udev->dev;
+struct rtl8xxxu_iter_stas_data {
+ struct rtl8xxxu_priv *priv;
+ struct list_head list;
+};
- dev_dbg(dev, "%s: no sta found\n", __func__);
- rcu_read_unlock();
- goto out;
- }
- rcu_read_unlock();
+static void rtl8xxxu_collect_sta_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtl8xxxu_iter_stas_data *iter_stas = data;
+ struct rtl8xxxu_stas_entry *stas_entry;
- signal = ieee80211_ave_rssi(vif);
+ stas_entry = kmalloc(sizeof(*stas_entry), GFP_ATOMIC);
+ if (!stas_entry)
+ return;
- priv->fops->report_rssi(priv, 0,
- rtl8xxxu_signal_to_snr(signal));
+ stas_entry->sta = sta;
+ list_add_tail(&stas_entry->list, &iter_stas->list);
+}
- if (priv->fops->set_crystal_cap)
- rtl8xxxu_track_cfo(priv);
+static void rtl8xxxu_watchdog_callback(struct work_struct *work)
+{
- rtl8xxxu_refresh_rate_mask(priv, signal, sta, false);
+ struct rtl8xxxu_iter_stas_data iter_data;
+ struct rtl8xxxu_stas_entry *sta_entry, *tmp;
+ struct rtl8xxxu_priv *priv;
+
+ priv = container_of(work, struct rtl8xxxu_priv, ra_watchdog.work);
+ iter_data.priv = priv;
+ INIT_LIST_HEAD(&iter_data.list);
+
+ mutex_lock(&priv->sta_mutex);
+ ieee80211_iterate_stations_atomic(priv->hw, rtl8xxxu_collect_sta_iter,
+ &iter_data);
+ list_for_each_entry_safe(sta_entry, tmp, &iter_data.list, list) {
+ list_del_init(&sta_entry->list);
+ rtl8xxxu_ra_iter(priv, sta_entry->sta);
+ kfree(sta_entry);
}
+ mutex_unlock(&priv->sta_mutex);
+
+ if (priv->fops->set_crystal_cap)
+ rtl8xxxu_track_cfo(priv);
-out:
schedule_delayed_work(&priv->ra_watchdog, 2 * HZ);
}
@@ -7304,7 +7593,9 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw)
if (priv->usb_interrupts)
rtl8xxxu_write32(priv, REG_USB_HIMR, 0);
+ cancel_work_sync(&priv->c2hcmd_work);
cancel_delayed_work_sync(&priv->ra_watchdog);
+ cancel_delayed_work_sync(&priv->update_beacon_work);
rtl8xxxu_free_rx_resources(priv);
rtl8xxxu_free_tx_resources(priv);
@@ -7315,16 +7606,34 @@ static int rtl8xxxu_sta_add(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
+ struct rtl8xxxu_vif *rtlvif = (struct rtl8xxxu_vif *)vif->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
+ mutex_lock(&priv->sta_mutex);
+ ewma_rssi_init(&sta_info->avg_rssi);
if (vif->type == NL80211_IFTYPE_AP) {
+ sta_info->rssi_level = RTL8XXXU_RATR_STA_INIT;
sta_info->macid = rtl8xxxu_acquire_macid(priv);
- if (sta_info->macid >= RTL8XXXU_MAX_MAC_ID_NUM)
+ if (sta_info->macid >= RTL8XXXU_MAX_MAC_ID_NUM) {
+ mutex_unlock(&priv->sta_mutex);
return -ENOSPC;
+ }
rtl8xxxu_refresh_rate_mask(priv, 0, sta, true);
priv->fops->report_connect(priv, sta_info->macid, H2C_MACID_ROLE_STA, true);
+ } else {
+ switch (rtlvif->port_num) {
+ case 0:
+ sta_info->macid = RTL8XXXU_BC_MC_MACID;
+ break;
+ case 1:
+ sta_info->macid = RTL8XXXU_BC_MC_MACID1;
+ break;
+ default:
+ break;
+ }
}
+ mutex_unlock(&priv->sta_mutex);
return 0;
}
@@ -7336,13 +7645,19 @@ static int rtl8xxxu_sta_remove(struct ieee80211_hw *hw,
struct rtl8xxxu_sta_info *sta_info = (struct rtl8xxxu_sta_info *)sta->drv_priv;
struct rtl8xxxu_priv *priv = hw->priv;
+ mutex_lock(&priv->sta_mutex);
if (vif->type == NL80211_IFTYPE_AP)
rtl8xxxu_release_macid(priv, sta_info->macid);
+ mutex_unlock(&priv->sta_mutex);
return 0;
}
static const struct ieee80211_ops rtl8xxxu_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rtl8xxxu_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.add_interface = rtl8xxxu_add_interface,
@@ -7476,6 +7791,20 @@ static void rtl8xxxu_deinit_led(struct rtl8xxxu_priv *priv)
led_classdev_unregister(led);
}
+static const struct ieee80211_iface_limit rtl8xxxu_limits[] = {
+ { .max = 2, .types = BIT(NL80211_IFTYPE_STATION), },
+ { .max = 1, .types = BIT(NL80211_IFTYPE_AP), },
+};
+
+static const struct ieee80211_iface_combination rtl8xxxu_combinations[] = {
+ {
+ .limits = rtl8xxxu_limits,
+ .n_limits = ARRAY_SIZE(rtl8xxxu_limits),
+ .max_interfaces = 2,
+ .num_different_channels = 1,
+ },
+};
+
static int rtl8xxxu_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
@@ -7522,7 +7851,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
untested = 0;
break;
case 0x2357:
- if (id->idProduct == 0x0109)
+ if (id->idProduct == 0x0109 || id->idProduct == 0x0135)
untested = 0;
break;
case 0x0b05:
@@ -7555,13 +7884,14 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
mutex_init(&priv->usb_buf_mutex);
mutex_init(&priv->syson_indirect_access_mutex);
mutex_init(&priv->h2c_mutex);
+ mutex_init(&priv->sta_mutex);
INIT_LIST_HEAD(&priv->tx_urb_free_list);
spin_lock_init(&priv->tx_urb_lock);
INIT_LIST_HEAD(&priv->rx_urb_pending_list);
spin_lock_init(&priv->rx_urb_lock);
INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work);
INIT_DELAYED_WORK(&priv->ra_watchdog, rtl8xxxu_watchdog_callback);
- INIT_WORK(&priv->update_beacon_work, rtl8xxxu_update_beacon_work_callback);
+ INIT_DELAYED_WORK(&priv->update_beacon_work, rtl8xxxu_update_beacon_work_callback);
skb_queue_head_init(&priv->c2hcmd_queue);
usb_set_intfdata(interface, hw);
@@ -7611,6 +7941,8 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
if (ret)
goto err_set_intfdata;
+ hw->vif_data_size = sizeof(struct rtl8xxxu_vif);
+
hw->wiphy->max_scan_ssids = 1;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
if (priv->fops->max_macid_num)
@@ -7620,6 +7952,13 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
hw->queues = 4;
+ hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
+ if (priv->fops->supports_concurrent) {
+ hw->wiphy->iface_combinations = rtl8xxxu_combinations;
+ hw->wiphy->n_iface_combinations = ARRAY_SIZE(rtl8xxxu_combinations);
+ }
+
sband = &rtl8xxxu_supported_band;
sband->ht_cap.ht_supported = true;
sband->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
@@ -7806,6 +8145,9 @@ static const struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192fu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x318b, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192fu_fops},
+/* TP-Link TL-WN823N V2 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0135, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192fu_fops},
#ifdef CONFIG_RTL8XXXU_UNTESTED
/* Still supported by rtlwifi */
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index 920ee50e2115..61c0c0ec07b3 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -146,6 +146,21 @@
#define GPIO_INTM_EDGE_TRIG_IRQ BIT(9)
#define REG_LEDCFG0 0x004c
+#define LEDCFG0_LED0CM GENMASK(2, 0)
+#define LEDCFG0_LED1CM GENMASK(10, 8)
+#define LED_MODE_SW_CTRL 0x0
+#define LED_MODE_TX_OR_RX_EVENTS 0x3
+#define LEDCFG0_LED0SV BIT(3)
+#define LEDCFG0_LED1SV BIT(11)
+#define LED_SW_OFF 0x0
+#define LED_SW_ON 0x1
+#define LEDCFG0_LED0_IO_MODE BIT(7)
+#define LEDCFG0_LED1_IO_MODE BIT(15)
+#define LED_IO_MODE_OUTPUT 0x0
+#define LED_IO_MODE_INPUT 0x1
+#define LEDCFG0_LED2EN BIT(21)
+#define LED_GPIO_DISABLE 0x0
+#define LED_GPIO_ENABLE 0x1
#define LEDCFG0_DPDT_SELECT BIT(23)
#define REG_LEDCFG1 0x004d
#define LEDCFG1_HW_LED_CONTROL BIT(1)
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 69e97647e3d6..2e60a6991ca1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -1903,6 +1903,10 @@ void rtl_init_sw_leds(struct ieee80211_hw *hw)
EXPORT_SYMBOL(rtl_init_sw_leds);
const struct ieee80211_ops rtl_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = rtl_op_start,
.stop = rtl_op_stop,
.tx = rtl_op_tx,
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index 2e945554ed6d..c1fbc29d5ca1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -1287,18 +1287,44 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
}
EXPORT_SYMBOL_GPL(rtl_get_hwinfo);
-void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size)
+static void _rtl_fw_block_write_usb(struct ieee80211_hw *hw, u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 start = START_ADDRESS;
+ u32 n;
+
+ while (size > 0) {
+ if (size >= 64)
+ n = 64;
+ else if (size >= 8)
+ n = 8;
+ else
+ n = 1;
+
+ rtl_write_chunk(rtlpriv, start, n, buffer);
+
+ start += n;
+ buffer += n;
+ size -= n;
+ }
+}
+
+void rtl_fw_block_write(struct ieee80211_hw *hw, u8 *buffer, u32 size)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 *pu4byteptr = (u8 *)buffer;
u32 i;
- for (i = 0; i < size; i++)
- rtl_write_byte(rtlpriv, (START_ADDRESS + i), *(pu4byteptr + i));
+ if (rtlpriv->rtlhal.interface == INTF_PCI) {
+ for (i = 0; i < size; i++)
+ rtl_write_byte(rtlpriv, (START_ADDRESS + i),
+ *(buffer + i));
+ } else if (rtlpriv->rtlhal.interface == INTF_USB) {
+ _rtl_fw_block_write_usb(hw, buffer, size);
+ }
}
EXPORT_SYMBOL_GPL(rtl_fw_block_write);
-void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer,
+void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, u8 *buffer,
u32 size)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h
index 1ec59f439382..4821625ad1e5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.h
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.h
@@ -91,8 +91,8 @@ void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate);
int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
int max_size, u8 *hwinfo, int *params);
void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen);
-void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer,
+void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, u8 *buffer,
u32 size);
-void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size);
+void rtl_fw_block_write(struct ieee80211_hw *hw, u8 *buffer, u32 size);
void rtl_efuse_ops_init(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 96ce05bcf0b3..11709b6c83f1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -378,13 +378,13 @@ static void _rtl_pci_io_handler_init(struct device *dev,
rtlpriv->io.dev = dev;
- rtlpriv->io.write8_async = pci_write8_async;
- rtlpriv->io.write16_async = pci_write16_async;
- rtlpriv->io.write32_async = pci_write32_async;
+ rtlpriv->io.write8 = pci_write8_async;
+ rtlpriv->io.write16 = pci_write16_async;
+ rtlpriv->io.write32 = pci_write32_async;
- rtlpriv->io.read8_sync = pci_read8_sync;
- rtlpriv->io.read16_sync = pci_read16_sync;
- rtlpriv->io.read32_sync = pci_read32_sync;
+ rtlpriv->io.read8 = pci_read8_sync;
+ rtlpriv->io.read16 = pci_read16_sync;
+ rtlpriv->io.read32 = pci_read32_sync;
}
static bool _rtl_update_earlymode_info(struct ieee80211_hw *hw,
@@ -2374,7 +2374,6 @@ EXPORT_SYMBOL(rtl_pci_resume);
#endif /* CONFIG_PM_SLEEP */
const struct rtl_intf_ops rtl_pci_ops = {
- .read_efuse_byte = read_efuse_byte,
.adapter_start = rtl_pci_start,
.adapter_stop = rtl_pci_stop,
.check_buddy_priv = rtl_pci_check_buddy_priv,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index 50e139186a93..ed151754fc6e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -350,7 +350,6 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- bool defaultadapter = true;
__le32 *pdesc = (__le32 *)pdesc8;
u16 seq_number;
__le16 fc = hdr->frame_control;
@@ -503,9 +502,6 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
if ((!ieee80211_is_data_qos(fc)) && ppsc->fwctrl_lps) {
set_tx_desc_hwseq_en(pdesc, 1);
set_tx_desc_pkt_id(pdesc, 8);
-
- if (!defaultadapter)
- set_tx_desc_qos(pdesc, 1);
}
set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
index 91e4427ab022..4757f93b84e4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
@@ -11,7 +11,7 @@
#define CHIP_VENDOR_UMC_B_CUT BIT(6)
#define IS_92C_1T2R(version) \
- (((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R))
+ (((version) & CHIP_92C_1T2R) == CHIP_92C_1T2R)
#define IS_VENDOR_UMC(version) \
(((version) & CHIP_VENDOR_UMC) ? true : false)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 5ec0eb8773a5..4217c9a08d01 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -622,6 +622,9 @@ static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw,
u16 valuelow;
switch (queue_sel) {
+ default:
+ WARN_ON(1);
+ fallthrough;
case (TX_SELE_HQ | TX_SELE_LQ):
valuehi = QUEUE_HIGH;
valuelow = QUEUE_LOW;
@@ -634,9 +637,6 @@ static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw,
valuehi = QUEUE_HIGH;
valuelow = QUEUE_NORMAL;
break;
- default:
- WARN_ON(1);
- break;
}
if (!wmm_enable) {
beq = valuelow;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
index 4ff0d4118193..a76f2dc8a977 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
@@ -101,7 +101,8 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
rtlphy->rf_type = RF_1T1R;
rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"Chip RF Type: %s\n",
- rtlphy->rf_type == RF_2T2R ? "RF_2T2R" : "RF_1T1R");
+ rtlphy->rf_type == RF_2T2R ? "RF_2T2R" :
+ rtlphy->rf_type == RF_1T2R ? "RF_1T2R" : "RF_1T1R");
if (get_rf_type(rtlphy) == RF_1T1R)
rtlpriv->dm.rfpath_rxenable[0] = true;
else
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index 20b4aac69642..48be7e346efc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -40,7 +40,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->dm.thermalvalue = 0;
/* for firmware buf */
- rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
+ rtlpriv->rtlhal.pfirmware = kmalloc(0x4000, GFP_KERNEL);
if (!rtlpriv->rtlhal.pfirmware) {
pr_err("Can't alloc buffer for fw\n");
return 1;
@@ -61,7 +61,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
fw_name, rtlpriv->io.dev,
GFP_KERNEL, hw, rtl_fw_cb);
if (err) {
- vfree(rtlpriv->rtlhal.pfirmware);
+ kfree(rtlpriv->rtlhal.pfirmware);
rtlpriv->rtlhal.pfirmware = NULL;
}
return err;
@@ -72,7 +72,7 @@ static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (rtlpriv->rtlhal.pfirmware) {
- vfree(rtlpriv->rtlhal.pfirmware);
+ kfree(rtlpriv->rtlhal.pfirmware);
rtlpriv->rtlhal.pfirmware = NULL;
}
}
@@ -145,7 +145,6 @@ MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
/* rx */
- .in_ep_num = RTL92C_USB_BULK_IN_NUM,
.rx_urb_num = RTL92C_NUM_RX_URBS,
.rx_max_size = RTL92C_SIZE_MAX_RX_BUFFER,
.usb_rx_hdl = rtl8192cu_rx_hdl,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
index 2f44c8aa6066..aa702ba7c9f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
@@ -79,68 +79,75 @@ static int configvernoutep(struct ieee80211_hw *hw)
static void twooutepmapping(struct ieee80211_hw *hw, bool is_chip8,
bool bwificfg, struct rtl_ep_map *ep_map)
{
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (bwificfg) { /* for WMM */
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB Chip-B & WMM Setting.....\n");
- ep_map->ep_mapping[RTL_TXQ_BE] = 2;
- ep_map->ep_mapping[RTL_TXQ_BK] = 3;
- ep_map->ep_mapping[RTL_TXQ_VI] = 3;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
} else { /* typical setting */
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB typical Setting.....\n");
- ep_map->ep_mapping[RTL_TXQ_BE] = 3;
- ep_map->ep_mapping[RTL_TXQ_BK] = 3;
- ep_map->ep_mapping[RTL_TXQ_VI] = 2;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
}
}
static void threeoutepmapping(struct ieee80211_hw *hw, bool bwificfg,
struct rtl_ep_map *ep_map)
{
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (bwificfg) { /* for WMM */
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB 3EP Setting for WMM.....\n");
- ep_map->ep_mapping[RTL_TXQ_BE] = 5;
- ep_map->ep_mapping[RTL_TXQ_BK] = 3;
- ep_map->ep_mapping[RTL_TXQ_VI] = 3;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[2];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
} else { /* typical setting */
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB 3EP Setting for typical.....\n");
- ep_map->ep_mapping[RTL_TXQ_BE] = 5;
- ep_map->ep_mapping[RTL_TXQ_BK] = 5;
- ep_map->ep_mapping[RTL_TXQ_VI] = 3;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[2];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[2];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
}
}
static void oneoutepmapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map)
{
- ep_map->ep_mapping[RTL_TXQ_BE] = 2;
- ep_map->ep_mapping[RTL_TXQ_BK] = 2;
- ep_map->ep_mapping[RTL_TXQ_VI] = 2;
- ep_map->ep_mapping[RTL_TXQ_VO] = 2;
- ep_map->ep_mapping[RTL_TXQ_MGT] = 2;
- ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
- ep_map->ep_mapping[RTL_TXQ_HI] = 2;
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
}
static int _out_ep_mapping(struct ieee80211_hw *hw)
@@ -475,9 +482,9 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- bool defaultadapter = true;
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ struct rtl_sta_info *sta_entry;
+ u8 agg_state = RTL_AGG_STOP;
+ u8 ampdu_density = 0;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 rate_flag = info->control.rates[0].flags;
@@ -486,6 +493,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
skb_get_queue_mapping(skb));
u8 *txdesc8;
__le32 *txdesc;
+ u8 tid;
seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc);
@@ -499,10 +507,21 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_tx_rate(txdesc, tcb_desc->hw_rate);
if (tcb_desc->use_shortgi || tcb_desc->use_shortpreamble)
set_tx_desc_data_shortgi(txdesc, 1);
- if (mac->tids[tid].agg.agg_state == RTL_AGG_ON &&
- info->flags & IEEE80211_TX_CTL_AMPDU) {
+
+ if (sta) {
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ tid = ieee80211_get_tid(hdr);
+ agg_state = sta_entry->tids[tid].agg.agg_state;
+ ampdu_density = sta->deflink.ht_cap.ampdu_density;
+ }
+
+ if (agg_state == RTL_AGG_OPERATIONAL &&
+ info->flags & IEEE80211_TX_CTL_AMPDU) {
set_tx_desc_agg_enable(txdesc, 1);
set_tx_desc_max_agg_num(txdesc, 0x14);
+ set_tx_desc_ampdu_density(txdesc, ampdu_density);
+ tcb_desc->rts_enable = 1;
+ tcb_desc->rts_rate = DESC_RATE24M;
} else {
set_tx_desc_agg_break(txdesc, 1);
}
@@ -537,14 +556,6 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_data_bw(txdesc, 0);
set_tx_desc_data_sc(txdesc, 0);
}
- rcu_read_lock();
- sta = ieee80211_find_sta(mac->vif, mac->bssid);
- if (sta) {
- u8 ampdu_density = sta->deflink.ht_cap.ampdu_density;
-
- set_tx_desc_ampdu_density(txdesc, ampdu_density);
- }
- rcu_read_unlock();
if (info->control.hw_key) {
struct ieee80211_key_conf *keyconf = info->control.hw_key;
@@ -587,8 +598,6 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
ppsc->fwctrl_lps) {
set_tx_desc_hwseq_en(txdesc, 1);
set_tx_desc_pkt_id(txdesc, 8);
- if (!defaultadapter)
- set_tx_desc_qos(txdesc, 1);
}
if (ieee80211_has_morefrags(fc))
set_tx_desc_more_frag(txdesc, 1);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
index 5f81cab205cc..09e61dc0f317 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
@@ -4,15 +4,12 @@
#ifndef __RTL92CU_TRX_H__
#define __RTL92CU_TRX_H__
-#define RTL92C_USB_BULK_IN_NUM 1
#define RTL92C_NUM_RX_URBS 8
#define RTL92C_NUM_TX_URBS 32
#define RTL92C_SIZE_MAX_RX_BUFFER 15360 /* 8192 */
#define RX_DRV_INFO_SIZE_UNIT 8
-#define RTL_AGG_ON 1
-
enum usb_rx_agg_mode {
USB_RX_AGG_DISABLE,
USB_RX_AGG_DMA,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index 743ac6871bf4..4ba42f6be3f2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1669,10 +1669,8 @@ static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw)
u8 cutvalue[2];
u16 chipvalue;
- rtlpriv->intf_ops->read_efuse_byte(hw, EEPROME_CHIP_VERSION_H,
- &cutvalue[1]);
- rtlpriv->intf_ops->read_efuse_byte(hw, EEPROME_CHIP_VERSION_L,
- &cutvalue[0]);
+ read_efuse_byte(hw, EEPROME_CHIP_VERSION_H, &cutvalue[1]);
+ read_efuse_byte(hw, EEPROME_CHIP_VERSION_L, &cutvalue[0]);
chipvalue = (cutvalue[1] << 8) | cutvalue[0];
switch (chipvalue) {
case 0xAA55:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 02ac69c08ed3..192982ec8152 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -42,6 +42,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
bool packet_beacon)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
struct phy_sts_cck_8192d *cck_buf;
s8 rx_pwr_all, rx_pwr[4];
@@ -62,9 +63,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
u8 report, cck_highpwr;
cck_buf = (struct phy_sts_cck_8192d *)p_drvinfo;
if (ppsc->rfpwr_state == ERFON)
- cck_highpwr = (u8) rtl_get_bbreg(hw,
- RFPGA0_XA_HSSIPARAMETER2,
- BIT(9));
+ cck_highpwr = rtlphy->cck_high_power;
else
cck_highpwr = false;
if (!cck_highpwr) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
index d9823ddab7be..65bfc14702f4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
@@ -349,7 +349,6 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- bool b_defaultadapter = true;
/* bool b_trigger_ac = false; */
u8 *pdesc8 = (u8 *)pdesc_tx;
__le32 *pdesc = (__le32 *)pdesc8;
@@ -503,10 +502,7 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_hwseq_en_8723(pdesc, 1);
/* set_tx_desc_hwseq_en(pdesc, 1); */
/* set_tx_desc_pkt_id(pdesc, 8); */
-
- if (!b_defaultadapter)
- set_tx_desc_hwseq_sel_8723(pdesc, 1);
- /* set_tx_desc_qos(pdesc, 1); */
+ /* set_tx_desc_qos(pdesc, 1); */
}
set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 30bf2775a335..6e8c87a2fae4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -23,86 +23,23 @@ MODULE_DESCRIPTION("USB basic driver for rtlwifi");
#define MAX_USBCTRL_VENDORREQ_TIMES 10
-static void usbctrl_async_callback(struct urb *urb)
-{
- if (urb) {
- /* free dr */
- kfree(urb->setup_packet);
- /* free databuf */
- kfree(urb->transfer_buffer);
- }
-}
-
-static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
- u16 value, u16 index, void *pdata,
- u16 len)
-{
- int rc;
- unsigned int pipe;
- u8 reqtype;
- struct usb_ctrlrequest *dr;
- struct urb *urb;
- const u16 databuf_maxlen = REALTEK_USB_VENQT_MAX_BUF_SIZE;
- u8 *databuf;
-
- if (WARN_ON_ONCE(len > databuf_maxlen))
- len = databuf_maxlen;
-
- pipe = usb_sndctrlpipe(udev, 0); /* write_out */
- reqtype = REALTEK_USB_VENQT_WRITE;
-
- dr = kzalloc(sizeof(*dr), GFP_ATOMIC);
- if (!dr)
- return -ENOMEM;
-
- databuf = kzalloc(databuf_maxlen, GFP_ATOMIC);
- if (!databuf) {
- kfree(dr);
- return -ENOMEM;
- }
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- kfree(databuf);
- kfree(dr);
- return -ENOMEM;
- }
-
- dr->bRequestType = reqtype;
- dr->bRequest = request;
- dr->wValue = cpu_to_le16(value);
- dr->wIndex = cpu_to_le16(index);
- dr->wLength = cpu_to_le16(len);
- /* data are already in little-endian order */
- memcpy(databuf, pdata, len);
- usb_fill_control_urb(urb, udev, pipe,
- (unsigned char *)dr, databuf, len,
- usbctrl_async_callback, NULL);
- rc = usb_submit_urb(urb, GFP_ATOMIC);
- if (rc < 0) {
- kfree(databuf);
- kfree(dr);
- }
- usb_free_urb(urb);
- return rc;
-}
-
-static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
- u16 value, u16 index, void *pdata,
- u16 len)
+static void _usbctrl_vendorreq_sync(struct usb_device *udev, u8 reqtype,
+ u16 value, void *pdata, u16 len)
{
unsigned int pipe;
int status;
- u8 reqtype;
int vendorreq_times = 0;
static int count;
- pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
- reqtype = REALTEK_USB_VENQT_READ;
+ if (reqtype == REALTEK_USB_VENQT_READ)
+ pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
+ else
+ pipe = usb_sndctrlpipe(udev, 0); /* write_out */
do {
- status = usb_control_msg(udev, pipe, request, reqtype, value,
- index, pdata, len, 1000);
+ status = usb_control_msg(udev, pipe, REALTEK_USB_VENQT_CMD_REQ,
+ reqtype, value, REALTEK_USB_VENQT_CMD_IDX,
+ pdata, len, 1000);
if (status < 0) {
/* firmware download is checksumed, don't retry */
if ((value >= FW_8192C_START_ADDRESS &&
@@ -114,18 +51,15 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
} while (++vendorreq_times < MAX_USBCTRL_VENDORREQ_TIMES);
if (status < 0 && count++ < 4)
- pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
- value, status, *(u32 *)pdata);
- return status;
+ dev_err(&udev->dev, "reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x reqtype=0x%x\n",
+ value, status, *(u32 *)pdata, reqtype);
}
static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
{
struct device *dev = rtlpriv->io.dev;
struct usb_device *udev = to_usb_device(dev);
- u8 request;
u16 wvalue;
- u16 index;
__le32 *data;
unsigned long flags;
@@ -134,14 +68,33 @@ static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
rtlpriv->usb_data_index = 0;
data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags);
- request = REALTEK_USB_VENQT_CMD_REQ;
- index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
wvalue = (u16)addr;
- _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
+ _usbctrl_vendorreq_sync(udev, REALTEK_USB_VENQT_READ, wvalue, data, len);
return le32_to_cpu(*data);
}
+
+static void _usb_write_sync(struct rtl_priv *rtlpriv, u32 addr, u32 val, u16 len)
+{
+ struct device *dev = rtlpriv->io.dev;
+ struct usb_device *udev = to_usb_device(dev);
+ unsigned long flags;
+ __le32 *data;
+ u16 wvalue;
+
+ spin_lock_irqsave(&rtlpriv->locks.usb_lock, flags);
+ if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
+ rtlpriv->usb_data_index = 0;
+ data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
+ spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags);
+
+ wvalue = (u16)(addr & 0x0000ffff);
+ *data = cpu_to_le32(val);
+
+ _usbctrl_vendorreq_sync(udev, REALTEK_USB_VENQT_WRITE, wvalue, data, len);
+}
+
static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
{
return (u8)_usb_read_sync(rtlpriv, addr, 1);
@@ -157,45 +110,27 @@ static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
return _usb_read_sync(rtlpriv, addr, 4);
}
-static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
- u16 len)
+static void _usb_write8_sync(struct rtl_priv *rtlpriv, u32 addr, u8 val)
{
- u8 request;
- u16 wvalue;
- u16 index;
- __le32 data;
- int ret;
-
- request = REALTEK_USB_VENQT_CMD_REQ;
- index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
- wvalue = (u16)(addr&0x0000ffff);
- data = cpu_to_le32(val);
-
- ret = _usbctrl_vendorreq_async_write(udev, request, wvalue,
- index, &data, len);
- if (ret < 0)
- dev_err(&udev->dev, "error %d writing at 0x%x\n", ret, addr);
+ _usb_write_sync(rtlpriv, addr, val, 1);
}
-static void _usb_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
+static void _usb_write16_sync(struct rtl_priv *rtlpriv, u32 addr, u16 val)
{
- struct device *dev = rtlpriv->io.dev;
-
- _usb_write_async(to_usb_device(dev), addr, val, 1);
+ _usb_write_sync(rtlpriv, addr, val, 2);
}
-static void _usb_write16_async(struct rtl_priv *rtlpriv, u32 addr, u16 val)
+static void _usb_write32_sync(struct rtl_priv *rtlpriv, u32 addr, u32 val)
{
- struct device *dev = rtlpriv->io.dev;
-
- _usb_write_async(to_usb_device(dev), addr, val, 2);
+ _usb_write_sync(rtlpriv, addr, val, 4);
}
-static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
+static void _usb_write_chunk_sync(struct rtl_priv *rtlpriv, u32 addr,
+ u32 length, u8 *data)
{
- struct device *dev = rtlpriv->io.dev;
+ struct usb_device *udev = to_usb_device(rtlpriv->io.dev);
- _usb_write_async(to_usb_device(dev), addr, val, 4);
+ _usbctrl_vendorreq_sync(udev, REALTEK_USB_VENQT_WRITE, addr, data, length);
}
static void _rtl_usb_io_handler_init(struct device *dev,
@@ -205,12 +140,13 @@ static void _rtl_usb_io_handler_init(struct device *dev,
rtlpriv->io.dev = dev;
mutex_init(&rtlpriv->io.bb_mutex);
- rtlpriv->io.write8_async = _usb_write8_async;
- rtlpriv->io.write16_async = _usb_write16_async;
- rtlpriv->io.write32_async = _usb_write32_async;
- rtlpriv->io.read8_sync = _usb_read8_sync;
- rtlpriv->io.read16_sync = _usb_read16_sync;
- rtlpriv->io.read32_sync = _usb_read32_sync;
+ rtlpriv->io.write8 = _usb_write8_sync;
+ rtlpriv->io.write16 = _usb_write16_sync;
+ rtlpriv->io.write32 = _usb_write32_sync;
+ rtlpriv->io.write_chunk = _usb_write_chunk_sync;
+ rtlpriv->io.read8 = _usb_read8_sync;
+ rtlpriv->io.read16 = _usb_read16_sync;
+ rtlpriv->io.read32 = _usb_read32_sync;
}
static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
@@ -280,7 +216,6 @@ static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
rtlusb->rx_max_size = rtlpriv->cfg->usb_interface_cfg->rx_max_size;
rtlusb->rx_urb_num = rtlpriv->cfg->usb_interface_cfg->rx_urb_num;
- rtlusb->in_ep = rtlpriv->cfg->usb_interface_cfg->in_ep_num;
rtlusb->usb_rx_hdl = rtlpriv->cfg->usb_interface_cfg->usb_rx_hdl;
rtlusb->usb_rx_segregate_hdl =
rtlpriv->cfg->usb_interface_cfg->usb_rx_segregate_hdl;
@@ -312,20 +247,38 @@ static int _rtl_usb_init(struct ieee80211_hw *hw)
pep_desc = &usb_intf->cur_altsetting->endpoint[epidx].desc;
- if (usb_endpoint_dir_in(pep_desc))
+ if (usb_endpoint_dir_in(pep_desc)) {
+ if (usb_endpoint_xfer_bulk(pep_desc)) {
+ /* The vendor drivers assume there is only one
+ * bulk in ep and that it's the first in ep.
+ */
+ if (rtlusb->in_ep_nums == 0)
+ rtlusb->in_ep = usb_endpoint_num(pep_desc);
+ else
+ pr_warn("%s: bulk in endpoint is not the first in endpoint\n",
+ __func__);
+ }
+
rtlusb->in_ep_nums++;
- else if (usb_endpoint_dir_out(pep_desc))
+ } else if (usb_endpoint_dir_out(pep_desc)) {
+ if (rtlusb->out_ep_nums < RTL_USB_MAX_BULKOUT_NUM) {
+ if (usb_endpoint_xfer_bulk(pep_desc))
+ rtlusb->out_eps[rtlusb->out_ep_nums] =
+ usb_endpoint_num(pep_desc);
+ } else {
+ pr_warn("%s: found more bulk out endpoints than the expected %d\n",
+ __func__, RTL_USB_MAX_BULKOUT_NUM);
+ }
+
rtlusb->out_ep_nums++;
+ }
rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"USB EP(0x%02x), MaxPacketSize=%d, Interval=%d\n",
pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
pep_desc->bInterval);
}
- if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num) {
- pr_err("Too few input end points found\n");
- return -EINVAL;
- }
+
if (rtlusb->out_ep_nums == 0) {
pr_err("No output end points found\n");
return -EINVAL;
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h
index 3bf85b23eec1..12529afc0510 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.h
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.h
@@ -19,6 +19,7 @@
#define RTL_USB_MAX_TXQ_NUM 4 /* max tx queue */
#define RTL_USB_MAX_EP_NUM 6 /* max ep number */
+#define RTL_USB_MAX_BULKOUT_NUM 4
#define RTL_USB_MAX_TX_URBS_NUM 8
enum rtl_txq {
@@ -94,6 +95,7 @@ struct rtl_usb {
/* Tx */
u8 out_ep_nums ;
+ u8 out_eps[RTL_USB_MAX_BULKOUT_NUM];
u8 out_queue_sel;
struct rtl_ep_map ep_map;
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index d87cd2252eac..9fabf597cfd6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1397,8 +1397,6 @@ struct rtl_phy {
#define RTL_AGG_PROGRESS 1
#define RTL_AGG_START 2
#define RTL_AGG_OPERATIONAL 3
-#define RTL_AGG_OFF 0
-#define RTL_AGG_ON 1
#define RTL_RX_AGG_START 1
#define RTL_RX_AGG_STOP 0
#define RTL_AGG_EMPTYING_HW_QUEUE_ADDBA 2
@@ -1447,13 +1445,15 @@ struct rtl_io {
/*PCI IO map */
unsigned long pci_base_addr; /*device I/O address */
- void (*write8_async)(struct rtl_priv *rtlpriv, u32 addr, u8 val);
- void (*write16_async)(struct rtl_priv *rtlpriv, u32 addr, u16 val);
- void (*write32_async)(struct rtl_priv *rtlpriv, u32 addr, u32 val);
+ void (*write8)(struct rtl_priv *rtlpriv, u32 addr, u8 val);
+ void (*write16)(struct rtl_priv *rtlpriv, u32 addr, u16 val);
+ void (*write32)(struct rtl_priv *rtlpriv, u32 addr, u32 val);
+ void (*write_chunk)(struct rtl_priv *rtlpriv, u32 addr, u32 length,
+ u8 *data);
- u8 (*read8_sync)(struct rtl_priv *rtlpriv, u32 addr);
- u16 (*read16_sync)(struct rtl_priv *rtlpriv, u32 addr);
- u32 (*read32_sync)(struct rtl_priv *rtlpriv, u32 addr);
+ u8 (*read8)(struct rtl_priv *rtlpriv, u32 addr);
+ u16 (*read16)(struct rtl_priv *rtlpriv, u32 addr);
+ u32 (*read32)(struct rtl_priv *rtlpriv, u32 addr);
};
@@ -1471,7 +1471,6 @@ struct rtl_mac {
enum nl80211_iftype opmode;
/*Probe Beacon management */
- struct rtl_tid_data tids[MAX_TID_COUNT];
enum rtl_link_state link_state;
int n_channels;
@@ -2290,7 +2289,6 @@ struct rtl_hal_ops {
struct rtl_intf_ops {
/*com */
- void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
int (*adapter_start)(struct ieee80211_hw *hw);
void (*adapter_stop)(struct ieee80211_hw *hw);
bool (*check_buddy_priv)(struct ieee80211_hw *hw,
@@ -2354,7 +2352,6 @@ struct rtl_mod_params {
struct rtl_hal_usbint_cfg {
/* data - rx */
- u32 in_ep_num;
u32 rx_urb_num;
u32 rx_max_size;
@@ -2916,25 +2913,25 @@ extern u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M];
static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
{
- return rtlpriv->io.read8_sync(rtlpriv, addr);
+ return rtlpriv->io.read8(rtlpriv, addr);
}
static inline u16 rtl_read_word(struct rtl_priv *rtlpriv, u32 addr)
{
- return rtlpriv->io.read16_sync(rtlpriv, addr);
+ return rtlpriv->io.read16(rtlpriv, addr);
}
static inline u32 rtl_read_dword(struct rtl_priv *rtlpriv, u32 addr)
{
- return rtlpriv->io.read32_sync(rtlpriv, addr);
+ return rtlpriv->io.read32(rtlpriv, addr);
}
static inline void rtl_write_byte(struct rtl_priv *rtlpriv, u32 addr, u8 val8)
{
- rtlpriv->io.write8_async(rtlpriv, addr, val8);
+ rtlpriv->io.write8(rtlpriv, addr, val8);
if (rtlpriv->cfg->write_readback)
- rtlpriv->io.read8_sync(rtlpriv, addr);
+ rtlpriv->io.read8(rtlpriv, addr);
}
static inline void rtl_write_byte_with_val32(struct ieee80211_hw *hw,
@@ -2947,19 +2944,25 @@ static inline void rtl_write_byte_with_val32(struct ieee80211_hw *hw,
static inline void rtl_write_word(struct rtl_priv *rtlpriv, u32 addr, u16 val16)
{
- rtlpriv->io.write16_async(rtlpriv, addr, val16);
+ rtlpriv->io.write16(rtlpriv, addr, val16);
if (rtlpriv->cfg->write_readback)
- rtlpriv->io.read16_sync(rtlpriv, addr);
+ rtlpriv->io.read16(rtlpriv, addr);
}
static inline void rtl_write_dword(struct rtl_priv *rtlpriv,
u32 addr, u32 val32)
{
- rtlpriv->io.write32_async(rtlpriv, addr, val32);
+ rtlpriv->io.write32(rtlpriv, addr, val32);
if (rtlpriv->cfg->write_readback)
- rtlpriv->io.read32_sync(rtlpriv, addr);
+ rtlpriv->io.read32(rtlpriv, addr);
+}
+
+static inline void rtl_write_chunk(struct rtl_priv *rtlpriv,
+ u32 addr, u32 length, u8 *data)
+{
+ rtlpriv->io.write_chunk(rtlpriv, addr, length, data);
}
static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index 1b2ad81838be..5b2036798159 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -316,23 +316,13 @@ static ssize_t rtw_debugfs_set_single_input(struct file *filp,
{
struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
- struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
- char tmp[32 + 1];
u32 input;
- int num;
int ret;
- ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = kstrtou32_from_user(buffer, count, 0, &input);
if (ret)
return ret;
- num = kstrtoint(tmp, 0, &input);
-
- if (num) {
- rtw_warn(rtwdev, "kstrtoint failed\n");
- return num;
- }
-
debugfs_priv->cb_data = input;
return count;
@@ -485,19 +475,12 @@ static ssize_t rtw_debugfs_set_fix_rate(struct file *filp,
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u8 fix_rate;
- char tmp[32 + 1];
int ret;
- ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = kstrtou8_from_user(buffer, count, 0, &fix_rate);
if (ret)
return ret;
- ret = kstrtou8(tmp, 0, &fix_rate);
- if (ret) {
- rtw_warn(rtwdev, "invalid args, [rate]\n");
- return ret;
- }
-
dm_info->fix_rate = fix_rate;
return count;
@@ -879,20 +862,13 @@ static ssize_t rtw_debugfs_set_coex_enable(struct file *filp,
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_coex *coex = &rtwdev->coex;
- char tmp[32 + 1];
bool enable;
int ret;
- ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = kstrtobool_from_user(buffer, count, &enable);
if (ret)
return ret;
- ret = kstrtobool(tmp, &enable);
- if (ret) {
- rtw_warn(rtwdev, "invalid arguments\n");
- return ret;
- }
-
mutex_lock(&rtwdev->mutex);
coex->manual_control = !enable;
mutex_unlock(&rtwdev->mutex);
@@ -951,18 +927,13 @@ static ssize_t rtw_debugfs_set_fw_crash(struct file *filp,
struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
- char tmp[32 + 1];
bool input;
int ret;
- ret = rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+ ret = kstrtobool_from_user(buffer, count, &input);
if (ret)
return ret;
- ret = kstrtobool(tmp, &input);
- if (ret)
- return -EINVAL;
-
if (!input)
return -EINVAL;
@@ -1030,11 +1001,12 @@ static ssize_t rtw_debugfs_set_dm_cap(struct file *filp,
struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
- int bit;
+ int ret, bit;
bool en;
- if (kstrtoint_from_user(buffer, count, 10, &bit))
- return -EINVAL;
+ ret = kstrtoint_from_user(buffer, count, 10, &bit);
+ if (ret)
+ return ret;
en = bit > 0;
bit = abs(bit);
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 298663b03580..0c1c1ff31085 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -309,6 +309,13 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
ret = rtw_pwr_seq_parser(rtwdev, pwr_seq);
+ if (pwr_on && rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB) {
+ if (chip->id == RTW_CHIP_TYPE_8822C ||
+ chip->id == RTW_CHIP_TYPE_8822B ||
+ chip->id == RTW_CHIP_TYPE_8821C)
+ rtw_write8_clr(rtwdev, REG_SYS_STATUS1 + 1, BIT(0));
+ }
+
if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO)
rtw_write32(rtwdev, REG_SDIO_HIMR, imr);
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index d8d68f16014e..7af5bf7fe5b6 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -927,6 +927,10 @@ static void rtw_ops_sta_rc_update(struct ieee80211_hw *hw,
}
const struct ieee80211_ops rtw_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rtw_ops_tx,
.wake_tx_queue = rtw_ops_wake_tx_queue,
.start = rtw_ops_start,
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 6d22628129d0..ffba6b88f392 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -2032,8 +2032,6 @@ static int rtw_chip_board_info_setup(struct rtw_dev *rtwdev)
rtw_phy_setup_phy_cond(rtwdev, hal->pkg_type);
rtw_phy_init_tx_power(rtwdev);
- if (rfe_def->agc_btg_tbl)
- rtw_load_table(rtwdev, rfe_def->agc_btg_tbl);
rtw_load_table(rtwdev, rfe_def->phy_pg_tbl);
rtw_load_table(rtwdev, rfe_def->txpwr_lmt_tbl);
rtw_phy_tx_power_by_rate_config(hal);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 2bfc0e822b8d..9986a4cb37eb 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -1450,6 +1450,7 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
const struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
struct pci_dev *pdev = rtwpci->pdev;
const struct rtw_intf_phy_para *para;
u16 cut;
@@ -1498,6 +1499,9 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
rtw_err(rtwdev, "failed to set PCI cap, ret = %d\n",
ret);
}
+
+ if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 5)
+ rtw_write32_mask(rtwdev, REG_ANAPARSW_MAC_0, BIT_CF_L_V2, 0x1);
}
static int __maybe_unused rtw_pci_suspend(struct device *dev)
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 128e75a81bf3..37ef80c9091d 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -1761,12 +1761,15 @@ static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
void rtw_phy_load_tables(struct rtw_dev *rtwdev)
{
+ const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev);
const struct rtw_chip_info *chip = rtwdev->chip;
u8 rf_path;
rtw_load_table(rtwdev, chip->mac_tbl);
rtw_load_table(rtwdev, chip->bb_tbl);
rtw_load_table(rtwdev, chip->agc_tbl);
+ if (rfe_def->agc_btg_tbl)
+ rtw_load_table(rtwdev, rfe_def->agc_btg_tbl);
rtw_load_rfk_table(rtwdev);
for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) {
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index 1634f03784f1..b122f226924b 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -557,6 +557,9 @@
#define REG_RFE_INV16 0x0cbe
#define BIT_RFE_BUF_EN BIT(3)
+#define REG_ANAPARSW_MAC_0 0x1010
+#define BIT_CF_L_V2 GENMASK(29, 28)
+
#define REG_ANAPAR_XTAL_0 0x1040
#define BIT_XCAP_0 GENMASK(23, 10)
#define REG_CPU_DMEM_CON 0x1080
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 429bb420b056..fe5d8e188350 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -773,9 +773,9 @@ static void rtw8821c_false_alarm_statistics(struct rtw_dev *rtwdev)
dm_info->cck_fa_cnt = cck_fa_cnt;
dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt = ofdm_fa_cnt;
if (cck_enable)
dm_info->total_fa_cnt += cck_fa_cnt;
- dm_info->total_fa_cnt = ofdm_fa_cnt;
crc32_cnt = rtw_read32(rtwdev, REG_CRC_CCK);
dm_info->cck_ok_cnt = FIELD_GET(GENMASK(15, 0), crc32_cnt);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
index 7a5cbdc31ef7..e2c7d9f87683 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
@@ -9,24 +9,36 @@
#include "usb.h"
static const struct usb_device_id rtw_8821cu_id_table[] = {
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb82b, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x2006, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8731, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb820, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc821, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb82b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc80c, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc820, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc821, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82a, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82b, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc811, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8811CU */
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8811, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8811CU */
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x2006, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* TOTOLINK A650UA v3 */
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82c, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331d, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xc811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xd811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8821cu_id_table);
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index e6ab1ac6d709..a0188511099a 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -33,6 +33,36 @@ static void rtw_usb_fill_tx_checksum(struct rtw_usb *rtwusb,
rtw_tx_fill_txdesc_checksum(rtwdev, &pkt_info, skb->data);
}
+static void rtw_usb_reg_sec(struct rtw_dev *rtwdev, u32 addr, __le32 *data)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ struct usb_device *udev = rtwusb->udev;
+ bool reg_on_section = false;
+ u16 t_reg = 0x4e0;
+ u8 t_len = 1;
+ int status;
+
+ /* There are three sections:
+ * 1. on (0x00~0xFF; 0x1000~0x10FF): this section is always powered on
+ * 2. off (< 0xFE00, excluding "on" section): this section could be
+ * powered off
+ * 3. local (>= 0xFE00): usb specific registers section
+ */
+ if (addr <= 0xff || (addr >= 0x1000 && addr <= 0x10ff))
+ reg_on_section = true;
+
+ if (!reg_on_section)
+ return;
+
+ status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE,
+ t_reg, 0, data, t_len, 500);
+
+ if (status != t_len && status != -ENODEV)
+ rtw_err(rtwdev, "%s: reg 0x%x, usb write %u fail, status: %d\n",
+ __func__, t_reg, t_len, status);
+}
+
static u32 rtw_usb_read(struct rtw_dev *rtwdev, u32 addr, u16 len)
{
struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
@@ -58,6 +88,11 @@ static u32 rtw_usb_read(struct rtw_dev *rtwdev, u32 addr, u16 len)
rtw_err(rtwdev, "read register 0x%x failed with %d\n",
addr, ret);
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C ||
+ rtwdev->chip->id == RTW_CHIP_TYPE_8822B ||
+ rtwdev->chip->id == RTW_CHIP_TYPE_8821C)
+ rtw_usb_reg_sec(rtwdev, addr, data);
+
return le32_to_cpu(*data);
}
@@ -102,6 +137,11 @@ static void rtw_usb_write(struct rtw_dev *rtwdev, u32 addr, u32 val, int len)
if (ret < 0 && ret != -ENODEV && count++ < 4)
rtw_err(rtwdev, "write register 0x%x failed with %d\n",
addr, ret);
+
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C ||
+ rtwdev->chip->id == RTW_CHIP_TYPE_8822B ||
+ rtwdev->chip->id == RTW_CHIP_TYPE_8821C)
+ rtw_usb_reg_sec(rtwdev, addr, data);
}
static void rtw_usb_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index 914c94988b2f..11fbdd142162 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -777,3 +777,64 @@ void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
SET_DCTL_SEC_ENT5_V1(cmd, addr_cam->sec_ent[5]);
SET_DCTL_SEC_ENT6_V1(cmd, addr_cam->sec_ent[6]);
}
+
+void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ struct rtw89_h2c_dctlinfo_ud_v2 *h2c)
+{
+ struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
+
+ h2c->c0 = le32_encode_bits(rtwsta ? rtwsta->mac_id : rtwvif->mac_id,
+ DCTLINFO_V2_C0_MACID) |
+ le32_encode_bits(1, DCTLINFO_V2_C0_OP);
+
+ h2c->w4 = le32_encode_bits(addr_cam->sec_ent_keyid[0],
+ DCTLINFO_V2_W4_SEC_ENT0_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[1],
+ DCTLINFO_V2_W4_SEC_ENT1_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[2],
+ DCTLINFO_V2_W4_SEC_ENT2_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[3],
+ DCTLINFO_V2_W4_SEC_ENT3_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[4],
+ DCTLINFO_V2_W4_SEC_ENT4_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[5],
+ DCTLINFO_V2_W4_SEC_ENT5_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[6],
+ DCTLINFO_V2_W4_SEC_ENT6_KEYID);
+ h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_SEC_ENT0_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT1_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT2_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT3_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT4_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT5_KEYID |
+ DCTLINFO_V2_W4_SEC_ENT6_KEYID);
+
+ h2c->w5 = le32_encode_bits(addr_cam->sec_cam_map[0],
+ DCTLINFO_V2_W5_SEC_ENT_VALID_V1) |
+ le32_encode_bits(addr_cam->sec_ent[0],
+ DCTLINFO_V2_W5_SEC_ENT0_V1);
+ h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_SEC_ENT_VALID_V1 |
+ DCTLINFO_V2_W5_SEC_ENT0_V1);
+
+ h2c->w6 = le32_encode_bits(addr_cam->sec_ent[1],
+ DCTLINFO_V2_W6_SEC_ENT1_V1) |
+ le32_encode_bits(addr_cam->sec_ent[2],
+ DCTLINFO_V2_W6_SEC_ENT2_V1) |
+ le32_encode_bits(addr_cam->sec_ent[3],
+ DCTLINFO_V2_W6_SEC_ENT3_V1) |
+ le32_encode_bits(addr_cam->sec_ent[4],
+ DCTLINFO_V2_W6_SEC_ENT4_V1);
+ h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_SEC_ENT1_V1 |
+ DCTLINFO_V2_W6_SEC_ENT2_V1 |
+ DCTLINFO_V2_W6_SEC_ENT3_V1 |
+ DCTLINFO_V2_W6_SEC_ENT4_V1);
+
+ h2c->w7 = le32_encode_bits(addr_cam->sec_ent[5],
+ DCTLINFO_V2_W7_SEC_ENT5_V1) |
+ le32_encode_bits(addr_cam->sec_ent[6],
+ DCTLINFO_V2_W7_SEC_ENT6_V1);
+ h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_SEC_ENT5_V1 |
+ DCTLINFO_V2_W7_SEC_ENT6_V1);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
index 83c160a614e6..fa09d11c345c 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.h
+++ b/drivers/net/wireless/realtek/rtw89/cam.h
@@ -352,6 +352,111 @@ static inline void FWCMD_SET_ADDR_BSSID_BSSID5(void *cmd, u32 value)
le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(31, 24));
}
+struct rtw89_h2c_dctlinfo_ud_v2 {
+ __le32 c0;
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+ __le32 w10;
+ __le32 w11;
+ __le32 w12;
+ __le32 w13;
+ __le32 w14;
+ __le32 w15;
+ __le32 m0;
+ __le32 m1;
+ __le32 m2;
+ __le32 m3;
+ __le32 m4;
+ __le32 m5;
+ __le32 m6;
+ __le32 m7;
+ __le32 m8;
+ __le32 m9;
+ __le32 m10;
+ __le32 m11;
+ __le32 m12;
+ __le32 m13;
+ __le32 m14;
+ __le32 m15;
+} __packed;
+
+#define DCTLINFO_V2_C0_MACID GENMASK(6, 0)
+#define DCTLINFO_V2_C0_OP BIT(7)
+
+#define DCTLINFO_V2_W0_QOS_FIELD_H GENMASK(7, 0)
+#define DCTLINFO_V2_W0_HW_EXSEQ_MACID GENMASK(14, 8)
+#define DCTLINFO_V2_W0_QOS_DATA BIT(15)
+#define DCTLINFO_V2_W0_AES_IV_L GENMASK(31, 16)
+#define DCTLINFO_V2_W0_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W1_AES_IV_H GENMASK(31, 0)
+#define DCTLINFO_V2_W1_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W2_SEQ0 GENMASK(11, 0)
+#define DCTLINFO_V2_W2_SEQ1 GENMASK(23, 12)
+#define DCTLINFO_V2_W2_AMSDU_MAX_LEN GENMASK(26, 24)
+#define DCTLINFO_V2_W2_STA_AMSDU_EN BIT(27)
+#define DCTLINFO_V2_W2_CHKSUM_OFLD_EN BIT(28)
+#define DCTLINFO_V2_W2_WITH_LLC BIT(29)
+#define DCTLINFO_V2_W2_NAT25_EN BIT(30)
+#define DCTLINFO_V2_W2_IS_MLD BIT(31)
+#define DCTLINFO_V2_W2_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W3_SEQ2 GENMASK(11, 0)
+#define DCTLINFO_V2_W3_SEQ3 GENMASK(23, 12)
+#define DCTLINFO_V2_W3_TGT_IND GENMASK(27, 24)
+#define DCTLINFO_V2_W3_TGT_IND_EN BIT(28)
+#define DCTLINFO_V2_W3_HTC_LB GENMASK(31, 29)
+#define DCTLINFO_V2_W3_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W4_VLAN_TAG_SEL GENMASK(7, 5)
+#define DCTLINFO_V2_W4_HTC_ORDER BIT(8)
+#define DCTLINFO_V2_W4_SEC_KEY_ID GENMASK(10, 9)
+#define DCTLINFO_V2_W4_VLAN_RX_DYNAMIC_PCP_EN BIT(11)
+#define DCTLINFO_V2_W4_VLAN_RX_PKT_DROP BIT(12)
+#define DCTLINFO_V2_W4_VLAN_RX_VALID BIT(13)
+#define DCTLINFO_V2_W4_VLAN_TX_VALID BIT(14)
+#define DCTLINFO_V2_W4_WAPI BIT(15)
+#define DCTLINFO_V2_W4_SEC_ENT_MODE GENMASK(17, 16)
+#define DCTLINFO_V2_W4_SEC_ENT0_KEYID GENMASK(19, 18)
+#define DCTLINFO_V2_W4_SEC_ENT1_KEYID GENMASK(21, 20)
+#define DCTLINFO_V2_W4_SEC_ENT2_KEYID GENMASK(23, 22)
+#define DCTLINFO_V2_W4_SEC_ENT3_KEYID GENMASK(25, 24)
+#define DCTLINFO_V2_W4_SEC_ENT4_KEYID GENMASK(27, 26)
+#define DCTLINFO_V2_W4_SEC_ENT5_KEYID GENMASK(29, 28)
+#define DCTLINFO_V2_W4_SEC_ENT6_KEYID GENMASK(31, 30)
+#define DCTLINFO_V2_W4_ALL GENMASK(31, 5)
+#define DCTLINFO_V2_W5_SEC_ENT7_KEYID GENMASK(1, 0)
+#define DCTLINFO_V2_W5_SEC_ENT8_KEYID GENMASK(3, 2)
+#define DCTLINFO_V2_W5_SEC_ENT_VALID_V1 GENMASK(23, 8)
+#define DCTLINFO_V2_W5_SEC_ENT0_V1 GENMASK(31, 24)
+#define DCTLINFO_V2_W5_ALL (GENMASK(31, 8) | GENMASK(3, 0))
+#define DCTLINFO_V2_W6_SEC_ENT1_V1 GENMASK(7, 0)
+#define DCTLINFO_V2_W6_SEC_ENT2_V1 GENMASK(15, 8)
+#define DCTLINFO_V2_W6_SEC_ENT3_V1 GENMASK(23, 16)
+#define DCTLINFO_V2_W6_SEC_ENT4_V1 GENMASK(31, 24)
+#define DCTLINFO_V2_W6_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W7_SEC_ENT5_V1 GENMASK(7, 0)
+#define DCTLINFO_V2_W7_SEC_ENT6_V1 GENMASK(15, 8)
+#define DCTLINFO_V2_W7_SEC_ENT7 GENMASK(23, 16)
+#define DCTLINFO_V2_W7_SEC_ENT8 GENMASK(31, 24)
+#define DCTLINFO_V2_W7_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W8_MLD_SMA_L_V1 GENMASK(31, 0)
+#define DCTLINFO_V2_W8_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W9_MLD_SMA_H_V1 GENMASK(15, 0)
+#define DCTLINFO_V2_W9_MLD_TMA_L_V1 GENMASK(31, 16)
+#define DCTLINFO_V2_W9_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W10_MLD_TMA_H_V1 GENMASK(31, 0)
+#define DCTLINFO_V2_W10_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W11_MLD_TA_BSSID_L_V1 GENMASK(31, 0)
+#define DCTLINFO_V2_W11_ALL GENMASK(31, 0)
+#define DCTLINFO_V2_W12_MLD_TA_BSSID_H_V1 GENMASK(15, 0)
+#define DCTLINFO_V2_W12_ALL GENMASK(15, 0)
+
int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
@@ -373,6 +478,10 @@ void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta,
u8 *cmd);
+void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ struct rtw89_h2c_dctlinfo_ud_v2 *h2c);
int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta, u8 *cmd);
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index cbf6821af6b8..051a3cad6101 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -212,33 +212,68 @@ void rtw89_entity_init(struct rtw89_dev *rtwdev)
rtw89_config_default_chandef(rtwdev);
}
+static void rtw89_entity_calculate_weight(struct rtw89_dev *rtwdev,
+ struct rtw89_entity_weight *w)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chanctx_cfg *cfg;
+ struct rtw89_vif *rtwvif;
+ int idx;
+
+ for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY) {
+ cfg = hal->sub[idx].cfg;
+ if (!cfg) {
+ /* doesn't run with chanctx ops; one channel at most */
+ w->active_chanctxs = 1;
+ break;
+ }
+
+ if (cfg->ref_count > 0)
+ w->active_chanctxs++;
+ }
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ if (rtwvif->chanctx_assigned)
+ w->active_roles++;
+ }
+}
+
enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
{
+ DECLARE_BITMAP(recalc_map, NUM_OF_RTW89_SUB_ENTITY) = {};
struct rtw89_hal *hal = &rtwdev->hal;
const struct cfg80211_chan_def *chandef;
+ struct rtw89_entity_weight w = {};
enum rtw89_entity_mode mode;
struct rtw89_chan chan;
- u8 weight;
- u8 last;
u8 idx;
lockdep_assert_held(&rtwdev->mutex);
- weight = bitmap_weight(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
- switch (weight) {
+ bitmap_copy(recalc_map, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+
+ rtw89_entity_calculate_weight(rtwdev, &w);
+ switch (w.active_chanctxs) {
default:
- rtw89_warn(rtwdev, "unknown ent chan weight: %d\n", weight);
- bitmap_zero(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY);
+ rtw89_warn(rtwdev, "unknown ent chanctxs weight: %d\n",
+ w.active_chanctxs);
+ bitmap_zero(recalc_map, NUM_OF_RTW89_SUB_ENTITY);
fallthrough;
case 0:
rtw89_config_default_chandef(rtwdev);
+ set_bit(RTW89_SUB_ENTITY_0, recalc_map);
fallthrough;
case 1:
- last = RTW89_SUB_ENTITY_0;
mode = RTW89_ENTITY_MODE_SCC;
break;
- case 2:
- last = RTW89_SUB_ENTITY_1;
+ case 2 ... NUM_OF_RTW89_SUB_ENTITY:
+ if (w.active_roles != NUM_OF_RTW89_MCC_ROLES) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "unhandled ent: %d chanctxs %d roles\n",
+ w.active_chanctxs, w.active_roles);
+ return RTW89_ENTITY_MODE_UNHANDLED;
+ }
+
mode = rtw89_get_entity_mode(rtwdev);
if (mode == RTW89_ENTITY_MODE_MCC)
break;
@@ -247,7 +282,7 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev)
break;
}
- for (idx = 0; idx <= last; idx++) {
+ for_each_set_bit(idx, recalc_map, NUM_OF_RTW89_SUB_ENTITY) {
chandef = rtw89_chandef_get(rtwdev, idx);
rtw89_get_channel_params(chandef, &chan);
if (chan.channel == 0) {
@@ -287,6 +322,13 @@ static void rtw89_chanctx_notify(struct rtw89_dev *rtwdev,
}
}
+static bool rtw89_concurrent_via_mrc(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
+
+ return chip_gen == RTW89_CHIP_BE;
+}
+
/* This function centrally manages how MCC roles are sorted and iterated.
* And, it guarantees that ordered_idx is less than NUM_OF_RTW89_MCC_ROLES.
* So, if data needs to pass an array for ordered_idx, the array can declare
@@ -320,19 +362,12 @@ int rtw89_iterate_mcc_roles(struct rtw89_dev *rtwdev,
return 0;
}
-/* For now, IEEE80211_HW_TIMING_BEACON_ONLY can make things simple to ensure
- * correctness of MCC calculation logic below. We have noticed that once driver
- * declares WIPHY_FLAG_SUPPORTS_MLO, the use of IEEE80211_HW_TIMING_BEACON_ONLY
- * will be restricted. We will make an alternative in driver when it is ready
- * for MLO.
- */
static u32 rtw89_mcc_get_tbtt_ofst(struct rtw89_dev *rtwdev,
struct rtw89_mcc_role *role, u64 tsf)
{
struct rtw89_vif *rtwvif = role->rtwvif;
- struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
u32 bcn_intvl_us = ieee80211_tu_to_usec(role->beacon_interval);
- u64 sync_tsf = vif->bss_conf.sync_tsf;
+ u64 sync_tsf = READ_ONCE(rtwvif->sync_bcn_tsf);
u32 remainder;
if (tsf < sync_tsf) {
@@ -346,16 +381,13 @@ static u32 rtw89_mcc_get_tbtt_ofst(struct rtw89_dev *rtwdev,
return remainder;
}
-static u16 rtw89_mcc_get_bcn_ofst(struct rtw89_dev *rtwdev)
+static int __mcc_fw_req_tsf(struct rtw89_dev *rtwdev, u64 *tsf_ref, u64 *tsf_aux)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_role *ref = &mcc->role_ref;
struct rtw89_mcc_role *aux = &mcc->role_aux;
struct rtw89_mac_mcc_tsf_rpt rpt = {};
struct rtw89_fw_mcc_tsf_req req = {};
- u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval);
- u32 tbtt_ofst_ref, tbtt_ofst_aux;
- u64 tsf_ref, tsf_aux;
int ret;
req.group = mcc->group;
@@ -365,11 +397,63 @@ static u16 rtw89_mcc_get_bcn_ofst(struct rtw89_dev *rtwdev)
if (ret) {
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MCC h2c failed to request tsf: %d\n", ret);
- return RTW89_MCC_DFLT_BCN_OFST_TIME;
+ return ret;
+ }
+
+ *tsf_ref = (u64)rpt.tsf_x_high << 32 | rpt.tsf_x_low;
+ *tsf_aux = (u64)rpt.tsf_y_high << 32 | rpt.tsf_y_low;
+
+ return 0;
+}
+
+static int __mrc_fw_req_tsf(struct rtw89_dev *rtwdev, u64 *tsf_ref, u64 *tsf_aux)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_fw_mrc_req_tsf_arg arg = {};
+ struct rtw89_mac_mrc_tsf_rpt rpt = {};
+ int ret;
+
+ BUILD_BUG_ON(RTW89_MAC_MRC_MAX_REQ_TSF_NUM < NUM_OF_RTW89_MCC_ROLES);
+
+ arg.num = 2;
+ arg.infos[0].band = ref->rtwvif->mac_idx;
+ arg.infos[0].port = ref->rtwvif->port;
+ arg.infos[1].band = aux->rtwvif->mac_idx;
+ arg.infos[1].port = aux->rtwvif->port;
+
+ ret = rtw89_fw_h2c_mrc_req_tsf(rtwdev, &arg, &rpt);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to request tsf: %d\n", ret);
+ return ret;
}
- tsf_ref = (u64)rpt.tsf_x_high << 32 | rpt.tsf_x_low;
- tsf_aux = (u64)rpt.tsf_y_high << 32 | rpt.tsf_y_low;
+ *tsf_ref = rpt.tsfs[0];
+ *tsf_aux = rpt.tsfs[1];
+
+ return 0;
+}
+
+static u16 rtw89_mcc_get_bcn_ofst(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ u32 bcn_intvl_ref_us = ieee80211_tu_to_usec(ref->beacon_interval);
+ u32 tbtt_ofst_ref, tbtt_ofst_aux;
+ u64 tsf_ref, tsf_aux;
+ int ret;
+
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_req_tsf(rtwdev, &tsf_ref, &tsf_aux);
+ else
+ ret = __mcc_fw_req_tsf(rtwdev, &tsf_ref, &tsf_aux);
+
+ if (ret)
+ return RTW89_MCC_DFLT_BCN_OFST_TIME;
+
tbtt_ofst_ref = rtw89_mcc_get_tbtt_ofst(rtwdev, ref, tsf_ref);
tbtt_ofst_aux = rtw89_mcc_get_tbtt_ofst(rtwdev, aux, tsf_aux);
@@ -392,6 +476,28 @@ void rtw89_mcc_role_fw_macid_bitmap_set_bit(struct rtw89_mcc_role *mcc_role,
mcc_role->macid_bitmap[idx] |= BIT(pos);
}
+static
+u32 rtw89_mcc_role_fw_macid_bitmap_to_u32(struct rtw89_mcc_role *mcc_role)
+{
+ unsigned int macid;
+ unsigned int i, j;
+ u32 bitmap = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mcc_role->macid_bitmap); i++) {
+ for (j = 0; j < 8; j++) {
+ macid = i * 8 + j;
+ if (macid >= 32)
+ goto out;
+
+ if (mcc_role->macid_bitmap[i] & BIT(j))
+ bitmap |= BIT(macid);
+ }
+ }
+
+out:
+ return bitmap;
+}
+
static void rtw89_mcc_role_macid_sta_iter(void *data, struct ieee80211_sta *sta)
{
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
@@ -588,6 +694,9 @@ static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev)
int ret;
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ if (!rtwvif->chanctx_assigned)
+ continue;
+
if (sel.bind_vif[rtwvif->sub_entity_idx]) {
rtw89_warn(rtwdev,
"MCC skip extra vif <macid %d> on chanctx[%d]\n",
@@ -1150,7 +1259,11 @@ static void rtw89_mcc_sync_tbtt(struct rtw89_dev *rtwdev,
tsf_ofst_tgt = bcn_intvl_src_us - remainder;
config->sync.macid_tgt = tgt->rtwvif->mac_id;
+ config->sync.band_tgt = tgt->rtwvif->mac_idx;
+ config->sync.port_tgt = tgt->rtwvif->port;
config->sync.macid_src = src->rtwvif->mac_id;
+ config->sync.band_src = src->rtwvif->mac_idx;
+ config->sync.port_src = src->rtwvif->port;
config->sync.offset = tsf_ofst_tgt / 1024;
config->sync.enable = true;
@@ -1297,6 +1410,37 @@ static int __mcc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *ro
return 0;
}
+static
+void __mrc_fw_add_role(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *role,
+ struct rtw89_fw_mrc_add_arg *arg, u8 slot_idx)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_policy *policy = &role->policy;
+ struct rtw89_fw_mrc_add_slot_arg *slot_arg;
+ const struct rtw89_chan *chan;
+
+ slot_arg = &arg->slots[slot_idx];
+ role->slot_idx = slot_idx;
+
+ slot_arg->duration = role->duration;
+ slot_arg->role_num = 1;
+
+ chan = rtw89_chan_get(rtwdev, role->rtwvif->sub_entity_idx);
+
+ slot_arg->roles[0].role_type = RTW89_H2C_MRC_ROLE_WIFI;
+ slot_arg->roles[0].is_master = role == ref;
+ slot_arg->roles[0].band = chan->band_type;
+ slot_arg->roles[0].bw = chan->band_width;
+ slot_arg->roles[0].central_ch = chan->channel;
+ slot_arg->roles[0].primary_ch = chan->primary_channel;
+ slot_arg->roles[0].en_tx_null = !policy->dis_tx_null;
+ slot_arg->roles[0].null_early = policy->tx_null_early;
+ slot_arg->roles[0].macid = role->rtwvif->mac_id;
+ slot_arg->roles[0].macid_main_bitmap =
+ rtw89_mcc_role_fw_macid_bitmap_to_u32(role);
+}
+
static int __mcc_fw_add_bt_role(struct rtw89_dev *rtwdev)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1318,6 +1462,20 @@ static int __mcc_fw_add_bt_role(struct rtw89_dev *rtwdev)
return 0;
}
+static
+void __mrc_fw_add_bt_role(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_mrc_add_arg *arg, u8 slot_idx)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_bt_role *bt_role = &mcc->bt_role;
+ struct rtw89_fw_mrc_add_slot_arg *slot_arg = &arg->slots[slot_idx];
+
+ slot_arg->duration = bt_role->duration;
+ slot_arg->role_num = 1;
+
+ slot_arg->roles[0].role_type = RTW89_H2C_MRC_ROLE_BT;
+}
+
static int __mcc_fw_start(struct rtw89_dev *rtwdev, bool replace)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1403,6 +1561,130 @@ static int __mcc_fw_start(struct rtw89_dev *rtwdev, bool replace)
return 0;
}
+static void __mrc_fw_add_courtesy(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_mrc_add_arg *arg)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_mcc_config *config = &mcc->config;
+ struct rtw89_mcc_pattern *pattern = &config->pattern;
+ struct rtw89_mcc_courtesy *courtesy = &pattern->courtesy;
+ struct rtw89_fw_mrc_add_slot_arg *slot_arg_src;
+ u8 slot_idx_tgt;
+
+ if (!courtesy->enable)
+ return;
+
+ if (courtesy->macid_src == ref->rtwvif->mac_id) {
+ slot_arg_src = &arg->slots[ref->slot_idx];
+ slot_idx_tgt = aux->slot_idx;
+ } else {
+ slot_arg_src = &arg->slots[aux->slot_idx];
+ slot_idx_tgt = ref->slot_idx;
+ }
+
+ slot_arg_src->courtesy_target = slot_idx_tgt;
+ slot_arg_src->courtesy_period = courtesy->slot_num;
+ slot_arg_src->courtesy_en = true;
+}
+
+static int __mrc_fw_start(struct rtw89_dev *rtwdev, bool replace)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_mcc_config *config = &mcc->config;
+ struct rtw89_mcc_pattern *pattern = &config->pattern;
+ struct rtw89_mcc_sync *sync = &config->sync;
+ struct rtw89_fw_mrc_start_arg start_arg = {};
+ struct rtw89_fw_mrc_add_arg add_arg = {};
+ int ret;
+
+ BUILD_BUG_ON(RTW89_MAC_MRC_MAX_ADD_SLOT_NUM <
+ NUM_OF_RTW89_MCC_ROLES + 1 /* bt role */);
+
+ if (replace) {
+ start_arg.old_sch_idx = mcc->group;
+ start_arg.action = RTW89_H2C_MRC_START_ACTION_REPLACE_OLD;
+ mcc->group = RTW89_MCC_NEXT_GROUP(mcc->group);
+ }
+
+ add_arg.sch_idx = mcc->group;
+ add_arg.sch_type = RTW89_H2C_MRC_SCH_BAND0_ONLY;
+
+ switch (pattern->plan) {
+ case RTW89_MCC_PLAN_TAIL_BT:
+ __mrc_fw_add_role(rtwdev, ref, &add_arg, 0);
+ __mrc_fw_add_role(rtwdev, aux, &add_arg, 1);
+ __mrc_fw_add_bt_role(rtwdev, &add_arg, 2);
+
+ add_arg.slot_num = 3;
+ add_arg.btc_in_sch = true;
+ break;
+ case RTW89_MCC_PLAN_MID_BT:
+ __mrc_fw_add_role(rtwdev, ref, &add_arg, 0);
+ __mrc_fw_add_bt_role(rtwdev, &add_arg, 1);
+ __mrc_fw_add_role(rtwdev, aux, &add_arg, 2);
+
+ add_arg.slot_num = 3;
+ add_arg.btc_in_sch = true;
+ break;
+ case RTW89_MCC_PLAN_NO_BT:
+ __mrc_fw_add_role(rtwdev, ref, &add_arg, 0);
+ __mrc_fw_add_role(rtwdev, aux, &add_arg, 1);
+
+ add_arg.slot_num = 2;
+ add_arg.btc_in_sch = false;
+ break;
+ default:
+ rtw89_warn(rtwdev, "MCC unknown plan: %d\n", pattern->plan);
+ return -EFAULT;
+ }
+
+ __mrc_fw_add_courtesy(rtwdev, &add_arg);
+
+ ret = rtw89_fw_h2c_mrc_add(rtwdev, &add_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger add: %d\n", ret);
+ return ret;
+ }
+
+ if (sync->enable) {
+ struct rtw89_fw_mrc_sync_arg sync_arg = {
+ .offset = sync->offset,
+ .src = {
+ .band = sync->band_src,
+ .port = sync->port_src,
+ },
+ .dest = {
+ .band = sync->band_tgt,
+ .port = sync->port_tgt,
+ },
+ };
+
+ ret = rtw89_fw_h2c_mrc_sync(rtwdev, &sync_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger sync: %d\n", ret);
+ return ret;
+ }
+ }
+
+ start_arg.sch_idx = mcc->group;
+ start_arg.start_tsf = config->start_tsf;
+
+ ret = rtw89_fw_h2c_mrc_start(rtwdev, &start_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger start: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int __mcc_fw_set_duration_no_bt(struct rtw89_dev *rtwdev, bool sync_changed)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1444,6 +1726,60 @@ static int __mcc_fw_set_duration_no_bt(struct rtw89_dev *rtwdev, bool sync_chang
return 0;
}
+static int __mrc_fw_set_duration_no_bt(struct rtw89_dev *rtwdev, bool sync_changed)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_mcc_config *config = &mcc->config;
+ struct rtw89_mcc_sync *sync = &config->sync;
+ struct rtw89_mcc_role *ref = &mcc->role_ref;
+ struct rtw89_mcc_role *aux = &mcc->role_aux;
+ struct rtw89_fw_mrc_upd_duration_arg dur_arg = {
+ .sch_idx = mcc->group,
+ .start_tsf = config->start_tsf,
+ .slot_num = 2,
+ .slots[0] = {
+ .slot_idx = ref->slot_idx,
+ .duration = ref->duration,
+ },
+ .slots[1] = {
+ .slot_idx = aux->slot_idx,
+ .duration = aux->duration,
+ },
+ };
+ struct rtw89_fw_mrc_sync_arg sync_arg = {
+ .offset = sync->offset,
+ .src = {
+ .band = sync->band_src,
+ .port = sync->port_src,
+ },
+ .dest = {
+ .band = sync->band_tgt,
+ .port = sync->port_tgt,
+ },
+
+ };
+ int ret;
+
+ ret = rtw89_fw_h2c_mrc_upd_duration(rtwdev, &dur_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to update duration: %d\n", ret);
+ return ret;
+ }
+
+ if (!sync->enable || !sync_changed)
+ return 0;
+
+ ret = rtw89_fw_h2c_mrc_sync(rtwdev, &sync_arg);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger sync: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void rtw89_mcc_handle_beacon_noa(struct rtw89_dev *rtwdev, bool enable)
{
struct rtw89_mcc_info *mcc = &rtwdev->mcc;
@@ -1494,7 +1830,7 @@ static void rtw89_mcc_handle_beacon_noa(struct rtw89_dev *rtwdev, bool enable)
if (!rtwvif_go->chanctx_assigned)
return;
- rtw89_fw_h2c_update_beacon(rtwdev, rtwvif_go);
+ rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_go);
}
static void rtw89_mcc_start_beacon_noa(struct rtw89_dev *rtwdev)
@@ -1562,7 +1898,11 @@ static int rtw89_mcc_start(struct rtw89_dev *rtwdev)
if (ret)
return ret;
- ret = __mcc_fw_start(rtwdev, false);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_start(rtwdev, false);
+ else
+ ret = __mcc_fw_start(rtwdev, false);
+
if (ret)
return ret;
@@ -1580,16 +1920,23 @@ static void rtw89_mcc_stop(struct rtw89_dev *rtwdev)
rtw89_debug(rtwdev, RTW89_DBG_CHAN, "MCC stop\n");
- ret = rtw89_fw_h2c_stop_mcc(rtwdev, mcc->group,
- ref->rtwvif->mac_id, true);
- if (ret)
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC h2c failed to trigger stop: %d\n", ret);
+ if (rtw89_concurrent_via_mrc(rtwdev)) {
+ ret = rtw89_fw_h2c_mrc_del(rtwdev, mcc->group);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to trigger del: %d\n", ret);
+ } else {
+ ret = rtw89_fw_h2c_stop_mcc(rtwdev, mcc->group,
+ ref->rtwvif->mac_id, true);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC h2c failed to trigger stop: %d\n", ret);
- ret = rtw89_fw_h2c_del_mcc_group(rtwdev, mcc->group, true);
- if (ret)
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC h2c failed to delete group: %d\n", ret);
+ ret = rtw89_fw_h2c_del_mcc_group(rtwdev, mcc->group, true);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC h2c failed to delete group: %d\n", ret);
+ }
rtw89_chanctx_notify(rtwdev, RTW89_CHANCTX_STATE_MCC_STOP);
@@ -1615,7 +1962,11 @@ static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
if (old_cfg.pattern.plan != RTW89_MCC_PLAN_NO_BT ||
config->pattern.plan != RTW89_MCC_PLAN_NO_BT) {
- ret = __mcc_fw_start(rtwdev, true);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_start(rtwdev, true);
+ else
+ ret = __mcc_fw_start(rtwdev, true);
+
if (ret)
return ret;
} else {
@@ -1624,7 +1975,11 @@ static int rtw89_mcc_update(struct rtw89_dev *rtwdev)
else
sync_changed = true;
- ret = __mcc_fw_set_duration_no_bt(rtwdev, sync_changed);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_set_duration_no_bt(rtwdev, sync_changed);
+ else
+ ret = __mcc_fw_set_duration_no_bt(rtwdev, sync_changed);
+
if (ret)
return ret;
}
@@ -1666,12 +2021,75 @@ static void rtw89_mcc_track(struct rtw89_dev *rtwdev)
rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_BCN_OFFSET_CHANGE);
}
+static int __mcc_fw_upd_macid_bitmap(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_role *upd)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ int ret;
+
+ ret = rtw89_fw_h2c_mcc_macid_bitmap(rtwdev, mcc->group,
+ upd->rtwvif->mac_id,
+ upd->macid_bitmap);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC h2c failed to update macid bitmap: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __mrc_fw_upd_macid_bitmap(struct rtw89_dev *rtwdev,
+ struct rtw89_mcc_role *cur,
+ struct rtw89_mcc_role *upd)
+{
+ struct rtw89_mcc_info *mcc = &rtwdev->mcc;
+ struct rtw89_fw_mrc_upd_bitmap_arg arg = {};
+ u32 old = rtw89_mcc_role_fw_macid_bitmap_to_u32(cur);
+ u32 new = rtw89_mcc_role_fw_macid_bitmap_to_u32(upd);
+ u32 add = new & ~old;
+ u32 del = old & ~new;
+ int ret;
+ int i;
+
+ arg.sch_idx = mcc->group;
+ arg.macid = upd->rtwvif->mac_id;
+
+ for (i = 0; i < 32; i++) {
+ if (add & BIT(i)) {
+ arg.client_macid = i;
+ arg.action = RTW89_H2C_MRC_UPD_BITMAP_ACTION_ADD;
+
+ ret = rtw89_fw_h2c_mrc_upd_bitmap(rtwdev, &arg);
+ if (ret)
+ goto err;
+ }
+ }
+
+ for (i = 0; i < 32; i++) {
+ if (del & BIT(i)) {
+ arg.client_macid = i;
+ arg.action = RTW89_H2C_MRC_UPD_BITMAP_ACTION_DEL;
+
+ ret = rtw89_fw_h2c_mrc_upd_bitmap(rtwdev, &arg);
+ if (ret)
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC h2c failed to update bitmap: %d\n", ret);
+ return ret;
+}
+
static int rtw89_mcc_upd_map_iterator(struct rtw89_dev *rtwdev,
struct rtw89_mcc_role *mcc_role,
unsigned int ordered_idx,
void *data)
{
- struct rtw89_mcc_info *mcc = &rtwdev->mcc;
struct rtw89_mcc_role upd = {
.rtwvif = mcc_role->rtwvif,
};
@@ -1685,14 +2103,13 @@ static int rtw89_mcc_upd_map_iterator(struct rtw89_dev *rtwdev,
sizeof(mcc_role->macid_bitmap)) == 0)
return 0;
- ret = rtw89_fw_h2c_mcc_macid_bitmap(rtwdev, mcc->group,
- upd.rtwvif->mac_id,
- upd.macid_bitmap);
- if (ret) {
- rtw89_debug(rtwdev, RTW89_DBG_CHAN,
- "MCC h2c failed to update macid bitmap: %d\n", ret);
+ if (rtw89_concurrent_via_mrc(rtwdev))
+ ret = __mrc_fw_upd_macid_bitmap(rtwdev, mcc_role, &upd);
+ else
+ ret = __mcc_fw_upd_macid_bitmap(rtwdev, &upd);
+
+ if (ret)
return ret;
- }
memcpy(mcc_role->macid_bitmap, upd.macid_bitmap,
sizeof(mcc_role->macid_bitmap));
@@ -1900,6 +2317,41 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev)
rtw89_queue_chanctx_work(rtwdev);
}
+static void rtw89_swap_sub_entity(struct rtw89_dev *rtwdev,
+ enum rtw89_sub_entity_idx idx1,
+ enum rtw89_sub_entity_idx idx2)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_sub_entity tmp;
+ struct rtw89_vif *rtwvif;
+ u8 cur;
+
+ if (idx1 == idx2)
+ return;
+
+ hal->sub[idx1].cfg->idx = idx2;
+ hal->sub[idx2].cfg->idx = idx1;
+
+ tmp = hal->sub[idx1];
+ hal->sub[idx1] = hal->sub[idx2];
+ hal->sub[idx2] = tmp;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ if (!rtwvif->chanctx_assigned)
+ continue;
+ if (rtwvif->sub_entity_idx == idx1)
+ rtwvif->sub_entity_idx = idx2;
+ else if (rtwvif->sub_entity_idx == idx2)
+ rtwvif->sub_entity_idx = idx1;
+ }
+
+ cur = atomic_read(&hal->roc_entity_idx);
+ if (cur == idx1)
+ atomic_set(&hal->roc_entity_idx, idx2);
+ else if (cur == idx2)
+ atomic_set(&hal->roc_entity_idx, idx1);
+}
+
int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
struct ieee80211_chanctx_conf *ctx)
{
@@ -1913,8 +2365,8 @@ int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev,
return -ENOENT;
rtw89_config_entity_chandef(rtwdev, idx, &ctx->def);
- rtw89_set_channel(rtwdev);
cfg->idx = idx;
+ cfg->ref_count = 0;
hal->sub[idx].cfg = cfg;
return 0;
}
@@ -1924,47 +2376,8 @@ void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev,
{
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
- enum rtw89_entity_mode mode;
- struct rtw89_vif *rtwvif;
- u8 drop, roll;
-
- drop = cfg->idx;
- if (drop != RTW89_SUB_ENTITY_0)
- goto out;
- roll = find_next_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY, drop + 1);
-
- /* Follow rtw89_config_default_chandef() when rtw89_entity_recalc(). */
- if (roll == NUM_OF_RTW89_SUB_ENTITY)
- goto out;
-
- /* RTW89_SUB_ENTITY_0 is going to release, and another exists.
- * Make another roll down to RTW89_SUB_ENTITY_0 to replace.
- */
- hal->sub[roll].cfg->idx = RTW89_SUB_ENTITY_0;
- hal->sub[RTW89_SUB_ENTITY_0] = hal->sub[roll];
-
- rtw89_for_each_rtwvif(rtwdev, rtwvif) {
- if (rtwvif->sub_entity_idx == roll)
- rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
- }
-
- atomic_cmpxchg(&hal->roc_entity_idx, roll, RTW89_SUB_ENTITY_0);
-
- drop = roll;
-
-out:
- mode = rtw89_get_entity_mode(rtwdev);
- switch (mode) {
- case RTW89_ENTITY_MODE_MCC:
- rtw89_mcc_stop(rtwdev);
- break;
- default:
- break;
- }
-
- clear_bit(drop, hal->entity_map);
- rtw89_set_channel(rtwdev);
+ clear_bit(cfg->idx, hal->entity_map);
}
void rtw89_chanctx_ops_change(struct rtw89_dev *rtwdev,
@@ -1985,16 +2398,73 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev,
struct ieee80211_chanctx_conf *ctx)
{
struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+ struct rtw89_entity_weight w = {};
rtwvif->sub_entity_idx = cfg->idx;
rtwvif->chanctx_assigned = true;
- return 0;
+ cfg->ref_count++;
+
+ if (cfg->idx == RTW89_SUB_ENTITY_0)
+ goto out;
+
+ rtw89_entity_calculate_weight(rtwdev, &w);
+ if (w.active_chanctxs != 1)
+ goto out;
+
+ /* put the first active chanctx at RTW89_SUB_ENTITY_0 */
+ rtw89_swap_sub_entity(rtwdev, cfg->idx, RTW89_SUB_ENTITY_0);
+
+out:
+ return rtw89_set_channel(rtwdev);
}
void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct ieee80211_chanctx_conf *ctx)
{
+ struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_entity_weight w = {};
+ enum rtw89_sub_entity_idx roll;
+ enum rtw89_entity_mode cur;
+
rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
rtwvif->chanctx_assigned = false;
+ cfg->ref_count--;
+
+ if (cfg->ref_count != 0)
+ goto out;
+
+ if (cfg->idx != RTW89_SUB_ENTITY_0)
+ goto out;
+
+ roll = find_next_bit(hal->entity_map, NUM_OF_RTW89_SUB_ENTITY,
+ cfg->idx + 1);
+ /* Follow rtw89_config_default_chandef() when rtw89_entity_recalc(). */
+ if (roll == NUM_OF_RTW89_SUB_ENTITY)
+ goto out;
+
+ /* RTW89_SUB_ENTITY_0 is going to release, and another exists.
+ * Make another roll down to RTW89_SUB_ENTITY_0 to replace.
+ */
+ rtw89_swap_sub_entity(rtwdev, cfg->idx, roll);
+
+out:
+ rtw89_entity_calculate_weight(rtwdev, &w);
+
+ cur = rtw89_get_entity_mode(rtwdev);
+ switch (cur) {
+ case RTW89_ENTITY_MODE_MCC:
+ /* If still multi-roles, re-plan MCC for chanctx changes.
+ * Otherwise, just stop MCC.
+ */
+ rtw89_mcc_stop(rtwdev);
+ if (w.active_roles == NUM_OF_RTW89_MCC_ROLES)
+ rtw89_mcc_start(rtwdev);
+ break;
+ default:
+ break;
+ }
+
+ rtw89_set_channel(rtwdev);
}
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index 9b98d8f4ee9d..ffa412f281f3 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -38,6 +38,11 @@ enum rtw89_chanctx_pause_reasons {
RTW89_CHANCTX_PAUSE_REASON_ROC,
};
+struct rtw89_entity_weight {
+ unsigned int active_chanctxs;
+ unsigned int active_roles;
+};
+
static inline bool rtw89_get_entity_state(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index f37afb4cbb63..d9b66d43f32e 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -129,68 +129,75 @@ static const u32 cxtbl[] = {
static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
/* firmware version must be in decreasing order for each chip */
+ {RTL8922A, RTW89_FW_VER_CODE(0, 35, 8, 0),
+ .fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
+ .fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
+ .fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7,
+ .fwlrole = 2, .frptmap = 7, .fcxctrl = 7, .fcxinit = 7,
+ .drvinfo_type = 1, .info_buf = 1800, .max_role_num = 6,
+ },
{RTL8851B, RTW89_FW_VER_CODE(0, 29, 29, 0),
.fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 2, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 2, .frptmap = 3, .fcxctrl = 1,
- .info_buf = 1800, .max_role_num = 6,
+ .fwlrole = 2, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 57, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 3, .fcxctrl = 1,
- .info_buf = 1280, .max_role_num = 5,
+ .fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 42, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 2, .fcxctrl = 1,
- .info_buf = 1280, .max_role_num = 5,
+ .fwlrole = 1, .frptmap = 2, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 0, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 2, .fcxctrl = 1,
- .info_buf = 1280, .max_role_num = 5,
+ .fwlrole = 1, .frptmap = 2, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 29, 29, 0),
.fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 2, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 2, .frptmap = 3, .fcxctrl = 1,
- .info_buf = 1800, .max_role_num = 6,
+ .fwlrole = 2, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 29, 14, 0),
.fcxbtcrpt = 5, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 4,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 3, .fcxctrl = 1,
- .info_buf = 1800, .max_role_num = 6,
+ .fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 27, 0, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 1, .fcxctrl = 1,
- .info_buf = 1280, .max_role_num = 5,
+ .fwlrole = 1, .frptmap = 1, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852A, RTW89_FW_VER_CODE(0, 13, 37, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
- .fwlrole = 1, .frptmap = 3, .fcxctrl = 1,
- .info_buf = 1280, .max_role_num = 5,
+ .fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852A, RTW89_FW_VER_CODE(0, 13, 0, 0),
.fcxbtcrpt = 1, .fcxtdma = 1, .fcxslots = 1, .fcxcysta = 2,
.fcxstep = 2, .fcxnullsta = 1, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
- .fwlrole = 0, .frptmap = 0, .fcxctrl = 0,
- .info_buf = 1024, .max_role_num = 5,
+ .fwlrole = 0, .frptmap = 0, .fcxctrl = 0, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5,
},
/* keep it to be the last as default entry */
@@ -198,8 +205,8 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtcrpt = 1, .fcxtdma = 1, .fcxslots = 1, .fcxcysta = 2,
.fcxstep = 2, .fcxnullsta = 1, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
- .fwlrole = 0, .frptmap = 0, .fcxctrl = 0,
- .info_buf = 1024, .max_role_num = 5,
+ .fwlrole = 0, .frptmap = 0, .fcxctrl = 0, .fcxinit = 0,
+ .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5,
},
};
@@ -351,17 +358,26 @@ enum btc_cx_poicy_type {
/* TDMA off + pri: WL_Rx = BT, BT_HI > WL_Tx > BT_Lo */
BTC_CXP_OFF_EQ3 = (BTC_CXP_OFF << 8) | 5,
+ /* TDMA off + pri: WL_Rx = BT, BT_HI > WL_Tx > BT_Lo */
+ BTC_CXP_OFF_EQ4 = (BTC_CXP_OFF << 8) | 6,
+
+ /* TDMA off + pri: WL_Rx = BT, BT_HI > WL_Tx > BT_Lo */
+ BTC_CXP_OFF_EQ5 = (BTC_CXP_OFF << 8) | 7,
+
/* TDMA off + pri: BT_Hi > WL > BT_Lo */
- BTC_CXP_OFF_BWB0 = (BTC_CXP_OFF << 8) | 6,
+ BTC_CXP_OFF_BWB0 = (BTC_CXP_OFF << 8) | 8,
/* TDMA off + pri: WL_Hi-Tx > BT_Hi_Rx, BT_Hi > WL > BT_Lo */
- BTC_CXP_OFF_BWB1 = (BTC_CXP_OFF << 8) | 7,
+ BTC_CXP_OFF_BWB1 = (BTC_CXP_OFF << 8) | 9,
/* TDMA off + pri: WL_Hi-Tx > BT, BT_Hi > other-WL > BT_Lo */
- BTC_CXP_OFF_BWB2 = (BTC_CXP_OFF << 8) | 8,
+ BTC_CXP_OFF_BWB2 = (BTC_CXP_OFF << 8) | 10,
/* TDMA off + pri: WL_Hi-Tx = BT */
- BTC_CXP_OFF_BWB3 = (BTC_CXP_OFF << 8) | 9,
+ BTC_CXP_OFF_BWB3 = (BTC_CXP_OFF << 8) | 11,
+
+ /* TDMA off + pri: WL > BT, Block-BT*/
+ BTC_CXP_OFF_WL2 = (BTC_CXP_OFF << 8) | 12,
/* TDMA off+Bcn-Protect + pri: WL_Hi-Tx > BT_Hi_Rx, BT_Hi > WL > BT_Lo*/
BTC_CXP_OFFB_BWB0 = (BTC_CXP_OFFB << 8) | 0,
@@ -676,20 +692,25 @@ static void _run_coex(struct rtw89_dev *rtwdev,
static void _write_scbd(struct rtw89_dev *rtwdev, u32 val, bool state);
static void _update_bt_scbd(struct rtw89_dev *rtwdev, bool only_update);
-static void _send_fw_cmd(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func,
- void *param, u16 len)
+static int _send_fw_cmd(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func,
+ void *param, u16 len)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &cx->wl;
+ struct rtw89_btc_dm *dm = &btc->dm;
int ret;
- if (!wl->status.map.init_ok) {
+ if (len > BTC_H2C_MAXLEN || len == 0) {
+ btc->fwinfo.cnt_h2c_fail++;
+ dm->error.map.h2c_buffer_over = true;
+ return -EINVAL;
+ } else if (!wl->status.map.init_ok) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): return by btc not init!!\n", __func__);
pfwinfo->cnt_h2c_fail++;
- return;
+ return -EINVAL;
} else if ((wl->status.map.rf_off_pre == BTC_LPS_RF_OFF &&
wl->status.map.rf_off == BTC_LPS_RF_OFF) ||
(wl->status.map.lps_pre == BTC_LPS_RF_OFF &&
@@ -697,20 +718,23 @@ static void _send_fw_cmd(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func,
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): return by wl off!!\n", __func__);
pfwinfo->cnt_h2c_fail++;
- return;
+ return -EINVAL;
}
- pfwinfo->cnt_h2c++;
-
ret = rtw89_fw_h2c_raw_with_hdr(rtwdev, h2c_class, h2c_func, param, len,
false, true);
- if (ret != 0)
+ if (ret)
pfwinfo->cnt_h2c_fail++;
+ else
+ pfwinfo->cnt_h2c++;
+
+ return ret;
}
static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
@@ -728,7 +752,9 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
if (type & BTC_RESET_CTRL) {
memset(&btc->ctrl, 0, sizeof(btc->ctrl));
- btc->ctrl.trace_step = FCXDEF_STEP;
+ btc->manual_ctrl = false;
+ if (ver->fcxctrl != 7)
+ btc->ctrl.ctrl.trace_step = FCXDEF_STEP;
}
/* Init Coex variables that are not zero */
@@ -777,22 +803,27 @@ static void _get_reg_status(struct rtw89_dev *rtwdev, u8 type, u8 *val)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
- struct rtw89_btc_module *md = &btc->mdinfo;
+ union rtw89_btc_module_info *md = &btc->mdinfo;
union rtw89_btc_fbtc_mreg_val *pmreg;
u32 pre_agc_addr = R_BTC_BB_PRE_AGC_S1;
u32 reg_val;
- u8 idx;
+ u8 idx, switch_type;
- if (md->ant.btg_pos == RF_PATH_A)
+ if (ver->fcxinit == 7)
+ switch_type = md->md_v7.switch_type;
+ else
+ switch_type = md->md.switch_type;
+
+ if (btc->btg_pos == RF_PATH_A)
pre_agc_addr = R_BTC_BB_PRE_AGC_S0;
switch (type) {
case BTC_CSTATUS_TXDIV_POS:
- if (md->switch_type == BTC_SWITCH_INTERNAL)
+ if (switch_type == BTC_SWITCH_INTERNAL)
*val = BTC_ANT_DIV_MAIN;
break;
case BTC_CSTATUS_RXDIV_POS:
- if (md->switch_type == BTC_SWITCH_INTERNAL)
+ if (switch_type == BTC_SWITCH_INTERNAL)
*val = BTC_ANT_DIV_MAIN;
break;
case BTC_CSTATUS_BB_GNT_MUX:
@@ -1117,7 +1148,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
void *rpt_content = NULL, *pfinfo = NULL;
u8 rpt_type = 0;
u16 wl_slot_set = 0, wl_slot_real = 0;
- u32 trace_step = btc->ctrl.trace_step, rpt_len = 0, diff_t = 0;
+ u32 trace_step = 0, rpt_len = 0, diff_t = 0;
u32 cnt_leak_slot, bt_slot_real, bt_slot_set, cnt_rx_imr;
u8 i, val = 0;
@@ -1207,6 +1238,9 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
break;
case BTC_RPT_TYPE_STEP:
pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
+ if (ver->fcxctrl != 7)
+ trace_step = btc->ctrl.ctrl.trace_step;
+
if (ver->fcxstep == 2) {
pfinfo = &pfwinfo->rpt_fbtc_step.finfo.v2;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.v2.step[0]) *
@@ -1920,6 +1954,7 @@ static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev,
struct rtw89_btc_btf_fwinfo *fwinfo = &btc->fwinfo;
struct rtw89_btc_btf_set_report r = {0};
u32 val, bit_map;
+ int ret;
if ((wl_smap->rf_off || wl_smap->lps != BTC_LPS_OFF) && rpt_state != 0)
return;
@@ -1938,13 +1973,13 @@ static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev,
if (val == fwinfo->rpt_en_map)
return;
- fwinfo->rpt_en_map = val;
-
r.fver = BTF_SET_REPORT_VER;
r.enable = cpu_to_le32(val);
r.para = cpu_to_le32(rpt_state);
- _send_fw_cmd(rtwdev, BTFC_SET, SET_REPORT_EN, &r, sizeof(r));
+ ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_REPORT_EN, &r, sizeof(r));
+ if (!ret)
+ fwinfo->rpt_en_map = val;
}
static void rtw89_btc_fw_set_slots(struct rtw89_dev *rtwdev, u8 num,
@@ -2032,6 +2067,7 @@ static void _fw_set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
+ int ret;
dm->run_action = action;
@@ -2060,11 +2096,12 @@ static void _fw_set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
if (btc->lps == 1)
rtw89_set_coex_ctrl_lps(rtwdev, btc->lps);
- _send_fw_cmd(rtwdev, BTFC_SET, SET_CX_POLICY,
- btc->policy, btc->policy_len);
-
- memcpy(&dm->tdma_now, &dm->tdma, sizeof(dm->tdma_now));
- memcpy(&dm->slot_now, &dm->slot, sizeof(dm->slot_now));
+ ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_CX_POLICY,
+ btc->policy, btc->policy_len);
+ if (!ret) {
+ memcpy(&dm->tdma_now, &dm->tdma, sizeof(dm->tdma_now));
+ memcpy(&dm->slot_now, &dm->slot, sizeof(dm->slot_now));
+ }
if (btc->update_policy_force)
btc->update_policy_force = false;
@@ -2083,20 +2120,32 @@ static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type)
switch (type) {
case CXDRVINFO_INIT:
- rtw89_fw_h2c_cxdrv_init(rtwdev);
+ if (ver->fcxinit == 7)
+ rtw89_fw_h2c_cxdrv_init_v7(rtwdev, type);
+ else
+ rtw89_fw_h2c_cxdrv_init(rtwdev, type);
break;
case CXDRVINFO_ROLE:
if (ver->fwlrole == 0)
- rtw89_fw_h2c_cxdrv_role(rtwdev);
+ rtw89_fw_h2c_cxdrv_role(rtwdev, type);
else if (ver->fwlrole == 1)
- rtw89_fw_h2c_cxdrv_role_v1(rtwdev);
+ rtw89_fw_h2c_cxdrv_role_v1(rtwdev, type);
else if (ver->fwlrole == 2)
- rtw89_fw_h2c_cxdrv_role_v2(rtwdev);
+ rtw89_fw_h2c_cxdrv_role_v2(rtwdev, type);
break;
case CXDRVINFO_CTRL:
- rtw89_fw_h2c_cxdrv_ctrl(rtwdev);
+ if (ver->drvinfo_type == 1)
+ type = 2;
+
+ if (ver->fcxctrl == 7)
+ rtw89_fw_h2c_cxdrv_ctrl_v7(rtwdev, type);
+ else
+ rtw89_fw_h2c_cxdrv_ctrl(rtwdev, type);
break;
case CXDRVINFO_TRX:
+ if (ver->drvinfo_type == 1)
+ type = 3;
+
dm->trx_info.tx_power = u32_get_bits(rf_para.wl_tx_power,
RTW89_BTC_WL_DEF_TX_PWR);
dm->trx_info.rx_gain = u32_get_bits(rf_para.wl_rx_gain,
@@ -2107,11 +2156,18 @@ static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type)
RTW89_BTC_WL_DEF_TX_PWR);
dm->trx_info.cn = wl->cn_report;
dm->trx_info.nhm = wl->nhm.pwr;
- rtw89_fw_h2c_cxdrv_trx(rtwdev);
+ rtw89_fw_h2c_cxdrv_trx(rtwdev, type);
break;
case CXDRVINFO_RFK:
- rtw89_fw_h2c_cxdrv_rfk(rtwdev);
+ if (ver->drvinfo_type == 1)
+ return;
+
+ rtw89_fw_h2c_cxdrv_rfk(rtwdev, type);
break;
+ case CXDRVINFO_TXPWR:
+ case CXDRVINFO_FDDT:
+ case CXDRVINFO_MLO:
+ case CXDRVINFO_OSI:
default:
break;
}
@@ -2261,20 +2317,25 @@ static void _set_bt_tx_power(struct rtw89_dev *rtwdev, u8 level)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ int ret;
u8 buf;
- if (bt->rf_para.tx_pwr_freerun == level)
+ if (btc->cx.cnt_bt[BTC_BCNT_INFOUPDATE] == 0)
return;
- bt->rf_para.tx_pwr_freerun = level;
- btc->dm.rf_trx_para.bt_tx_power = level;
+ if (bt->rf_para.tx_pwr_freerun == level)
+ return;
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): level = %d\n",
__func__, level);
buf = (s8)(-level);
- _send_fw_cmd(rtwdev, BTFC_SET, SET_BT_TX_PWR, &buf, 1);
+ ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_BT_TX_PWR, &buf, 1);
+ if (!ret) {
+ bt->rf_para.tx_pwr_freerun = level;
+ btc->dm.rf_trx_para.bt_tx_power = level;
+ }
}
#define BTC_BT_RX_NORMAL_LVL 7
@@ -2284,6 +2345,9 @@ static void _set_bt_rx_gain(struct rtw89_dev *rtwdev, u8 level)
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ if (btc->cx.cnt_bt[BTC_BCNT_INFOUPDATE] == 0)
+ return;
+
if ((bt->rf_para.rx_gain_freerun == level ||
level > BTC_BT_RX_NORMAL_LVL) &&
(!rtwdev->chip->scbd || bt->lna_constrain == level))
@@ -2333,7 +2397,7 @@ static void _set_rf_trx_para(struct rtw89_dev *rtwdev)
}
/* decide trx_para_level */
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
/* fix LNA2 + TIA gain not change by GNT_BT */
if ((btc->dm.wl_btg_rx && b->profile_cnt.now != 0) ||
dm->bt_only == 1)
@@ -2435,7 +2499,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
u8 en = 0, i, ch = 0, bw = 0;
u8 mode, connect_cnt;
- if (btc->ctrl.manual || wl->status.map.scan)
+ if (btc->manual_ctrl || wl->status.map.scan)
return;
if (ver->fwlrole == 0) {
@@ -2560,8 +2624,16 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
struct rtw89_btc_bt_hid_desc *hid = &bt_linfo->hid_desc;
+ union rtw89_btc_module_info *md = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = btc->ver;
+ u8 isolation;
+
+ if (ver->fcxinit == 7)
+ isolation = md->md_v7.ant.isolation;
+ else
+ isolation = md->md.ant.isolation;
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
btc->dm.trx_para_level = 0;
return false;
}
@@ -2584,7 +2656,7 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
}
/* TODO get isolation by BT psd */
- if (btc->mdinfo.ant.isolation >= BTC_FREERUN_ANTISO_MIN) {
+ if (isolation >= BTC_FREERUN_ANTISO_MIN) {
btc->dm.trx_para_level = 5;
return true;
}
@@ -2712,7 +2784,7 @@ void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type)
u8 type;
u32 tbl_w1, tbl_b1, tbl_b4;
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
if (btc->cx.wl.status.map._4way)
tbl_w1 = cxtbl[1];
else
@@ -3023,12 +3095,13 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
struct rtw89_btc_wl_role_info_v1 *wl_rinfo = &btc->cx.wl.role_info_v1;
struct rtw89_btc_bt_hid_desc *hid = &btc->cx.bt.link_info.hid_desc;
struct rtw89_btc_bt_hfp_desc *hfp = &btc->cx.bt.link_info.hfp_desc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
u8 type, null_role;
u32 tbl_w1, tbl_b1, tbl_b4;
type = FIELD_GET(BTC_CXP_MASK, policy_type);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
if (btc->cx.wl.status.map._4way)
tbl_w1 = cxtbl[1];
else if (hid->exist && hid->type == BTC_HID_218)
@@ -3048,9 +3121,16 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
tbl_b4 = cxtbl[2];
}
} else {
- tbl_w1 = cxtbl[16];
tbl_b1 = cxtbl[17];
tbl_b4 = cxtbl[17];
+
+ if (wl->bg_mode)
+ tbl_w1 = cxtbl[8];
+ else if ((wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) &&
+ hid->exist)
+ tbl_w1 = cxtbl[19];
+ else
+ tbl_w1 = cxtbl[16];
}
btc->bt_req_en = false;
@@ -3615,7 +3695,7 @@ static void _action_bt_idle(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
switch (btc->cx.state_map) {
case BTC_WBUSY_BNOSCAN: /*wl-busy + bt idle*/
case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-idle */
@@ -3654,7 +3734,7 @@ static void _action_bt_hfp(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
if (btc->cx.wl.status.map._4way) {
_set_policy(rtwdev, BTC_CXP_OFF_WL, BTC_ACT_BT_HFP);
} else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) {
@@ -3664,7 +3744,12 @@ static void _action_bt_hfp(struct rtw89_dev *rtwdev)
_set_policy(rtwdev, BTC_CXP_OFF_BWB1, BTC_ACT_BT_HFP);
}
} else {
- _set_policy(rtwdev, BTC_CXP_OFF_EQ2, BTC_ACT_BT_HFP);
+ if (wl->bg_mode)
+ _set_policy(rtwdev, BTC_CXP_OFF_BWB1, BTC_ACT_BT_HFP);
+ else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL))
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ5, BTC_ACT_BT_HFP);
+ else
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ2, BTC_ACT_BT_HFP);
}
}
@@ -3679,7 +3764,7 @@ static void _action_bt_hid(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
if (wl->status.map._4way) {
policy_type = BTC_CXP_OFF_WL;
} else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) {
@@ -3697,7 +3782,12 @@ static void _action_bt_hid(struct rtw89_dev *rtwdev)
policy_type = BTC_CXP_OFF_BWB1;
}
} else { /* dedicated-antenna */
- policy_type = BTC_CXP_OFF_EQ3;
+ if (wl->bg_mode)
+ policy_type = BTC_CXP_OFF_BWB1;
+ else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL))
+ policy_type = BTC_CXP_OFF_EQ4;
+ else
+ policy_type = BTC_CXP_OFF_EQ3;
}
_set_policy(rtwdev, policy_type, BTC_ACT_BT_HID);
@@ -3947,7 +4037,7 @@ static void _action_wl_other(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED)
+ if (btc->ant_type == BTC_ANT_SHARED)
_set_policy(rtwdev, BTC_CXP_OFFB_BWB0, BTC_ACT_WL_OTHER);
else
_set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_OTHER);
@@ -3991,7 +4081,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
u32 is_btg;
u8 i, val;
- if (btc->ctrl.manual)
+ if (btc->manual_ctrl)
return;
if (ver->fwlrole == 0)
@@ -4063,7 +4153,7 @@ static void _set_wl_preagc_ctrl(struct rtw89_dev *rtwdev)
struct rtw89_btc_dm *dm = &btc->dm;
u8 is_preagc, val;
- if (btc->ctrl.manual)
+ if (btc->manual_ctrl)
return;
if (wl_rinfo->link_mode == BTC_WLINK_25G_MCC)
@@ -4083,7 +4173,7 @@ static void _set_wl_preagc_ctrl(struct rtw89_dev *rtwdev)
else if (ver->fwlrole == 2 && wl_rinfo->dbcc_en &&
wl_rinfo->dbcc_2g_phy != RTW89_PHY_1)
is_preagc = BTC_PREAGC_DISABLE;
- else if (btc->mdinfo.ant.type == BTC_ANT_SHARED)
+ else if (btc->ant_type == BTC_ANT_SHARED)
is_preagc = BTC_PREAGC_DISABLE;
else
is_preagc = BTC_PREAGC_ENABLE;
@@ -4187,13 +4277,12 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
struct rtw89_txtime_data data = {.rtwdev = rtwdev};
- u8 mode;
- u8 tx_retry;
+ u8 mode, igno_bt, tx_retry;
u32 tx_time;
u16 enable;
bool reenable = false;
- if (btc->ctrl.manual)
+ if (btc->manual_ctrl)
return;
if (ver->fwlrole == 0)
@@ -4205,7 +4294,12 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
else
return;
- if (btc->dm.freerun || btc->ctrl.igno_bt || b->profile_cnt.now == 0 ||
+ if (ver->fcxctrl == 7)
+ igno_bt = btc->ctrl.ctrl_v7.igno_bt;
+ else
+ igno_bt = btc->ctrl.ctrl.igno_bt;
+
+ if (btc->dm.freerun || igno_bt || b->profile_cnt.now == 0 ||
mode == BTC_WLINK_5G || mode == BTC_WLINK_NOLINK) {
enable = 0;
tx_time = BTC_MAX_TX_TIME_DEF;
@@ -4402,7 +4496,7 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev)
if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED)
+ if (btc->ant_type == BTC_ANT_SHARED)
_set_policy(rtwdev, BTC_CXP_OFFE_DEF,
BTC_RSN_NTFY_SCAN_START);
else
@@ -4430,7 +4524,7 @@ static void _action_wl_25g_mcc(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev, BTC_CXP_OFFE_DEF2,
BTC_ACT_WL_25G_MCC);
@@ -4447,7 +4541,7 @@ static void _action_wl_2g_mcc(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev, BTC_CXP_OFFE_DEF2,
BTC_ACT_WL_2G_MCC);
@@ -4465,7 +4559,7 @@ static void _action_wl_2g_scc(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev,
BTC_CXP_OFFE_DEF2, BTC_ACT_WL_2G_SCC);
@@ -4487,7 +4581,7 @@ static void _action_wl_2g_scc_v1(struct rtw89_dev *rtwdev)
u16 policy_type = BTC_CXP_OFF_BT;
u32 dur;
- if (btc->mdinfo.ant.type == BTC_ANT_DEDICATED) {
+ if (btc->ant_type == BTC_ANT_DEDICATED) {
policy_type = BTC_CXP_OFF_EQ0;
} else {
/* shared-antenna */
@@ -4549,7 +4643,7 @@ static void _action_wl_2g_scc_v2(struct rtw89_dev *rtwdev)
u16 policy_type = BTC_CXP_OFF_BT;
u32 dur;
- if (btc->mdinfo.ant.type == BTC_ANT_DEDICATED) {
+ if (btc->ant_type == BTC_ANT_DEDICATED) {
policy_type = BTC_CXP_OFF_EQ0;
} else {
/* shared-antenna */
@@ -4607,7 +4701,7 @@ static void _action_wl_2g_ap(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev, BTC_CXP_OFFE_DEF2,
BTC_ACT_WL_2G_AP);
@@ -4624,7 +4718,7 @@ static void _action_wl_2g_go(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev,
BTC_CXP_OFFE_DEF2, BTC_ACT_WL_2G_GO);
@@ -4642,7 +4736,7 @@ static void _action_wl_2g_gc(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
_action_by_bt(rtwdev);
} else {/* dedicated-antenna */
_set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_2G_GC);
@@ -4655,7 +4749,7 @@ static void _action_wl_2g_nan(struct rtw89_dev *rtwdev)
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
- if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->ant_type == BTC_ANT_SHARED) { /* shared-antenna */
if (btc->cx.bt.link_info.profile_cnt.now == 0)
_set_policy(rtwdev,
BTC_CXP_OFFE_DEF2, BTC_ACT_WL_2G_NAN);
@@ -5351,7 +5445,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
- u8 mode;
+ u8 mode, igno_bt, always_freerun;
lockdep_assert_held(&rtwdev->mutex);
@@ -5368,20 +5462,28 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
else
return;
+ if (ver->fcxctrl == 7) {
+ igno_bt = btc->ctrl.ctrl_v7.igno_bt;
+ always_freerun = btc->ctrl.ctrl_v7.always_freerun;
+ } else {
+ igno_bt = btc->ctrl.ctrl.igno_bt;
+ always_freerun = btc->ctrl.ctrl.always_freerun;
+ }
+
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): reason=%d, mode=%d\n",
__func__, reason, mode);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): wl_only=%d, bt_only=%d\n",
__func__, dm->wl_only, dm->bt_only);
/* Be careful to change the following function sequence!! */
- if (btc->ctrl.manual) {
+ if (btc->manual_ctrl) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): return for Manual CTRL!!\n",
__func__);
return;
}
- if (btc->ctrl.igno_bt &&
+ if (igno_bt &&
(reason == BTC_RSN_UPDATE_BT_INFO ||
reason == BTC_RSN_UPDATE_BT_SCBD)) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -5418,24 +5520,24 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
dm->freerun = false;
dm->cnt_dm[BTC_DCNT_RUN]++;
dm->fddt_train = BTC_FDDT_DISABLE;
- btc->ctrl.igno_bt = false;
bt->scan_rx_low_pri = false;
+ igno_bt = false;
- if (btc->ctrl.always_freerun) {
+ if (always_freerun) {
_action_freerun(rtwdev);
- btc->ctrl.igno_bt = true;
+ igno_bt = true;
goto exit;
}
if (dm->wl_only) {
_action_wl_only(rtwdev);
- btc->ctrl.igno_bt = true;
+ igno_bt = true;
goto exit;
}
if (wl->status.map.rf_off || wl->status.map.lps || dm->bt_only) {
_action_wl_off(rtwdev, mode);
- btc->ctrl.igno_bt = true;
+ igno_bt = true;
goto exit;
}
@@ -5525,6 +5627,10 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
exit:
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): exit\n", __func__);
+ if (ver->fcxctrl == 7)
+ btc->ctrl.ctrl_v7.igno_bt = igno_bt;
+ else
+ btc->ctrl.ctrl.igno_bt = igno_bt;
_action_common(rtwdev);
}
@@ -5560,16 +5666,26 @@ static void _set_init_info(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
- dm->init_info.wl_only = (u8)dm->wl_only;
- dm->init_info.bt_only = (u8)dm->bt_only;
- dm->init_info.wl_init_ok = (u8)wl->status.map.init_ok;
- dm->init_info.dbcc_en = rtwdev->dbcc_en;
- dm->init_info.cx_other = btc->cx.other.type;
- dm->init_info.wl_guard_ch = chip->afh_guard_ch;
- dm->init_info.module = btc->mdinfo;
+ if (ver->fcxinit == 7) {
+ dm->init_info.init_v7.wl_only = (u8)dm->wl_only;
+ dm->init_info.init_v7.bt_only = (u8)dm->bt_only;
+ dm->init_info.init_v7.wl_init_ok = (u8)wl->status.map.init_ok;
+ dm->init_info.init_v7.cx_other = btc->cx.other.type;
+ dm->init_info.init_v7.wl_guard_ch = chip->afh_guard_ch;
+ dm->init_info.init_v7.module = btc->mdinfo.md_v7;
+ } else {
+ dm->init_info.init.wl_only = (u8)dm->wl_only;
+ dm->init_info.init.bt_only = (u8)dm->bt_only;
+ dm->init_info.init.wl_init_ok = (u8)wl->status.map.init_ok;
+ dm->init_info.init.dbcc_en = rtwdev->dbcc_en;
+ dm->init_info.init.cx_other = btc->cx.other.type;
+ dm->init_info.init.wl_guard_ch = chip->afh_guard_ch;
+ dm->init_info.init.module = btc->mdinfo.md;
+ }
}
void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode)
@@ -5578,11 +5694,15 @@ void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode)
struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_btc_ver *ver = btc->ver;
_reset_btc_var(rtwdev, BTC_RESET_ALL);
btc->dm.run_reason = BTC_RSN_NONE;
btc->dm.run_action = BTC_ACT_NONE;
- btc->ctrl.igno_bt = true;
+ if (ver->fcxctrl == 7)
+ btc->ctrl.ctrl_v7.igno_bt = true;
+ else
+ btc->ctrl.ctrl.igno_bt = true;
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): mode=%d\n", __func__, mode);
@@ -6298,7 +6418,7 @@ static void rtw89_btc_ntfy_wl_sta_iter(void *data, struct ieee80211_sta *sta)
if (BTC_RSSI_LOW(link_info->rssi_state[i]))
rssi_map |= BIT(i);
- if (btc->mdinfo.ant.type == BTC_ANT_DEDICATED &&
+ if (btc->ant_type == BTC_ANT_DEDICATED &&
BTC_RSSI_CHANGE(link_info->rssi_state[i]))
is_sta_change = true;
}
@@ -6489,13 +6609,16 @@ void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
u32 ver_main = 0, ver_sub = 0, ver_hotfix = 0, id_branch = 0;
+ u8 cv, rfe, iso, ant_num, ant_single_pos;
if (!(dm->coex_info_map & BTC_COEX_INFO_CX))
return;
@@ -6545,11 +6668,24 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
ver_main, ver_sub, ver_hotfix, id_branch,
bt->ver_info.fw, bt->run_patch_code ? "patch" : "ROM");
+ if (ver->fcxinit == 7) {
+ cv = md->md_v7.kt_ver;
+ rfe = md->md_v7.rfe_type;
+ iso = md->md_v7.ant.isolation;
+ ant_num = md->md_v7.ant.num;
+ ant_single_pos = md->md_v7.ant.single_pos;
+ } else {
+ cv = md->md.cv;
+ rfe = md->md.rfe_type;
+ iso = md->md.ant.isolation;
+ ant_num = md->md.ant.num;
+ ant_single_pos = md->md.ant.single_pos;
+ }
+
seq_printf(m, " %-15s : cv:%x, rfe_type:0x%x, ant_iso:%d, ant_pg:%d, %s",
- "[hw_info]", btc->mdinfo.cv, btc->mdinfo.rfe_type,
- btc->mdinfo.ant.isolation, btc->mdinfo.ant.num,
- (btc->mdinfo.ant.num > 1 ? "" : (btc->mdinfo.ant.single_pos ?
- "1Ant_Pos:S1, " : "1Ant_Pos:S0, ")));
+ "[hw_info]", cv, rfe, iso, ant_num,
+ ant_num > 1 ? "" :
+ ant_single_pos ? "1Ant_Pos:S1, " : "1Ant_Pos:S0, ");
seq_printf(m, "3rd_coex:%d, dbcc:%d, tx_num:%d, rx_num:%d\n",
btc->cx.other.type, rtwdev->dbcc_en, hal->tx_nss,
@@ -6722,20 +6858,26 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_bt_info *bt = &cx->bt;
struct rtw89_btc_wl_info *wl = &cx->wl;
- struct rtw89_btc_module *module = &btc->mdinfo;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
+ union rtw89_btc_module_info *md = &btc->mdinfo;
u8 *afh = bt_linfo->afh_map;
u8 *afh_le = bt_linfo->afh_map_le;
+ u8 bt_pos;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_BT))
return;
+ if (ver->fcxinit == 7)
+ bt_pos = md->md_v7.bt_pos;
+ else
+ bt_pos = md->md.bt_pos;
+
seq_puts(m, "========== [BT Status] ==========\n");
seq_printf(m, " %-15s : enable:%s, btg:%s%s, connect:%s, ",
"[status]", bt->enable.now ? "Y" : "N",
bt->btg_type ? "Y" : "N",
- (bt->enable.now && (bt->btg_type != module->bt_pos) ?
+ (bt->enable.now && (bt->btg_type != bt_pos) ?
"(efuse-mismatch!!)" : ""),
(bt_linfo->status.map.connect ? "Y" : "N"));
@@ -6934,10 +7076,13 @@ static const char *steps_to_str(u16 step)
CASE_BTC_POLICY_STR(OFF_EQ1);
CASE_BTC_POLICY_STR(OFF_EQ2);
CASE_BTC_POLICY_STR(OFF_EQ3);
+ CASE_BTC_POLICY_STR(OFF_EQ4);
+ CASE_BTC_POLICY_STR(OFF_EQ5);
CASE_BTC_POLICY_STR(OFF_BWB0);
CASE_BTC_POLICY_STR(OFF_BWB1);
CASE_BTC_POLICY_STR(OFF_BWB2);
CASE_BTC_POLICY_STR(OFF_BWB3);
+ CASE_BTC_POLICY_STR(OFF_WL2);
CASE_BTC_POLICY_STR(OFFB_BWB0);
CASE_BTC_POLICY_STR(OFFE_DEF);
CASE_BTC_POLICY_STR(OFFE_DEF2);
@@ -7123,21 +7268,22 @@ static void _show_dm_step(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ u8 igno_bt;
if (!(dm->coex_info_map & BTC_COEX_INFO_DM))
return;
seq_printf(m, "========== [Mechanism Status %s] ==========\n",
- (btc->ctrl.manual ? "(Manual)" : "(Auto)"));
+ (btc->manual_ctrl ? "(Manual)" : "(Auto)"));
seq_printf(m,
" %-15s : type:%s, reason:%s(), action:%s(), ant_path:%s, init_mode:%s, run_cnt:%d\n",
"[status]",
- module->ant.type == BTC_ANT_SHARED ? "shared" : "dedicated",
+ btc->ant_type == BTC_ANT_SHARED ? "shared" : "dedicated",
steps_to_str(dm->run_reason),
steps_to_str(dm->run_action | BTC_ACT_EXT_BIT),
id_to_ant(FIELD_GET(GENMASK(7, 0), dm->set_ant_path)),
@@ -7146,8 +7292,13 @@ static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_dm_step(rtwdev, m);
+ if (ver->fcxctrl == 7)
+ igno_bt = btc->ctrl.ctrl_v7.igno_bt;
+ else
+ igno_bt = btc->ctrl.ctrl.igno_bt;
+
seq_printf(m, " %-15s : wl_only:%d, bt_only:%d, igno_bt:%d, free_run:%d, wl_ps_ctrl:%d, wl_mimo_ps:%d, ",
- "[dm_flag]", dm->wl_only, dm->bt_only, btc->ctrl.igno_bt,
+ "[dm_flag]", dm->wl_only, dm->bt_only, igno_bt,
dm->freerun, btc->lps, dm->wl_mimo_ps);
seq_printf(m, "leak_ap:%d, fw_offload:%s%s\n", dm->leak_ap,
@@ -7888,10 +8039,11 @@ static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_fbtc_steps_v2 *pstep = NULL;
+ const struct rtw89_btc_ver *ver = btc->ver;
u8 type, val, cnt = 0, state = 0;
bool outloop = false;
u16 i, diff_t, n_start = 0, n_stop = 0;
- u16 pos_old, pos_new;
+ u16 pos_old, pos_new, trace_step;
pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
if (!pcinfo->valid)
@@ -7908,11 +8060,16 @@ static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
do {
switch (state) {
case 0:
+ if (ver->fcxctrl == 7 || ver->fcxctrl == 1)
+ trace_step = 50;
+ else
+ trace_step = btc->ctrl.ctrl.trace_step;
+
n_start = pos_old;
if (pos_new >= pos_old)
n_stop = pos_new;
else
- n_stop = btc->ctrl.trace_step - 1;
+ n_stop = trace_step - 1;
state = 1;
break;
@@ -8742,7 +8899,7 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
seq_printf(m, "WL FW / BT FW %d.%d.%d.%d / NA\n",
fw_suit->major_ver, fw_suit->minor_ver,
fw_suit->sub_ver, fw_suit->sub_idex);
- seq_printf(m, "manual %d\n", btc->ctrl.manual);
+ seq_printf(m, "manual %d\n", btc->manual_ctrl);
seq_puts(m, "=========================================\n");
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index 46e25c6f88a6..13303830684e 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -7,6 +7,8 @@
#include "core.h"
+#define BTC_H2C_MAXLEN 2020
+
enum btc_mode {
BTC_MODE_NORMAL,
BTC_MODE_WL,
@@ -23,6 +25,7 @@ enum btc_wl_rfk_type {
BTC_WRFKT_DACK = 4,
BTC_WRFKT_RXDCK = 5,
BTC_WRFKT_TSSI = 6,
+ BTC_WRFKT_CHLK = 7,
};
#define NM_EXEC false
@@ -152,6 +155,10 @@ enum btc_lps_state {
#define BTC_REG_NOTFOUND 0xff
+#define R_BTC_ZB_COEX_TBL_0 0xE328
+#define R_BTC_ZB_COEX_TBL_1 0xE32c
+#define R_BTC_ZB_BREAK_TBL 0xE350
+
enum btc_ant_div_pos {
BTC_ANT_DIV_MAIN = 0,
BTC_ANT_DIV_AUX = 1,
@@ -180,6 +187,20 @@ enum btc_btgctrl_type {
BTC_BTGCTRL_BB_GNT_NOTFOUND,
};
+enum btc_wa_type {
+ BTC_WA_5G_HI_CH_RX = BIT(0),
+ BTC_WA_NULL_AP = BIT(1),
+ BTC_WA_HFP_ZB = BIT(2), /* HFP PTA req bit4 define issue */
+};
+
+enum btc_3cx_type {
+ BTC_3CX_NONE = 0,
+ BTC_3CX_BT2 = BIT(0),
+ BTC_3CX_ZB = BIT(1),
+ BTC_3CX_LTE = BIT(2),
+ BTC_3CX_MAX,
+};
+
void rtw89_btc_ntfy_poweron(struct rtw89_dev *rtwdev);
void rtw89_btc_ntfy_poweroff(struct rtw89_dev *rtwdev);
void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode);
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index fd527a249996..d474b8d5df3d 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -372,7 +372,7 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
chip->ops->set_txpwr(rtwdev, chan, phy_idx);
}
-void rtw89_set_channel(struct rtw89_dev *rtwdev)
+int rtw89_set_channel(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -399,7 +399,7 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev)
break;
default:
WARN(1, "Invalid ent mode: %d\n", mode);
- return;
+ return -EINVAL;
}
roc_idx = atomic_read(&hal->roc_entity_idx);
@@ -426,6 +426,7 @@ void rtw89_set_channel(struct rtw89_dev *rtwdev)
}
rtw89_set_entity_state(rtwdev, true);
+ return 0;
}
void rtw89_get_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
@@ -1176,7 +1177,8 @@ static __le32 rtw89_build_txwd_info2_v1(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_info4(struct rtw89_tx_desc_info *desc_info)
{
- u32 dword = FIELD_PREP(RTW89_TXWD_INFO4_RTS_EN, 1) |
+ bool rts_en = !desc_info->is_bmc;
+ u32 dword = FIELD_PREP(RTW89_TXWD_INFO4_RTS_EN, rts_en) |
FIELD_PREP(RTW89_TXWD_INFO4_HW_RTS_EN, 1);
return cpu_to_le32(dword);
@@ -1329,7 +1331,8 @@ static __le32 rtw89_build_txwd_info2_v2(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_info4_v2(struct rtw89_tx_desc_info *desc_info)
{
- u32 dword = FIELD_PREP(BE_TXD_INFO4_RTS_EN, 1) |
+ bool rts_en = !desc_info->is_bmc;
+ u32 dword = FIELD_PREP(BE_TXD_INFO4_RTS_EN, rts_en) |
FIELD_PREP(BE_TXD_INFO4_HW_RTS_EN, 1);
return cpu_to_le32(dword);
@@ -1866,6 +1869,17 @@ static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev,
ieee80211_queue_work(rtwdev->hw, &rtwdev->cancel_6ghz_probe_work);
}
+static void rtw89_vif_sync_bcn_tsf(struct rtw89_vif *rtwvif,
+ struct ieee80211_hdr *hdr, size_t len)
+{
+ struct ieee80211_mgmt *mgmt = (typeof(mgmt))hdr;
+
+ if (len < offsetof(typeof(*mgmt), u.beacon.variable))
+ return;
+
+ WRITE_ONCE(rtwvif->sync_bcn_tsf, le64_to_cpu(mgmt->u.beacon.timestamp));
+}
+
static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
@@ -1896,8 +1910,10 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
return;
if (ieee80211_is_beacon(hdr->frame_control)) {
- if (vif->type == NL80211_IFTYPE_STATION)
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ rtw89_vif_sync_bcn_tsf(rtwvif, hdr, skb->len);
rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu);
+ }
pkt_stat->beacon_nr++;
}
@@ -3345,6 +3361,14 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
return ret;
}
+ ret = rtw89_chip_h2c_default_cmac_tbl(rtwdev, rtwvif, rtwsta);
+ if (ret)
+ return ret;
+
+ ret = rtw89_chip_h2c_default_dmac_tbl(rtwdev, rtwvif, rtwsta);
+ if (ret)
+ return ret;
+
rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_REMOTE_STA_CHANGE);
}
@@ -3393,7 +3417,7 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, true);
}
- ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
+ ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c cmac table\n");
return ret;
@@ -3442,7 +3466,7 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
}
}
- ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
+ ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c cmac table\n");
return ret;
@@ -3485,6 +3509,8 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
rtw89_warn(rtwdev, "failed to send h2c general packet\n");
return ret;
}
+
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true);
}
return ret;
@@ -3611,7 +3637,8 @@ static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
cpu_to_le16(867), cpu_to_le16(1733), cpu_to_le16(2600), cpu_to_le16(3467),
};
const struct rtw89_chip_info *chip = rtwdev->chip;
- const __le16 *highest = chip->support_bw160 ? highest_bw160 : highest_bw80;
+ const __le16 *highest = chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160) ?
+ highest_bw160 : highest_bw80;
struct rtw89_hal *hal = &rtwdev->hal;
u16 tx_mcs_map = 0, rx_mcs_map = 0;
u8 sts_cap = 3;
@@ -3640,34 +3667,34 @@ static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
vht_cap->cap |= sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
- if (chip->support_bw160)
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
IEEE80211_VHT_CAP_SHORT_GI_160;
vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rx_mcs_map);
vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(tx_mcs_map);
vht_cap->vht_mcs.rx_highest = highest[hal->rx_nss - 1];
vht_cap->vht_mcs.tx_highest = highest[hal->tx_nss - 1];
-}
-#define RTW89_SBAND_IFTYPES_NR 2
+ if (ieee80211_hw_check(rtwdev->hw, SUPPORTS_VHT_EXT_NSS_BW))
+ vht_cap->vht_mcs.tx_highest |=
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
+}
static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
enum nl80211_band band,
- struct ieee80211_supported_band *sband)
+ enum nl80211_iftype iftype,
+ struct ieee80211_sband_iftype_data *iftype_data)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_hal *hal = &rtwdev->hal;
- struct ieee80211_sband_iftype_data *iftype_data;
bool no_ng16 = (chip->chip_id == RTL8852A && hal->cv == CHIP_CBV) ||
(chip->chip_id == RTL8852B && hal->cv == CHIP_CAV);
+ struct ieee80211_sta_he_cap *he_cap;
+ int nss = hal->rx_nss;
+ u8 *mac_cap_info;
+ u8 *phy_cap_info;
u16 mcs_map = 0;
int i;
- int nss = hal->rx_nss;
- int idx = 0;
-
- iftype_data = kcalloc(RTW89_SBAND_IFTYPES_NR, sizeof(*iftype_data), GFP_KERNEL);
- if (!iftype_data)
- return;
for (i = 0; i < 8; i++) {
if (i < nss)
@@ -3676,12 +3703,196 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
mcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
}
- for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
- struct ieee80211_sta_he_cap *he_cap;
- u8 *mac_cap_info;
- u8 *phy_cap_info;
+ he_cap = &iftype_data->he_cap;
+ mac_cap_info = he_cap->he_cap_elem.mac_cap_info;
+ phy_cap_info = he_cap->he_cap_elem.phy_cap_info;
+
+ he_cap->has_he = true;
+ mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
+ if (iftype == NL80211_IFTYPE_STATION)
+ mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
+ mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ALL_ACK |
+ IEEE80211_HE_MAC_CAP2_BSR;
+ mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2;
+ if (iftype == NL80211_IFTYPE_AP)
+ mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_OMI_CONTROL;
+ mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_OPS |
+ IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
+ if (iftype == NL80211_IFTYPE_STATION)
+ mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
+ if (band == NL80211_BAND_2GHZ) {
+ phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ } else {
+ phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
+ phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ }
+ phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
+ phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_DOPPLER_TX;
+ phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM;
+ if (iftype == NL80211_IFTYPE_STATION)
+ phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2;
+ if (iftype == NL80211_IFTYPE_AP)
+ phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU;
+ phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
+ phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ phy_cap_info[5] = no_ng16 ? 0 :
+ IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
+ IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
+ phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
+ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
+ IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE;
+ phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP7_MAX_NC_1;
+ phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
+ phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
+ phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
+ u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
+ if (iftype == NL80211_IFTYPE_STATION)
+ phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+ he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map);
+ he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map);
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160)) {
+ he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(mcs_map);
+ he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(mcs_map);
+ }
+
+ if (band == NL80211_BAND_6GHZ) {
+ __le16 capa;
- switch (i) {
+ capa = le16_encode_bits(IEEE80211_HT_MPDU_DENSITY_NONE,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
+ le16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
+ le16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+ iftype_data->he_6ghz_capa.capa = capa;
+ }
+}
+
+static void rtw89_init_eht_cap(struct rtw89_dev *rtwdev,
+ enum nl80211_band band,
+ enum nl80211_iftype iftype,
+ struct ieee80211_sband_iftype_data *iftype_data)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct ieee80211_eht_cap_elem_fixed *eht_cap_elem;
+ struct ieee80211_eht_mcs_nss_supp *eht_nss;
+ struct ieee80211_sta_eht_cap *eht_cap;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ bool support_320mhz = false;
+ int sts = 8;
+ u8 val;
+
+ if (chip->chip_gen == RTW89_CHIP_AX)
+ return;
+
+ if (band == NL80211_BAND_6GHZ &&
+ chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_320))
+ support_320mhz = true;
+
+ eht_cap = &iftype_data->eht_cap;
+ eht_cap_elem = &eht_cap->eht_cap_elem;
+ eht_nss = &eht_cap->eht_mcs_nss_supp;
+
+ eht_cap->has_eht = true;
+
+ eht_cap_elem->mac_cap_info[0] =
+ u8_encode_bits(IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991,
+ IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK);
+ eht_cap_elem->mac_cap_info[1] = 0;
+
+ eht_cap_elem->phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE;
+ if (support_320mhz)
+ eht_cap_elem->phy_cap_info[0] |=
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ;
+
+ eht_cap_elem->phy_cap_info[0] |=
+ u8_encode_bits(u8_get_bits(sts - 1, BIT(0)),
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK);
+ eht_cap_elem->phy_cap_info[1] =
+ u8_encode_bits(u8_get_bits(sts - 1, GENMASK(2, 1)),
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) |
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK);
+ if (support_320mhz)
+ eht_cap_elem->phy_cap_info[1] |=
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[2] = 0;
+
+ eht_cap_elem->phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK;
+
+ eht_cap_elem->phy_cap_info[4] =
+ IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP |
+ u8_encode_bits(1, IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
+
+ eht_cap_elem->phy_cap_info[5] =
+ u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US,
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
+
+ eht_cap_elem->phy_cap_info[6] = 0;
+ eht_cap_elem->phy_cap_info[7] = 0;
+ eht_cap_elem->phy_cap_info[8] = 0;
+
+ val = u8_encode_bits(hal->rx_nss, IEEE80211_EHT_MCS_NSS_RX) |
+ u8_encode_bits(hal->tx_nss, IEEE80211_EHT_MCS_NSS_TX);
+ eht_nss->bw._80.rx_tx_mcs9_max_nss = val;
+ eht_nss->bw._80.rx_tx_mcs11_max_nss = val;
+ eht_nss->bw._80.rx_tx_mcs13_max_nss = val;
+ eht_nss->bw._160.rx_tx_mcs9_max_nss = val;
+ eht_nss->bw._160.rx_tx_mcs11_max_nss = val;
+ eht_nss->bw._160.rx_tx_mcs13_max_nss = val;
+ if (support_320mhz) {
+ eht_nss->bw._320.rx_tx_mcs9_max_nss = val;
+ eht_nss->bw._320.rx_tx_mcs11_max_nss = val;
+ eht_nss->bw._320.rx_tx_mcs13_max_nss = val;
+ }
+}
+
+#define RTW89_SBAND_IFTYPES_NR 2
+
+static void rtw89_init_he_eht_cap(struct rtw89_dev *rtwdev,
+ enum nl80211_band band,
+ struct ieee80211_supported_band *sband)
+{
+ struct ieee80211_sband_iftype_data *iftype_data;
+ enum nl80211_iftype iftype;
+ int idx = 0;
+
+ iftype_data = kcalloc(RTW89_SBAND_IFTYPES_NR, sizeof(*iftype_data), GFP_KERNEL);
+ if (!iftype_data)
+ return;
+
+ for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
+ switch (iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_AP:
break;
@@ -3694,92 +3905,10 @@ static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
break;
}
- iftype_data[idx].types_mask = BIT(i);
- he_cap = &iftype_data[idx].he_cap;
- mac_cap_info = he_cap->he_cap_elem.mac_cap_info;
- phy_cap_info = he_cap->he_cap_elem.phy_cap_info;
-
- he_cap->has_he = true;
- mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
- if (i == NL80211_IFTYPE_STATION)
- mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
- mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ALL_ACK |
- IEEE80211_HE_MAC_CAP2_BSR;
- mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2;
- if (i == NL80211_IFTYPE_AP)
- mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_OMI_CONTROL;
- mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_OPS |
- IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
- if (i == NL80211_IFTYPE_STATION)
- mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
- if (band == NL80211_BAND_2GHZ) {
- phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
- } else {
- phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
- if (chip->support_bw160)
- phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
- }
- phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
- IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
- IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
- phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
- IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
- IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
- IEEE80211_HE_PHY_CAP2_DOPPLER_TX;
- phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM;
- if (i == NL80211_IFTYPE_STATION)
- phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2;
- if (i == NL80211_IFTYPE_AP)
- phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU;
- phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
- if (chip->support_bw160)
- phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
- phy_cap_info[5] = no_ng16 ? 0 :
- IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
- IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
- phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
- IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
- IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
- IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE;
- phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
- IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP7_MAX_NC_1;
- phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
- if (chip->support_bw160)
- phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
- IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
- phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
- IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
- u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
- IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
- if (i == NL80211_IFTYPE_STATION)
- phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
- he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map);
- he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map);
- if (chip->support_bw160) {
- he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(mcs_map);
- he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(mcs_map);
- }
-
- if (band == NL80211_BAND_6GHZ) {
- __le16 capa;
+ iftype_data[idx].types_mask = BIT(iftype);
- capa = le16_encode_bits(IEEE80211_HT_MPDU_DENSITY_NONE,
- IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
- le16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
- IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
- le16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
- IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
- iftype_data[idx].he_6ghz_capa.capa = capa;
- }
+ rtw89_init_he_cap(rtwdev, band, iftype, &iftype_data[idx]);
+ rtw89_init_eht_cap(rtwdev, band, iftype, &iftype_data[idx]);
idx++;
}
@@ -3800,7 +3929,7 @@ static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
if (!sband_2ghz)
goto err;
rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap);
- rtw89_init_he_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
+ rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz;
}
@@ -3810,7 +3939,7 @@ static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
goto err;
rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap);
rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap);
- rtw89_init_he_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
+ rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz;
}
@@ -3818,7 +3947,7 @@ static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
sband_6ghz = kmemdup(&rtw89_sband_6ghz, size, GFP_KERNEL);
if (!sband_6ghz)
goto err;
- rtw89_init_he_cap(rtwdev, NL80211_BAND_6GHZ, sband_6ghz);
+ rtw89_init_he_eht_cap(rtwdev, NL80211_BAND_6GHZ, sband_6ghz);
hw->wiphy->bands[NL80211_BAND_6GHZ] = sband_6ghz;
}
@@ -3879,7 +4008,7 @@ void rtw89_core_update_beacon_work(struct work_struct *work)
rtwdev = rtwvif->rtwdev;
mutex_lock(&rtwdev->mutex);
- rtw89_fw_h2c_update_beacon(rtwdev, rtwvif);
+ rtw89_chip_h2c_update_beacon(rtwdev, rtwvif);
mutex_unlock(&rtwdev->mutex);
}
@@ -3944,7 +4073,6 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
{
int ret;
- rtwdev->mac.qta_mode = RTW89_QTA_SCC;
ret = rtw89_mac_init(rtwdev);
if (ret) {
rtw89_err(rtwdev, "mac init fail, ret:%d\n", ret);
@@ -3961,6 +4089,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
return ret;
rtw89_phy_init_bb_reg(rtwdev);
+ rtw89_chip_bb_postinit(rtwdev);
rtw89_phy_init_rf_reg(rtwdev, false);
rtw89_btc_ntfy_init(rtwdev, BTC_MODE_NORMAL);
@@ -3983,6 +4112,7 @@ int rtw89_core_start(struct rtw89_dev *rtwdev)
set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
+ rtw89_chip_rfk_init_late(rtwdev);
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.log.enable);
rtw89_fw_h2c_init_ba_cam(rtwdev);
@@ -4078,6 +4208,15 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_traffic_stats_init(rtwdev, &rtwdev->stats);
rtwdev->hal.rx_fltr = DEFAULT_AX_RX_FLTR;
+ rtwdev->dbcc_en = false;
+ rtwdev->mlo_dbcc_mode = MLO_DBCC_NOT_SUPPORT;
+ rtwdev->mac.qta_mode = RTW89_QTA_SCC;
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ rtwdev->dbcc_en = true;
+ rtwdev->mac.qta_mode = RTW89_QTA_DBCC;
+ rtwdev->mlo_dbcc_mode = MLO_2_PLUS_0_1RF;
+ }
INIT_WORK(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work);
INIT_WORK(&btc->arp_notify_work, rtw89_btc_ntfy_arp_packet_work);
@@ -4085,6 +4224,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_WORK(&btc->icmp_notify_work, rtw89_btc_ntfy_icmp_packet_work);
init_completion(&rtwdev->fw.req.completion);
+ init_completion(&rtwdev->rfk_wait.completion);
schedule_work(&rtwdev->load_firmware_work);
@@ -4290,6 +4430,7 @@ EXPORT_SYMBOL(rtw89_chip_info_setup);
static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
struct ieee80211_hw *hw = rtwdev->hw;
struct rtw89_efuse *efuse = &rtwdev->efuse;
struct rtw89_hal *hal = &rtwdev->hal;
@@ -4324,8 +4465,8 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
- /* ref: description of rtw89_mcc_get_tbtt_ofst() in chan.c */
- ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
+ if (chip->support_bandwidths & BIT(NL80211_CHAN_WIDTH_160))
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
if (RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
ieee80211_hw_set(hw, CONNECTION_MONITOR);
@@ -4362,6 +4503,8 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
hw->wiphy->max_remain_on_channel_duration = 1000;
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL);
ret = rtw89_core_set_supported_band(rtwdev);
if (ret) {
@@ -4453,9 +4596,10 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &early_fw);
if (no_chanctx) {
- ops->add_chanctx = NULL;
- ops->remove_chanctx = NULL;
- ops->change_chanctx = NULL;
+ ops->add_chanctx = ieee80211_emulate_add_chanctx;
+ ops->remove_chanctx = ieee80211_emulate_remove_chanctx;
+ ops->change_chanctx = ieee80211_emulate_change_chanctx;
+ ops->switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx;
ops->assign_vif_chanctx = NULL;
ops->unassign_vif_chanctx = NULL;
ops->remain_on_channel = NULL;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index ea6df859ba15..2e854c9af709 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -17,6 +17,7 @@ struct rtw89_pci_info;
struct rtw89_mac_gen_def;
struct rtw89_phy_gen_def;
struct rtw89_efuse_block_cfg;
+struct rtw89_h2c_rf_tssi;
struct rtw89_fw_txpwr_track_cfg;
struct rtw89_phy_rfk_log_fmt;
@@ -32,6 +33,7 @@ extern const struct ieee80211_ops rtw89_ops;
#define MASKDWORD 0xffffffff
#define RFREG_MASK 0xfffff
#define INV_RF_DATA 0xffffffff
+#define BYPASS_CR_DATA 0xbabecafe
#define RTW89_TRACK_WORK_PERIOD round_jiffies_relative(HZ * 2)
#define RTW89_FORBID_BA_TIMER round_jiffies_relative(HZ * 4)
@@ -878,7 +880,7 @@ enum rtw89_ps_mode {
#define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
#define RTW89_6G_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1)
#define RTW89_BYR_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1)
-#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_160 + 1)
+#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1)
enum rtw89_ru_bandwidth {
RTW89_RU26 = 0,
@@ -956,6 +958,9 @@ struct rtw89_port_reg {
u32 mbssid;
u32 mbssid_drop;
u32 tsf_sync;
+ u32 ptcl_dbg;
+ u32 ptcl_dbg_info;
+ u32 bcn_drop_all;
u32 hiq_win[RTW89_PORT_NUM];
};
@@ -1146,9 +1151,15 @@ struct rtw89_mac_ax_gnt {
u8 gnt_wl;
} __packed;
+struct rtw89_mac_ax_wl_act {
+ u8 wlan_act_en;
+ u8 wlan_act;
+};
+
#define RTW89_MAC_AX_COEX_GNT_NR 2
struct rtw89_mac_ax_coex_gnt {
struct rtw89_mac_ax_gnt band[RTW89_MAC_AX_COEX_GNT_NR];
+ struct rtw89_mac_ax_wl_act bt[RTW89_MAC_AX_COEX_GNT_NR];
};
enum rtw89_btc_ncnt {
@@ -1266,6 +1277,18 @@ struct rtw89_btc_ant_info {
u8 stream_cnt: 4;
};
+struct rtw89_btc_ant_info_v7 {
+ u8 type; /* shared, dedicated(non-shared) */
+ u8 num; /* antenna count */
+ u8 isolation;
+ u8 single_pos;/* wifi 1ss-1ant at 0:S0 or 1:S1 */
+
+ u8 diversity; /* only for wifi use 1-antenna */
+ u8 btg_pos; /* btg-circuit at 0:S0/1:S1/others:all */
+ u8 stream_cnt; /* spatial_stream count */
+ u8 rsvd;
+} __packed;
+
enum rtw89_tfc_dir {
RTW89_TFC_UL,
RTW89_TFC_DL,
@@ -1660,6 +1683,16 @@ struct rtw89_btc_dm_emap {
u32 wl_e2g_hang: 1;
u32 wl_ver_mismatch: 1;
u32 bt_ver_mismatch: 1;
+ u32 rfe_type0: 1;
+ u32 h2c_buffer_over: 1;
+ u32 bt_tx_hang: 1; /* for SNR too low bug, BT has no Tx req*/
+ u32 wl_no_sta_ntfy: 1;
+
+ u32 h2c_bmap_mismatch: 1;
+ u32 c2h_bmap_mismatch: 1;
+ u32 h2c_struct_invalid: 1;
+ u32 c2h_struct_invalid: 1;
+ u32 h2c_c2h_buffer_mismatch: 1;
};
union rtw89_btc_dm_error_map {
@@ -1708,6 +1741,7 @@ struct rtw89_btc_wl_info {
u8 cn_report;
u8 coex_mode;
+ bool bg_mode;
bool scbd_change;
u32 scbd;
};
@@ -1725,6 +1759,25 @@ struct rtw89_btc_module {
u8 kt_ver_adie;
};
+struct rtw89_btc_module_v7 {
+ u8 rfe_type;
+ u8 kt_ver;
+ u8 bt_solo;
+ u8 bt_pos; /* wl-end view: get from efuse, must compare bt.btg_type*/
+
+ u8 switch_type; /* WL/BT switch type: 0: internal, 1: external */
+ u8 wa_type; /* WA type: 0:none, 1: 51B 5G_Hi-Ch_Rx */
+ u8 kt_ver_adie;
+ u8 rsvd;
+
+ struct rtw89_btc_ant_info_v7 ant;
+} __packed;
+
+union rtw89_btc_module_info {
+ struct rtw89_btc_module md;
+ struct rtw89_btc_module_v7 md_v7;
+};
+
#define RTW89_BTC_DM_MAXSTEP 30
#define RTW89_BTC_DM_CNT_MAX (RTW89_BTC_DM_MAXSTEP * 8)
@@ -1747,6 +1800,25 @@ struct rtw89_btc_init_info {
u16 rsvd;
};
+struct rtw89_btc_init_info_v7 {
+ u8 wl_guard_ch;
+ u8 wl_only;
+ u8 wl_init_ok;
+ u8 rsvd3;
+
+ u8 cx_other;
+ u8 bt_only;
+ u8 pta_mode;
+ u8 pta_direction;
+
+ struct rtw89_btc_module_v7 module;
+} __packed;
+
+union rtw89_btc_init_info_u {
+ struct rtw89_btc_init_info init;
+ struct rtw89_btc_init_info_v7 init_v7;
+};
+
struct rtw89_btc_wl_tx_limit_para {
u16 enable;
u32 tx_time; /* unit: us */
@@ -2485,7 +2557,7 @@ struct rtw89_btc_dm {
struct rtw89_btc_fbtc_tdma tdma;
struct rtw89_btc_fbtc_tdma tdma_now;
struct rtw89_mac_ax_coex_gnt gnt;
- struct rtw89_btc_init_info init_info; /* pass to wl_fw if offload */
+ union rtw89_btc_init_info_u init_info; /* pass to wl_fw if offload */
struct rtw89_btc_rf_trx_para rf_trx_para;
struct rtw89_btc_wl_tx_limit_para wl_tx_limit;
struct rtw89_btc_dm_step dm_step;
@@ -2534,6 +2606,18 @@ struct rtw89_btc_ctrl {
u32 rsvd: 12;
};
+struct rtw89_btc_ctrl_v7 {
+ u8 manual;
+ u8 igno_bt;
+ u8 always_freerun;
+ u8 rsvd;
+} __packed;
+
+union rtw89_btc_ctrl_list {
+ struct rtw89_btc_ctrl ctrl;
+ struct rtw89_btc_ctrl_v7 ctrl_v7;
+};
+
struct rtw89_btc_dbg {
/* cmd "rb" */
bool rb_done;
@@ -2706,7 +2790,9 @@ struct rtw89_btc_ver {
u8 fwlrole;
u8 frptmap;
u8 fcxctrl;
+ u8 fcxinit;
+ u8 drvinfo_type;
u16 info_buf;
u8 max_role_num;
};
@@ -2718,8 +2804,8 @@ struct rtw89_btc {
struct rtw89_btc_cx cx;
struct rtw89_btc_dm dm;
- struct rtw89_btc_ctrl ctrl;
- struct rtw89_btc_module mdinfo;
+ union rtw89_btc_ctrl_list ctrl;
+ union rtw89_btc_module_info mdinfo;
struct rtw89_btc_btf_fwinfo fwinfo;
struct rtw89_btc_dbg dbg;
@@ -2731,11 +2817,14 @@ struct rtw89_btc {
u32 bt_req_len;
u8 policy[RTW89_BTC_POLICY_MAXLEN];
+ u8 ant_type;
+ u8 btg_pos;
u16 policy_len;
u16 policy_type;
bool bt_req_en;
bool update_policy_force;
bool lps;
+ bool manual_ctrl;
};
enum rtw89_btc_hmsg {
@@ -2875,7 +2964,7 @@ struct rtw89_ba_cam_entry {
#define RTW89_MAX_ADDR_CAM_NUM 128
#define RTW89_MAX_BSSID_CAM_NUM 20
#define RTW89_MAX_SEC_CAM_NUM 128
-#define RTW89_MAX_BA_CAM_NUM 8
+#define RTW89_MAX_BA_CAM_NUM 24
#define RTW89_SEC_CAM_IN_ADDR_CAM 7
struct rtw89_addr_cam_entry {
@@ -2932,6 +3021,7 @@ struct rtw89_sta {
struct ewma_evm evm_min[RF_PATH_MAX];
struct ewma_evm evm_max[RF_PATH_MAX];
struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
+ DECLARE_BITMAP(ampdu_map, IEEE80211_NUM_TIDS);
struct ieee80211_rx_status rx_status;
u16 rx_hw_rate;
__le32 htc_template;
@@ -3041,6 +3131,7 @@ struct rtw89_vif {
u8 bcn_hit_cond;
u8 hit_rule;
u8 last_noa_nr;
+ u64 sync_bcn_tsf;
bool offchan;
bool trigger;
bool lsig_txop;
@@ -3111,7 +3202,7 @@ struct rtw89_hci_ops {
void (*ctrl_txdma_ch)(struct rtw89_dev *rtwdev, bool enable);
void (*ctrl_txdma_fw_ch)(struct rtw89_dev *rtwdev, bool enable);
void (*ctrl_trxhci)(struct rtw89_dev *rtwdev, bool enable);
- int (*poll_txdma_ch)(struct rtw89_dev *rtwdev);
+ int (*poll_txdma_ch_idle)(struct rtw89_dev *rtwdev);
void (*clr_idx_all)(struct rtw89_dev *rtwdev);
void (*clear)(struct rtw89_dev *rtwdev, struct pci_dev *pdev);
void (*disable_intr)(struct rtw89_dev *rtwdev);
@@ -3131,6 +3222,7 @@ struct rtw89_chip_ops {
int (*enable_bb_rf)(struct rtw89_dev *rtwdev);
int (*disable_bb_rf)(struct rtw89_dev *rtwdev);
void (*bb_preinit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+ void (*bb_postinit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
void (*bb_reset)(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
void (*bb_sethw)(struct rtw89_dev *rtwdev);
@@ -3152,7 +3244,9 @@ struct rtw89_chip_ops {
int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map);
void (*fem_setup)(struct rtw89_dev *rtwdev);
void (*rfe_gpio)(struct rtw89_dev *rtwdev);
+ void (*rfk_hw_init)(struct rtw89_dev *rtwdev);
void (*rfk_init)(struct rtw89_dev *rtwdev);
+ void (*rfk_init_late)(struct rtw89_dev *rtwdev);
void (*rfk_channel)(struct rtw89_dev *rtwdev);
void (*rfk_band_changed)(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
@@ -3196,6 +3290,22 @@ struct rtw89_chip_ops {
int (*h2c_dctl_sec_cam)(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta);
+ int (*h2c_default_cmac_tbl)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
+ int (*h2c_assoc_cmac_tbl)(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ int (*h2c_ampdu_cmac_tbl)(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ int (*h2c_default_dmac_tbl)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
+ int (*h2c_update_beacon)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
+ int (*h2c_ba_cam)(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params);
void (*btc_set_rfe)(struct rtw89_dev *rtwdev);
void (*btc_init_cfg)(struct rtw89_dev *rtwdev);
@@ -3225,8 +3335,62 @@ enum rtw89_dma_ch {
RTW89_DMA_CH_NUM = 13
};
+#define MLO_MODE_FOR_BB0_BB1_RF(bb0, bb1, rf) ((rf) << 12 | (bb1) << 4 | (bb0))
+
+enum rtw89_mlo_dbcc_mode {
+ MLO_DBCC_NOT_SUPPORT = 1,
+ MLO_0_PLUS_2_1RF = MLO_MODE_FOR_BB0_BB1_RF(0, 2, 1),
+ MLO_0_PLUS_2_2RF = MLO_MODE_FOR_BB0_BB1_RF(0, 2, 2),
+ MLO_1_PLUS_1_1RF = MLO_MODE_FOR_BB0_BB1_RF(1, 1, 1),
+ MLO_1_PLUS_1_2RF = MLO_MODE_FOR_BB0_BB1_RF(1, 1, 2),
+ MLO_2_PLUS_0_1RF = MLO_MODE_FOR_BB0_BB1_RF(2, 0, 1),
+ MLO_2_PLUS_0_2RF = MLO_MODE_FOR_BB0_BB1_RF(2, 0, 2),
+ MLO_2_PLUS_2_2RF = MLO_MODE_FOR_BB0_BB1_RF(2, 2, 2),
+ DBCC_LEGACY = 0xffffffff,
+};
+
+enum rtw89_scan_be_operation {
+ RTW89_SCAN_OP_STOP,
+ RTW89_SCAN_OP_START,
+ RTW89_SCAN_OP_SETPARM,
+ RTW89_SCAN_OP_GETRPT,
+ RTW89_SCAN_OP_NUM
+};
+
+enum rtw89_scan_be_mode {
+ RTW89_SCAN_MODE_SA,
+ RTW89_SCAN_MODE_MACC,
+ RTW89_SCAN_MODE_NUM
+};
+
+enum rtw89_scan_be_opmode {
+ RTW89_SCAN_OPMODE_NONE,
+ RTW89_SCAN_OPMODE_TBTT,
+ RTW89_SCAN_OPMODE_INTV,
+ RTW89_SCAN_OPMODE_CNT,
+ RTW89_SCAN_OPMODE_NUM,
+};
+
+struct rtw89_scan_option {
+ bool enable;
+ bool target_ch_mode;
+ u8 num_macc_role;
+ u8 num_opch;
+ u8 repeat;
+ u16 norm_pd;
+ u16 slow_pd;
+ u16 norm_cy;
+ u8 opch_end;
+ u64 prohib_chan;
+ enum rtw89_phy_idx band;
+ enum rtw89_scan_be_operation operation;
+ enum rtw89_scan_be_mode scan_mode;
+ enum rtw89_mlo_dbcc_mode mlo_mode;
+};
+
enum rtw89_qta_mode {
RTW89_QTA_SCC,
+ RTW89_QTA_DBCC,
RTW89_QTA_DLFW,
RTW89_QTA_WOW,
@@ -3713,7 +3877,7 @@ struct rtw89_chip_info {
u32 rf_base_addr[2];
u8 support_chanctx_num;
u8 support_bands;
- bool support_bw160;
+ u16 support_bandwidths;
bool support_unii4;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
@@ -3790,6 +3954,7 @@ struct rtw89_chip_info {
const u32 *c2h_regs;
struct rtw89_reg_def c2h_counter_reg;
const struct rtw89_page_regs *page_regs;
+ u32 wow_reason_reg;
bool cfo_src_fd;
bool cfo_hw_comp;
const struct rtw89_reg_def *dcfo_comp;
@@ -3838,7 +4003,7 @@ enum rtw89_host_rpr_mode {
RTW89_RPR_MODE_STF
};
-#define RTW89_COMPLETION_BUF_SIZE 24
+#define RTW89_COMPLETION_BUF_SIZE 40
#define RTW89_WAIT_COND_IDLE UINT_MAX
struct rtw89_completion_data {
@@ -3897,6 +4062,7 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_NO_DEEP_PS,
RTW89_FW_FEATURE_NO_LPS_PG,
RTW89_FW_FEATURE_BEACON_FILTER,
+ RTW89_FW_FEATURE_MACID_PAUSE_SLEEP,
};
struct rtw89_fw_suit {
@@ -3957,6 +4123,19 @@ struct rtw89_fw_elm_info {
struct rtw89_phy_rfk_log_fmt *rfk_log_fmt;
};
+enum rtw89_fw_mss_dev_type {
+ RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF = 0xF,
+ RTW89_FW_MSS_DEV_TYPE_FWSEC_INV = 0xFF,
+};
+
+struct rtw89_fw_secure {
+ bool secure_boot;
+ u32 sb_sel_mgn;
+ u8 mss_dev_type;
+ u8 mss_cust_idx;
+ u8 mss_key_num;
+};
+
struct rtw89_fw_info {
struct rtw89_fw_req_info req;
int fw_format;
@@ -3971,6 +4150,7 @@ struct rtw89_fw_info {
struct rtw89_fw_log log;
u32 feature_map;
struct rtw89_fw_elm_info elm_info;
+ struct rtw89_fw_secure sec;
};
#define RTW89_CHK_FW_FEATURE(_feat, _fw) \
@@ -4045,6 +4225,7 @@ struct rtw89_tas_info {
struct rtw89_chanctx_cfg {
enum rtw89_sub_entity_idx idx;
+ int ref_count;
};
enum rtw89_chanctx_changes {
@@ -4064,13 +4245,16 @@ enum rtw89_entity_mode {
RTW89_ENTITY_MODE_MCC,
NUM_OF_RTW89_ENTITY_MODE,
- RTW89_ENTITY_MODE_INVALID = NUM_OF_RTW89_ENTITY_MODE,
+ RTW89_ENTITY_MODE_INVALID = -EINVAL,
+ RTW89_ENTITY_MODE_UNHANDLED = -ESRCH,
};
struct rtw89_sub_entity {
struct cfg80211_chan_def chandef;
struct rtw89_chan chan;
struct rtw89_chan_rcd rcd;
+
+ /* only assigned when running with chanctx_ops */
struct rtw89_chanctx_cfg *cfg;
};
@@ -4123,6 +4307,7 @@ enum rtw89_flags {
RTW89_FLAG_CMAC1_FUNC,
RTW89_FLAG_FW_RDY,
RTW89_FLAG_RUNNING,
+ RTW89_FLAG_PROBE_DONE,
RTW89_FLAG_BFEE_MON,
RTW89_FLAG_BFEE_EN,
RTW89_FLAG_BFEE_TIMER_KEEP,
@@ -4179,6 +4364,21 @@ struct rtw89_phy_stat {
struct rtw89_pkt_stat last_pkt_stat;
};
+enum rtw89_rfk_report_state {
+ RTW89_RFK_STATE_START = 0x0,
+ RTW89_RFK_STATE_OK = 0x1,
+ RTW89_RFK_STATE_FAIL = 0x2,
+ RTW89_RFK_STATE_TIMEOUT = 0x3,
+ RTW89_RFK_STATE_H2C_CMD_ERR = 0x4,
+};
+
+struct rtw89_rfk_wait_info {
+ struct completion completion;
+ ktime_t start_time;
+ enum rtw89_rfk_report_state state;
+ u8 version;
+};
+
#define RTW89_DACK_PATH_NR 2
#define RTW89_DACK_IDX_NR 2
#define RTW89_DACK_MSBK_NR 16
@@ -4194,15 +4394,18 @@ struct rtw89_dack_info {
bool msbk_timeout[RTW89_DACK_PATH_NR];
};
-#define RTW89_IQK_CHS_NR 2
-#define RTW89_IQK_PATH_NR 4
+#define RTW89_RFK_CHS_NR 3
struct rtw89_rfk_mcc_info {
- u8 ch[RTW89_IQK_CHS_NR];
- u8 band[RTW89_IQK_CHS_NR];
+ u8 ch[RTW89_RFK_CHS_NR];
+ u8 band[RTW89_RFK_CHS_NR];
+ u8 bw[RTW89_RFK_CHS_NR];
u8 table_idx;
};
+#define RTW89_IQK_CHS_NR 2
+#define RTW89_IQK_PATH_NR 4
+
struct rtw89_lck_info {
u8 thermal[RF_PATH_MAX];
};
@@ -4380,6 +4583,11 @@ struct rtw89_cfo_tracking_info {
u8 lock_cnt;
};
+enum rtw89_tssi_mode {
+ RTW89_TSSI_NORMAL = 0,
+ RTW89_TSSI_SCAN = 1,
+};
+
enum rtw89_tssi_alimk_band {
TSSI_ALIMK_2G = 0,
TSSI_ALIMK_5GL,
@@ -4589,6 +4797,7 @@ struct rtw89_hw_scan_info {
struct ieee80211_vif *scanning_vif;
struct list_head pkt_list[NUM_NL80211_BANDS];
struct rtw89_chan op_chan;
+ bool abort;
u32 last_chan_idx;
};
@@ -4605,6 +4814,48 @@ enum rtw89_phy_bb_gain_band {
RTW89_BB_GAIN_BAND_NR,
};
+enum rtw89_phy_gain_band_be {
+ RTW89_BB_GAIN_BAND_2G_BE = 0,
+ RTW89_BB_GAIN_BAND_5G_L_BE = 1,
+ RTW89_BB_GAIN_BAND_5G_M_BE = 2,
+ RTW89_BB_GAIN_BAND_5G_H_BE = 3,
+ RTW89_BB_GAIN_BAND_6G_L0_BE = 4,
+ RTW89_BB_GAIN_BAND_6G_L1_BE = 5,
+ RTW89_BB_GAIN_BAND_6G_M0_BE = 6,
+ RTW89_BB_GAIN_BAND_6G_M1_BE = 7,
+ RTW89_BB_GAIN_BAND_6G_H0_BE = 8,
+ RTW89_BB_GAIN_BAND_6G_H1_BE = 9,
+ RTW89_BB_GAIN_BAND_6G_UH0_BE = 10,
+ RTW89_BB_GAIN_BAND_6G_UH1_BE = 11,
+
+ RTW89_BB_GAIN_BAND_NR_BE,
+};
+
+enum rtw89_phy_bb_bw_be {
+ RTW89_BB_BW_20_40 = 0,
+ RTW89_BB_BW_80_160_320 = 1,
+
+ RTW89_BB_BW_NR_BE,
+};
+
+enum rtw89_bw20_sc {
+ RTW89_BW20_SC_20M = 1,
+ RTW89_BW20_SC_40M = 2,
+ RTW89_BW20_SC_80M = 4,
+ RTW89_BW20_SC_160M = 8,
+ RTW89_BW20_SC_320M = 16,
+};
+
+enum rtw89_cmac_table_bw {
+ RTW89_CMAC_BW_20M = 0,
+ RTW89_CMAC_BW_40M = 1,
+ RTW89_CMAC_BW_80M = 2,
+ RTW89_CMAC_BW_160M = 3,
+ RTW89_CMAC_BW_320M = 4,
+
+ RTW89_CMAC_BW_NR,
+};
+
enum rtw89_phy_bb_rxsc_num {
RTW89_BB_RXSC_NUM_40 = 9, /* SC: 0, 1~8 */
RTW89_BB_RXSC_NUM_80 = 13, /* SC: 0, 1~8, 9~12 */
@@ -4627,6 +4878,27 @@ struct rtw89_phy_bb_gain_info {
[RTW89_BB_RXSC_NUM_160];
};
+struct rtw89_phy_bb_gain_info_be {
+ s8 lna_gain[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE][RF_PATH_MAX]
+ [LNA_GAIN_NUM];
+ s8 tia_gain[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE][RF_PATH_MAX]
+ [TIA_GAIN_NUM];
+ s8 lna_gain_bypass[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE]
+ [RF_PATH_MAX][LNA_GAIN_NUM];
+ s8 lna_op1db[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE]
+ [RF_PATH_MAX][LNA_GAIN_NUM];
+ s8 tia_lna_op1db[RTW89_BB_GAIN_BAND_NR_BE][RTW89_BB_BW_NR_BE]
+ [RF_PATH_MAX][LNA_GAIN_NUM + 1];
+ s8 rpl_ofst_20[RTW89_BB_GAIN_BAND_NR_BE][RF_PATH_MAX]
+ [RTW89_BW20_SC_20M];
+ s8 rpl_ofst_40[RTW89_BB_GAIN_BAND_NR_BE][RF_PATH_MAX]
+ [RTW89_BW20_SC_40M];
+ s8 rpl_ofst_80[RTW89_BB_GAIN_BAND_NR_BE][RF_PATH_MAX]
+ [RTW89_BW20_SC_80M];
+ s8 rpl_ofst_160[RTW89_BB_GAIN_BAND_NR_BE][RF_PATH_MAX]
+ [RTW89_BW20_SC_160M];
+};
+
struct rtw89_phy_efuse_gain {
bool offset_valid;
bool comp_valid;
@@ -4681,6 +4953,9 @@ struct rtw89_mcc_role {
struct rtw89_mcc_policy policy;
struct rtw89_mcc_limit limit;
+ /* only valid when running with FW MRC mechanism */
+ u8 slot_idx;
+
/* byte-array in LE order for FW */
u8 macid_bitmap[BITS_TO_BYTES(RTW89_MAX_MAC_ID_NUM)];
@@ -4724,7 +4999,11 @@ struct rtw89_mcc_sync {
bool enable;
u16 offset; /* TU */
u8 macid_src;
+ u8 band_src;
+ u8 port_src;
u8 macid_tgt;
+ u8 band_tgt;
+ u8 port_tgt;
};
struct rtw89_mcc_config {
@@ -4757,6 +5036,7 @@ struct rtw89_dev {
const struct ieee80211_ops *ops;
bool dbcc_en;
+ enum rtw89_mlo_dbcc_mode mlo_dbcc_mode;
struct rtw89_hw_scan_info scan_info;
const struct rtw89_chip_info *chip;
const struct rtw89_pci_info *pci_info;
@@ -4806,6 +5086,7 @@ struct rtw89_dev {
DECLARE_BITMAP(pkt_offload, RTW89_MAX_PKT_OFLD_NUM);
struct rtw89_phy_stat phystat;
+ struct rtw89_rfk_wait_info rfk_wait;
struct rtw89_dack_info dack;
struct rtw89_iqk_info iqk;
struct rtw89_dpk_info dpk;
@@ -4824,7 +5105,10 @@ struct rtw89_dev {
struct rtw89_env_monitor_info env_monitor;
struct rtw89_dig_info dig;
struct rtw89_phy_ch_info ch_info;
- struct rtw89_phy_bb_gain_info bb_gain;
+ union {
+ struct rtw89_phy_bb_gain_info ax;
+ struct rtw89_phy_bb_gain_info_be be;
+ } bb_gain;
struct rtw89_phy_efuse_gain efuse_gain;
struct rtw89_phy_ul_tb_info ul_tb_info;
struct rtw89_antdiv_info antdiv;
@@ -4969,12 +5253,12 @@ static inline void rtw89_hci_ctrl_trxhci(struct rtw89_dev *rtwdev, bool enable)
rtwdev->hci.ops->ctrl_trxhci(rtwdev, enable);
}
-static inline int rtw89_hci_poll_txdma_ch(struct rtw89_dev *rtwdev)
+static inline int rtw89_hci_poll_txdma_ch_idle(struct rtw89_dev *rtwdev)
{
int ret = 0;
- if (rtwdev->hci.ops->poll_txdma_ch)
- ret = rtwdev->hci.ops->poll_txdma_ch(rtwdev);
+ if (rtwdev->hci.ops->poll_txdma_ch_idle)
+ ret = rtwdev->hci.ops->poll_txdma_ch_idle(rtwdev);
return ret;
}
@@ -5437,6 +5721,14 @@ static inline void rtw89_chip_rfe_gpio(struct rtw89_dev *rtwdev)
chip->ops->rfe_gpio(rtwdev);
}
+static inline void rtw89_chip_rfk_hw_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->rfk_hw_init)
+ chip->ops->rfk_hw_init(rtwdev);
+}
+
static inline
void rtw89_chip_bb_preinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
@@ -5446,6 +5738,20 @@ void rtw89_chip_bb_preinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
chip->ops->bb_preinit(rtwdev, phy_idx);
}
+static inline
+void rtw89_chip_bb_postinit(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (!chip->ops->bb_postinit)
+ return;
+
+ chip->ops->bb_postinit(rtwdev, RTW89_PHY_0);
+
+ if (rtwdev->dbcc_en)
+ chip->ops->bb_postinit(rtwdev, RTW89_PHY_1);
+}
+
static inline void rtw89_chip_bb_sethw(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -5462,6 +5768,14 @@ static inline void rtw89_chip_rfk_init(struct rtw89_dev *rtwdev)
chip->ops->rfk_init(rtwdev);
}
+static inline void rtw89_chip_rfk_init_late(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->rfk_init_late)
+ chip->ops->rfk_init_late(rtwdev);
+}
+
static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -5750,6 +6064,18 @@ out:
rcu_read_unlock();
}
+static inline bool rtw89_is_mlo_1_1(struct rtw89_dev *rtwdev)
+{
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_1_PLUS_1_1RF:
+ case MLO_1_PLUS_1_2RF:
+ case DBCC_LEGACY:
+ return true;
+ default:
+ return false;
+ }
+}
+
int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel);
int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
@@ -5815,7 +6141,7 @@ void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev);
void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef);
void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef,
struct rtw89_chan *chan);
-void rtw89_set_channel(struct rtw89_dev *rtwdev);
+int rtw89_set_channel(struct rtw89_dev *rtwdev);
void rtw89_get_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
struct rtw89_chan *chan);
u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 44829a148185..affffc4092ba 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -3427,14 +3427,17 @@ static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp,
struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_btc *btc = &rtwdev->btc;
- bool btc_manual;
+ const struct rtw89_btc_ver *ver = btc->ver;
int ret;
- ret = kstrtobool_from_user(user_buf, count, &btc_manual);
+ ret = kstrtobool_from_user(user_buf, count, &btc->manual_ctrl);
if (ret)
return ret;
- btc->ctrl.manual = btc_manual;
+ if (ver->fcxctrl == 7)
+ btc->ctrl.ctrl_v7.manual = btc->manual_ctrl;
+ else
+ btc->ctrl.ctrl.manual = btc->manual_ctrl;
return count;
}
diff --git a/drivers/net/wireless/realtek/rtw89/efuse.h b/drivers/net/wireless/realtek/rtw89/efuse.h
index 5c6787179bad..72416f56a071 100644
--- a/drivers/net/wireless/realtek/rtw89/efuse.h
+++ b/drivers/net/wireless/realtek/rtw89/efuse.h
@@ -23,5 +23,6 @@ int rtw89_parse_efuse_map_be(struct rtw89_dev *rtwdev);
int rtw89_parse_phycap_map_be(struct rtw89_dev *rtwdev);
int rtw89_cnv_efuse_state_be(struct rtw89_dev *rtwdev, bool idle);
int rtw89_read_efuse_ver(struct rtw89_dev *rtwdev, u8 *efv);
+int rtw89_efuse_read_fw_secure_be(struct rtw89_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/efuse_be.c b/drivers/net/wireless/realtek/rtw89/efuse_be.c
index 8e8b7cd315f7..0be26d5fdf7c 100644
--- a/drivers/net/wireless/realtek/rtw89/efuse_be.c
+++ b/drivers/net/wireless/realtek/rtw89/efuse_be.c
@@ -7,6 +7,31 @@
#include "mac.h"
#include "reg.h"
+#define EFUSE_EXTERNALPN_ADDR_BE 0x1580
+#define EFUSE_B1_MSSDEVTYPE_MASK GENMASK(3, 0)
+#define EFUSE_B1_MSSCUSTIDX0_MASK GENMASK(7, 4)
+#define EFUSE_SERIALNUM_ADDR_BE 0x1581
+#define EFUSE_B2_MSSKEYNUM_MASK GENMASK(3, 0)
+#define EFUSE_B2_MSSCUSTIDX1_MASK BIT(6)
+#define EFUSE_SB_CRYP_SEL_ADDR 0x1582
+#define EFUSE_SB_CRYP_SEL_SIZE 2
+#define EFUSE_SB_CRYP_SEL_DEFAULT 0xFFFF
+#define SB_SEL_MGN_MAX_SIZE 2
+#define EFUSE_SEC_BE_START 0x1580
+#define EFUSE_SEC_BE_SIZE 4
+
+enum rtw89_efuse_mss_dev_type {
+ MSS_DEV_TYPE_FWSEC_DEF = 0xF,
+ MSS_DEV_TYPE_FWSEC_WINLIN_INBOX = 0xC,
+ MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_NON_COB = 0xA,
+ MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_COB = 0x9,
+ MSS_DEV_TYPE_FWSEC_NONWIN_INBOX = 0x6,
+};
+
+static const u32 sb_sel_mgn[SB_SEL_MGN_MAX_SIZE] = {
+ 0x8000100, 0xC000180
+};
+
static void rtw89_enable_efuse_pwr_cut_ddv_be(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -418,3 +443,120 @@ out_free:
return ret;
}
+
+static u16 get_sb_cryp_sel_idx(u16 sb_cryp_sel)
+{
+ u8 low_bit, high_bit, cnt_zero = 0;
+ u8 idx, sel_form_v, sel_idx_v;
+ u16 sb_cryp_sel_v = 0x0;
+
+ sel_form_v = u16_get_bits(sb_cryp_sel, MASKBYTE0);
+ sel_idx_v = u16_get_bits(sb_cryp_sel, MASKBYTE1);
+
+ for (idx = 0; idx < 4; idx++) {
+ low_bit = !!(sel_form_v & BIT(idx));
+ high_bit = !!(sel_form_v & BIT(7 - idx));
+ if (low_bit != high_bit)
+ return U16_MAX;
+ if (low_bit)
+ continue;
+
+ cnt_zero++;
+ if (cnt_zero == 1)
+ sb_cryp_sel_v = idx * 16;
+ else if (cnt_zero > 1)
+ return U16_MAX;
+ }
+
+ low_bit = u8_get_bits(sel_idx_v, 0x0F);
+ high_bit = u8_get_bits(sel_idx_v, 0xF0);
+
+ if ((low_bit ^ high_bit) != 0xF)
+ return U16_MAX;
+
+ return sb_cryp_sel_v + low_bit;
+}
+
+static u8 get_mss_dev_type_idx(struct rtw89_dev *rtwdev, u8 mss_dev_type)
+{
+ switch (mss_dev_type) {
+ case MSS_DEV_TYPE_FWSEC_WINLIN_INBOX:
+ mss_dev_type = 0x0;
+ break;
+ case MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_NON_COB:
+ mss_dev_type = 0x1;
+ break;
+ case MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_COB:
+ mss_dev_type = 0x2;
+ break;
+ case MSS_DEV_TYPE_FWSEC_NONWIN_INBOX:
+ mss_dev_type = 0x3;
+ break;
+ case MSS_DEV_TYPE_FWSEC_DEF:
+ mss_dev_type = RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF;
+ break;
+ default:
+ rtw89_warn(rtwdev, "unknown mss_dev_type %d", mss_dev_type);
+ mss_dev_type = RTW89_FW_MSS_DEV_TYPE_FWSEC_INV;
+ break;
+ }
+
+ return mss_dev_type;
+}
+
+int rtw89_efuse_read_fw_secure_be(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
+ u32 sec_addr = EFUSE_SEC_BE_START;
+ u32 sec_size = EFUSE_SEC_BE_SIZE;
+ u16 sb_cryp_sel, sb_cryp_sel_idx;
+ u8 sec_map[EFUSE_SEC_BE_SIZE];
+ u8 mss_dev_type;
+ u8 b1, b2;
+ int ret;
+
+ ret = rtw89_dump_physical_efuse_map_be(rtwdev, sec_map,
+ sec_addr, sec_size, false);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to dump secsel map\n");
+ return ret;
+ }
+
+ sb_cryp_sel = sec_map[EFUSE_SB_CRYP_SEL_ADDR - sec_addr] |
+ sec_map[EFUSE_SB_CRYP_SEL_ADDR - sec_addr + 1] << 8;
+ if (sb_cryp_sel == EFUSE_SB_CRYP_SEL_DEFAULT)
+ goto out;
+
+ sb_cryp_sel_idx = get_sb_cryp_sel_idx(sb_cryp_sel);
+ if (sb_cryp_sel_idx >= SB_SEL_MGN_MAX_SIZE) {
+ rtw89_warn(rtwdev, "invalid SB cryp sel idx %d\n", sb_cryp_sel_idx);
+ goto out;
+ }
+
+ sec->sb_sel_mgn = sb_sel_mgn[sb_cryp_sel_idx];
+
+ b1 = sec_map[EFUSE_EXTERNALPN_ADDR_BE - sec_addr];
+ b2 = sec_map[EFUSE_SERIALNUM_ADDR_BE - sec_addr];
+
+ mss_dev_type = u8_get_bits(b1, EFUSE_B1_MSSDEVTYPE_MASK);
+ sec->mss_cust_idx = 0x1F - (u8_get_bits(b1, EFUSE_B1_MSSCUSTIDX0_MASK) |
+ u8_get_bits(b2, EFUSE_B2_MSSCUSTIDX1_MASK) << 4);
+ sec->mss_key_num = 0xF - u8_get_bits(b2, EFUSE_B2_MSSKEYNUM_MASK);
+
+ sec->mss_dev_type = get_mss_dev_type_idx(rtwdev, mss_dev_type);
+ if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_INV) {
+ rtw89_warn(rtwdev, "invalid mss_dev_type %d\n", mss_dev_type);
+ goto out;
+ }
+
+ sec->secure_boot = true;
+
+out:
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "MSS secure_boot=%d dev_type=%d cust_idx=%d key_num=%d\n",
+ sec->secure_boot, sec->mss_dev_type, sec->mss_cust_idx,
+ sec->mss_key_num);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_efuse_read_fw_secure_be);
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 09684cea9731..185cd339c085 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -13,6 +13,8 @@
#include "reg.h"
#include "util.h"
+static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C};
+
union rtw89_fw_element_arg {
size_t offset;
enum rtw89_rf_path rf_path;
@@ -163,6 +165,161 @@ static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 le
return 0;
}
+static int __get_mssc_key_idx(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mss_pool_hdr *mss_hdr,
+ u32 rmp_tbl_size, u32 *key_idx)
+{
+ struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
+ u32 sel_byte_idx;
+ u32 mss_sel_idx;
+ u8 sel_bit_idx;
+ int i;
+
+ if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) {
+ if (!mss_hdr->defen)
+ return -ENOENT;
+
+ mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
+ sec->mss_key_num;
+ } else {
+ if (mss_hdr->defen)
+ mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3;
+ else
+ mss_sel_idx = 0;
+ mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) *
+ le16_to_cpu(mss_hdr->msscust_max) +
+ sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
+ sec->mss_key_num;
+ }
+
+ sel_byte_idx = mss_sel_idx >> 3;
+ sel_bit_idx = mss_sel_idx & 0x7;
+
+ if (sel_byte_idx >= rmp_tbl_size)
+ return -EFAULT;
+
+ if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx)))
+ return -ENOENT;
+
+ *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1));
+
+ for (i = 0; i < sel_byte_idx; i++)
+ *key_idx += hweight8(mss_hdr->rmp_tbl[i]);
+
+ return 0;
+}
+
+static int __parse_formatted_mssc(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr_section_info *section_info,
+ const struct rtw89_fw_hdr_section_v1 *section,
+ const void *content,
+ u32 *mssc_len)
+{
+ const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len;
+ const union rtw89_fw_section_mssc_content *section_content = content;
+ struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
+ u32 rmp_tbl_size;
+ u32 key_sign_len;
+ u32 real_key_idx;
+ u32 sb_sel_ver;
+ int ret;
+
+ if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) {
+ rtw89_err(rtwdev, "[ERR] wrong MSS signature\n");
+ return -ENOENT;
+ }
+
+ if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) {
+ rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) *
+ le16_to_cpu(mss_hdr->msscust_max) *
+ mss_hdr->mssdev_max) >> 3;
+ if (mss_hdr->defen)
+ rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE;
+ } else {
+ rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n",
+ mss_hdr->rmpfmt);
+ return -EINVAL;
+ }
+
+ if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) {
+ rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n",
+ rmp_tbl_size, (int)sizeof(*mss_hdr),
+ le32_to_cpu(mss_hdr->key_raw_offset));
+ return -EINVAL;
+ }
+
+ key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2;
+ if (!key_sign_len)
+ key_sign_len = 512;
+
+ if (info->dsp_checksum)
+ key_sign_len += FWDL_SECURITY_CHKSUM_LEN;
+
+ *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size +
+ le16_to_cpu(mss_hdr->keypair_num) * key_sign_len;
+
+ if (!sec->secure_boot)
+ goto out;
+
+ sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v);
+ if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn)
+ goto ignore;
+
+ ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx);
+ if (ret)
+ goto ignore;
+
+ section_info->key_addr = content + section_info->len +
+ le32_to_cpu(mss_hdr->key_raw_offset) +
+ key_sign_len * real_key_idx;
+ section_info->key_len = key_sign_len;
+ section_info->key_idx = real_key_idx;
+
+out:
+ if (info->secure_section_exist) {
+ section_info->ignore = true;
+ return 0;
+ }
+
+ info->secure_section_exist = true;
+
+ return 0;
+
+ignore:
+ section_info->ignore = true;
+
+ return 0;
+}
+
+static int __parse_security_section(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr_section_info *section_info,
+ const struct rtw89_fw_hdr_section_v1 *section,
+ const void *content,
+ u32 *mssc_len)
+{
+ int ret;
+
+ section_info->mssc =
+ le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC);
+
+ if (section_info->mssc == FORMATTED_MSSC) {
+ ret = __parse_formatted_mssc(rtwdev, info, section_info,
+ section, content, mssc_len);
+ if (ret)
+ return -EINVAL;
+ } else {
+ *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN;
+ if (info->dsp_checksum)
+ *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN;
+
+ info->secure_section_exist = true;
+ }
+
+ return 0;
+}
+
static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
struct rtw89_fw_bin_info *info)
{
@@ -173,10 +330,12 @@ static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 le
const u8 *fw_end = fw + len;
const u8 *bin;
u32 base_hdr_len;
- u32 mssc_len = 0;
+ u32 mssc_len;
+ int ret;
u32 i;
info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM);
+ info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM);
base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR);
@@ -199,16 +358,9 @@ static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 le
section_info = info->section_info;
for (i = 0; i < info->section_num; i++) {
section = &fw_hdr->sections[i];
+
section_info->type =
le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE);
- if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
- section_info->mssc =
- le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC);
- mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN;
- } else {
- section_info->mssc = 0;
- }
-
section_info->len =
le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE);
if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM))
@@ -217,15 +369,40 @@ static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 le
section_info->dladdr =
le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR);
section_info->addr = bin;
- bin += section_info->len;
+
+ if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
+ ret = __parse_security_section(rtwdev, info, section_info,
+ section, bin, &mssc_len);
+ if (ret)
+ return ret;
+ } else {
+ section_info->mssc = 0;
+ mssc_len = 0;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n",
+ i, section_info->type, section_info->len,
+ section_info->mssc, mssc_len, bin - fw);
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n",
+ section_info->ignore, section_info->key_addr,
+ section_info->key_addr ?
+ section_info->key_addr - section_info->addr : 0,
+ section_info->key_len, section_info->key_idx);
+
+ bin += section_info->len + mssc_len;
section_info++;
}
- if (fw_end != bin + mssc_len) {
+ if (fw_end != bin) {
rtw89_err(rtwdev, "[ERR]fw bin size\n");
return -EINVAL;
}
+ if (!info->secure_section_exist)
+ rtw89_warn(rtwdev, "no firmware secure section\n");
+
return 0;
}
@@ -458,6 +635,8 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -919,9 +1098,56 @@ static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
len + H2C_HEADER_LEN));
}
-static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
+static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr *fw_hdr)
+{
+ le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN,
+ FW_HDR_W7_PART_SIZE);
+
+ return 0;
+}
+
+static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_bin_info *info,
+ struct rtw89_fw_hdr_v1 *fw_hdr)
{
+ struct rtw89_fw_hdr_section_info *section_info;
+ struct rtw89_fw_hdr_section_v1 *section;
+ u8 dst_sec_idx = 0;
+ u8 sec_idx;
+
+ le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN,
+ FW_HDR_V1_W7_PART_SIZE);
+
+ for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) {
+ section_info = &info->section_info[sec_idx];
+ section = &fw_hdr->sections[sec_idx];
+
+ if (section_info->ignore)
+ continue;
+
+ if (dst_sec_idx != sec_idx)
+ fw_hdr->sections[dst_sec_idx] = *section;
+
+ dst_sec_idx++;
+ }
+
+ le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM);
+
+ return (info->section_num - dst_sec_idx) * sizeof(*section);
+}
+
+static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_suit *fw_suit,
+ struct rtw89_fw_bin_info *info)
+{
+ u32 len = info->hdr_len - info->dynamic_hdr_len;
+ struct rtw89_fw_hdr_v1 *fw_hdr_v1;
+ const u8 *fw = fw_suit->data;
+ struct rtw89_fw_hdr *fw_hdr;
struct sk_buff *skb;
+ u32 truncated;
u32 ret = 0;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
@@ -931,7 +1157,26 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 l
}
skb_put_data(skb, fw, len);
- SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN);
+
+ switch (fw_suit->hdr_ver) {
+ case 0:
+ fw_hdr = (struct rtw89_fw_hdr *)skb->data;
+ truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr);
+ break;
+ case 1:
+ fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data;
+ truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto fail;
+ }
+
+ if (truncated) {
+ len -= truncated;
+ skb_trim(skb, len);
+ }
+
rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FWDL,
H2C_FUNC_MAC_FWHDR_DL, len);
@@ -950,12 +1195,14 @@ fail:
return ret;
}
-static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
+static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_suit *fw_suit,
+ struct rtw89_fw_bin_info *info)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
int ret;
- ret = __rtw89_fw_download_hdr(rtwdev, fw, len);
+ ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info);
if (ret) {
rtw89_err(rtwdev, "[ERR]FW header download\n");
return ret;
@@ -979,9 +1226,21 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
struct sk_buff *skb;
const u8 *section = info->addr;
u32 residue_len = info->len;
+ bool copy_key = false;
u32 pkt_len;
int ret;
+ if (info->ignore)
+ return 0;
+
+ if (info->key_addr && info->key_len) {
+ if (info->len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len)
+ rtw89_warn(rtwdev, "ignore to copy key data because of len %d, %d, %d\n",
+ info->len, FWDL_SECTION_PER_PKT_LEN, info->key_len);
+ else
+ copy_key = true;
+ }
+
while (residue_len) {
if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
pkt_len = FWDL_SECTION_PER_PKT_LEN;
@@ -995,6 +1254,10 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
}
skb_put_data(skb, section, pkt_len);
+ if (copy_key)
+ memcpy(skb->data + pkt_len - info->key_len,
+ info->key_addr, info->key_len);
+
ret = rtw89_h2c_tx(rtwdev, skb, true);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
@@ -1101,7 +1364,7 @@ static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
struct rtw89_fw_suit *fw_suit)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
- struct rtw89_fw_bin_info info;
+ struct rtw89_fw_bin_info info = {};
int ret;
ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info);
@@ -1120,8 +1383,7 @@ static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
return ret;
}
- ret = rtw89_fw_download_hdr(rtwdev, fw_suit->data, info.hdr_len -
- info.dynamic_hdr_len);
+ ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info);
if (ret)
return ret;
@@ -1485,13 +1747,108 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
-#define H2C_BA_CAM_LEN 8
+int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
+
+ rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif, rtwsta, h2c);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2);
+
+int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
+
+ h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) |
+ le32_encode_bits(1, DCTLINFO_V2_C0_OP);
+
+ h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL);
+ h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL);
+ h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL);
+ h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL);
+ h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL);
+ h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL);
+ h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL);
+ h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL);
+ h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL);
+ h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL);
+ h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL);
+ h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL);
+ h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2);
+
int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_h2c_ba_cam *h2c;
u8 macid = rtwsta->mac_id;
+ u32 len = sizeof(*h2c);
struct sk_buff *skb;
u8 entry_idx;
int ret;
@@ -1509,32 +1866,34 @@ int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
return 0;
}
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
return -ENOMEM;
}
- skb_put(skb, H2C_BA_CAM_LEN);
- SET_BA_CAM_MACID(skb->data, macid);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ba_cam *)skb->data;
+
+ h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID);
if (chip->bacam_ver == RTW89_BACAM_V0_EXT)
- SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
+ h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1);
else
- SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
+ h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX);
if (!valid)
goto end;
- SET_BA_CAM_VALID(skb->data, valid);
- SET_BA_CAM_TID(skb->data, params->tid);
+ h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) |
+ le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID);
if (params->buf_size > 64)
- SET_BA_CAM_BMAP_SIZE(skb->data, 4);
+ h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
else
- SET_BA_CAM_BMAP_SIZE(skb->data, 0);
+ h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
/* If init req is set, hw will set the ssn */
- SET_BA_CAM_INIT_REQ(skb->data, 1);
- SET_BA_CAM_SSN(skb->data, params->ssn);
+ h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) |
+ le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN);
if (chip->bacam_ver == RTW89_BACAM_V0_EXT) {
- SET_BA_CAM_STD_EN(skb->data, 1);
- SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx);
+ h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) |
+ le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BA_CAM_W1_BAND);
}
end:
@@ -1542,7 +1901,7 @@ end:
H2C_CAT_MAC,
H2C_CL_BA_CAM,
H2C_FUNC_MAC_BA_CAM, 0, 1,
- H2C_BA_CAM_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -1556,31 +1915,35 @@ fail:
return ret;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam);
static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev,
u8 entry_idx, u8 uid)
{
+ struct rtw89_h2c_ba_cam *h2c;
+ u32 len = sizeof(*h2c);
struct sk_buff *skb;
int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
return -ENOMEM;
}
- skb_put(skb, H2C_BA_CAM_LEN);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ba_cam *)skb->data;
- SET_BA_CAM_VALID(skb->data, 1);
- SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
- SET_BA_CAM_UID(skb->data, uid);
- SET_BA_CAM_BAND(skb->data, 0);
- SET_BA_CAM_STD_EN(skb->data, 0);
+ h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID);
+ h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) |
+ le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) |
+ le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) |
+ le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
H2C_CL_BA_CAM,
H2C_FUNC_MAC_BA_CAM, 0, 1,
- H2C_BA_CAM_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -1609,14 +1972,132 @@ void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev)
}
}
+int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_h2c_ba_cam_v1 *h2c;
+ u8 macid = rtwsta->mac_id;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u8 entry_idx;
+ u8 bmap_size;
+ int ret;
+
+ ret = valid ?
+ rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) :
+ rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx);
+ if (ret) {
+ /* it still works even if we don't have static BA CAM, because
+ * hardware can create dynamic BA CAM automatically.
+ */
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ "failed to %s entry tid=%d for h2c ba cam\n",
+ valid ? "alloc" : "free", params->tid);
+ return 0;
+ }
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data;
+
+ if (params->buf_size > 512)
+ bmap_size = 10;
+ else if (params->buf_size > 256)
+ bmap_size = 8;
+ else if (params->buf_size > 64)
+ bmap_size = 4;
+ else
+ bmap_size = 0;
+
+ h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) |
+ le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) |
+ le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) |
+ le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) |
+ le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) |
+ le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK);
+
+ entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */
+ h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) |
+ le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) |
+ le32_encode_bits(!!rtwvif->mac_idx, RTW89_H2C_BA_CAM_V1_W1_BAND_SEL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_BA_CAM,
+ H2C_FUNC_MAC_BA_CAM_V1, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1);
+
+int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
+ u8 offset, u8 mac_idx)
+{
+ struct rtw89_h2c_ba_cam_init *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_ba_cam_init *)skb->data;
+
+ h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) |
+ le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) |
+ le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_BA_CAM,
+ H2C_FUNC_MAC_BA_CAM_INIT, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_LOG_CFG_LEN 12
int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
{
struct sk_buff *skb;
- u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
- BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
+ u32 comp = 0;
int ret;
+ if (enable)
+ comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
+ BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) |
+ BIT(RTW89_FW_LOG_COMP_SCAN);
+
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
@@ -1815,6 +2296,50 @@ fail:
return ret;
}
+int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ rtwvif->sub_entity_idx);
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_h2c_lps_ch_info *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ if (chip->chip_gen != RTW89_CHIP_BE)
+ return 0;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_lps_ch_info *)skb->data;
+
+ h2c->info[0].central_ch = chan->channel;
+ h2c->info[0].pri_ch = chan->primary_channel;
+ h2c->info[0].band = chan->band_type;
+ h2c->info[0].bw = chan->band_width;
+ h2c->mlo_dbcc_mode_lps = cpu_to_le32(MLO_2_PLUS_0_1RF);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
+ H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_P2P_ACT_LEN 20
int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_p2p_noa_desc *desc,
@@ -1892,11 +2417,12 @@ static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
#define H2C_CMC_TBL_LEN 68
int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif)
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 macid = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
struct sk_buff *skb;
- u8 macid = rtwvif->mac_id;
int ret;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
@@ -1937,6 +2463,91 @@ fail:
return ret;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl);
+
+int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
+
+ h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
+ le32_encode_bits(1, CCTLINFO_G7_C0_OP);
+
+ h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE);
+ h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL);
+
+ h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) |
+ le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) |
+ le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
+ h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL);
+
+ h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL);
+
+ h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL);
+
+ h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW);
+ h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL);
+
+ h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
+ le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
+ le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
+ le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
+ le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
+ h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL);
+
+ h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE);
+ h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL);
+
+ h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) |
+ le32_encode_bits(1, CCTLINFO_G7_W7_NR) |
+ le32_encode_bits(1, CCTLINFO_G7_W7_CB) |
+ le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) |
+ le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE);
+ h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL);
+
+ h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL);
+
+ h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) |
+ le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) |
+ le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L);
+ h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL);
+
+ h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) |
+ le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) |
+ le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE);
+ h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7);
static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta, u8 *pads)
@@ -1950,9 +2561,6 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
u16 ppe;
int i;
- if (!sta->deflink.he_cap.has_he)
- return;
-
ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]);
if (!ppe_th) {
@@ -2011,7 +2619,7 @@ int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
int ret;
memset(pads, 0, sizeof(pads));
- if (sta)
+ if (sta && sta->deflink.he_cap.has_he)
__get_sta_he_pkt_padding(rtwdev, sta, pads);
if (vif->p2p)
@@ -2073,6 +2681,246 @@ fail:
return ret;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl);
+
+static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta, u8 *pads)
+{
+ u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
+ u16 ppe_thres_hdr;
+ u8 ppe16, ppe8;
+ u8 n, idx, sh;
+ u8 ru_bitmap;
+ bool ppe_th;
+ u16 ppe;
+ int i;
+
+ ppe_th = !!u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5],
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT);
+ if (!ppe_th) {
+ u8 pad;
+
+ pad = u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5],
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
+
+ for (i = 0; i < RTW89_PPE_BW_NUM; i++)
+ pads[i] = pad;
+
+ return;
+ }
+
+ ppe_thres_hdr = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres);
+ ru_bitmap = u16_get_bits(ppe_thres_hdr,
+ IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n = hweight8(ru_bitmap);
+ n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE +
+ (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
+
+ for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
+ if (!(ru_bitmap & BIT(i))) {
+ pads[i] = 1;
+ continue;
+ }
+
+ idx = n >> 3;
+ sh = n & 7;
+ n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2;
+
+ ppe = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres + idx);
+ ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
+ sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE;
+ ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
+
+ if (ppe16 != 7 && ppe8 == 7)
+ pads[i] = 2;
+ else if (ppe8 != 7)
+ pads[i] = 1;
+ else
+ pads[i] = 0;
+ }
+}
+
+int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
+ u8 pads[RTW89_PPE_BW_NUM];
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u16 lowest_rate;
+ int ret;
+
+ memset(pads, 0, sizeof(pads));
+ if (sta) {
+ if (sta->deflink.eht_cap.has_eht)
+ __get_sta_eht_pkt_padding(rtwdev, sta, pads);
+ else if (sta->deflink.he_cap.has_he)
+ __get_sta_he_pkt_padding(rtwdev, sta, pads);
+ }
+
+ if (vif->p2p)
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ lowest_rate = RTW89_HW_RATE_CCK1;
+ else
+ lowest_rate = RTW89_HW_RATE_OFDM6;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
+
+ h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
+ le32_encode_bits(1, CCTLINFO_G7_C0_OP);
+
+ h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) |
+ le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB);
+ h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB |
+ CCTLINFO_G7_W0_DISDATAFB);
+
+ h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
+ h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
+
+ h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
+ h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
+
+ h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
+ h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
+
+ h2c->w4 = le32_encode_bits(rtwvif->port, CCTLINFO_G7_W4_MULTI_PORT_ID);
+ h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID);
+
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
+ h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM);
+ h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM);
+ }
+
+ if (vif->bss_conf.eht_support) {
+ u16 punct = vif->bss_conf.chanreq.oper.punctured;
+
+ h2c->w4 |= le32_encode_bits(~punct,
+ CCTLINFO_G7_W4_ACT_SUBCH_CBW);
+ h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW);
+ }
+
+ h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
+ le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320],
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
+ h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 |
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 |
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 |
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 |
+ CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
+
+ h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
+ CCTLINFO_G7_W6_ULDL);
+ h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL);
+
+ if (sta) {
+ h2c->w8 = le32_encode_bits(sta->deflink.he_cap.has_he,
+ CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
+ h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7);
+
+int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u16 agg_num = 0;
+ u8 ba_bmap = 0;
+ int ret;
+ u8 tid;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
+
+ for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) {
+ if (agg_num == 0)
+ agg_num = rtwsta->ampdu_params[tid].agg_num;
+ else
+ agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num);
+ }
+
+ if (agg_num <= 0x20)
+ ba_bmap = 3;
+ else if (agg_num > 0x20 && agg_num <= 0x40)
+ ba_bmap = 0;
+ else if (agg_num > 0x40 && agg_num <= 0x80)
+ ba_bmap = 1;
+ else if (agg_num > 0x80 && agg_num <= 0x100)
+ ba_bmap = 2;
+ else if (agg_num > 0x100 && agg_num <= 0x200)
+ ba_bmap = 4;
+ else if (agg_num > 0x200 && agg_num <= 0x400)
+ ba_bmap = 5;
+
+ h2c->c0 = le32_encode_bits(rtwsta->mac_id, CCTLINFO_G7_C0_MACID) |
+ le32_encode_bits(1, CCTLINFO_G7_C0_OP);
+
+ h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP);
+ h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta)
@@ -2155,18 +3003,20 @@ fail:
return ret;
}
-#define H2C_BCN_BASE_LEN 12
int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
- struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
rtwvif->sub_entity_idx);
- struct sk_buff *skb;
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct rtw89_h2c_bcn_upd *h2c;
struct sk_buff *skb_beacon;
- u16 tim_offset;
+ struct ieee80211_hdr *hdr;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
int bcn_total_len;
u16 beacon_rate;
+ u16 tim_offset;
void *noa_data;
u8 noa_len;
int ret;
@@ -2192,23 +3042,27 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
skb_put_data(skb_beacon, noa_data, noa_len);
}
- bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len;
+ hdr = (struct ieee80211_hdr *)skb_beacon;
+ tim_offset -= ieee80211_hdrlen(hdr->frame_control);
+
+ bcn_total_len = len + skb_beacon->len;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
dev_kfree_skb_any(skb_beacon);
return -ENOMEM;
}
- skb_put(skb, H2C_BCN_BASE_LEN);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_bcn_upd *)skb->data;
- SET_BCN_UPD_PORT(skb->data, rtwvif->port);
- SET_BCN_UPD_MBSSID(skb->data, 0);
- SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx);
- SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset);
- SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id);
- SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL);
- SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE);
- SET_BCN_UPD_RATE(skb->data, beacon_rate);
+ h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_W0_PORT) |
+ le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) |
+ le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) |
+ le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST);
+ h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) |
+ le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) |
+ le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) |
+ le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE);
skb_put_data(skb, skb_beacon->data, skb_beacon->len);
dev_kfree_skb_any(skb_beacon);
@@ -2227,6 +3081,90 @@ int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
return 0;
}
+EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon);
+
+int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct rtw89_h2c_bcn_upd_be *h2c;
+ struct sk_buff *skb_beacon;
+ struct ieee80211_hdr *hdr;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int bcn_total_len;
+ u16 beacon_rate;
+ u16 tim_offset;
+ void *noa_data;
+ u8 noa_len;
+ int ret;
+
+ if (vif->p2p)
+ beacon_rate = RTW89_HW_RATE_OFDM6;
+ else if (chan->band_type == RTW89_BAND_2G)
+ beacon_rate = RTW89_HW_RATE_CCK1;
+ else
+ beacon_rate = RTW89_HW_RATE_OFDM6;
+
+ skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
+ NULL, 0);
+ if (!skb_beacon) {
+ rtw89_err(rtwdev, "failed to get beacon skb\n");
+ return -ENOMEM;
+ }
+
+ noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data);
+ if (noa_len &&
+ (noa_len <= skb_tailroom(skb_beacon) ||
+ pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
+ skb_put_data(skb_beacon, noa_data, noa_len);
+ }
+
+ hdr = (struct ieee80211_hdr *)skb_beacon;
+ tim_offset -= ieee80211_hdrlen(hdr->frame_control);
+
+ bcn_total_len = len + skb_beacon->len;
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ dev_kfree_skb_any(skb_beacon);
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data;
+
+ h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) |
+ le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) |
+ le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) |
+ le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST);
+ h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) |
+ le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) |
+ le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) |
+ le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE);
+
+ skb_put_data(skb, skb_beacon->data, skb_beacon->len);
+ dev_kfree_skb_any(skb_beacon);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_BCN_UPD_BE, 0, 1,
+ bcn_total_len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be);
#define H2C_ROLE_MAINTAIN_LEN 4
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
@@ -2277,45 +3215,93 @@ fail:
return ret;
}
-#define H2C_JOIN_INFO_LEN 4
+static enum rtw89_fw_sta_type
+rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ struct ieee80211_sta *sta = rtwsta_to_sta_safe(rtwsta);
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+
+ if (!sta)
+ goto by_vif;
+
+ if (sta->deflink.eht_cap.has_eht)
+ return RTW89_FW_BE_STA;
+ else if (sta->deflink.he_cap.has_he)
+ return RTW89_FW_AX_STA;
+ else
+ return RTW89_FW_N_AC_STA;
+
+by_vif:
+ if (vif->bss_conf.eht_support)
+ return RTW89_FW_BE_STA;
+ else if (vif->bss_conf.he_support)
+ return RTW89_FW_AX_STA;
+ else
+ return RTW89_FW_N_AC_STA;
+}
+
int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta, bool dis_conn)
{
struct sk_buff *skb;
u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
u8 self_role = rtwvif->self_role;
+ enum rtw89_fw_sta_type sta_type;
u8 net_type = rtwvif->net_type;
+ struct rtw89_h2c_join_v1 *h2c_v1;
+ struct rtw89_h2c_join *h2c;
+ u32 len = sizeof(*h2c);
+ bool format_v1 = false;
int ret;
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ len = sizeof(*h2c_v1);
+ format_v1 = true;
+ }
+
if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
self_role = RTW89_SELF_ROLE_AP_CLIENT;
net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
}
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
return -ENOMEM;
}
- skb_put(skb, H2C_JOIN_INFO_LEN);
- SET_JOININFO_MACID(skb->data, mac_id);
- SET_JOININFO_OP(skb->data, dis_conn);
- SET_JOININFO_BAND(skb->data, rtwvif->mac_idx);
- SET_JOININFO_WMM(skb->data, rtwvif->wmm);
- SET_JOININFO_TGR(skb->data, rtwvif->trigger);
- SET_JOININFO_ISHESTA(skb->data, 0);
- SET_JOININFO_DLBW(skb->data, 0);
- SET_JOININFO_TF_MAC_PAD(skb->data, 0);
- SET_JOININFO_DL_T_PE(skb->data, 0);
- SET_JOININFO_PORT_ID(skb->data, rtwvif->port);
- SET_JOININFO_NET_TYPE(skb->data, net_type);
- SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role);
- SET_JOININFO_SELF_ROLE(skb->data, self_role);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_join *)skb->data;
+
+ h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) |
+ le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) |
+ le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_JOININFO_W0_BAND) |
+ le32_encode_bits(rtwvif->wmm, RTW89_H2C_JOININFO_W0_WMM) |
+ le32_encode_bits(rtwvif->trigger, RTW89_H2C_JOININFO_W0_TGR) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) |
+ le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) |
+ le32_encode_bits(rtwvif->port, RTW89_H2C_JOININFO_W0_PORT_ID) |
+ le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) |
+ le32_encode_bits(rtwvif->wifi_role, RTW89_H2C_JOININFO_W0_WIFI_ROLE) |
+ le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE);
+ if (!format_v1)
+ goto done;
+
+ h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data;
+
+ sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif, rtwsta);
+
+ h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE);
+ h2c_v1->w2 = 0;
+
+done:
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
H2C_FUNC_MAC_JOININFO, 0, 1,
- H2C_JOIN_INFO_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -2368,24 +3354,49 @@ fail:
int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
bool pause)
{
- struct rtw89_fw_macid_pause_grp h2c = {{0}};
- u8 len = sizeof(struct rtw89_fw_macid_pause_grp);
+ struct rtw89_fw_macid_pause_sleep_grp *h2c_new;
+ struct rtw89_fw_macid_pause_grp *h2c;
+ __le32 set = cpu_to_le32(BIT(sh));
+ u8 h2c_macid_pause_id;
struct sk_buff *skb;
+ u32 len;
int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
+ if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) {
+ h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP;
+ len = sizeof(*h2c_new);
+ } else {
+ h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE;
+ len = sizeof(*h2c);
+ }
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
- rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
+ rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n");
return -ENOMEM;
}
- h2c.mask_grp[grp] = cpu_to_le32(BIT(sh));
- if (pause)
- h2c.pause_grp[grp] = cpu_to_le32(BIT(sh));
- skb_put_data(skb, &h2c, len);
+ skb_put(skb, len);
+
+ if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) {
+ h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data;
+
+ h2c_new->n[0].pause_mask_grp[grp] = set;
+ h2c_new->n[0].sleep_mask_grp[grp] = set;
+ if (pause) {
+ h2c_new->n[0].pause_grp[grp] = set;
+ h2c_new->n[0].sleep_grp[grp] = set;
+ }
+ } else {
+ h2c = (struct rtw89_fw_macid_pause_grp *)skb->data;
+
+ h2c->mask_grp[grp] = set;
+ if (pause)
+ h2c->pause_grp[grp] = set;
+ }
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
- H2C_FUNC_MAC_MACID_PAUSE, 1, 0,
+ h2c_macid_pause_id, 1, 0,
len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
@@ -2516,6 +3527,8 @@ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
{
struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL;
+ s32 thold = RTW89_DEFAULT_CQM_THOLD;
+ u32 hyst = RTW89_DEFAULT_CQM_HYST;
struct rtw89_h2c_bcnfltr *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
@@ -2536,14 +3549,19 @@ int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
skb_put(skb, len);
h2c = (struct rtw89_h2c_bcnfltr *)skb->data;
+ if (bss_conf->cqm_rssi_hyst)
+ hyst = bss_conf->cqm_rssi_hyst;
+ if (bss_conf->cqm_rssi_thold)
+ thold = bss_conf->cqm_rssi_thold;
+
h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) |
le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) |
le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) |
le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT,
RTW89_H2C_BCNFLTR_W0_MODE) |
le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) |
- le32_encode_bits(bss_conf->cqm_rssi_hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
- le32_encode_bits(bss_conf->cqm_rssi_thold + MAX_RSSI,
+ le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
+ le32_encode_bits(thold + MAX_RSSI,
RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) |
le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID);
@@ -2735,11 +3753,11 @@ fail:
return ret;
}
-int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
- struct rtw89_btc_init_info *init_info = &dm->init_info;
+ struct rtw89_btc_init_info *init_info = &dm->init_info.init;
struct rtw89_btc_module *module = &init_info->module;
struct rtw89_btc_ant_info *ant = &module->ant;
struct rtw89_h2c_cxinit *h2c;
@@ -2755,7 +3773,7 @@ int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
skb_put(skb, len);
h2c = (struct rtw89_h2c_cxinit *)skb->data;
- h2c->hdr.type = CXDRVINFO_INIT;
+ h2c->hdr.type = type;
h2c->hdr.len = len - H2C_LEN_CXDRVHDR;
h2c->ant_type = ant->type;
@@ -2802,12 +3820,53 @@ fail:
return ret;
}
+int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7;
+ struct rtw89_h2c_cxinit_v7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data;
+
+ h2c->hdr.type = type;
+ h2c->hdr.ver = btc->ver->fcxinit;
+ h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
+ h2c->init = *init_info;
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define PORT_DATA_OFFSET 4
#define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
#define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \
(4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
@@ -2832,7 +3891,7 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
skb_put(skb, len);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
@@ -2888,7 +3947,7 @@ fail:
#define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \
(4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
@@ -2912,7 +3971,7 @@ int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
skb_put(skb, len);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
@@ -2978,7 +4037,7 @@ fail:
#define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \
(4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
@@ -3002,7 +4061,7 @@ int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev)
skb_put(skb, len);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
@@ -3062,11 +4121,11 @@ fail:
}
#define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
const struct rtw89_btc_ver *ver = btc->ver;
- struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
+ struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl;
struct sk_buff *skb;
u8 *cmd;
int ret;
@@ -3079,7 +4138,7 @@ int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
@@ -3106,8 +4165,47 @@ fail:
return ret;
}
+int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7;
+ struct rtw89_h2c_cxctrl_v7 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data;
+
+ h2c->hdr.type = type;
+ h2c->hdr.ver = btc->ver->fcxctrl;
+ h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7;
+ h2c->ctrl = *ctrl;
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_trx_info *trx = &btc->dm.trx_info;
@@ -3123,7 +4221,7 @@ int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev)
skb_put(skb, H2C_LEN_CXDRVINFO_TRX);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_TRX);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl);
@@ -3163,7 +4261,7 @@ fail:
}
#define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
-int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
+int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
@@ -3180,7 +4278,7 @@ int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
cmd = skb->data;
- RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK);
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
@@ -3296,62 +4394,163 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
return 0;
}
-#define H2C_LEN_SCAN_LIST_OFFLOAD 4
-int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
+int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num,
struct list_head *chan_list)
{
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_h2c_chinfo_elem *elem;
struct rtw89_mac_chinfo *ch_info;
+ struct rtw89_h2c_chinfo *h2c;
struct sk_buff *skb;
- int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
unsigned int cond;
- u8 *cmd;
+ int skb_len;
int ret;
+ static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
+
+ skb_len = struct_size(h2c, elem, ch_num);
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
return -ENOMEM;
}
- skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD);
- cmd = skb->data;
+ skb_put(skb, sizeof(*h2c));
+ h2c = (struct rtw89_h2c_chinfo *)skb->data;
+
+ h2c->ch_num = ch_num;
+ h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
+
+ list_for_each_entry(ch_info, chan_list, list) {
+ elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem));
+
+ elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) |
+ le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) |
+ le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) |
+ le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH);
+
+ elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) |
+ le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) |
+ le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) |
+ le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) |
+ le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) |
+ le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) |
+ le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) |
+ le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) |
+ le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) |
+ le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM);
+
+ elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) |
+ le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) |
+ le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) |
+ le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3);
+
+ elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) |
+ le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) |
+ le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) |
+ le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
+
+ cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
+ struct list_head *chan_list)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_h2c_chinfo_elem_be *elem;
+ struct rtw89_mac_chinfo_be *ch_info;
+ struct rtw89_h2c_chinfo *h2c;
+ struct sk_buff *skb;
+ unsigned int cond;
+ int skb_len;
+ int ret;
+
+ static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
+
+ skb_len = struct_size(h2c, elem, ch_num);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, sizeof(*h2c));
+ h2c = (struct rtw89_h2c_chinfo *)skb->data;
- RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len);
- /* in unit of 4 bytes */
- RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4);
+ h2c->ch_num = ch_num;
+ h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
+ h2c->arg = u8_encode_bits(RTW89_PHY_0, RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK);
list_for_each_entry(ch_info, chan_list, list) {
- cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE);
-
- RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period);
- RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time);
- RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch);
- RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch);
- RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw);
- RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action);
- RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt);
- RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt);
- RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data);
- RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band);
- RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id);
- RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch);
- RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null);
- RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num);
- RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]);
- RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]);
- RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]);
- RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]);
- RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]);
- RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]);
- RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]);
- RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]);
+ elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem));
+
+ elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD) |
+ le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) |
+ le32_encode_bits(ch_info->central_ch,
+ RTW89_H2C_CHINFO_BE_W0_CENTER_CH) |
+ le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH);
+
+ elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) |
+ le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) |
+ le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) |
+ le32_encode_bits(ch_info->pause_data,
+ RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) |
+ le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) |
+ le32_encode_bits(ch_info->rand_seq_num,
+ RTW89_H2C_CHINFO_BE_W1_RANDOM) |
+ le32_encode_bits(ch_info->notify_action,
+ RTW89_H2C_CHINFO_BE_W1_NOTIFY) |
+ le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0,
+ RTW89_H2C_CHINFO_BE_W1_PROBE) |
+ le32_encode_bits(ch_info->leave_crit,
+ RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) |
+ le32_encode_bits(ch_info->chkpt_timer,
+ RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER);
+
+ elem->w2 = le32_encode_bits(ch_info->leave_time,
+ RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) |
+ le32_encode_bits(ch_info->leave_th,
+ RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) |
+ le32_encode_bits(ch_info->tx_pkt_ctrl,
+ RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL);
+
+ elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) |
+ le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) |
+ le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) |
+ le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3);
+
+ elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) |
+ le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) |
+ le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) |
+ le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7);
+
+ elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) |
+ le32_encode_bits(ch_info->fw_probe0_ssids,
+ RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS);
+
+ elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids,
+ RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) |
+ le32_encode_bits(ch_info->fw_probe0_bssids,
+ RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS);
}
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
- cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH);
+ cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
if (ret) {
@@ -3410,7 +4609,10 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
H2C_FUNC_SCANOFLD, 1, 1,
len);
- cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD);
+ if (option->enable)
+ cond = RTW89_SCANOFLD_WAIT_COND_START;
+ else
+ cond = RTW89_SCANOFLD_WAIT_COND_STOP;
ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
if (ret) {
@@ -3421,6 +4623,169 @@ int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
return 0;
}
+static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *chan;
+ u8 i, idx;
+
+ sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ];
+
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+ if (chan->flags & IEEE80211_CHAN_DISABLED) {
+ idx = (chan->hw_value - 1) / 4;
+ option->prohib_chan |= BIT(idx);
+ }
+ }
+}
+
+int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option,
+ struct rtw89_vif *rtwvif)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_h2c_scanofld_be_macc_role *macc_role;
+ struct rtw89_chan *op = &scan_info->op_chan;
+ struct rtw89_h2c_scanofld_be_opch *opch;
+ struct rtw89_h2c_scanofld_be *h2c;
+ struct sk_buff *skb;
+ u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role;
+ u8 opch_size = sizeof(*opch) * option->num_opch;
+ u8 probe_id[NUM_NL80211_BANDS];
+ unsigned int cond;
+ void *ptr;
+ int ret;
+ u32 len;
+ u8 i;
+
+ rtw89_scan_get_6g_disabled_chan(rtwdev, option);
+
+ len = sizeof(*h2c) + macc_role_size + opch_size;
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_scanofld_be *)skb->data;
+ ptr = skb->data;
+
+ h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) |
+ le32_encode_bits(option->scan_mode,
+ RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) |
+ le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) |
+ le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) |
+ le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) |
+ le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) |
+ le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) |
+ le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND);
+
+ h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) |
+ le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) |
+ le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD);
+
+ h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) |
+ le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) |
+ le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END);
+
+ h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) |
+ le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID);
+
+ h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ],
+ RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) |
+ le32_encode_bits(probe_id[NL80211_BAND_6GHZ],
+ RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START);
+
+ h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE);
+
+ h2c->w6 = le32_encode_bits(option->prohib_chan,
+ RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW);
+ h2c->w7 = le32_encode_bits(option->prohib_chan >> 32,
+ RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH);
+ ptr += sizeof(*h2c);
+
+ for (i = 0; i < option->num_macc_role; i++) {
+ macc_role = (struct rtw89_h2c_scanofld_be_macc_role *)&h2c->role[i];
+ macc_role->w0 =
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) |
+ le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END);
+ ptr += sizeof(*macc_role);
+ }
+
+ for (i = 0; i < option->num_opch; i++) {
+ opch = ptr;
+ opch->w0 = le32_encode_bits(rtwvif->mac_id,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) |
+ le32_encode_bits(option->band,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) |
+ le32_encode_bits(rtwvif->port,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) |
+ le32_encode_bits(RTW89_SCAN_OPMODE_INTV,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) |
+ le32_encode_bits(true,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) |
+ le32_encode_bits(RTW89_OFF_CHAN_TIME / 10,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL);
+
+ opch->w1 = le32_encode_bits(RTW89_CHANNEL_TIME,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION) |
+ le32_encode_bits(op->band_type,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) |
+ le32_encode_bits(op->band_width,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) |
+ le32_encode_bits(0x3,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) |
+ le32_encode_bits(op->primary_channel,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) |
+ le32_encode_bits(op->channel,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH);
+
+ opch->w2 = le32_encode_bits(0,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) |
+ le32_encode_bits(0,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) |
+ le32_encode_bits(2,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS);
+
+ opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) |
+ le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) |
+ le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) |
+ le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
+ RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3);
+ ptr += sizeof(*opch);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_SCANOFLD_BE, 1, 1,
+ len);
+
+ if (option->enable)
+ cond = RTW89_SCANOFLD_BE_WAIT_COND_START;
+ else
+ cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP;
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n");
+ return ret;
+ }
+
+ return 0;
+}
+
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info,
u16 len, u8 page)
@@ -3497,6 +4862,328 @@ fail:
}
EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
+int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+ struct rtw89_fw_h2c_rfk_pre_info *h2c;
+ u8 tbl_sel = rfk_mcc->table_idx;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u8 tbl, path;
+ u32 val32;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data;
+
+ h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
+
+ BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR);
+
+ for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) {
+ for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
+ h2c->dbcc.ch[path][tbl] = cpu_to_le32(rfk_mcc->ch[tbl]);
+ h2c->dbcc.band[path][tbl] = cpu_to_le32(rfk_mcc->band[tbl]);
+ }
+ }
+
+ for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
+ h2c->tbl.cur_ch[path] = cpu_to_le32(rfk_mcc->ch[tbl_sel]);
+ h2c->tbl.cur_band[path] = cpu_to_le32(rfk_mcc->band[tbl_sel]);
+ }
+
+ h2c->phy_idx = cpu_to_le32(phy_idx);
+ h2c->cur_band = cpu_to_le32(rfk_mcc->band[tbl_sel]);
+ h2c->cur_bw = cpu_to_le32(rfk_mcc->bw[tbl_sel]);
+ h2c->cur_center_ch = cpu_to_le32(rfk_mcc->ch[tbl_sel]);
+
+ val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1);
+ h2c->ktbl_sel0 = cpu_to_le32(val32);
+ val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1);
+ h2c->ktbl_sel1 = cpu_to_le32(val32);
+ val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
+ h2c->rfmod0 = cpu_to_le32(val32);
+ val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
+ h2c->rfmod1 = cpu_to_le32(val32);
+
+ if (rtw89_is_mlo_1_1(rtwdev))
+ h2c->mlo_1_1 = cpu_to_le32(1);
+
+ h2c->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_PRE_NOTIFY, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_tssi_mode tssi_mode)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ RTW89_SUB_ENTITY_0);
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_h2c_rf_tssi *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_tssi *)skb->data;
+
+ h2c->len = cpu_to_le16(len);
+ h2c->phy = phy_idx;
+ h2c->ch = chan->channel;
+ h2c->bw = chan->band_width;
+ h2c->band = chan->band_type;
+ h2c->hwtx_en = true;
+ h2c->cv = hal->cv;
+ h2c->tssi_mode = tssi_mode;
+
+ rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c);
+ rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_h2c_rf_iqk *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_iqk *)skb->data;
+
+ h2c->phy_idx = cpu_to_le32(phy_idx);
+ h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ RTW89_SUB_ENTITY_0);
+ struct rtw89_h2c_rf_dpk *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_dpk *)skb->data;
+
+ h2c->len = len;
+ h2c->phy = phy_idx;
+ h2c->dpk_enable = true;
+ h2c->kpath = RF_AB;
+ h2c->cur_band = chan->band_type;
+ h2c->cur_bw = chan->band_width;
+ h2c->cur_ch = chan->channel;
+ h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ RTW89_SUB_ENTITY_0);
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_h2c_rf_txgapk *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_txgapk *)skb->data;
+
+ h2c->len = len;
+ h2c->ktype = 2;
+ h2c->phy = phy_idx;
+ h2c->kpath = RF_AB;
+ h2c->band = chan->band_type;
+ h2c->bw = chan->band_width;
+ h2c->ch = chan->channel;
+ h2c->cv = hal->cv;
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_h2c_rf_dack *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_dack *)skb->data;
+
+ h2c->len = cpu_to_le32(len);
+ h2c->phy = cpu_to_le32(phy_idx);
+ h2c->type = cpu_to_le32(0);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
+ RTW89_SUB_ENTITY_0);
+ struct rtw89_h2c_rf_rxdck *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_rf_rxdck *)skb->data;
+
+ h2c->len = len;
+ h2c->phy = phy_idx;
+ h2c->is_afe = false;
+ h2c->kpath = RF_AB;
+ h2c->cur_band = chan->band_type;
+ h2c->cur_bw = chan->band_width;
+ h2c->cur_ch = chan->channel;
+ h2c->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
+ H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack)
@@ -3600,7 +5287,7 @@ static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev,
default:
return false;
case RTW89_C2H_CAT_MAC:
- return rtw89_mac_c2h_chk_atomic(rtwdev, class, func);
+ return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func);
case RTW89_C2H_CAT_OUTSRC:
return rtw89_phy_c2h_chk_atomic(rtwdev, class, func);
}
@@ -4050,8 +5737,66 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
}
}
-static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif, bool connected)
+static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
+ int ssid_num,
+ struct rtw89_mac_chinfo_be *ch_info)
+{
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ struct rtw89_pktofld_info *info;
+ u8 band, probe_count = 0, i;
+
+ ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
+ ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
+ ch_info->bw = RTW89_SCAN_WIDTH;
+ ch_info->tx_null = false;
+ ch_info->pause_data = false;
+ ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
+
+ if (ssid_num) {
+ band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
+
+ list_for_each_entry(info, &scan_info->pkt_list[band], list) {
+ if (info->channel_6ghz &&
+ ch_info->pri_ch != info->channel_6ghz)
+ continue;
+ ch_info->pkt_id[probe_count++] = info->id;
+ if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
+ break;
+ }
+ }
+
+ if (ch_info->ch_band == RTW89_BAND_6G) {
+ if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
+ !ch_info->is_psc) {
+ ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
+ if (!req->duration_mandatory)
+ ch_info->period -= RTW89_DWELL_TIME_6G;
+ }
+ }
+
+ for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++)
+ ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE;
+
+ switch (chan_type) {
+ case RTW89_CHAN_DFS:
+ if (ch_info->ch_band != RTW89_BAND_6G)
+ ch_info->period =
+ max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME);
+ ch_info->dwell_time = RTW89_DWELL_TIME;
+ break;
+ case RTW89_CHAN_ACTIVE:
+ break;
+ default:
+ rtw89_warn(rtwdev, "Channel type out of bound\n");
+ break;
+ }
+}
+
+int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected)
{
struct cfg80211_scan_request *req = rtwvif->scan_req;
struct rtw89_mac_chinfo *ch_info, *tmp;
@@ -4074,7 +5819,7 @@ static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
goto out;
}
- if (req->duration_mandatory)
+ if (req->duration)
ch_info->period = req->duration;
else if (channel->band == NL80211_BAND_6GHZ)
ch_info->period = RTW89_CHANNEL_TIME_6G +
@@ -4127,9 +5872,69 @@ out:
return ret;
}
+int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected)
+{
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
+ struct rtw89_mac_chinfo_be *ch_info, *tmp;
+ struct ieee80211_channel *channel;
+ struct list_head chan_list;
+ enum rtw89_chan_type type;
+ int list_len, ret;
+ bool random_seq;
+ u32 idx;
+
+ random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN);
+ INIT_LIST_HEAD(&chan_list);
+
+ for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
+ idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
+ idx++, list_len++) {
+ channel = req->channels[idx];
+ ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
+ if (!ch_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (req->duration)
+ ch_info->period = req->duration;
+ else if (channel->band == NL80211_BAND_6GHZ)
+ ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
+ else
+ ch_info->period = RTW89_CHANNEL_TIME;
+
+ ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
+ ch_info->central_ch = channel->hw_value;
+ ch_info->pri_ch = channel->hw_value;
+ ch_info->rand_seq_num = random_seq;
+ ch_info->is_psc = cfg80211_channel_is_psc(channel);
+
+ if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
+ type = RTW89_CHAN_DFS;
+ else
+ type = RTW89_CHAN_ACTIVE;
+ rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info);
+
+ list_add_tail(&ch_info->list, &chan_list);
+ }
+
+ rtwdev->scan_info.last_chan_idx = idx;
+ ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list);
+
+out:
+ list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
+ list_del(&ch_info->list);
+ kfree(ch_info);
+ }
+
+ return ret;
+}
+
static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool connected)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
int ret;
ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif);
@@ -4137,7 +5942,7 @@ static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
rtw89_err(rtwdev, "Update probe request failed\n");
goto out;
}
- ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif, connected);
+ ret = mac->add_chan_list(rtwdev, rtwvif, connected);
out:
return ret;
}
@@ -4154,9 +5959,11 @@ void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan);
rtwdev->scan_info.scanning_vif = vif;
rtwdev->scan_info.last_chan_idx = 0;
+ rtwdev->scan_info.abort = false;
rtwvif->scan_ies = &scan_req->ies;
rtwvif->scan_req = req;
ieee80211_stop_queues(rtwdev->hw);
+ rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, false);
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
get_random_mask_addr(mac_addr, req->mac_addr,
@@ -4181,10 +5988,10 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
struct cfg80211_scan_info info = {
.aborted = aborted,
};
- struct rtw89_vif *rtwvif;
if (!vif)
return;
@@ -4197,22 +6004,29 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
rtw89_core_scan_complete(rtwdev, vif, true);
ieee80211_scan_completed(rtwdev->hw, &info);
ieee80211_wake_queues(rtwdev->hw);
+ rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, true);
rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
rtw89_release_pkt_list(rtwdev);
- rtwvif = (struct rtw89_vif *)vif->drv_priv;
rtwvif->scan_req = NULL;
rtwvif->scan_ies = NULL;
scan_info->last_chan_idx = 0;
scan_info->scanning_vif = NULL;
+ scan_info->abort = false;
rtw89_chanctx_proceed(rtwdev);
}
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
{
- rtw89_hw_scan_offload(rtwdev, vif, false);
- rtw89_hw_scan_complete(rtwdev, vif, true);
+ struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
+ int ret;
+
+ scan_info->abort = true;
+
+ ret = rtw89_hw_scan_offload(rtwdev, vif, false);
+ if (ret)
+ rtw89_hw_scan_complete(rtwdev, vif, true);
}
static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
@@ -4231,6 +6045,7 @@ static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
bool enable)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct rtw89_scan_option opt = {0};
struct rtw89_vif *rtwvif;
bool connected;
@@ -4248,7 +6063,18 @@ int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
if (ret)
goto out;
}
- ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP;
+ opt.scan_mode = RTW89_SCAN_MODE_SA;
+ opt.band = RTW89_PHY_0;
+ opt.num_macc_role = 0;
+ opt.mlo_mode = rtwdev->mlo_dbcc_mode;
+ opt.num_opch = connected ? 1 : 0;
+ opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID;
+ }
+
+ ret = mac->scan_offload(rtwdev, &opt, rtwvif);
out:
return ret;
}
@@ -4922,6 +6748,372 @@ int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
}
+static
+u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_add_slot_arg *slot_arg,
+ struct rtw89_h2c_mrc_add_slot *slot_h2c)
+{
+ bool fill_h2c = !!slot_h2c;
+ unsigned int i;
+
+ if (!fill_h2c)
+ goto calc_len;
+
+ slot_h2c->w0 = le32_encode_bits(slot_arg->duration,
+ RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) |
+ le32_encode_bits(slot_arg->courtesy_en,
+ RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) |
+ le32_encode_bits(slot_arg->role_num,
+ RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM);
+ slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period,
+ RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) |
+ le32_encode_bits(slot_arg->courtesy_target,
+ RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET);
+
+ for (i = 0; i < slot_arg->role_num; i++) {
+ slot_h2c->roles[i].w0 =
+ le32_encode_bits(slot_arg->roles[i].macid,
+ RTW89_H2C_MRC_ADD_ROLE_W0_MACID) |
+ le32_encode_bits(slot_arg->roles[i].role_type,
+ RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) |
+ le32_encode_bits(slot_arg->roles[i].is_master,
+ RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) |
+ le32_encode_bits(slot_arg->roles[i].en_tx_null,
+ RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) |
+ le32_encode_bits(false,
+ RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) |
+ le32_encode_bits(false,
+ RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN);
+ slot_h2c->roles[i].w1 =
+ le32_encode_bits(slot_arg->roles[i].central_ch,
+ RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) |
+ le32_encode_bits(slot_arg->roles[i].primary_ch,
+ RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) |
+ le32_encode_bits(slot_arg->roles[i].bw,
+ RTW89_H2C_MRC_ADD_ROLE_W1_BW) |
+ le32_encode_bits(slot_arg->roles[i].band,
+ RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) |
+ le32_encode_bits(slot_arg->roles[i].null_early,
+ RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) |
+ le32_encode_bits(false,
+ RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) |
+ le32_encode_bits(true,
+ RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC);
+ slot_h2c->roles[i].macid_main_bitmap =
+ cpu_to_le32(slot_arg->roles[i].macid_main_bitmap);
+ slot_h2c->roles[i].macid_paired_bitmap =
+ cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap);
+ }
+
+calc_len:
+ return struct_size(slot_h2c, roles, slot_arg->role_num);
+}
+
+int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_add_arg *arg)
+{
+ struct rtw89_h2c_mrc_add *h2c_head;
+ struct sk_buff *skb;
+ unsigned int i;
+ void *tmp;
+ u32 len;
+ int ret;
+
+ len = sizeof(*h2c_head);
+ for (i = 0; i < arg->slot_num; i++)
+ len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL);
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc add\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ tmp = skb->data;
+
+ h2c_head = tmp;
+ h2c_head->w0 = le32_encode_bits(arg->sch_idx,
+ RTW89_H2C_MRC_ADD_W0_SCH_IDX) |
+ le32_encode_bits(arg->sch_type,
+ RTW89_H2C_MRC_ADD_W0_SCH_TYPE) |
+ le32_encode_bits(arg->slot_num,
+ RTW89_H2C_MRC_ADD_W0_SLOT_NUM) |
+ le32_encode_bits(arg->btc_in_sch,
+ RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH);
+
+ tmp += sizeof(*h2c_head);
+ for (i = 0; i < arg->slot_num; i++)
+ tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_ADD_MRC, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_start_arg *arg)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct rtw89_h2c_mrc_start *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ unsigned int cond;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc start\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_start *)skb->data;
+
+ h2c->w0 = le32_encode_bits(arg->sch_idx,
+ RTW89_H2C_MRC_START_W0_SCH_IDX) |
+ le32_encode_bits(arg->old_sch_idx,
+ RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) |
+ le32_encode_bits(arg->action,
+ RTW89_H2C_MRC_START_W0_ACTION);
+
+ h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
+ h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_START_MRC, 0, 0,
+ len);
+
+ cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
+
+int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct rtw89_h2c_mrc_del *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ unsigned int cond;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc del\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_del *)skb->data;
+
+ h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_DEL_MRC, 0, 0,
+ len);
+
+ cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
+
+int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_req_tsf_arg *arg,
+ struct rtw89_mac_mrc_tsf_rpt *rpt)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ struct rtw89_h2c_mrc_req_tsf *h2c;
+ struct rtw89_mac_mrc_tsf_rpt *tmp;
+ struct sk_buff *skb;
+ unsigned int i;
+ u32 len;
+ int ret;
+
+ len = struct_size(h2c, infos, arg->num);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data;
+
+ h2c->req_tsf_num = arg->num;
+ for (i = 0; i < arg->num; i++)
+ h2c->infos[i] =
+ u8_encode_bits(arg->infos[i].band,
+ RTW89_H2C_MRC_REQ_TSF_INFO_BAND) |
+ u8_encode_bits(arg->infos[i].port,
+ RTW89_H2C_MRC_REQ_TSF_INFO_PORT);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_MRC_REQ_TSF, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF);
+ if (ret)
+ return ret;
+
+ tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf;
+ *rpt = *tmp;
+
+ return 0;
+}
+
+int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_upd_bitmap_arg *arg)
+{
+ struct rtw89_h2c_mrc_upd_bitmap *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data;
+
+ h2c->w0 = le32_encode_bits(arg->sch_idx,
+ RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) |
+ le32_encode_bits(arg->action,
+ RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) |
+ le32_encode_bits(arg->macid,
+ RTW89_H2C_MRC_UPD_BITMAP_W0_MACID);
+ h2c->w1 = le32_encode_bits(arg->client_macid,
+ RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_MRC_UPD_BITMAP, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_sync_arg *arg)
+{
+ struct rtw89_h2c_mrc_sync *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_sync *)skb->data;
+
+ h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) |
+ le32_encode_bits(arg->src.port,
+ RTW89_H2C_MRC_SYNC_W0_SRC_PORT) |
+ le32_encode_bits(arg->src.band,
+ RTW89_H2C_MRC_SYNC_W0_SRC_BAND) |
+ le32_encode_bits(arg->dest.port,
+ RTW89_H2C_MRC_SYNC_W0_DEST_PORT) |
+ le32_encode_bits(arg->dest.band,
+ RTW89_H2C_MRC_SYNC_W0_DEST_BAND);
+ h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_MRC_SYNC, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_upd_duration_arg *arg)
+{
+ struct rtw89_h2c_mrc_upd_duration *h2c;
+ struct sk_buff *skb;
+ unsigned int i;
+ u32 len;
+ int ret;
+
+ len = struct_size(h2c, slots, arg->slot_num);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data;
+
+ h2c->w0 = le32_encode_bits(arg->sch_idx,
+ RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) |
+ le32_encode_bits(arg->slot_num,
+ RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) |
+ le32_encode_bits(false,
+ RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH);
+
+ h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
+ h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
+
+ for (i = 0; i < arg->slot_num; i++) {
+ h2c->slots[i] =
+ le32_encode_bits(arg->slots[i].slot_idx,
+ RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) |
+ le32_encode_bits(arg->slots[i].duration,
+ RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MRC,
+ H2C_FUNC_MRC_UPD_DURATION, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len)
{
static const u8 zeros[U8_MAX] = {};
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 01016588b1fc..44311f65b4fa 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -64,6 +64,8 @@ struct rtw89_h2creg_sch_tx_en {
#define RTW89_H2CREG_SCH_TX_EN_W1_MASK GENMASK(15, 0)
#define RTW89_H2CREG_SCH_TX_EN_W1_BAND BIT(16)
+#define RTW89_H2CREG_WOW_CPUIO_RX_CTRL_EN GENMASK(23, 16)
+
#define RTW89_H2CREG_MAX 4
#define RTW89_C2HREG_MAX 4
#define RTW89_C2HREG_HDR_LEN 2
@@ -95,7 +97,9 @@ enum rtw89_mac_h2c_type {
RTW89_FWCMD_H2CREG_FUNC_FWERR,
RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE,
RTW89_FWCMD_H2CREG_FUNC_GETPKT_INFORM,
- RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN
+ RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN,
+ RTW89_FWCMD_H2CREG_FUNC_WOW_TRX_STOP = 0x6,
+ RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL = 0xA,
};
enum rtw89_mac_c2h_type {
@@ -104,7 +108,8 @@ enum rtw89_mac_c2h_type {
RTW89_FWCMD_C2HREG_FUNC_ERR_MSG,
RTW89_FWCMD_C2HREG_FUNC_PHY_CAP,
RTW89_FWCMD_C2HREG_FUNC_TX_PAUSE_RPT,
- RTW89_FWCMD_C2HREG_FUNC_NULL = 0xFF
+ RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK = 0xA,
+ RTW89_FWCMD_C2HREG_FUNC_NULL = 0xFF,
};
enum rtw89_fw_c2h_category {
@@ -149,6 +154,7 @@ enum rtw89_fw_log_comp {
RTW89_FW_LOG_COMP_TWT,
RTW89_FW_LOG_COMP_RF,
RTW89_FW_LOG_COMP_MCC = 20,
+ RTW89_FW_LOG_COMP_SCAN = 28,
};
enum rtw89_pkt_offload_op {
@@ -169,6 +175,16 @@ enum rtw89_scanofld_notify_reason {
RTW89_SCAN_ENTER_CH_NOTIFY,
RTW89_SCAN_LEAVE_CH_NOTIFY,
RTW89_SCAN_END_SCAN_NOTIFY,
+ RTW89_SCAN_REPORT_NOTIFY,
+ RTW89_SCAN_CHKPT_NOTIFY,
+ RTW89_SCAN_ENTER_OP_NOTIFY,
+ RTW89_SCAN_LEAVE_OP_NOTIFY,
+};
+
+enum rtw89_scanofld_status {
+ RTW89_SCAN_STATUS_NOTIFY,
+ RTW89_SCAN_STATUS_SUCCESS,
+ RTW89_SCAN_STATUS_FAIL,
};
enum rtw89_chan_type {
@@ -184,6 +200,9 @@ enum rtw89_p2pps_action {
RTW89_P2P_ACT_TERMINATE = 3,
};
+#define RTW89_DEFAULT_CQM_HYST 4
+#define RTW89_DEFAULT_CQM_THOLD -70
+
enum rtw89_bcn_fltr_offload_mode {
RTW89_BCN_FLTR_OFFLOAD_MODE_0 = 0,
RTW89_BCN_FLTR_OFFLOAD_MODE_1,
@@ -216,6 +235,10 @@ struct rtw89_fw_hdr_section_info {
u32 dladdr;
u32 mssc;
u8 type;
+ bool ignore;
+ const u8 *key_addr;
+ u32 key_len;
+ u32 key_idx;
};
struct rtw89_fw_bin_info {
@@ -223,6 +246,8 @@ struct rtw89_fw_bin_info {
u32 hdr_len;
bool dynamic_hdr_en;
u32 dynamic_hdr_len;
+ bool dsp_checksum;
+ bool secure_section_exist;
struct rtw89_fw_hdr_section_info section_info[FWDL_SECTION_MAX_NUM];
};
@@ -231,6 +256,15 @@ struct rtw89_fw_macid_pause_grp {
__le32 mask_grp[4];
} __packed;
+struct rtw89_fw_macid_pause_sleep_grp {
+ struct {
+ __le32 pause_grp[4];
+ __le32 pause_mask_grp[4];
+ __le32 sleep_grp[4];
+ __le32 sleep_mask_grp[4];
+ } __packed n[4];
+} __packed;
+
#define RTW89_H2C_MAX_SIZE 2048
#define RTW89_CHANNEL_TIME 45
#define RTW89_CHANNEL_TIME_6G 20
@@ -243,6 +277,7 @@ struct rtw89_fw_macid_pause_grp {
#define RTW89_SCANOFLD_MAX_IE_LEN 512
#define RTW89_SCANOFLD_PKT_NONE 0xFF
#define RTW89_SCANOFLD_DEBUG_MASK 0x1F
+#define RTW89_CHAN_INVALID 0xFF
#define RTW89_MAC_CHINFO_SIZE 28
#define RTW89_SCAN_LIST_GUARD 4
#define RTW89_SCAN_LIST_LIMIT \
@@ -274,9 +309,32 @@ struct rtw89_mac_chinfo {
bool is_psc;
};
-struct rtw89_scan_option {
- bool enable;
- bool target_ch_mode;
+struct rtw89_mac_chinfo_be {
+ u8 period;
+ u8 dwell_time;
+ u8 central_ch;
+ u8 pri_ch;
+ u8 bw:3;
+ u8 ch_band:2;
+ u8 dfs_ch:1;
+ u8 pause_data:1;
+ u8 tx_null:1;
+ u8 rand_seq_num:1;
+ u8 notify_action:5;
+ u8 probe_id;
+ u8 leave_crit;
+ u8 chkpt_timer;
+ u8 leave_time;
+ u8 leave_th;
+ u16 tx_pkt_ctrl;
+ u8 pkt_id[RTW89_SCANOFLD_MAX_SSID];
+ u8 sw_def;
+ u16 fw_probe0_ssids;
+ u16 fw_probe0_shortssids;
+ u16 fw_probe0_bssids;
+
+ struct list_head list;
+ bool is_psc;
};
struct rtw89_pktofld_info {
@@ -419,6 +477,7 @@ static inline void RTW89_SET_EDCA_PARAM(void *cmd, u32 val)
#define FWDL_SECURITY_SECTION_TYPE 9
#define FWDL_SECURITY_SIGLEN 512
+#define FWDL_SECURITY_CHKSUM_LEN 8
struct rtw89_fw_dynhdr_sec {
__le32 w0;
@@ -472,6 +531,7 @@ struct rtw89_fw_hdr {
#define FW_HDR_W4_MIN GENMASK(31, 24)
#define FW_HDR_W5_YEAR GENMASK(31, 0)
#define FW_HDR_W6_SEC_NUM GENMASK(15, 8)
+#define FW_HDR_W7_PART_SIZE GENMASK(15, 0)
#define FW_HDR_W7_DYN_HDR BIT(16)
#define FW_HDR_W7_CMD_VERSERION GENMASK(31, 24)
@@ -489,6 +549,7 @@ struct rtw89_fw_hdr_section_v1 {
#define FWSECTION_HDR_V1_W1_CHECKSUM BIT(28)
#define FWSECTION_HDR_V1_W1_REDL BIT(29)
#define FWSECTION_HDR_V1_W2_MSSC GENMASK(7, 0)
+#define FORMATTED_MSSC 0xFF
#define FWSECTION_HDR_V1_W2_BBMCU_IDX GENMASK(27, 24)
struct rtw89_fw_hdr_v1 {
@@ -521,12 +582,42 @@ struct rtw89_fw_hdr_v1 {
#define FW_HDR_V1_W5_YEAR GENMASK(15, 0)
#define FW_HDR_V1_W5_HDR_SIZE GENMASK(31, 16)
#define FW_HDR_V1_W6_SEC_NUM GENMASK(15, 8)
+#define FW_HDR_V1_W6_DSP_CHKSUM BIT(24)
+#define FW_HDR_V1_W7_PART_SIZE GENMASK(15, 0)
#define FW_HDR_V1_W7_DYN_HDR BIT(16)
-static inline void SET_FW_HDR_PART_SIZE(void *fwhdr, u32 val)
-{
- le32p_replace_bits((__le32 *)fwhdr + 7, val, GENMASK(15, 0));
-}
+enum rtw89_fw_mss_pool_rmp_tbl_type {
+ MSS_POOL_RMP_TBL_BITMASK = 0x0,
+ MSS_POOL_RMP_TBL_RECORD = 0x1,
+};
+
+#define FWDL_MSS_POOL_DEFKEYSETS_SIZE 8
+
+struct rtw89_fw_mss_pool_hdr {
+ u8 signature[8]; /* equal to mss_signature[] */
+ __le32 rmp_tbl_offset;
+ __le32 key_raw_offset;
+ u8 defen;
+ u8 rsvd[3];
+ u8 rmpfmt; /* enum rtw89_fw_mss_pool_rmp_tbl_type */
+ u8 mssdev_max;
+ __le16 keypair_num;
+ __le16 msscust_max;
+ __le16 msskey_num_max;
+ __le32 rsvd3;
+ u8 rmp_tbl[];
+} __packed;
+
+union rtw89_fw_section_mssc_content {
+ struct {
+ u8 pad[58];
+ __le32 v;
+ } __packed sb_sel_ver;
+ struct {
+ u8 pad[60];
+ __le16 v;
+ } __packed key_sign_len;
+} __packed;
static inline void SET_CTRL_INFO_MACID(void *table, u32 val)
{
@@ -1198,6 +1289,149 @@ static inline void SET_CMC_TBL_CSI_BW(void *table, u32 val)
GENMASK(31, 30));
}
+struct rtw89_h2c_cctlinfo_ud_g7 {
+ __le32 c0;
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+ __le32 w10;
+ __le32 w11;
+ __le32 w12;
+ __le32 w13;
+ __le32 w14;
+ __le32 w15;
+ __le32 m0;
+ __le32 m1;
+ __le32 m2;
+ __le32 m3;
+ __le32 m4;
+ __le32 m5;
+ __le32 m6;
+ __le32 m7;
+ __le32 m8;
+ __le32 m9;
+ __le32 m10;
+ __le32 m11;
+ __le32 m12;
+ __le32 m13;
+ __le32 m14;
+ __le32 m15;
+} __packed;
+
+#define CCTLINFO_G7_C0_MACID GENMASK(6, 0)
+#define CCTLINFO_G7_C0_OP BIT(7)
+
+#define CCTLINFO_G7_W0_DATARATE GENMASK(11, 0)
+#define CCTLINFO_G7_W0_DATA_GI_LTF GENMASK(14, 12)
+#define CCTLINFO_G7_W0_TRYRATE BIT(15)
+#define CCTLINFO_G7_W0_ARFR_CTRL GENMASK(17, 16)
+#define CCTLINFO_G7_W0_DIS_HE1SS_STBC BIT(18)
+#define CCTLINFO_G7_W0_ACQ_RPT_EN BIT(20)
+#define CCTLINFO_G7_W0_MGQ_RPT_EN BIT(21)
+#define CCTLINFO_G7_W0_ULQ_RPT_EN BIT(22)
+#define CCTLINFO_G7_W0_TWTQ_RPT_EN BIT(23)
+#define CCTLINFO_G7_W0_FORCE_TXOP BIT(24)
+#define CCTLINFO_G7_W0_DISRTSFB BIT(25)
+#define CCTLINFO_G7_W0_DISDATAFB BIT(26)
+#define CCTLINFO_G7_W0_NSTR_EN BIT(27)
+#define CCTLINFO_G7_W0_AMPDU_DENSITY GENMASK(31, 28)
+#define CCTLINFO_G7_W0_ALL (GENMASK(31, 20) | GENMASK(18, 0))
+#define CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE GENMASK(11, 0)
+#define CCTLINFO_G7_W1_RTS_TXCNT_LMT GENMASK(15, 12)
+#define CCTLINFO_G7_W1_RTSRATE GENMASK(27, 16)
+#define CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE GENMASK(31, 28)
+#define CCTLINFO_G7_W1_ALL GENMASK(31, 0)
+#define CCTLINFO_G7_W2_DATA_TX_CNT_LMT GENMASK(5, 0)
+#define CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL BIT(6)
+#define CCTLINFO_G7_W2_MAX_AGG_NUM_SEL BIT(7)
+#define CCTLINFO_G7_W2_RTS_EN BIT(8)
+#define CCTLINFO_G7_W2_CTS2SELF_EN BIT(9)
+#define CCTLINFO_G7_W2_CCA_RTS GENMASK(11, 10)
+#define CCTLINFO_G7_W2_HW_RTS_EN BIT(12)
+#define CCTLINFO_G7_W2_RTS_DROP_DATA_MODE GENMASK(14, 13)
+#define CCTLINFO_G7_W2_PRELD_EN BIT(15)
+#define CCTLINFO_G7_W2_AMPDU_MAX_LEN GENMASK(26, 16)
+#define CCTLINFO_G7_W2_UL_MU_DIS BIT(27)
+#define CCTLINFO_G7_W2_AMPDU_MAX_TIME GENMASK(31, 28)
+#define CCTLINFO_G7_W2_ALL GENMASK(31, 0)
+#define CCTLINFO_G7_W3_MAX_AGG_NUM GENMASK(7, 0)
+#define CCTLINFO_G7_W3_DATA_BW GENMASK(10, 8)
+#define CCTLINFO_G7_W3_DATA_BW_ER BIT(11)
+#define CCTLINFO_G7_W3_BA_BMAP GENMASK(14, 12)
+#define CCTLINFO_G7_W3_VCS_STBC BIT(15)
+#define CCTLINFO_G7_W3_VO_LFTIME_SEL GENMASK(18, 16)
+#define CCTLINFO_G7_W3_VI_LFTIME_SEL GENMASK(21, 19)
+#define CCTLINFO_G7_W3_BE_LFTIME_SEL GENMASK(24, 22)
+#define CCTLINFO_G7_W3_BK_LFTIME_SEL GENMASK(27, 25)
+#define CCTLINFO_G7_W3_AMPDU_TIME_SEL BIT(28)
+#define CCTLINFO_G7_W3_AMPDU_LEN_SEL BIT(29)
+#define CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL BIT(30)
+#define CCTLINFO_G7_W3_LSIG_TXOP_EN BIT(31)
+#define CCTLINFO_G7_W3_ALL GENMASK(31, 0)
+#define CCTLINFO_G7_W4_MULTI_PORT_ID GENMASK(2, 0)
+#define CCTLINFO_G7_W4_BYPASS_PUNC BIT(3)
+#define CCTLINFO_G7_W4_MBSSID GENMASK(7, 4)
+#define CCTLINFO_G7_W4_DATA_DCM BIT(8)
+#define CCTLINFO_G7_W4_DATA_ER BIT(9)
+#define CCTLINFO_G7_W4_DATA_LDPC BIT(10)
+#define CCTLINFO_G7_W4_DATA_STBC BIT(11)
+#define CCTLINFO_G7_W4_A_CTRL_BQR BIT(12)
+#define CCTLINFO_G7_W4_A_CTRL_BSR BIT(14)
+#define CCTLINFO_G7_W4_A_CTRL_CAS BIT(15)
+#define CCTLINFO_G7_W4_ACT_SUBCH_CBW GENMASK(31, 16)
+#define CCTLINFO_G7_W4_ALL (GENMASK(31, 14) | GENMASK(12, 0))
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 GENMASK(1, 0)
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 GENMASK(3, 2)
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 GENMASK(5, 4)
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 GENMASK(7, 6)
+#define CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4 GENMASK(9, 8)
+#define CCTLINFO_G7_W5_SR_RATE GENMASK(14, 10)
+#define CCTLINFO_G7_W5_TID_DISABLE GENMASK(23, 16)
+#define CCTLINFO_G7_W5_ADDR_CAM_INDEX GENMASK(31, 24)
+#define CCTLINFO_G7_W5_ALL (GENMASK(31, 16) | GENMASK(14, 0))
+#define CCTLINFO_G7_W6_AID12_PAID GENMASK(11, 0)
+#define CCTLINFO_G7_W6_RESP_REF_RATE GENMASK(23, 12)
+#define CCTLINFO_G7_W6_ULDL BIT(31)
+#define CCTLINFO_G7_W6_ALL (BIT(31) | GENMASK(23, 0))
+#define CCTLINFO_G7_W7_NC GENMASK(2, 0)
+#define CCTLINFO_G7_W7_NR GENMASK(5, 3)
+#define CCTLINFO_G7_W7_NG GENMASK(7, 6)
+#define CCTLINFO_G7_W7_CB GENMASK(9, 8)
+#define CCTLINFO_G7_W7_CS GENMASK(11, 10)
+#define CCTLINFO_G7_W7_CSI_STBC_EN BIT(13)
+#define CCTLINFO_G7_W7_CSI_LDPC_EN BIT(14)
+#define CCTLINFO_G7_W7_CSI_PARA_EN BIT(15)
+#define CCTLINFO_G7_W7_CSI_FIX_RATE GENMASK(27, 16)
+#define CCTLINFO_G7_W7_CSI_BW GENMASK(31, 29)
+#define CCTLINFO_G7_W7_ALL (GENMASK(31, 29) | GENMASK(27, 13) | GENMASK(11, 0))
+#define CCTLINFO_G7_W8_ALL_ACK_SUPPORT BIT(0)
+#define CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT BIT(1)
+#define CCTLINFO_G7_W8_BSR_OM_UPD_EN BIT(2)
+#define CCTLINFO_G7_W8_MACID_FWD_IDC BIT(3)
+#define CCTLINFO_G7_W8_AZ_SEC_EN BIT(4)
+#define CCTLINFO_G7_W8_CSI_SEC_EN BIT(5)
+#define CCTLINFO_G7_W8_FIX_UL_ADDRCAM_IDX BIT(6)
+#define CCTLINFO_G7_W8_CTRL_CNT_VLD BIT(7)
+#define CCTLINFO_G7_W8_CTRL_CNT GENMASK(11, 8)
+#define CCTLINFO_G7_W8_RESP_SEC_TYPE GENMASK(15, 12)
+#define CCTLINFO_G7_W8_ALL GENMASK(15, 0)
+/* W9~13 are reserved */
+#define CCTLINFO_G7_W14_VO_CURR_RATE GENMASK(11, 0)
+#define CCTLINFO_G7_W14_VI_CURR_RATE GENMASK(23, 12)
+#define CCTLINFO_G7_W14_BE_CURR_RATE_L GENMASK(31, 24)
+#define CCTLINFO_G7_W14_ALL GENMASK(31, 0)
+#define CCTLINFO_G7_W15_BE_CURR_RATE_H GENMASK(3, 0)
+#define CCTLINFO_G7_W15_BK_CURR_RATE GENMASK(15, 4)
+#define CCTLINFO_G7_W15_MGNT_CURR_RATE GENMASK(27, 16)
+#define CCTLINFO_G7_W15_ALL GENMASK(27, 0)
+
static inline void SET_DCTL_MACID_V1(void *table, u32 val)
{
le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0));
@@ -1500,105 +1734,98 @@ static inline void SET_DCTL_SEC_ENT6_V1(void *table, u32 val)
GENMASK(31, 24));
}
-static inline void SET_BCN_UPD_PORT(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
-}
-
-static inline void SET_BCN_UPD_MBSSID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
-}
-
-static inline void SET_BCN_UPD_BAND(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16));
-}
-
-static inline void SET_BCN_UPD_GRP_IE_OFST(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, (val - 24) | BIT(7), GENMASK(31, 24));
-}
-
-static inline void SET_BCN_UPD_MACID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0));
-}
-
-static inline void SET_BCN_UPD_SSN_SEL(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(9, 8));
-}
-
-static inline void SET_BCN_UPD_SSN_MODE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(11, 10));
-}
-
-static inline void SET_BCN_UPD_RATE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(20, 12));
-}
-
-static inline void SET_BCN_UPD_TXPWR(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(23, 21));
-}
-
-static inline void SET_BCN_UPD_TXINFO_CTRL_EN(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(0));
-}
-
-static inline void SET_BCN_UPD_NTX_PATH_EN(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(4, 1));
-}
-
-static inline void SET_BCN_UPD_PATH_MAP_A(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(6, 5));
-}
-
-static inline void SET_BCN_UPD_PATH_MAP_B(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(8, 7));
-}
-
-static inline void SET_BCN_UPD_PATH_MAP_C(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(10, 9));
-}
-
-static inline void SET_BCN_UPD_PATH_MAP_D(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(12, 11));
-}
-
-static inline void SET_BCN_UPD_PATH_ANTSEL_A(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(13));
-}
-
-static inline void SET_BCN_UPD_PATH_ANTSEL_B(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(14));
-}
-
-static inline void SET_BCN_UPD_PATH_ANTSEL_C(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(15));
-}
+struct rtw89_h2c_bcn_upd {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+} __packed;
-static inline void SET_BCN_UPD_PATH_ANTSEL_D(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(16));
-}
+#define RTW89_H2C_BCN_UPD_W0_PORT GENMASK(7, 0)
+#define RTW89_H2C_BCN_UPD_W0_MBSSID GENMASK(15, 8)
+#define RTW89_H2C_BCN_UPD_W0_BAND GENMASK(23, 16)
+#define RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST GENMASK(31, 24)
+#define RTW89_H2C_BCN_UPD_W1_MACID GENMASK(7, 0)
+#define RTW89_H2C_BCN_UPD_W1_SSN_SEL GENMASK(9, 8)
+#define RTW89_H2C_BCN_UPD_W1_SSN_MODE GENMASK(11, 10)
+#define RTW89_H2C_BCN_UPD_W1_RATE GENMASK(20, 12)
+#define RTW89_H2C_BCN_UPD_W1_TXPWR GENMASK(23, 21)
+#define RTW89_H2C_BCN_UPD_W2_TXINFO_CTRL_EN BIT(0)
+#define RTW89_H2C_BCN_UPD_W2_NTX_PATH_EN GENMASK(4, 1)
+#define RTW89_H2C_BCN_UPD_W2_PATH_MAP_A GENMASK(6, 5)
+#define RTW89_H2C_BCN_UPD_W2_PATH_MAP_B GENMASK(8, 7)
+#define RTW89_H2C_BCN_UPD_W2_PATH_MAP_C GENMASK(10, 9)
+#define RTW89_H2C_BCN_UPD_W2_PATH_MAP_D GENMASK(12, 11)
+#define RTW89_H2C_BCN_UPD_W2_PATH_ANTSEL_A BIT(13)
+#define RTW89_H2C_BCN_UPD_W2_PATH_ANTSEL_B BIT(14)
+#define RTW89_H2C_BCN_UPD_W2_PATH_ANTSEL_C BIT(15)
+#define RTW89_H2C_BCN_UPD_W2_PATH_ANTSEL_D BIT(16)
+#define RTW89_H2C_BCN_UPD_W2_CSA_OFST GENMASK(31, 17)
+
+struct rtw89_h2c_bcn_upd_be {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 w8;
+ __le32 w9;
+ __le32 w10;
+ __le32 w11;
+ __le32 w12;
+ __le32 w13;
+ __le32 w14;
+ __le32 w15;
+ __le32 w16;
+ __le32 w17;
+ __le32 w18;
+ __le32 w19;
+ __le32 w20;
+ __le32 w21;
+ __le32 w22;
+ __le32 w23;
+ __le32 w24;
+ __le32 w25;
+ __le32 w26;
+ __le32 w27;
+ __le32 w28;
+ __le32 w29;
+} __packed;
-static inline void SET_BCN_UPD_CSA_OFST(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 17));
-}
+#define RTW89_H2C_BCN_UPD_BE_W0_PORT GENMASK(7, 0)
+#define RTW89_H2C_BCN_UPD_BE_W0_MBSSID GENMASK(15, 8)
+#define RTW89_H2C_BCN_UPD_BE_W0_BAND GENMASK(23, 16)
+#define RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST GENMASK(31, 24)
+#define RTW89_H2C_BCN_UPD_BE_W1_MACID GENMASK(7, 0)
+#define RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL GENMASK(9, 8)
+#define RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE GENMASK(11, 10)
+#define RTW89_H2C_BCN_UPD_BE_W1_RATE GENMASK(20, 12)
+#define RTW89_H2C_BCN_UPD_BE_W1_TXPWR GENMASK(23, 21)
+#define RTW89_H2C_BCN_UPD_BE_W1_MACID_EXT GENMASK(31, 24)
+#define RTW89_H2C_BCN_UPD_BE_W2_TXINFO_CTRL_EN BIT(0)
+#define RTW89_H2C_BCN_UPD_BE_W2_NTX_PATH_EN GENMASK(4, 1)
+#define RTW89_H2C_BCN_UPD_BE_W2_PATH_MAP_A GENMASK(6, 5)
+#define RTW89_H2C_BCN_UPD_BE_W2_PATH_MAP_B GENMASK(8, 7)
+#define RTW89_H2C_BCN_UPD_BE_W2_PATH_MAP_C GENMASK(10, 9)
+#define RTW89_H2C_BCN_UPD_BE_W2_PATH_MAP_D GENMASK(12, 11)
+#define RTW89_H2C_BCN_UPD_BE_W2_ANTSEL_A BIT(13)
+#define RTW89_H2C_BCN_UPD_BE_W2_ANTSEL_B BIT(14)
+#define RTW89_H2C_BCN_UPD_BE_W2_ANTSEL_C BIT(15)
+#define RTW89_H2C_BCN_UPD_BE_W2_ANTSEL_D BIT(16)
+#define RTW89_H2C_BCN_UPD_BE_W2_CSA_OFST GENMASK(31, 17)
+#define RTW89_H2C_BCN_UPD_BE_W3_MLIE_CSA_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W3_CRITICAL_UPD_FLAG_OFST GENMASK(31, 16)
+#define RTW89_H2C_BCN_UPD_BE_W4_VAP1_DTIM_CNT_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W4_VAP2_DTIM_CNT_OFST GENMASK(31, 16)
+#define RTW89_H2C_BCN_UPD_BE_W5_VAP3_DTIM_CNT_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W5_VAP4_DTIM_CNT_OFST GENMASK(31, 16)
+#define RTW89_H2C_BCN_UPD_BE_W6_VAP5_DTIM_CNT_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W6_VAP6_DTIM_CNT_OFST GENMASK(31, 16)
+#define RTW89_H2C_BCN_UPD_BE_W7_VAP7_DTIM_CNT_OFST GENMASK(15, 0)
+#define RTW89_H2C_BCN_UPD_BE_W7_ECSA_OFST GENMASK(30, 16)
+#define RTW89_H2C_BCN_UPD_BE_W7_PROTECTION_KEY_ID BIT(31)
static inline void SET_FWROLE_MAINTAIN_MACID(void *h2c, u32 val)
{
@@ -1620,70 +1847,46 @@ static inline void SET_FWROLE_MAINTAIN_WIFI_ROLE(void *h2c, u32 val)
le32p_replace_bits((__le32 *)h2c, val, GENMASK(16, 13));
}
-static inline void SET_JOININFO_MACID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
-}
-
-static inline void SET_JOININFO_OP(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(8));
-}
-
-static inline void SET_JOININFO_BAND(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(9));
-}
-
-static inline void SET_JOININFO_WMM(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(11, 10));
-}
-
-static inline void SET_JOININFO_TGR(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(12));
-}
-
-static inline void SET_JOININFO_ISHESTA(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(13));
-}
-
-static inline void SET_JOININFO_DLBW(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 14));
-}
-
-static inline void SET_JOININFO_TF_MAC_PAD(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(17, 16));
-}
-
-static inline void SET_JOININFO_DL_T_PE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(20, 18));
-}
-
-static inline void SET_JOININFO_PORT_ID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 21));
-}
+enum rtw89_fw_sta_type { /* value of RTW89_H2C_JOININFO_W1_STA_TYPE */
+ RTW89_FW_N_AC_STA = 0,
+ RTW89_FW_AX_STA = 1,
+ RTW89_FW_BE_STA = 2,
+};
-static inline void SET_JOININFO_NET_TYPE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(25, 24));
-}
+struct rtw89_h2c_join {
+ __le32 w0;
+} __packed;
-static inline void SET_JOININFO_WIFI_ROLE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(29, 26));
-}
+struct rtw89_h2c_join_v1 {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+} __packed;
-static inline void SET_JOININFO_SELF_ROLE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 30));
-}
+#define RTW89_H2C_JOININFO_W0_MACID GENMASK(7, 0)
+#define RTW89_H2C_JOININFO_W0_OP BIT(8)
+#define RTW89_H2C_JOININFO_W0_BAND BIT(9)
+#define RTW89_H2C_JOININFO_W0_WMM GENMASK(11, 10)
+#define RTW89_H2C_JOININFO_W0_TGR BIT(12)
+#define RTW89_H2C_JOININFO_W0_ISHESTA BIT(13)
+#define RTW89_H2C_JOININFO_W0_DLBW GENMASK(15, 14)
+#define RTW89_H2C_JOININFO_W0_TF_MAC_PAD GENMASK(17, 16)
+#define RTW89_H2C_JOININFO_W0_DL_T_PE GENMASK(20, 18)
+#define RTW89_H2C_JOININFO_W0_PORT_ID GENMASK(23, 21)
+#define RTW89_H2C_JOININFO_W0_NET_TYPE GENMASK(25, 24)
+#define RTW89_H2C_JOININFO_W0_WIFI_ROLE GENMASK(29, 26)
+#define RTW89_H2C_JOININFO_W0_SELF_ROLE GENMASK(31, 30)
+#define RTW89_H2C_JOININFO_W1_STA_TYPE GENMASK(2, 0)
+#define RTW89_H2C_JOININFO_W1_IS_MLD BIT(3)
+#define RTW89_H2C_JOININFO_W1_MAIN_MACID GENMASK(11, 4)
+#define RTW89_H2C_JOININFO_W1_MLO_MODE BIT(12)
+#define RTW89_H2C_JOININFO_W1_EMLSR_CAB BIT(13)
+#define RTW89_H2C_JOININFO_W1_NSTR_EN BIT(14)
+#define RTW89_H2C_JOININFO_W1_INIT_PWR_STATE BIT(15)
+#define RTW89_H2C_JOININFO_W1_EMLSR_PADDING GENMASK(18, 16)
+#define RTW89_H2C_JOININFO_W1_EMLSR_TRANS_DELAY GENMASK(21, 19)
+#define RTW89_H2C_JOININFO_W2_MACID_EXT GENMASK(7, 0)
+#define RTW89_H2C_JOININFO_W2_MAIN_MACID_EXT GENMASK(15, 8)
struct rtw89_h2c_notify_dbcc {
__le32 w0;
@@ -1741,60 +1944,47 @@ static inline void SET_LOG_CFG_COMP_EXT(void *h2c, u32 val)
le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 0));
}
-static inline void SET_BA_CAM_VALID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(0));
-}
-
-static inline void SET_BA_CAM_INIT_REQ(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(1));
-}
-
-static inline void SET_BA_CAM_ENTRY_IDX(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(3, 2));
-}
-
-static inline void SET_BA_CAM_TID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 4));
-}
-
-static inline void SET_BA_CAM_MACID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
-}
-
-static inline void SET_BA_CAM_BMAP_SIZE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(19, 16));
-}
-
-static inline void SET_BA_CAM_SSN(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 20));
-}
-
-static inline void SET_BA_CAM_UID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(7, 0));
-}
+struct rtw89_h2c_ba_cam {
+ __le32 w0;
+ __le32 w1;
+} __packed;
-static inline void SET_BA_CAM_STD_EN(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 1, val, BIT(8));
-}
+#define RTW89_H2C_BA_CAM_W0_VALID BIT(0)
+#define RTW89_H2C_BA_CAM_W0_INIT_REQ BIT(1)
+#define RTW89_H2C_BA_CAM_W0_ENTRY_IDX GENMASK(3, 2)
+#define RTW89_H2C_BA_CAM_W0_TID GENMASK(7, 4)
+#define RTW89_H2C_BA_CAM_W0_MACID GENMASK(15, 8)
+#define RTW89_H2C_BA_CAM_W0_BMAP_SIZE GENMASK(19, 16)
+#define RTW89_H2C_BA_CAM_W0_SSN GENMASK(31, 20)
+#define RTW89_H2C_BA_CAM_W1_UID GENMASK(7, 0)
+#define RTW89_H2C_BA_CAM_W1_STD_EN BIT(8)
+#define RTW89_H2C_BA_CAM_W1_BAND BIT(9)
+#define RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1 GENMASK(31, 28)
+
+struct rtw89_h2c_ba_cam_v1 {
+ __le32 w0;
+ __le32 w1;
+} __packed;
-static inline void SET_BA_CAM_BAND(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 1, val, BIT(9));
-}
+#define RTW89_H2C_BA_CAM_V1_W0_VALID BIT(0)
+#define RTW89_H2C_BA_CAM_V1_W0_INIT_REQ BIT(1)
+#define RTW89_H2C_BA_CAM_V1_W0_TID_MASK GENMASK(7, 4)
+#define RTW89_H2C_BA_CAM_V1_W0_MACID_MASK GENMASK(15, 8)
+#define RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK GENMASK(19, 16)
+#define RTW89_H2C_BA_CAM_V1_W0_SSN_MASK GENMASK(31, 20)
+#define RTW89_H2C_BA_CAM_V1_W1_UID_VALUE_MASK GENMASK(7, 0)
+#define RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN BIT(8)
+#define RTW89_H2C_BA_CAM_V1_W1_BAND_SEL BIT(9)
+#define RTW89_H2C_BA_CAM_V1_W1_MLD_EN BIT(10)
+#define RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK GENMASK(31, 24)
+
+struct rtw89_h2c_ba_cam_init {
+ __le32 w0;
+} __packed;
-static inline void SET_BA_CAM_ENTRY_IDX_V1(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(31, 28));
-}
+#define RTW89_H2C_BA_CAM_INIT_USERS_MASK GENMASK(7, 0)
+#define RTW89_H2C_BA_CAM_INIT_OFFSET_MASK GENMASK(19, 12)
+#define RTW89_H2C_BA_CAM_INIT_BAND_SEL BIT(24)
static inline void SET_LPS_PARM_MACID(void *h2c, u32 val)
{
@@ -1846,6 +2036,17 @@ static inline void SET_LPS_PARM_LASTRPWM(void *h2c, u32 val)
le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8));
}
+struct rtw89_h2c_lps_ch_info {
+ struct {
+ u8 pri_ch;
+ u8 central_ch;
+ u8 bw;
+ u8 band;
+ } __packed info[2];
+
+ __le32 mlo_dbcc_mode_lps;
+} __packed;
+
static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)cmd, val, GENMASK(31, 0));
@@ -2128,9 +2329,15 @@ enum rtw89_btc_btf_set {
SET_BT_IGNORE_WLAN_ACT,
SET_BT_TX_PWR,
SET_BT_LNA_CONSTRAIN,
- SET_BT_GOLDEN_RX_RANGE,
+ SET_BT_QUERY_DEV_LIST,
+ SET_BT_QUERY_DEV_INFO,
SET_BT_PSD_REPORT,
SET_H2C_TEST,
+ SET_IOFLD_RF,
+ SET_IOFLD_BB,
+ SET_IOFLD_MAC,
+ SET_IOFLD_SCBD,
+ SET_H2C_MACRO,
SET_MAX1,
};
@@ -2144,6 +2351,10 @@ enum rtw89_btc_cxdrvinfo {
CXDRVINFO_CTRL,
CXDRVINFO_SCAN,
CXDRVINFO_TRX, /* WL traffic to WL fw */
+ CXDRVINFO_TXPWR,
+ CXDRVINFO_FDDT,
+ CXDRVINFO_MLO,
+ CXDRVINFO_OSI,
CXDRVINFO_MAX,
};
@@ -2170,7 +2381,19 @@ struct rtw89_h2c_cxhdr {
u8 len;
} __packed;
+struct rtw89_h2c_cxhdr_v7 {
+ u8 type;
+ u8 ver;
+ u8 len;
+} __packed;
+
+struct rtw89_h2c_cxctrl_v7 {
+ struct rtw89_h2c_cxhdr_v7 hdr;
+ struct rtw89_btc_ctrl_v7 ctrl;
+} __packed;
+
#define H2C_LEN_CXDRVHDR sizeof(struct rtw89_h2c_cxhdr)
+#define H2C_LEN_CXDRVHDR_V7 sizeof(struct rtw89_h2c_cxhdr_v7)
struct rtw89_h2c_cxinit {
struct rtw89_h2c_cxhdr hdr;
@@ -2204,6 +2427,11 @@ struct rtw89_h2c_cxinit {
#define RTW89_H2C_CXINIT_INFO_CX_OTHER BIT(3)
#define RTW89_H2C_CXINIT_INFO_BT_ONLY BIT(4)
+struct rtw89_h2c_cxinit_v7 {
+ struct rtw89_h2c_cxhdr_v7 hdr;
+ struct rtw89_btc_init_info_v7 init;
+} __packed;
+
static inline void RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(void *cmd, u8 val)
{
u8p_replace_bits((u8 *)(cmd) + 2, val, GENMASK(7, 0));
@@ -2569,135 +2797,91 @@ static inline void RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(void *cmd, u32 val)
le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(31, 16));
}
-static inline void RTW89_SET_FWCMD_SCANOFLD_CH_NUM(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
-}
-
-static inline void RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PERIOD(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(7, 0));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_DWELL(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(15, 8));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_CENTER_CH(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(23, 16));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PRI_CH(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd)), val, GENMASK(31, 24));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_BW(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(2, 0));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_ACTION(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(7, 3));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_NUM_PKT(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(11, 8));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_TX(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(12));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(13));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_BAND(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(15, 14));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT_ID(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, GENMASK(23, 16));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_DFS(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(24));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_TX_NULL(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(25));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_RANDOM(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(26));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_CFG_TX(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 4), val, BIT(27));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT0(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(7, 0));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT1(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(15, 8));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT2(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(23, 16));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT3(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 8), val, GENMASK(31, 24));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT4(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(7, 0));
-}
-
-static inline void RTW89_SET_FWCMD_CHINFO_PKT5(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(15, 8));
-}
+struct rtw89_h2c_chinfo_elem {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+} __packed;
-static inline void RTW89_SET_FWCMD_CHINFO_PKT6(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(23, 16));
-}
+#define RTW89_H2C_CHINFO_W0_PERIOD GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_W0_DWELL GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_W0_CENTER_CH GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_W0_PRI_CH GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_W1_BW GENMASK(2, 0)
+#define RTW89_H2C_CHINFO_W1_ACTION GENMASK(7, 3)
+#define RTW89_H2C_CHINFO_W1_NUM_PKT GENMASK(11, 8)
+#define RTW89_H2C_CHINFO_W1_TX BIT(12)
+#define RTW89_H2C_CHINFO_W1_PAUSE_DATA BIT(13)
+#define RTW89_H2C_CHINFO_W1_BAND GENMASK(15, 14)
+#define RTW89_H2C_CHINFO_W1_PKT_ID GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_W1_DFS BIT(24)
+#define RTW89_H2C_CHINFO_W1_TX_NULL BIT(25)
+#define RTW89_H2C_CHINFO_W1_RANDOM BIT(26)
+#define RTW89_H2C_CHINFO_W1_CFG_TX BIT(27)
+#define RTW89_H2C_CHINFO_W2_PKT0 GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_W2_PKT1 GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_W2_PKT2 GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_W2_PKT3 GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_W3_PKT4 GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_W3_PKT5 GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_W3_PKT6 GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_W3_PKT7 GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_W4_POWER_IDX GENMASK(15, 0)
+
+struct rtw89_h2c_chinfo_elem_be {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+} __packed;
-static inline void RTW89_SET_FWCMD_CHINFO_PKT7(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 12), val, GENMASK(31, 24));
-}
+#define RTW89_H2C_CHINFO_BE_W0_PERIOD GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W0_DWELL GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_BE_W0_CENTER_CH GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_BE_W0_PRI_CH GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_BE_W1_BW GENMASK(2, 0)
+#define RTW89_H2C_CHINFO_BE_W1_CH_BAND GENMASK(4, 3)
+#define RTW89_H2C_CHINFO_BE_W1_DFS BIT(5)
+#define RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA BIT(6)
+#define RTW89_H2C_CHINFO_BE_W1_TX_NULL BIT(7)
+#define RTW89_H2C_CHINFO_BE_W1_RANDOM BIT(8)
+#define RTW89_H2C_CHINFO_BE_W1_NOTIFY GENMASK(13, 9)
+#define RTW89_H2C_CHINFO_BE_W1_PROBE BIT(14)
+#define RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT GENMASK(17, 15)
+#define RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL GENMASK(31, 16)
+#define RTW89_H2C_CHINFO_BE_W3_PKT0 GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W3_PKT1 GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_BE_W3_PKT2 GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_BE_W3_PKT3 GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_BE_W4_PKT4 GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W4_PKT5 GENMASK(15, 8)
+#define RTW89_H2C_CHINFO_BE_W4_PKT6 GENMASK(23, 16)
+#define RTW89_H2C_CHINFO_BE_W4_PKT7 GENMASK(31, 24)
+#define RTW89_H2C_CHINFO_BE_W5_SW_DEF GENMASK(7, 0)
+#define RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS GENMASK(31, 16)
+#define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS GENMASK(15, 0)
+#define RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS GENMASK(31, 16)
+
+struct rtw89_h2c_chinfo {
+ u8 ch_num;
+ u8 elem_size;
+ u8 arg;
+ u8 rsvd0;
+ struct rtw89_h2c_chinfo_elem elem[] __counted_by(ch_num);
+} __packed;
-static inline void RTW89_SET_FWCMD_CHINFO_POWER_IDX(void *cmd, u32 val)
-{
- le32p_replace_bits((__le32 *)((u8 *)(cmd) + 16), val, GENMASK(15, 0));
-}
+#define RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK BIT(0)
+#define RTW89_H2C_CHINFO_ARG_APPEND_MASK BIT(1)
struct rtw89_h2c_scanofld {
__le32 w0;
@@ -2726,6 +2910,79 @@ struct rtw89_h2c_scanofld {
#define RTW89_H2C_SCANOFLD_W2_NORM_PD GENMASK(15, 0)
#define RTW89_H2C_SCANOFLD_W2_SLOW_PD GENMASK(23, 16)
+struct rtw89_h2c_scanofld_be_macc_role {
+ __le32 w0;
+} __packed;
+
+#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND GENMASK(1, 0)
+#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT GENMASK(4, 2)
+#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID GENMASK(23, 8)
+#define RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END GENMASK(31, 24)
+
+struct rtw89_h2c_scanofld_be_opch {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+} __packed;
+
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID GENMASK(15, 0)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND GENMASK(17, 16)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT GENMASK(20, 18)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY GENMASK(22, 21)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL BIT(23)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND GENMASK(9, 8)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW GENMASK(12, 10)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY GENMASK(14, 13)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS GENMASK(18, 16)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0 GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1 GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2 GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3 GENMASK(31, 24)
+
+struct rtw89_h2c_scanofld_be {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ struct rtw89_h2c_scanofld_be_macc_role role[];
+} __packed;
+
+#define RTW89_H2C_SCANOFLD_BE_W0_OP GENMASK(1, 0)
+#define RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE GENMASK(3, 2)
+#define RTW89_H2C_SCANOFLD_BE_W0_REPEAT GENMASK(5, 4)
+#define RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END BIT(6)
+#define RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH BIT(7)
+#define RTW89_H2C_SCANOFLD_BE_W0_MACID GENMASK(23, 8)
+#define RTW89_H2C_SCANOFLD_BE_W0_PORT GENMASK(26, 24)
+#define RTW89_H2C_SCANOFLD_BE_W0_BAND GENMASK(28, 27)
+#define RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_W1_NUM_OP GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_W1_NORM_PD GENMASK(31, 16)
+#define RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD GENMASK(15, 0)
+#define RTW89_H2C_SCANOFLD_BE_W2_NORM_CY GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_W2_OPCH_END GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID GENMASK(23, 16)
+#define RTW89_H2C_SCANOFLD_BE_W3_PROBEID GENMASK(31, 24)
+#define RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_W4_DELAY_START GENMASK(31, 16)
+#define RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE GENMASK(31, 0)
+#define RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW GENMASK(31, 0)
+#define RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH GENMASK(31, 0)
+
static inline void RTW89_SET_FWCMD_P2P_MACID(void *cmd, u32 val)
{
le32p_replace_bits((__le32 *)cmd, val, GENMASK(7, 0));
@@ -3160,6 +3417,225 @@ inline void RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(void *cmd, u32 val)
le32p_replace_bits((__le32 *)cmd + 4, val, GENMASK(31, 0));
}
+enum rtw89_h2c_mrc_sch_types {
+ RTW89_H2C_MRC_SCH_BAND0_ONLY = 0,
+ RTW89_H2C_MRC_SCH_BAND1_ONLY = 1,
+ RTW89_H2C_MRC_SCH_DUAL_BAND = 2,
+};
+
+enum rtw89_h2c_mrc_role_types {
+ RTW89_H2C_MRC_ROLE_WIFI = 0,
+ RTW89_H2C_MRC_ROLE_BT = 1,
+ RTW89_H2C_MRC_ROLE_EMPTY = 2,
+};
+
+#define RTW89_MAC_MRC_MAX_ADD_SLOT_NUM 3
+#define RTW89_MAC_MRC_MAX_ADD_ROLE_NUM_PER_SLOT 1 /* before MLO */
+
+struct rtw89_fw_mrc_add_slot_arg {
+ u16 duration; /* unit: TU */
+ bool courtesy_en;
+ u8 courtesy_period;
+ u8 courtesy_target; /* slot idx */
+
+ unsigned int role_num;
+ struct {
+ enum rtw89_h2c_mrc_role_types role_type;
+ bool is_master;
+ bool en_tx_null;
+ enum rtw89_band band;
+ enum rtw89_bandwidth bw;
+ u8 macid;
+ u8 central_ch;
+ u8 primary_ch;
+ u8 null_early; /* unit: TU */
+
+ /* if MLD, for macid: [0, chip::support_mld_num)
+ * otherwise, for macid: [0, 32)
+ */
+ u32 macid_main_bitmap;
+ /* for MLD, bit X maps to macid: X + chip::support_mld_num */
+ u32 macid_paired_bitmap;
+ } roles[RTW89_MAC_MRC_MAX_ADD_ROLE_NUM_PER_SLOT];
+};
+
+struct rtw89_fw_mrc_add_arg {
+ u8 sch_idx;
+ enum rtw89_h2c_mrc_sch_types sch_type;
+ bool btc_in_sch;
+
+ unsigned int slot_num;
+ struct rtw89_fw_mrc_add_slot_arg slots[RTW89_MAC_MRC_MAX_ADD_SLOT_NUM];
+};
+
+struct rtw89_h2c_mrc_add_role {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 macid_main_bitmap;
+ __le32 macid_paired_bitmap;
+} __packed;
+
+#define RTW89_H2C_MRC_ADD_ROLE_W0_MACID GENMASK(15, 0)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE GENMASK(23, 16)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER BIT(24)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE BIT(25)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN BIT(26)
+#define RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN BIT(27)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG GENMASK(7, 0)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH GENMASK(15, 8)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_BW GENMASK(19, 16)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE GENMASK(21, 20)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS BIT(22)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC BIT(23)
+#define RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY GENMASK(31, 24)
+#define RTW89_H2C_MRC_ADD_ROLE_W2_ALT_PERIOD GENMASK(7, 0)
+#define RTW89_H2C_MRC_ADD_ROLE_W2_ALT_ROLE_TYPE GENMASK(15, 8)
+#define RTW89_H2C_MRC_ADD_ROLE_W2_ALT_ROLE_MACID GENMASK(23, 16)
+
+struct rtw89_h2c_mrc_add_slot {
+ __le32 w0;
+ __le32 w1;
+ struct rtw89_h2c_mrc_add_role roles[];
+} __packed;
+
+#define RTW89_H2C_MRC_ADD_SLOT_W0_DURATION GENMASK(15, 0)
+#define RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN BIT(17)
+#define RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM GENMASK(31, 24)
+#define RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD GENMASK(7, 0)
+#define RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET GENMASK(15, 8)
+
+struct rtw89_h2c_mrc_add {
+ __le32 w0;
+ /* Logically append flexible struct rtw89_h2c_mrc_add_slot, but there
+ * are other flexible array inside it. We cannot access them correctly
+ * through this struct. So, in case misusing, we don't really declare
+ * it here.
+ */
+} __packed;
+
+#define RTW89_H2C_MRC_ADD_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_ADD_W0_SCH_TYPE GENMASK(7, 4)
+#define RTW89_H2C_MRC_ADD_W0_SLOT_NUM GENMASK(15, 8)
+#define RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH BIT(16)
+
+enum rtw89_h2c_mrc_start_actions {
+ RTW89_H2C_MRC_START_ACTION_START_NEW = 0,
+ RTW89_H2C_MRC_START_ACTION_REPLACE_OLD = 1,
+};
+
+struct rtw89_fw_mrc_start_arg {
+ u8 sch_idx;
+ u8 old_sch_idx;
+ u64 start_tsf;
+ enum rtw89_h2c_mrc_start_actions action;
+};
+
+struct rtw89_h2c_mrc_start {
+ __le32 w0;
+ __le32 start_tsf_low;
+ __le32 start_tsf_high;
+} __packed;
+
+#define RTW89_H2C_MRC_START_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_START_W0_OLD_SCH_IDX GENMASK(7, 4)
+#define RTW89_H2C_MRC_START_W0_ACTION GENMASK(15, 8)
+
+struct rtw89_h2c_mrc_del {
+ __le32 w0;
+} __packed;
+
+#define RTW89_H2C_MRC_DEL_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_DEL_W0_DEL_ALL BIT(4)
+#define RTW89_H2C_MRC_DEL_W0_STOP_ONLY BIT(5)
+#define RTW89_H2C_MRC_DEL_W0_SPECIFIC_ROLE_EN BIT(6)
+#define RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX GENMASK(15, 8)
+#define RTW89_H2C_MRC_DEL_W0_SPECIFIC_ROLE_MACID GENMASK(31, 16)
+
+#define RTW89_MAC_MRC_MAX_REQ_TSF_NUM 2
+
+struct rtw89_fw_mrc_req_tsf_arg {
+ unsigned int num;
+ struct {
+ u8 band;
+ u8 port;
+ } infos[RTW89_MAC_MRC_MAX_REQ_TSF_NUM];
+};
+
+struct rtw89_h2c_mrc_req_tsf {
+ u8 req_tsf_num;
+ u8 infos[] __counted_by(req_tsf_num);
+} __packed;
+
+#define RTW89_H2C_MRC_REQ_TSF_INFO_BAND GENMASK(3, 0)
+#define RTW89_H2C_MRC_REQ_TSF_INFO_PORT GENMASK(7, 4)
+
+enum rtw89_h2c_mrc_upd_bitmap_actions {
+ RTW89_H2C_MRC_UPD_BITMAP_ACTION_DEL = 0,
+ RTW89_H2C_MRC_UPD_BITMAP_ACTION_ADD = 1,
+};
+
+struct rtw89_fw_mrc_upd_bitmap_arg {
+ u8 sch_idx;
+ u8 macid;
+ u8 client_macid;
+ enum rtw89_h2c_mrc_upd_bitmap_actions action;
+};
+
+struct rtw89_h2c_mrc_upd_bitmap {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION BIT(4)
+#define RTW89_H2C_MRC_UPD_BITMAP_W0_MACID GENMASK(31, 16)
+#define RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID GENMASK(15, 0)
+
+struct rtw89_fw_mrc_sync_arg {
+ u8 offset; /* unit: TU */
+ struct {
+ u8 band;
+ u8 port;
+ } src, dest;
+};
+
+struct rtw89_h2c_mrc_sync {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_H2C_MRC_SYNC_W0_SYNC_EN BIT(0)
+#define RTW89_H2C_MRC_SYNC_W0_SRC_PORT GENMASK(11, 8)
+#define RTW89_H2C_MRC_SYNC_W0_SRC_BAND GENMASK(15, 12)
+#define RTW89_H2C_MRC_SYNC_W0_DEST_PORT GENMASK(19, 16)
+#define RTW89_H2C_MRC_SYNC_W0_DEST_BAND GENMASK(23, 20)
+#define RTW89_H2C_MRC_SYNC_W1_OFFSET GENMASK(15, 0)
+
+struct rtw89_fw_mrc_upd_duration_arg {
+ u8 sch_idx;
+ u64 start_tsf;
+
+ unsigned int slot_num;
+ struct {
+ u8 slot_idx;
+ u16 duration; /* unit: TU */
+ } slots[RTW89_MAC_MRC_MAX_ADD_SLOT_NUM];
+};
+
+struct rtw89_h2c_mrc_upd_duration {
+ __le32 w0;
+ __le32 start_tsf_low;
+ __le32 start_tsf_high;
+ __le32 slots[];
+} __packed;
+
+#define RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX GENMASK(3, 0)
+#define RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM GENMASK(15, 8)
+#define RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH BIT(16)
+#define RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX GENMASK(7, 0)
+#define RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION GENMASK(31, 16)
+
#define RTW89_C2H_HEADER_LEN 8
struct rtw89_c2h_hdr {
@@ -3275,20 +3751,29 @@ struct rtw89_c2h_ra_rpt {
#define RTW89_GET_MAC_C2H_PKTOFLD_LEN(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 16))
-#define RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 0))
-#define RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(19, 16))
-#define RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(23, 20))
-#define RTW89_GET_MAC_C2H_ACTUAL_PERIOD(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 24))
-#define RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(3, 0))
-#define RTW89_GET_MAC_C2H_SCANOFLD_AIR_DENSITY(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(7, 4))
-#define RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(25, 24))
+struct rtw89_c2h_scanofld {
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+} __packed;
+
+#define RTW89_C2H_SCANOFLD_W2_PRI_CH GENMASK(7, 0)
+#define RTW89_C2H_SCANOFLD_W2_RSN GENMASK(19, 16)
+#define RTW89_C2H_SCANOFLD_W2_STATUS GENMASK(23, 20)
+#define RTW89_C2H_SCANOFLD_W2_PERIOD GENMASK(31, 24)
+#define RTW89_C2H_SCANOFLD_W5_TX_FAIL GENMASK(3, 0)
+#define RTW89_C2H_SCANOFLD_W5_AIR_DENSITY GENMASK(7, 4)
+#define RTW89_C2H_SCANOFLD_W5_BAND GENMASK(25, 24)
+#define RTW89_C2H_SCANOFLD_W5_MAC_IDX BIT(26)
+#define RTW89_C2H_SCANOFLD_W6_SW_DEF GENMASK(7, 0)
+#define RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD GENMASK(15, 8)
+#define RTW89_C2H_SCANOFLD_W6_FW_DEF GENMASK(23, 16)
+#define RTW89_C2H_SCANOFLD_W7_REPORT_TSF GENMASK(31, 0)
#define RTW89_GET_MAC_C2H_MCC_RCV_ACK_GROUP(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
@@ -3339,6 +3824,36 @@ static_assert(sizeof(struct rtw89_mac_mcc_tsf_rpt) <= RTW89_COMPLETION_BUF_SIZE)
#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_HIGH(c2h) \
le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0))
+struct rtw89_mac_mrc_tsf_rpt {
+ unsigned int num;
+ u64 tsfs[RTW89_MAC_MRC_MAX_REQ_TSF_NUM];
+};
+
+static_assert(sizeof(struct rtw89_mac_mrc_tsf_rpt) <= RTW89_COMPLETION_BUF_SIZE);
+
+struct rtw89_c2h_mrc_tsf_rpt_info {
+ __le32 tsf_low;
+ __le32 tsf_high;
+} __packed;
+
+struct rtw89_c2h_mrc_tsf_rpt {
+ struct rtw89_c2h_hdr hdr;
+ __le32 w2;
+ struct rtw89_c2h_mrc_tsf_rpt_info infos[];
+} __packed;
+
+#define RTW89_C2H_MRC_TSF_RPT_W2_REQ_TSF_NUM GENMASK(7, 0)
+
+struct rtw89_c2h_mrc_status_rpt {
+ struct rtw89_c2h_hdr hdr;
+ __le32 w2;
+ __le32 tsf_low;
+ __le32 tsf_high;
+} __packed;
+
+#define RTW89_C2H_MRC_STATUS_RPT_W2_STATUS GENMASK(5, 0)
+#define RTW89_C2H_MRC_STATUS_RPT_W2_SCH_IDX GENMASK(7, 6)
+
struct rtw89_c2h_pkt_ofld_rsp {
__le32 w0;
__le32 w1;
@@ -3647,6 +4162,9 @@ struct rtw89_fw_h2c_rf_reg_info {
#define H2C_FUNC_MAC_BCN_UPD 0x5
#define H2C_FUNC_MAC_DCTLINFO_UD_V1 0x9
#define H2C_FUNC_MAC_CCTLINFO_UD_V1 0xa
+#define H2C_FUNC_MAC_DCTLINFO_UD_V2 0xc
+#define H2C_FUNC_MAC_BCN_UPD_BE 0xd
+#define H2C_FUNC_MAC_CCTLINFO_UD_G7 0x11
/* CLASS 6 - Address CAM */
#define H2C_CL_MAC_ADDR_CAM_UPDATE 0x6
@@ -3672,6 +4190,8 @@ enum rtw89_fw_ofld_h2c_func {
H2C_FUNC_CFG_BCNFLTR = 0x1e,
H2C_FUNC_OFLD_RSSI = 0x1f,
H2C_FUNC_OFLD_TP = 0x20,
+ H2C_FUNC_MAC_MACID_PAUSE_SLEEP = 0x28,
+ H2C_FUNC_SCANOFLD_BE = 0x2c,
NUM_OF_RTW89_FW_OFLD_H2C_FUNC,
};
@@ -3683,6 +4203,14 @@ enum rtw89_fw_ofld_h2c_func {
RTW89_FW_OFLD_WAIT_COND(RTW89_PKT_OFLD_WAIT_TAG(pkt_id, pkt_op), \
H2C_FUNC_PACKET_OFLD)
+#define RTW89_SCANOFLD_WAIT_COND_ADD_CH RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH)
+
+#define RTW89_SCANOFLD_WAIT_COND_START RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD)
+#define RTW89_SCANOFLD_WAIT_COND_STOP RTW89_FW_OFLD_WAIT_COND(1, H2C_FUNC_SCANOFLD)
+#define RTW89_SCANOFLD_BE_WAIT_COND_START RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD_BE)
+#define RTW89_SCANOFLD_BE_WAIT_COND_STOP RTW89_FW_OFLD_WAIT_COND(1, H2C_FUNC_SCANOFLD_BE)
+
+
/* CLASS 10 - Security CAM */
#define H2C_CL_MAC_SEC_CAM 0xa
#define H2C_FUNC_MAC_SEC_UPD 0x1
@@ -3690,6 +4218,8 @@ enum rtw89_fw_ofld_h2c_func {
/* CLASS 12 - BA CAM */
#define H2C_CL_BA_CAM 0xc
#define H2C_FUNC_MAC_BA_CAM 0x0
+#define H2C_FUNC_MAC_BA_CAM_V1 0x1
+#define H2C_FUNC_MAC_BA_CAM_INIT 0x2
/* CLASS 14 - MCC */
#define H2C_CL_MCC 0xe
@@ -3710,15 +4240,50 @@ enum rtw89_mcc_h2c_func {
#define RTW89_MCC_WAIT_COND(group, func) \
((group) * NUM_OF_RTW89_MCC_H2C_FUNC + (func))
+/* CLASS 24 - MRC */
+#define H2C_CL_MRC 0x18
+enum rtw89_mrc_h2c_func {
+ H2C_FUNC_MRC_REQ_TSF = 0x0,
+ H2C_FUNC_ADD_MRC = 0x1,
+ H2C_FUNC_START_MRC = 0x2,
+ H2C_FUNC_DEL_MRC = 0x3,
+ H2C_FUNC_MRC_SYNC = 0x4,
+ H2C_FUNC_MRC_UPD_DURATION = 0x5,
+ H2C_FUNC_MRC_UPD_BITMAP = 0x6,
+
+ NUM_OF_RTW89_MRC_H2C_FUNC,
+};
+
+/* can consider MRC's sch_idx as MCC's group */
+#define RTW89_MRC_WAIT_COND(sch_idx, func) \
+ ((sch_idx) * NUM_OF_RTW89_MRC_H2C_FUNC + (func))
+
+#define RTW89_MRC_WAIT_COND_REQ_TSF \
+ RTW89_MRC_WAIT_COND(0 /* don't care */, H2C_FUNC_MRC_REQ_TSF)
+
#define H2C_CAT_OUTSRC 0x2
#define H2C_CL_OUTSRC_RA 0x1
#define H2C_FUNC_OUTSRC_RA_MACIDCFG 0x0
+#define H2C_CL_OUTSRC_DM 0x2
+#define H2C_FUNC_FW_LPS_CH_INFO 0xb
+
#define H2C_CL_OUTSRC_RF_REG_A 0x8
#define H2C_CL_OUTSRC_RF_REG_B 0x9
#define H2C_CL_OUTSRC_RF_FW_NOTIFY 0xa
#define H2C_FUNC_OUTSRC_RF_GET_MCCCH 0x2
+#define H2C_CL_OUTSRC_RF_FW_RFK 0xb
+
+enum rtw89_rfk_offload_h2c_func {
+ H2C_FUNC_RFK_TSSI_OFFLOAD = 0x0,
+ H2C_FUNC_RFK_IQK_OFFLOAD = 0x1,
+ H2C_FUNC_RFK_DPK_OFFLOAD = 0x3,
+ H2C_FUNC_RFK_TXGAPK_OFFLOAD = 0x4,
+ H2C_FUNC_RFK_DACK_OFFLOAD = 0x5,
+ H2C_FUNC_RFK_RXDCK_OFFLOAD = 0x6,
+ H2C_FUNC_RFK_PRE_NOTIFY = 0x8,
+};
struct rtw89_fw_h2c_rf_get_mccch {
__le32 ch_0;
@@ -3729,6 +4294,114 @@ struct rtw89_fw_h2c_rf_get_mccch {
__le32 current_band_type;
} __packed;
+#define NUM_OF_RTW89_FW_RFK_PATH 2
+#define NUM_OF_RTW89_FW_RFK_TBL 3
+
+struct rtw89_fw_h2c_rfk_pre_info {
+ struct {
+ __le32 ch[NUM_OF_RTW89_FW_RFK_PATH][NUM_OF_RTW89_FW_RFK_TBL];
+ __le32 band[NUM_OF_RTW89_FW_RFK_PATH][NUM_OF_RTW89_FW_RFK_TBL];
+ } __packed dbcc;
+
+ __le32 mlo_mode;
+ struct {
+ __le32 cur_ch[NUM_OF_RTW89_FW_RFK_PATH];
+ __le32 cur_band[NUM_OF_RTW89_FW_RFK_PATH];
+ } __packed tbl;
+
+ __le32 phy_idx;
+ __le32 cur_band;
+ __le32 cur_bw;
+ __le32 cur_center_ch;
+
+ __le32 ktbl_sel0;
+ __le32 ktbl_sel1;
+ __le32 rfmod0;
+ __le32 rfmod1;
+
+ __le32 mlo_1_1;
+ __le32 rfe_type;
+ __le32 drv_mode;
+
+ struct {
+ __le32 ch[NUM_OF_RTW89_FW_RFK_PATH];
+ __le32 band[NUM_OF_RTW89_FW_RFK_PATH];
+ } __packed mlo;
+} __packed;
+
+struct rtw89_h2c_rf_tssi {
+ __le16 len;
+ u8 phy;
+ u8 ch;
+ u8 bw;
+ u8 band;
+ u8 hwtx_en;
+ u8 cv;
+ s8 curr_tssi_cck_de[2];
+ s8 curr_tssi_cck_de_20m[2];
+ s8 curr_tssi_cck_de_40m[2];
+ s8 curr_tssi_efuse_cck_de[2];
+ s8 curr_tssi_ofdm_de[2];
+ s8 curr_tssi_ofdm_de_20m[2];
+ s8 curr_tssi_ofdm_de_40m[2];
+ s8 curr_tssi_ofdm_de_80m[2];
+ s8 curr_tssi_ofdm_de_160m[2];
+ s8 curr_tssi_ofdm_de_320m[2];
+ s8 curr_tssi_efuse_ofdm_de[2];
+ s8 curr_tssi_ofdm_de_diff_20m[2];
+ s8 curr_tssi_ofdm_de_diff_80m[2];
+ s8 curr_tssi_ofdm_de_diff_160m[2];
+ s8 curr_tssi_ofdm_de_diff_320m[2];
+ s8 curr_tssi_trim_de[2];
+ u8 pg_thermal[2];
+ u8 ftable[2][128];
+ u8 tssi_mode;
+} __packed;
+
+struct rtw89_h2c_rf_iqk {
+ __le32 phy_idx;
+ __le32 dbcc;
+} __packed;
+
+struct rtw89_h2c_rf_dpk {
+ u8 len;
+ u8 phy;
+ u8 dpk_enable;
+ u8 kpath;
+ u8 cur_band;
+ u8 cur_bw;
+ u8 cur_ch;
+ u8 dpk_dbg_en;
+} __packed;
+
+struct rtw89_h2c_rf_txgapk {
+ u8 len;
+ u8 ktype;
+ u8 phy;
+ u8 kpath;
+ u8 band;
+ u8 bw;
+ u8 ch;
+ u8 cv;
+} __packed;
+
+struct rtw89_h2c_rf_dack {
+ __le32 len;
+ __le32 phy;
+ __le32 type;
+} __packed;
+
+struct rtw89_h2c_rf_rxdck {
+ u8 len;
+ u8 phy;
+ u8 is_afe;
+ u8 kpath;
+ u8 cur_band;
+ u8 cur_bw;
+ u8 cur_ch;
+ u8 rxdck_dbg_en;
+} __packed;
+
enum rtw89_rf_log_type {
RTW89_RF_RUN_LOG = 0,
RTW89_RF_RPT_LOG = 1,
@@ -3800,6 +4473,12 @@ struct rtw89_c2h_rf_txgapk_rpt_log {
u8 rsv1;
} __packed;
+struct rtw89_c2h_rfk_report {
+ struct rtw89_c2h_hdr hdr;
+ u8 state; /* enum rtw89_rfk_report_state */
+ u8 version;
+} __packed;
+
#define RTW89_FW_RSVD_PLE_SIZE 0x800
#define RTW89_FW_BACKTRACE_INFO_SIZE 8
@@ -3830,21 +4509,39 @@ void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u8 type, u8 cat, u8 class, u8 func,
bool rack, bool dack, u32 len);
int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif);
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta);
int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta);
int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif);
+int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif,
struct rtw89_sta *rtwsta, const u8 *scan_mac_addr);
int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta);
void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h);
void rtw89_fw_c2h_work(struct work_struct *work);
int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
@@ -3866,25 +4563,41 @@ int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu);
int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi);
-int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id);
int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
struct sk_buff *skb_ofld);
-int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
+int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num,
struct list_head *chan_list);
+int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
+ struct list_head *chan_list);
int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
struct rtw89_scan_option *opt,
struct rtw89_vif *vif);
+int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *opt,
+ struct rtw89_vif *vif);
int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
struct rtw89_fw_h2c_rf_reg_info *info,
u16 len, u8 page);
int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ enum rtw89_tssi_mode tssi_mode);
+int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
bool rack, bool dack);
@@ -3898,10 +4611,16 @@ void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw);
int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params);
+int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params);
void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
+ u8 offset, u8 mac_idx);
int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
+int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len);
struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len);
int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
@@ -3916,6 +4635,10 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
bool enable);
void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
+int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected);
+int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected);
int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
const struct rtw89_pkt_drop_params *params);
@@ -3956,6 +4679,20 @@ int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source,
u8 target, u8 offset);
int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mcc_duration *p);
+int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_add_arg *arg);
+int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_start_arg *arg);
+int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx);
+int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_req_tsf_arg *arg,
+ struct rtw89_mac_mrc_tsf_rpt *rpt);
+int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_upd_bitmap_arg *arg);
+int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_sync_arg *arg);
+int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
+ const struct rtw89_fw_mrc_upd_duration_arg *arg);
static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
{
@@ -3965,6 +4702,65 @@ static inline void rtw89_fw_h2c_init_ba_cam(struct rtw89_dev *rtwdev)
rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(rtwdev);
}
+static inline int rtw89_chip_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_default_cmac_tbl(rtwdev, rtwvif, rtwsta);
+}
+
+static inline int rtw89_chip_h2c_default_dmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->h2c_default_dmac_tbl)
+ return chip->ops->h2c_default_dmac_tbl(rtwdev, rtwvif, rtwsta);
+
+ return 0;
+}
+
+static inline int rtw89_chip_h2c_update_beacon(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_update_beacon(rtwdev, rtwvif);
+}
+
+static inline int rtw89_chip_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_assoc_cmac_tbl(rtwdev, vif, sta);
+}
+
+static inline int rtw89_chip_h2c_ampdu_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->h2c_ampdu_cmac_tbl)
+ return chip->ops->h2c_ampdu_cmac_tbl(rtwdev, vif, sta);
+
+ return 0;
+}
+
+static inline
+int rtw89_chip_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return chip->ops->h2c_ba_cam(rtwdev, rtwsta, valid, params);
+}
+
/* must consider compatibility; don't insert new in the mid */
struct rtw89_fw_txpwr_byrate_entry {
u8 band;
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index c485ef2cc3d3..aa5b396b5d2b 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -1625,7 +1625,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_size19 = {RTW89_WDE_PG_64, 3328, 0,},
/* PCIE */
.ple_size0 = {RTW89_PLE_PG_128, 1520, 16,},
- .ple_size0_v1 = {RTW89_PLE_PG_128, 2672, 256, 212992,},
+ .ple_size0_v1 = {RTW89_PLE_PG_128, 2688, 240, 212992,},
.ple_size3_v1 = {RTW89_PLE_PG_128, 2928, 0, 212992,},
/* DLFW */
.ple_size4 = {RTW89_PLE_PG_128, 64, 1472,},
@@ -1650,8 +1650,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.wde_qt17 = {0, 0, 0, 0,},
/* 8852C PCIE SCC */
.wde_qt18 = {3228, 60, 0, 40,},
- .ple_qt0 = {320, 0, 32, 16, 13, 13, 292, 0, 32, 18, 1, 4, 0,},
- .ple_qt1 = {320, 0, 32, 16, 1944, 1944, 2223, 0, 1963, 1949, 1, 1935, 0,},
+ .ple_qt0 = {320, 320, 32, 16, 13, 13, 292, 292, 64, 18, 1, 4, 0,},
+ .ple_qt1 = {320, 320, 32, 16, 1316, 1316, 1595, 1595, 1367, 1321, 1, 1307, 0,},
/* PCIE SCC */
.ple_qt4 = {264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,},
/* PCIE SCC */
@@ -1677,7 +1677,7 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
.ple_qt_52b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,},
/* 8851B PCIE WOW */
.ple_qt_51b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,},
- .ple_rsvd_qt0 = {2, 112, 56, 6, 6, 6, 6, 0, 0, 62,},
+ .ple_rsvd_qt0 = {2, 107, 107, 6, 6, 6, 6, 0, 0, 0,},
.ple_rsvd_qt1 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
.rsvd0_size0 = {212992, 0,},
.rsvd1_size0 = {587776, 2048,},
@@ -2025,6 +2025,9 @@ void rtw89_mac_hw_mgnt_sec(struct rtw89_dev *rtwdev, bool enable)
{
u32 msk32 = B_AX_UC_MGNT_DEC | B_AX_BMC_MGNT_DEC;
+ if (rtwdev->chip->chip_gen != RTW89_CHIP_AX)
+ return;
+
if (enable)
rtw89_write32_set(rtwdev, R_AX_SEC_ENG_CTRL, msk32);
else
@@ -2537,6 +2540,9 @@ static int spatial_reuse_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RX_SR_CTRL, mac_idx);
rtw89_write8_clr(rtwdev, reg, B_AX_SR_EN);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BSSID_SRC_CTRL, mac_idx);
+ rtw89_write8_set(rtwdev, reg, B_AX_PLCP_SRC_EN);
+
return 0;
}
@@ -3192,13 +3198,11 @@ static int set_cpuio_ax(struct rtw89_dev *rtwdev,
return 0;
}
-int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
+int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
+ bool band1_en)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_dle_mem *cfg;
- struct rtw89_cpuio_ctrl ctrl_para = {0};
- u16 pkt_id;
- int ret;
cfg = get_dle_mem_cfg(rtwdev, mode);
if (!cfg) {
@@ -3213,6 +3217,16 @@ int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mod
dle_quota_cfg(rtwdev, cfg, INVALID_QT_WCPU);
+ return mac->dle_quota_change(rtwdev, band1_en);
+}
+
+static int dle_quota_change_ax(struct rtw89_dev *rtwdev, bool band1_en)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ struct rtw89_cpuio_ctrl ctrl_para = {0};
+ u16 pkt_id;
+ int ret;
+
ret = mac->dle_buf_req(rtwdev, 0x20, true, &pkt_id);
if (ret) {
rtw89_err(rtwdev, "[ERR]WDE DLE buf req\n");
@@ -3301,7 +3315,7 @@ static int band1_enable_ax(struct rtw89_dev *rtwdev)
return ret;
}
- ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
+ ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
return ret;
@@ -3676,6 +3690,28 @@ static int trx_init_ax(struct rtw89_dev *rtwdev)
return 0;
}
+static int rtw89_mac_feat_init(struct rtw89_dev *rtwdev)
+{
+#define BACAM_1024BMP_OCC_ENTRY 4
+#define BACAM_MAX_RU_SUPPORT_B0_STA 1
+#define BACAM_MAX_RU_SUPPORT_B1_STA 1
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 users, offset;
+
+ if (chip->bacam_ver != RTW89_BACAM_V1)
+ return 0;
+
+ offset = 0;
+ users = BACAM_MAX_RU_SUPPORT_B0_STA;
+ rtw89_fw_h2c_init_ba_cam_users(rtwdev, users, offset, RTW89_MAC_0);
+
+ offset += users * BACAM_1024BMP_OCC_ENTRY;
+ users = BACAM_MAX_RU_SUPPORT_B1_STA;
+ rtw89_fw_h2c_init_ba_cam_users(rtwdev, users, offset, RTW89_MAC_1);
+
+ return 0;
+}
+
static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
@@ -3910,6 +3946,10 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev)
if (ret)
goto fail;
+ ret = rtw89_mac_feat_init(rtwdev);
+ if (ret)
+ goto fail;
+
if (rtwdev->hci.ops->mac_post_init) {
ret = rtwdev->hci.ops->mac_post_init(rtwdev);
if (ret)
@@ -4000,6 +4040,9 @@ static const struct rtw89_port_reg rtw89_port_base_ax = {
.mbssid = R_AX_MBSSID_CTRL,
.mbssid_drop = R_AX_MBSSID_DROP_0,
.tsf_sync = R_AX_PORT0_TSF_SYNC,
+ .ptcl_dbg = R_AX_PTCL_DBG,
+ .ptcl_dbg_info = R_AX_PTCL_DBG_INFO,
+ .bcn_drop_all = R_AX_BCN_DROP_ALL0,
.hiq_win = {R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG,
R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2,
R_AX_PORT_HGQ_WINDOW_CFG + 3},
@@ -4008,13 +4051,15 @@ static const struct rtw89_port_reg rtw89_port_base_ax = {
static void rtw89_mac_check_packet_ctrl(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, u8 type)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ const struct rtw89_port_reg *p = mac->port_base;
u8 mask = B_AX_PTCL_DBG_INFO_MASK_BY_PORT(rtwvif->port);
u32 reg_info, reg_ctrl;
u32 val;
int ret;
- reg_info = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_DBG_INFO, rtwvif->mac_idx);
- reg_ctrl = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_DBG, rtwvif->mac_idx);
+ reg_info = rtw89_mac_reg_by_idx(rtwdev, p->ptcl_dbg_info, rtwvif->mac_idx);
+ reg_ctrl = rtw89_mac_reg_by_idx(rtwdev, p->ptcl_dbg, rtwvif->mac_idx);
rtw89_write32_mask(rtwdev, reg_ctrl, B_AX_PTCL_DBG_SEL_MASK, type);
rtw89_write32_set(rtwdev, reg_ctrl, B_AX_PTCL_DBG_EN);
@@ -4031,7 +4076,7 @@ static void rtw89_mac_bcn_drop(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvi
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_port_reg *p = mac->port_base;
- rtw89_write32_set(rtwdev, R_AX_BCN_DROP_ALL0, BIT(rtwvif->port));
+ rtw89_write32_set(rtwdev, p->bcn_drop_all, BIT(rtwvif->port));
rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK, 1);
rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_area, B_AX_BCN_MSK_AREA_MASK, 0);
rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, 0);
@@ -4044,9 +4089,9 @@ static void rtw89_mac_bcn_drop(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvi
if (rtwvif->port == RTW89_PORT_0)
rtw89_mac_check_packet_ctrl(rtwdev, rtwvif, AX_PTCL_DBG_BCNQ_NUM1);
- rtw89_write32_clr(rtwdev, R_AX_BCN_DROP_ALL0, BIT(rtwvif->port));
+ rtw89_write32_clr(rtwdev, p->bcn_drop_all, BIT(rtwvif->port));
rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TBTT_PROHIB_EN);
- fsleep(2);
+ fsleep(2000);
}
#define BCN_INTERVAL 100
@@ -4159,13 +4204,11 @@ static void rtw89_mac_port_cfg_rx_sw(struct rtw89_dev *rtwdev,
rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bit);
}
-static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif)
+void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool en)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
const struct rtw89_port_reg *p = mac->port_base;
- bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA ||
- rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
if (en)
rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN);
@@ -4173,6 +4216,15 @@ static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN);
}
+static void rtw89_mac_port_cfg_rx_sync_by_nettype(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA ||
+ rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
+
+ rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, en);
+}
+
static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool en)
{
@@ -4471,7 +4523,11 @@ int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
if (ret)
return ret;
- ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif);
+ ret = rtw89_chip_h2c_default_cmac_tbl(rtwdev, rtwvif, NULL);
+ if (ret)
+ return ret;
+
+ ret = rtw89_chip_h2c_default_dmac_tbl(rtwdev, rtwvif, NULL);
if (ret)
return ret;
@@ -4508,7 +4564,7 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
rtw89_mac_port_cfg_net_type(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_prct(rtwdev, rtwvif);
rtw89_mac_port_cfg_rx_sw(rtwdev, rtwvif);
- rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_rx_sync_by_nettype(rtwdev, rtwvif);
rtw89_mac_port_cfg_tx_sw_by_nettype(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif);
rtw89_mac_port_cfg_hiq_win(rtwdev, rtwvif);
@@ -4571,6 +4627,7 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif)
{
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct ieee80211_hw *hw = rtwdev->hw;
bool tolerated = true;
u32 reg;
@@ -4578,18 +4635,19 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
if (!vif->bss_conf.he_support || vif->type != NL80211_IFTYPE_STATION)
return;
- if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ if (!(vif->bss_conf.chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR))
return;
- cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef,
+ cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chanreq.oper,
rtw89_mac_check_he_obss_narrow_bw_ru_iter,
&tolerated);
- reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RXTRIG_TEST_USER_2, rtwvif->mac_idx);
+ reg = rtw89_mac_reg_by_idx(rtwdev, mac->narrow_bw_ru_dis.addr,
+ rtwvif->mac_idx);
if (tolerated)
- rtw89_write32_clr(rtwdev, reg, B_AX_RXTRIG_RU26_DIS);
+ rtw89_write32_clr(rtwdev, reg, mac->narrow_bw_ru_dis.mask);
else
- rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_RU26_DIS);
+ rtw89_write32_set(rtwdev, reg, mac->narrow_bw_ru_dis.mask);
}
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
@@ -4641,35 +4699,52 @@ static bool rtw89_is_op_chan(struct rtw89_dev *rtwdev, u8 band, u8 channel)
}
static void
-rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len)
{
+ const struct rtw89_c2h_scanofld *c2h =
+ (const struct rtw89_c2h_scanofld *)skb->data;
struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
struct rtw89_chan new;
- u8 reason, status, tx_fail, band, actual_period;
- u32 last_chan = rtwdev->scan_info.last_chan_idx;
+ u8 reason, status, tx_fail, band, actual_period, expect_period;
+ u32 last_chan = rtwdev->scan_info.last_chan_idx, report_tsf;
+ u8 mac_idx, sw_def, fw_def;
u16 chan;
int ret;
if (!rtwvif)
return;
- tx_fail = RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h->data);
- status = RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h->data);
- chan = RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h->data);
- reason = RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h->data);
- band = RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h->data);
- actual_period = RTW89_GET_MAC_C2H_ACTUAL_PERIOD(c2h->data);
+ tx_fail = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_TX_FAIL);
+ status = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_STATUS);
+ chan = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_PRI_CH);
+ reason = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_RSN);
+ band = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_BAND);
+ actual_period = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_PERIOD);
+ mac_idx = le32_get_bits(c2h->w5, RTW89_C2H_SCANOFLD_W5_MAC_IDX);
+
if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ)))
band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
- "band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
- band, chan, reason, status, tx_fail, actual_period);
+ "mac_idx[%d] band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n",
+ mac_idx, band, chan, reason, status, tx_fail, actual_period);
+
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
+ sw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_SW_DEF);
+ expect_period = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_EXPECT_PERIOD);
+ fw_def = le32_get_bits(c2h->w6, RTW89_C2H_SCANOFLD_W6_FW_DEF);
+ report_tsf = le32_get_bits(c2h->w7, RTW89_C2H_SCANOFLD_W7_REPORT_TSF);
+
+ rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
+ "sw_def: %d, fw_def: %d, tsf: %x, expect: %d\n",
+ sw_def, fw_def, report_tsf, expect_period);
+ }
switch (reason) {
+ case RTW89_SCAN_LEAVE_OP_NOTIFY:
case RTW89_SCAN_LEAVE_CH_NOTIFY:
if (rtw89_is_op_chan(rtwdev, band, chan)) {
rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, false);
@@ -4685,9 +4760,10 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
rtw89_warn(rtwdev, "HW scan failed: %d\n", ret);
}
} else {
- rtw89_hw_scan_complete(rtwdev, vif, false);
+ rtw89_hw_scan_complete(rtwdev, vif, rtwdev->scan_info.abort);
}
break;
+ case RTW89_SCAN_ENTER_OP_NOTIFY:
case RTW89_SCAN_ENTER_CH_NOTIFY:
if (rtw89_is_op_chan(rtwdev, band, chan)) {
rtw89_assign_entity_chan(rtwdev, rtwvif->sub_entity_idx,
@@ -4807,8 +4883,13 @@ rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 le
default:
return;
case H2C_FUNC_ADD_SCANOFLD_CH:
+ cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
+ break;
case H2C_FUNC_SCANOFLD:
- cond = RTW89_FW_OFLD_WAIT_COND(0, h2c_func);
+ cond = RTW89_SCANOFLD_WAIT_COND_START;
+ break;
+ case H2C_FUNC_SCANOFLD_BE:
+ cond = RTW89_SCANOFLD_BE_WAIT_COND_START;
break;
}
@@ -5021,6 +5102,84 @@ rtw89_mac_c2h_mcc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32
rtw89_complete_cond(&rtwdev->mcc.wait, cond, &data);
}
+static void
+rtw89_mac_c2h_mrc_tsf_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ const struct rtw89_c2h_mrc_tsf_rpt *c2h_rpt;
+ struct rtw89_completion_data data = {};
+ struct rtw89_mac_mrc_tsf_rpt *rpt;
+ unsigned int i;
+
+ c2h_rpt = (const struct rtw89_c2h_mrc_tsf_rpt *)c2h->data;
+ rpt = (struct rtw89_mac_mrc_tsf_rpt *)data.buf;
+ rpt->num = min_t(u8, RTW89_MAC_MRC_MAX_REQ_TSF_NUM,
+ le32_get_bits(c2h_rpt->w2,
+ RTW89_C2H_MRC_TSF_RPT_W2_REQ_TSF_NUM));
+
+ for (i = 0; i < rpt->num; i++) {
+ u32 tsf_high = le32_to_cpu(c2h_rpt->infos[i].tsf_high);
+ u32 tsf_low = le32_to_cpu(c2h_rpt->infos[i].tsf_low);
+
+ rpt->tsfs[i] = (u64)tsf_high << 32 | tsf_low;
+
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H TSF RPT: index %u> %llu\n",
+ i, rpt->tsfs[i]);
+ }
+
+ rtw89_complete_cond(wait, RTW89_MRC_WAIT_COND_REQ_TSF, &data);
+}
+
+static void
+rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
+ const struct rtw89_c2h_mrc_status_rpt *c2h_rpt;
+ struct rtw89_completion_data data = {};
+ enum rtw89_mac_mrc_status status;
+ unsigned int cond;
+ bool next = false;
+ u32 tsf_high;
+ u32 tsf_low;
+ u8 sch_idx;
+ u8 func;
+
+ c2h_rpt = (const struct rtw89_c2h_mrc_status_rpt *)c2h->data;
+ sch_idx = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MRC_STATUS_RPT_W2_SCH_IDX);
+ status = le32_get_bits(c2h_rpt->w2, RTW89_C2H_MRC_STATUS_RPT_W2_STATUS);
+ tsf_high = le32_to_cpu(c2h_rpt->tsf_high);
+ tsf_low = le32_to_cpu(c2h_rpt->tsf_low);
+
+ switch (status) {
+ case RTW89_MAC_MRC_START_SCH_OK:
+ func = H2C_FUNC_START_MRC;
+ break;
+ case RTW89_MAC_MRC_STOP_SCH_OK:
+ /* H2C_FUNC_DEL_MRC without STOP_ONLY, so wait for DEL_SCH_OK */
+ func = H2C_FUNC_DEL_MRC;
+ next = true;
+ break;
+ case RTW89_MAC_MRC_DEL_SCH_OK:
+ func = H2C_FUNC_DEL_MRC;
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "invalid MRC C2H STS RPT: status %d\n", status);
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: sch_idx %d, status %d, tsf %llu\n",
+ sch_idx, status, (u64)tsf_high << 32 | tsf_low);
+
+ if (next)
+ return;
+
+ cond = RTW89_MRC_WAIT_COND(sch_idx, func);
+ rtw89_complete_cond(wait, cond, &data);
+}
+
static
void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
@@ -5052,7 +5211,39 @@ void (* const rtw89_mac_c2h_mcc_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_MCC_STATUS_RPT] = rtw89_mac_c2h_mcc_status_rpt,
};
-bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
+static
+void (* const rtw89_mac_c2h_mrc_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_MAC_C2H_FUNC_MRC_TSF_RPT] = rtw89_mac_c2h_mrc_tsf_rpt,
+ [RTW89_MAC_C2H_FUNC_MRC_STATUS_RPT] = rtw89_mac_c2h_mrc_status_rpt,
+};
+
+static void rtw89_mac_c2h_scanofld_rsp_atomic(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb)
+{
+ const struct rtw89_c2h_scanofld *c2h =
+ (const struct rtw89_c2h_scanofld *)skb->data;
+ struct rtw89_wait_info *fw_ofld_wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_completion_data data = {};
+ unsigned int cond;
+ u8 status, reason;
+
+ status = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_STATUS);
+ reason = le32_get_bits(c2h->w2, RTW89_C2H_SCANOFLD_W2_RSN);
+ data.err = status != RTW89_SCAN_STATUS_SUCCESS;
+
+ if (reason == RTW89_SCAN_END_SCAN_NOTIFY) {
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
+ cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP;
+ else
+ cond = RTW89_SCANOFLD_WAIT_COND_STOP;
+
+ rtw89_complete_cond(fw_ofld_wait, cond, &data);
+ }
+}
+
+bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ u8 class, u8 func)
{
switch (class) {
default:
@@ -5069,11 +5260,16 @@ bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
switch (func) {
default:
return false;
+ case RTW89_MAC_C2H_FUNC_SCANOFLD_RSP:
+ rtw89_mac_c2h_scanofld_rsp_atomic(rtwdev, c2h);
+ return false;
case RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP:
return true;
}
case RTW89_MAC_C2H_CLASS_MCC:
return true;
+ case RTW89_MAC_C2H_CLASS_MRC:
+ return true;
}
}
@@ -5096,6 +5292,10 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MCC)
handler = rtw89_mac_c2h_mcc_handler[func];
break;
+ case RTW89_MAC_C2H_CLASS_MRC:
+ if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MRC)
+ handler = rtw89_mac_c2h_mrc_handler[func];
+ break;
case RTW89_MAC_C2H_CLASS_FWDBG:
return;
default:
@@ -5115,8 +5315,7 @@ bool rtw89_mac_get_txpwr_cr_ax(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
u32 reg_base, u32 *cr)
{
- const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem;
- enum rtw89_qta_mode mode = dle_mem->mode;
+ enum rtw89_qta_mode mode = rtwdev->mac.qta_mode;
u32 addr = rtw89_mac_reg_by_idx(rtwdev, reg_base, phy_idx);
if (addr < R_AX_PWR_RATE_CTRL || addr > CMAC1_END_ADDR_AX) {
@@ -5143,7 +5342,8 @@ error:
return false;
}
-int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+static
+int rtw89_mac_cfg_ppdu_status_ax(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
{
u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PPDU_STAT, mac_idx);
int ret;
@@ -5166,7 +5366,6 @@ int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
return 0;
}
-EXPORT_SYMBOL(rtw89_mac_cfg_ppdu_status);
void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx)
{
@@ -5419,7 +5618,8 @@ int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_mac_cfg_gnt_v1);
-int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
+static
+int rtw89_mac_cfg_plt_ax(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
{
u32 reg;
u16 val;
@@ -5515,7 +5715,7 @@ bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev)
return !!val;
}
-u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band)
+static u16 rtw89_mac_get_plt_cnt_ax(struct rtw89_dev *rtwdev, u8 band)
{
u32 reg;
u16 cnt;
@@ -6069,6 +6269,41 @@ int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev,
return ret;
}
+static int rtw89_wow_config_mac_ax(struct rtw89_dev *rtwdev, bool enable_wow)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ int ret;
+
+ if (enable_wow) {
+ ret = rtw89_mac_resize_ple_rx_quota(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]patch rx qta %d\n", ret);
+ return ret;
+ }
+
+ rtw89_write32_set(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
+ rtw89_write32_clr(rtwdev, mac->rx_fltr, B_AX_SNIFFER_MODE);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ rtw89_write32(rtwdev, R_AX_ACTION_FWD0, 0);
+ rtw89_write32(rtwdev, R_AX_ACTION_FWD1, 0);
+ rtw89_write32(rtwdev, R_AX_TF_FWD, 0);
+ rtw89_write32(rtwdev, R_AX_HW_RPT_FWD, 0);
+ } else {
+ ret = rtw89_mac_resize_ple_rx_quota(rtwdev, false);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]patch rx qta %d\n", ret);
+ return ret;
+ }
+
+ rtw89_write32_clr(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD);
+ rtw89_write32(rtwdev, R_AX_TF_FWD, TRXCFG_MPDU_PROC_TF_FRWD);
+ }
+
+ return 0;
+}
+
static u8 rtw89_fw_get_rdy_ax(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type)
{
u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL);
@@ -6096,6 +6331,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.rx_fltr = R_AX_RX_FLTR_OPT,
.port_base = &rtw89_port_base_ax,
.agg_len_ht = R_AX_AGG_LEN_HT_0,
+ .ps_status = R_AX_PPWRBIT_SETTING,
.muedca_ctrl = {
.addr = R_AX_MUEDCA_EN,
@@ -6106,6 +6342,11 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.mask = B_AX_BFMEE_HT_NDPA_EN | B_AX_BFMEE_VHT_NDPA_EN |
B_AX_BFMEE_HE_NDPA_EN,
},
+ .narrow_bw_ru_dis = {
+ .addr = R_AX_RXTRIG_TEST_USER_2,
+ .mask = B_AX_RXTRIG_RU26_DIS,
+ },
+ .wow_ctrl = {.addr = R_AX_WOW_CTRL, .mask = B_AX_WOW_WOWEN,},
.check_mac_en = rtw89_mac_check_mac_en_ax,
.sys_init = sys_init_ax,
@@ -6117,6 +6358,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.bf_assoc = rtw89_mac_bf_assoc_ax,
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_ax,
+ .cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_ax,
.dle_mix_cfg = dle_mix_cfg_ax,
.chk_dle_rdy = chk_dle_rdy_ax,
@@ -6128,6 +6370,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.wde_quota_cfg = wde_quota_cfg_ax,
.ple_quota_cfg = ple_quota_cfg_ax,
.set_cpuio = set_cpuio_ax,
+ .dle_quota_change = dle_quota_change_ax,
.disable_cpu = rtw89_mac_disable_cpu_ax,
.fwdl_enable_wcpu = rtw89_mac_enable_cpu_ax,
@@ -6137,6 +6380,9 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.parse_phycap_map = rtw89_parse_phycap_map_ax,
.cnv_efuse_state = rtw89_cnv_efuse_state_ax,
+ .cfg_plt = rtw89_mac_cfg_plt_ax,
+ .get_plt_cnt = rtw89_mac_get_plt_cnt_ax,
+
.get_txpwr_cr = rtw89_mac_get_txpwr_cr_ax,
.write_xtal_si = rtw89_mac_write_xtal_si_ax,
@@ -6146,5 +6392,10 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = {
.dump_err_status = rtw89_mac_dump_err_status_ax,
.is_txq_empty = mac_is_txq_empty_ax,
+
+ .add_chan_list = rtw89_hw_scan_add_chan_list,
+ .scan_offload = rtw89_fw_h2c_scan_offload,
+
+ .wow_config_mac = rtw89_wow_config_mac_ax,
};
EXPORT_SYMBOL(rtw89_mac_gen_ax);
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index ed98b49809a4..6fb457153a11 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -169,6 +169,12 @@ enum rtw89_mac_ax_l0_to_l1_event {
MAC_AX_L0_TO_L1_EVENT_MAX = 15,
};
+enum rtw89_mac_wow_fw_status {
+ WOWLAN_NOT_READY = 0x00,
+ WOWLAN_SLEEP_READY = 0x01,
+ WOWLAN_RESUME_READY = 0x02,
+};
+
#define RTW89_PORT_OFFSET_TU_TO_32US(shift_tu) ((shift_tu) * 1024 / 32)
enum rtw89_mac_dbg_port_sel {
@@ -406,13 +412,21 @@ enum rtw89_mac_c2h_mcc_func {
NUM_OF_RTW89_MAC_C2H_FUNC_MCC,
};
+enum rtw89_mac_c2h_mrc_func {
+ RTW89_MAC_C2H_FUNC_MRC_TSF_RPT = 0,
+ RTW89_MAC_C2H_FUNC_MRC_STATUS_RPT = 1,
+
+ NUM_OF_RTW89_MAC_C2H_FUNC_MRC,
+};
+
enum rtw89_mac_c2h_class {
- RTW89_MAC_C2H_CLASS_INFO,
- RTW89_MAC_C2H_CLASS_OFLD,
- RTW89_MAC_C2H_CLASS_TWT,
- RTW89_MAC_C2H_CLASS_WOW,
- RTW89_MAC_C2H_CLASS_MCC,
- RTW89_MAC_C2H_CLASS_FWDBG,
+ RTW89_MAC_C2H_CLASS_INFO = 0x0,
+ RTW89_MAC_C2H_CLASS_OFLD = 0x1,
+ RTW89_MAC_C2H_CLASS_TWT = 0x2,
+ RTW89_MAC_C2H_CLASS_WOW = 0x3,
+ RTW89_MAC_C2H_CLASS_MCC = 0x4,
+ RTW89_MAC_C2H_CLASS_FWDBG = 0x5,
+ RTW89_MAC_C2H_CLASS_MRC = 0xe,
RTW89_MAC_C2H_CLASS_MAX,
};
@@ -441,6 +455,12 @@ enum rtw89_mac_mcc_status {
RTW89_MAC_MCC_TXNULL1_FAIL = 27,
};
+enum rtw89_mac_mrc_status {
+ RTW89_MAC_MRC_START_SCH_OK = 0,
+ RTW89_MAC_MRC_STOP_SCH_OK = 1,
+ RTW89_MAC_MRC_DEL_SCH_OK = 2,
+};
+
struct rtw89_mac_ax_coex {
#define RTW89_MAC_AX_COEX_RTK_MODE 0
#define RTW89_MAC_AX_COEX_CSR_MODE 1
@@ -894,9 +914,12 @@ struct rtw89_mac_gen_def {
u32 rx_fltr;
const struct rtw89_port_reg *port_base;
u32 agg_len_ht;
+ u32 ps_status;
struct rtw89_reg_def muedca_ctrl;
struct rtw89_reg_def bfee_ctrl;
+ struct rtw89_reg_def narrow_bw_ru_dis;
+ struct rtw89_reg_def wow_ctrl;
int (*check_mac_en)(struct rtw89_dev *rtwdev, u8 band,
enum rtw89_mac_hwmod_sel sel);
@@ -913,6 +936,7 @@ struct rtw89_mac_gen_def {
enum rtw89_machdr_frame_type type,
enum rtw89_mac_fwd_target fwd_target,
u8 mac_idx);
+ int (*cfg_ppdu_status)(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable);
int (*dle_mix_cfg)(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg);
int (*chk_dle_rdy)(struct rtw89_dev *rtwdev, bool wde_or_ple);
@@ -930,6 +954,7 @@ struct rtw89_mac_gen_def {
const struct rtw89_ple_quota *max_cfg);
int (*set_cpuio)(struct rtw89_dev *rtwdev,
struct rtw89_cpuio_ctrl *ctrl_para, bool wd);
+ int (*dle_quota_change)(struct rtw89_dev *rtwdev, bool band1_en);
void (*disable_cpu)(struct rtw89_dev *rtwdev);
int (*fwdl_enable_wcpu)(struct rtw89_dev *rtwdev, u8 boot_reason,
@@ -940,6 +965,9 @@ struct rtw89_mac_gen_def {
int (*parse_phycap_map)(struct rtw89_dev *rtwdev);
int (*cnv_efuse_state)(struct rtw89_dev *rtwdev, bool idle);
+ int (*cfg_plt)(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt);
+ u16 (*get_plt_cnt)(struct rtw89_dev *rtwdev, u8 band);
+
bool (*get_txpwr_cr)(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
u32 reg_base, u32 *cr);
@@ -952,6 +980,14 @@ struct rtw89_mac_gen_def {
enum mac_ax_err_info err);
bool (*is_txq_empty)(struct rtw89_dev *rtwdev);
+
+ int (*add_chan_list)(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool connected);
+ int (*scan_offload)(struct rtw89_dev *rtwdev,
+ struct rtw89_scan_option *option,
+ struct rtw89_vif *rtwvif);
+
+ int (*wow_config_mac)(struct rtw89_dev *rtwdev, bool enable_wow);
};
extern const struct rtw89_mac_gen_def rtw89_mac_gen_ax;
@@ -1086,6 +1122,8 @@ void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
u16 offset_tu);
int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
u64 *tsf);
+void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool en);
void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif);
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
@@ -1127,7 +1165,8 @@ static inline int rtw89_chip_reset_bb_rf(struct rtw89_dev *rtwdev)
u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev);
int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err);
-bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func);
+bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
+ u8 class, u8 func);
void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len, u8 class, u8 func);
int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev);
@@ -1135,9 +1174,20 @@ int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
u32 *tx_en, enum rtw89_sch_tx_sel sel);
int rtw89_mac_stop_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx,
u32 *tx_en, enum rtw89_sch_tx_sel sel);
+int rtw89_mac_stop_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel);
int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
-int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_ids, bool enable);
+int rtw89_mac_resume_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en);
+
+static inline
+int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ return mac->cfg_ppdu_status(rtwdev, mac_idx, enable);
+}
+
void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx);
void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop);
int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex);
@@ -1147,13 +1197,31 @@ int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
-int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt);
-u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band);
+int rtw89_mac_cfg_gnt_v2(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
+
+static inline
+int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ return mac->cfg_plt(rtwdev, plt);
+}
+
+static inline
+u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band)
+{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+
+ return mac->get_plt_cnt(rtwdev, band);
+}
+
void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val);
u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev);
bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev);
int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl);
int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl);
+int rtw89_mac_cfg_ctrl_path_v2(struct rtw89_dev *rtwdev, bool wl);
void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter);
void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev);
@@ -1306,6 +1374,7 @@ enum rtw89_mac_xtal_si_offset {
#define XTAL_SI_BIG_PWR_CUT BIT(1)
XTAL_SI_XTAL_DRV = 0x15,
#define XTAL_SI_DRV_LATCH BIT(4)
+ XTAL_SI_XTAL_PLL = 0x16,
XTAL_SI_XTAL_XMD_2 = 0x24,
#define XTAL_SI_LDO_LPS GENMASK(6, 4)
XTAL_SI_XTAL_XMD_4 = 0x26,
@@ -1339,6 +1408,7 @@ enum rtw89_mac_xtal_si_offset {
XTAL_SI_SRAM_CTRL = 0xA1,
#define XTAL_SI_SRAM_DIS BIT(1)
#define FULL_BIT_MASK GENMASK(7, 0)
+ XTAL_SI_APBT = 0xD1,
XTAL_SI_PLL = 0xE0,
XTAL_SI_PLL_1 = 0xE1,
};
@@ -1364,7 +1434,8 @@ int rtw89_mac_resize_ple_rx_quota(struct rtw89_dev *rtwdev, bool wow);
int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev,
enum rtw89_mac_idx band);
void rtw89_mac_hw_mgnt_sec(struct rtw89_dev *rtwdev, bool wow);
-int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode);
+int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
+ bool band1_en);
int rtw89_mac_get_dle_rsvd_qt_cfg(struct rtw89_dev *rtwdev,
enum rtw89_mac_dle_rsvd_qt_type type,
struct rtw89_mac_dle_rsvd_qt_cfg *cfg);
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 93889d2fface..31d1ffb16e83 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -441,7 +441,7 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
* when disconnected by peer
*/
if (rtwdev->scanning)
- rtw89_hw_scan_abort(rtwdev, vif);
+ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
}
}
@@ -449,10 +449,11 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
ether_addr_copy(rtwvif->bssid, conf->bssid);
rtw89_cam_bssid_changed(rtwdev, rtwvif);
rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
+ WRITE_ONCE(rtwvif->sync_bcn_tsf, 0);
}
if (changed & BSS_CHANGED_BEACON)
- rtw89_fw_h2c_update_beacon(rtwdev, rtwvif);
+ rtw89_chip_h2c_update_beacon(rtwdev, rtwvif);
if (changed & BSS_CHANGED_ERP_SLOT)
rtw89_conf_tx(rtwdev, rtwvif);
@@ -497,7 +498,7 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw,
ether_addr_copy(rtwvif->bssid, vif->bss_conf.bssid);
rtw89_cam_bssid_changed(rtwdev, rtwvif);
rtw89_mac_port_update(rtwdev, rtwvif);
- rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+ rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_TYPE_CHANGE);
rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
@@ -518,7 +519,7 @@ void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mutex_lock(&rtwdev->mutex);
rtw89_mac_stop_ap(rtwdev, rtwvif);
- rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+ rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
mutex_unlock(&rtwdev->mutex);
}
@@ -660,6 +661,8 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mutex_lock(&rtwdev->mutex);
clear_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
+ clear_bit(tid, rtwsta->ampdu_map);
+ rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, vif, sta);
mutex_unlock(&rtwdev->mutex);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
@@ -668,17 +671,19 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
set_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
rtwsta->ampdu_params[tid].agg_num = params->buf_size;
rtwsta->ampdu_params[tid].amsdu = params->amsdu;
+ set_bit(tid, rtwsta->ampdu_map);
rtw89_leave_ps_mode(rtwdev);
+ rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, vif, sta);
mutex_unlock(&rtwdev->mutex);
break;
case IEEE80211_AMPDU_RX_START:
mutex_lock(&rtwdev->mutex);
- rtw89_fw_h2c_ba_cam(rtwdev, rtwsta, true, params);
+ rtw89_chip_h2c_ba_cam(rtwdev, rtwsta, true, params);
mutex_unlock(&rtwdev->mutex);
break;
case IEEE80211_AMPDU_RX_STOP:
mutex_lock(&rtwdev->mutex);
- rtw89_fw_h2c_ba_cam(rtwdev, rtwsta, false, params);
+ rtw89_chip_h2c_ba_cam(rtwdev, rtwsta, false, params);
mutex_unlock(&rtwdev->mutex);
break;
default:
@@ -990,7 +995,7 @@ static int rtw89_ops_remain_on_channel(struct ieee80211_hw *hw,
}
if (rtwdev->scanning)
- rtw89_hw_scan_abort(rtwdev, vif);
+ rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif);
if (type == IEEE80211_ROC_TYPE_MGMT_TX)
roc->state = RTW89_ROC_MGMT;
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index be30c9346293..f16467377eab 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -52,6 +52,9 @@ static const struct rtw89_port_reg rtw89_port_base_be = {
.mbssid = R_BE_MBSSID_CTRL,
.mbssid_drop = R_BE_MBSSID_DROP_0,
.tsf_sync = R_BE_PORT_0_TSF_SYNC,
+ .ptcl_dbg = R_BE_PTCL_DBG,
+ .ptcl_dbg_info = R_BE_PTCL_DBG_INFO,
+ .bcn_drop_all = R_BE_BCN_DROP_ALL0,
.hiq_win = {R_BE_P0MB_HGQ_WINDOW_CFG_0, R_BE_PORT_HGQ_WINDOW_CFG,
R_BE_PORT_HGQ_WINDOW_CFG + 1, R_BE_PORT_HGQ_WINDOW_CFG + 2,
R_BE_PORT_HGQ_WINDOW_CFG + 3},
@@ -988,6 +991,9 @@ static int spatial_reuse_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_RX_SR_CTRL, mac_idx);
rtw89_write8_clr(rtwdev, reg, B_BE_SR_EN | B_BE_SR_CTRL_PLCP_EN);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_BSSID_SRC_CTRL, mac_idx);
+ rtw89_write8_set(rtwdev, reg, B_BE_PLCP_SRC_EN);
+
return 0;
}
@@ -995,7 +1001,8 @@ static int tmac_init_be(struct rtw89_dev *rtwdev, u8 mac_idx)
{
u32 reg;
- rtw89_write32_clr(rtwdev, R_BE_TB_PPDU_CTRL, B_BE_QOSNULL_UPD_MUEDCA_EN);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_TB_PPDU_CTRL, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_BE_QOSNULL_UPD_MUEDCA_EN);
reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_WMTX_TCR_BE_4, mac_idx);
rtw89_write32_mask(rtwdev, reg, B_BE_EHT_HE_PPDU_4XLTF_ZLD_USTIMER_MASK, 0x12);
@@ -1449,6 +1456,71 @@ static int set_cpuio_be(struct rtw89_dev *rtwdev,
return 0;
}
+static int dle_upd_qta_aval_page_be(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_dle_ctrl_type type,
+ enum rtw89_mac_dle_ple_quota_id quota_id)
+{
+ u32 val;
+
+ if (type == DLE_CTRL_TYPE_WDE) {
+ rtw89_write32_mask(rtwdev, R_BE_WDE_BUFMGN_CTL,
+ B_BE_WDE_AVAL_UPD_QTAID_MASK, quota_id);
+ rtw89_write32_set(rtwdev, R_BE_WDE_BUFMGN_CTL, B_BE_WDE_AVAL_UPD_REQ);
+
+ return read_poll_timeout(rtw89_read32, val,
+ !(val & B_BE_WDE_AVAL_UPD_REQ),
+ 1, 2000, false, rtwdev, R_BE_WDE_BUFMGN_CTL);
+ } else if (type == DLE_CTRL_TYPE_PLE) {
+ rtw89_write32_mask(rtwdev, R_BE_PLE_BUFMGN_CTL,
+ B_BE_PLE_AVAL_UPD_QTAID_MASK, quota_id);
+ rtw89_write32_set(rtwdev, R_BE_PLE_BUFMGN_CTL, B_BE_PLE_AVAL_UPD_REQ);
+
+ return read_poll_timeout(rtw89_read32, val,
+ !(val & B_BE_PLE_AVAL_UPD_REQ),
+ 1, 2000, false, rtwdev, R_BE_PLE_BUFMGN_CTL);
+ }
+
+ rtw89_warn(rtwdev, "%s wrong type %d\n", __func__, type);
+ return -EINVAL;
+}
+
+static int dle_quota_change_be(struct rtw89_dev *rtwdev, bool band1_en)
+{
+ int ret;
+
+ if (band1_en) {
+ ret = dle_upd_qta_aval_page_be(rtwdev, DLE_CTRL_TYPE_PLE,
+ PLE_QTAID_B0_TXPL);
+ if (ret) {
+ rtw89_err(rtwdev, "update PLE B0 TX avail page fail %d\n", ret);
+ return ret;
+ }
+
+ ret = dle_upd_qta_aval_page_be(rtwdev, DLE_CTRL_TYPE_PLE,
+ PLE_QTAID_CMAC0_RX);
+ if (ret) {
+ rtw89_err(rtwdev, "update PLE CMAC0 RX avail page fail %d\n", ret);
+ return ret;
+ }
+ } else {
+ ret = dle_upd_qta_aval_page_be(rtwdev, DLE_CTRL_TYPE_PLE,
+ PLE_QTAID_B1_TXPL);
+ if (ret) {
+ rtw89_err(rtwdev, "update PLE B1 TX avail page fail %d\n", ret);
+ return ret;
+ }
+
+ ret = dle_upd_qta_aval_page_be(rtwdev, DLE_CTRL_TYPE_PLE,
+ PLE_QTAID_CMAC1_RX);
+ if (ret) {
+ rtw89_err(rtwdev, "update PLE CMAC1 RX avail page fail %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int preload_init_be(struct rtw89_dev *rtwdev, u8 mac_idx,
enum rtw89_qta_mode mode)
{
@@ -1480,6 +1552,13 @@ static int preload_init_be(struct rtw89_dev *rtwdev, u8 mac_idx,
static int dbcc_bb_ctrl_be(struct rtw89_dev *rtwdev, bool bb1_en)
{
+ u32 set = B_BE_FEN_BB1PLAT_RSTB | B_BE_FEN_BB1_IP_RSTN;
+
+ if (bb1_en)
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, set);
+ else
+ rtw89_write32_clr(rtwdev, R_BE_FEN_RST_ENABLE, set);
+
return 0;
}
@@ -1538,7 +1617,7 @@ static int band1_enable_be(struct rtw89_dev *rtwdev)
return ret;
}
- ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
+ ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode, true);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
return ret;
@@ -1593,7 +1672,7 @@ static int band1_disable_be(struct rtw89_dev *rtwdev)
return ret;
}
- ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
+ ret = rtw89_mac_dle_quota_change(rtwdev, rtwdev->mac.qta_mode, false);
if (ret) {
rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
return ret;
@@ -1616,7 +1695,7 @@ static int dbcc_enable_be(struct rtw89_dev *rtwdev, bool enable)
if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) {
ret = rtw89_fw_h2c_notify_dbcc(rtwdev, true);
if (ret) {
- rtw89_err(rtwdev, "%s:[ERR]notfify dbcc1 fail %d\n",
+ rtw89_err(rtwdev, "%s:[ERR] notify dbcc1 fail %d\n",
__func__, ret);
return ret;
}
@@ -1625,7 +1704,7 @@ static int dbcc_enable_be(struct rtw89_dev *rtwdev, bool enable)
if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) {
ret = rtw89_fw_h2c_notify_dbcc(rtwdev, false);
if (ret) {
- rtw89_err(rtwdev, "%s:[ERR]notfify dbcc1 fail %d\n",
+ rtw89_err(rtwdev, "%s:[ERR] notify dbcc1 fail %d\n",
__func__, ret);
return ret;
}
@@ -1718,12 +1797,220 @@ static int trx_init_be(struct rtw89_dev *rtwdev)
return 0;
}
+int rtw89_mac_cfg_gnt_v2(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
+{
+ u32 val = 0;
+
+ if (gnt_cfg->band[0].gnt_bt)
+ val |= B_BE_GNT_BT_BB0_VAL | B_BE_GNT_BT_RX_BB0_VAL |
+ B_BE_GNT_BT_TX_BB0_VAL;
+
+ if (gnt_cfg->band[0].gnt_bt_sw_en)
+ val |= B_BE_GNT_BT_BB0_SWCTRL | B_BE_GNT_BT_RX_BB0_SWCTRL |
+ B_BE_GNT_BT_TX_BB0_SWCTRL;
+
+ if (gnt_cfg->band[0].gnt_wl)
+ val |= B_BE_GNT_WL_BB0_VAL | B_BE_GNT_WL_RX_VAL |
+ B_BE_GNT_WL_TX_VAL | B_BE_GNT_WL_BB_PWR_VAL;
+
+ if (gnt_cfg->band[0].gnt_wl_sw_en)
+ val |= B_BE_GNT_WL_BB0_SWCTRL | B_BE_GNT_WL_RX_SWCTRL |
+ B_BE_GNT_WL_TX_SWCTRL | B_BE_GNT_WL_BB_PWR_SWCTRL;
+
+ if (gnt_cfg->band[1].gnt_bt)
+ val |= B_BE_GNT_BT_BB1_VAL | B_BE_GNT_BT_RX_BB1_VAL |
+ B_BE_GNT_BT_TX_BB1_VAL;
+
+ if (gnt_cfg->band[1].gnt_bt_sw_en)
+ val |= B_BE_GNT_BT_BB1_SWCTRL | B_BE_GNT_BT_RX_BB1_SWCTRL |
+ B_BE_GNT_BT_TX_BB1_SWCTRL;
+
+ if (gnt_cfg->band[1].gnt_wl)
+ val |= B_BE_GNT_WL_BB1_VAL | B_BE_GNT_WL_RX_VAL |
+ B_BE_GNT_WL_TX_VAL | B_BE_GNT_WL_BB_PWR_VAL;
+
+ if (gnt_cfg->band[1].gnt_wl_sw_en)
+ val |= B_BE_GNT_WL_BB1_SWCTRL | B_BE_GNT_WL_RX_SWCTRL |
+ B_BE_GNT_WL_TX_SWCTRL | B_BE_GNT_WL_BB_PWR_SWCTRL;
+
+ if (gnt_cfg->bt[0].wlan_act_en)
+ val |= B_BE_WL_ACT_SWCTRL;
+ if (gnt_cfg->bt[0].wlan_act)
+ val |= B_BE_WL_ACT_VAL;
+ if (gnt_cfg->bt[1].wlan_act_en)
+ val |= B_BE_WL_ACT2_SWCTRL;
+ if (gnt_cfg->bt[1].wlan_act)
+ val |= B_BE_WL_ACT2_VAL;
+
+ rtw89_write32(rtwdev, R_BE_GNT_SW_CTRL, val);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_gnt_v2);
+
+int rtw89_mac_cfg_ctrl_path_v2(struct rtw89_dev *rtwdev, bool wl)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_mac_ax_gnt *g = dm->gnt.band;
+ struct rtw89_mac_ax_wl_act *gbt = dm->gnt.bt;
+ int i;
+
+ if (wl)
+ return 0;
+
+ for (i = 0; i < RTW89_PHY_MAX; i++) {
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 1;
+ g[i].gnt_wl_sw_en = 1;
+ g[i].gnt_wl = 0;
+ gbt[i].wlan_act = 1;
+ gbt[i].wlan_act_en = 0;
+ }
+
+ return rtw89_mac_cfg_gnt_v2(rtwdev, &dm->gnt);
+}
+EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path_v2);
+
+static
+int rtw89_mac_cfg_plt_be(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
+{
+ u32 reg;
+ u16 val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, plt->band, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_BT_PLT, plt->band);
+ val = (plt->tx & RTW89_MAC_AX_PLT_LTE_RX ? B_BE_TX_PLT_GNT_LTE_RX : 0) |
+ (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_BE_TX_PLT_GNT_BT_TX : 0) |
+ (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_BE_TX_PLT_GNT_BT_RX : 0) |
+ (plt->tx & RTW89_MAC_AX_PLT_GNT_WL ? B_BE_TX_PLT_GNT_WL : 0) |
+ (plt->rx & RTW89_MAC_AX_PLT_LTE_RX ? B_BE_RX_PLT_GNT_LTE_RX : 0) |
+ (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_BE_RX_PLT_GNT_BT_TX : 0) |
+ (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_BE_RX_PLT_GNT_BT_RX : 0) |
+ (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_BE_RX_PLT_GNT_WL : 0) |
+ B_BE_PLT_EN;
+ rtw89_write16(rtwdev, reg, val);
+
+ return 0;
+}
+
+static u16 rtw89_mac_get_plt_cnt_be(struct rtw89_dev *rtwdev, u8 band)
+{
+ u32 reg;
+ u16 cnt;
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_BT_PLT, band);
+ cnt = rtw89_read32_mask(rtwdev, reg, B_BE_BT_PLT_PKT_CNT_MASK);
+ rtw89_write16_set(rtwdev, reg, B_BE_BT_PLT_RST);
+
+ return cnt;
+}
+
+static int rtw89_set_hw_sch_tx_en_v2(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 tx_en, u32 tx_en_mask)
+{
+ u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CTN_DRV_TXEN, mac_idx);
+ u32 val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ val = rtw89_read32(rtwdev, reg);
+ val = (val & ~tx_en_mask) | (tx_en & tx_en_mask);
+ rtw89_write32(rtwdev, reg, val);
+
+ return 0;
+}
+
+int rtw89_mac_stop_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u32 *tx_en, enum rtw89_sch_tx_sel sel)
+{
+ int ret;
+
+ *tx_en = rtw89_read32(rtwdev,
+ rtw89_mac_reg_by_idx(rtwdev, R_BE_CTN_DRV_TXEN, mac_idx));
+
+ switch (sel) {
+ case RTW89_SCH_TX_SEL_ALL:
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx, 0,
+ B_BE_CTN_TXEN_ALL_MASK);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_HIQ:
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx,
+ 0, B_BE_CTN_TXEN_HGQ);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_MG0:
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx,
+ 0, B_BE_CTN_TXEN_MGQ);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_MACID:
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx, 0,
+ B_BE_CTN_TXEN_ALL_MASK);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_stop_sch_tx_v2);
+
+int rtw89_mac_resume_sch_tx_v2(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en)
+{
+ int ret;
+
+ ret = rtw89_set_hw_sch_tx_en_v2(rtwdev, mac_idx, tx_en,
+ B_BE_CTN_TXEN_ALL_MASK);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v2);
+
+static
+int rtw89_mac_cfg_ppdu_status_be(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+{
+ u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PPDU_STAT, mac_idx);
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ if (!enable) {
+ rtw89_write32_clr(rtwdev, reg, B_BE_PPDU_STAT_RPT_EN);
+ return 0;
+ }
+
+ rtw89_write32_mask(rtwdev, R_BE_HW_PPDU_STATUS, B_BE_FWD_PPDU_STAT_MASK, 3);
+ rtw89_write32(rtwdev, reg, B_BE_PPDU_STAT_RPT_EN | B_BE_PPDU_MAC_INFO |
+ B_BE_APP_RX_CNT_RPT | B_BE_APP_PLCP_HDR_RPT |
+ B_BE_PPDU_STAT_RPT_CRC32 | B_BE_PPDU_STAT_RPT_DMA);
+
+ return 0;
+}
+
static bool rtw89_mac_get_txpwr_cr_be(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
u32 reg_base, u32 *cr)
{
- const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem;
- enum rtw89_qta_mode mode = dle_mem->mode;
+ enum rtw89_qta_mode mode = rtwdev->mac.qta_mode;
int ret;
ret = rtw89_mac_check_mac_en(rtwdev, (enum rtw89_mac_idx)phy_idx,
@@ -2020,6 +2307,52 @@ static void rtw89_mac_dump_qta_lost_be(struct rtw89_dev *rtwdev)
dump_err_status_dispatcher_be(rtwdev);
}
+static int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable)
+{
+ struct rtw89_mac_h2c_info h2c_info = {};
+ struct rtw89_mac_c2h_info c2h_info = {};
+ u32 ret;
+
+ h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL;
+ h2c_info.content_len = sizeof(h2c_info.u.hdr);
+ h2c_info.u.hdr.w0 = u32_encode_bits(wow_enable, RTW89_H2CREG_WOW_CPUIO_RX_CTRL_EN);
+
+ ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info);
+ if (ret)
+ return ret;
+
+ if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int rtw89_wow_config_mac_be(struct rtw89_dev *rtwdev, bool enable_wow)
+{
+ if (enable_wow) {
+ rtw89_write32_set(rtwdev, R_BE_RX_STOP, B_BE_HOST_RX_STOP);
+ rtw89_write32_clr(rtwdev, R_BE_RX_FLTR_OPT, B_BE_SNIFFER_MODE);
+ rtw89_mac_cpu_io_rx(rtwdev, enable_wow);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ rtw89_write32(rtwdev, R_BE_FWD_ERR, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_ACTN0, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_ACTN1, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_ACTN2, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_TF0, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_TF1, 0);
+ rtw89_write32(rtwdev, R_BE_FWD_ERR, 0);
+ rtw89_write32(rtwdev, R_BE_HW_PPDU_STATUS, 0);
+ rtw89_write8(rtwdev, R_BE_DBG_WOW_READY, WOWLAN_NOT_READY);
+ } else {
+ rtw89_mac_cpu_io_rx(rtwdev, enable_wow);
+ rtw89_write32_clr(rtwdev, R_BE_RX_STOP, B_BE_HOST_RX_STOP);
+ rtw89_write32_set(rtwdev, R_BE_RX_FLTR_OPT, R_BE_RX_FLTR_OPT);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ }
+
+ return 0;
+}
+
static void rtw89_mac_dump_cmac_err_status_be(struct rtw89_dev *rtwdev,
u8 band)
{
@@ -2218,6 +2551,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.rx_fltr = R_BE_RX_FLTR_OPT,
.port_base = &rtw89_port_base_be,
.agg_len_ht = R_BE_AGG_LEN_HT_0,
+ .ps_status = R_BE_WMTX_POWER_BE_BIT_CTL,
.muedca_ctrl = {
.addr = R_BE_MUEDCA_EN,
@@ -2228,6 +2562,11 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.mask = B_BE_BFMEE_HT_NDPA_EN | B_BE_BFMEE_VHT_NDPA_EN |
B_BE_BFMEE_HE_NDPA_EN | B_BE_BFMEE_EHT_NDPA_EN,
},
+ .narrow_bw_ru_dis = {
+ .addr = R_BE_RXTRIG_TEST_USER_2,
+ .mask = B_BE_RXTRIG_RU26_DIS,
+ },
+ .wow_ctrl = {.addr = R_BE_WOW_CTRL, .mask = B_BE_WOW_WOWEN,},
.check_mac_en = rtw89_mac_check_mac_en_be,
.sys_init = sys_init_be,
@@ -2239,6 +2578,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.bf_assoc = rtw89_mac_bf_assoc_be,
.typ_fltr_opt = rtw89_mac_typ_fltr_opt_be,
+ .cfg_ppdu_status = rtw89_mac_cfg_ppdu_status_be,
.dle_mix_cfg = dle_mix_cfg_be,
.chk_dle_rdy = chk_dle_rdy_be,
@@ -2250,6 +2590,7 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.wde_quota_cfg = wde_quota_cfg_be,
.ple_quota_cfg = ple_quota_cfg_be,
.set_cpuio = set_cpuio_be,
+ .dle_quota_change = dle_quota_change_be,
.disable_cpu = rtw89_mac_disable_cpu_be,
.fwdl_enable_wcpu = rtw89_mac_fwdl_enable_wcpu_be,
@@ -2259,6 +2600,9 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.parse_phycap_map = rtw89_parse_phycap_map_be,
.cnv_efuse_state = rtw89_cnv_efuse_state_be,
+ .cfg_plt = rtw89_mac_cfg_plt_be,
+ .get_plt_cnt = rtw89_mac_get_plt_cnt_be,
+
.get_txpwr_cr = rtw89_mac_get_txpwr_cr_be,
.write_xtal_si = rtw89_mac_write_xtal_si_be,
@@ -2268,5 +2612,10 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = {
.dump_err_status = rtw89_mac_dump_err_status_be,
.is_txq_empty = mac_is_txq_empty_be,
+
+ .add_chan_list = rtw89_hw_scan_add_chan_list_be,
+ .scan_offload = rtw89_fw_h2c_scan_offload_be,
+
+ .wow_config_mac = rtw89_wow_config_mac_be,
};
EXPORT_SYMBOL(rtw89_mac_gen_be);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 769f1ce62ebc..19001130ad94 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -155,8 +155,8 @@ static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
DMA_FROM_DEVICE);
}
-static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
- struct sk_buff *skb)
+static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb)
{
struct rtw89_pci_rxbd_info *rxbd_info;
struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
@@ -166,11 +166,59 @@ static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS);
rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE);
rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG);
+}
+
+static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_rx_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 target_rx_tag;
+
+ if (!info->check_rx_tag)
+ return 0;
+
+ /* valid range is 1 ~ 0x1FFF */
+ if (rx_ring->target_rx_tag == 0)
+ target_rx_tag = 1;
+ else
+ target_rx_tag = rx_ring->target_rx_tag;
+
+ if (rx_info->tag != target_rx_tag) {
+ rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "mismatch RX tag 0x%x 0x%x\n",
+ rx_info->tag, target_rx_tag);
+ return -EAGAIN;
+ }
return 0;
}
-static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
+static
+int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_rx_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ int rx_tag_retry = 100;
+ int ret;
+
+ do {
+ rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
+ rtw89_pci_rxbd_info_update(rtwdev, skb);
+
+ ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb);
+ if (ret != -EAGAIN)
+ break;
+ } while (rx_tag_retry--);
+
+ /* update target rx_tag for next RX */
+ rx_ring->target_rx_tag = rx_info->tag + 1;
+
+ return ret;
+}
+
+static void rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev *rtwdev, bool enable)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
@@ -187,7 +235,7 @@ static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
}
}
-static void rtw89_pci_ctrl_txdma_fw_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
+static void rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev *rtwdev, bool enable)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
@@ -259,9 +307,8 @@ static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
skb = rx_ring->buf[skb_idx];
- rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
- ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
+ ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
if (ret) {
rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
bd_ring->wp, ret);
@@ -549,9 +596,8 @@ static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
skb = rx_ring->buf[skb_idx];
- rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
- ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
+ ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
if (ret) {
rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
bd_ring->wp, ret);
@@ -705,7 +751,7 @@ void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0;
isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ?
rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0;
- isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR);
+ isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1];
if (isrs->halt_c2h_isrs)
rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs);
@@ -1550,6 +1596,7 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
bd_ring->rp = 0;
rx_ring->diliver_skb = NULL;
rx_ring->diliver_desc.ready = false;
+ rx_ring->target_rx_tag = 0;
rtw89_write16(rtwdev, addr_num, bd_ring->len);
rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
@@ -1907,22 +1954,87 @@ static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u
return 0;
}
+static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data)
+{
+ u16 addr_2lsb = addr & B_AX_DBI_2LSB;
+ u16 write_addr;
+ u8 flag;
+ int ret;
+
+ write_addr = addr & B_AX_DBI_ADDR_MSK;
+ write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK);
+ rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data);
+ rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr);
+ rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16);
+
+ ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
+ 10 * RTW89_PCI_WR_RETRY_CNT, false,
+ rtwdev, R_AX_DBI_FLAG + 2);
+ if (ret)
+ rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n",
+ addr);
+
+ return ret;
+}
+
+static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value)
+{
+ u16 read_addr = addr & B_AX_DBI_ADDR_MSK;
+ u8 flag;
+ int ret;
+
+ rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr);
+ rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16);
+
+ ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
+ 10 * RTW89_PCI_WR_RETRY_CNT, false,
+ rtwdev, R_AX_DBI_FLAG + 2);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n",
+ addr);
+ return ret;
+ }
+
+ read_addr = R_AX_DBI_RDATA + (addr & 3);
+ *value = rtw89_read8(rtwdev, read_addr);
+
+ return 0;
+}
+
static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
u8 data)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
struct pci_dev *pdev = rtwpci->pdev;
+ int ret;
- return pci_write_config_byte(pdev, addr, data);
+ ret = pci_write_config_byte(pdev, addr, data);
+ if (!ret)
+ return 0;
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
+ ret = rtw89_dbi_write8(rtwdev, addr, data);
+
+ return ret;
}
static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
u8 *value)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
struct pci_dev *pdev = rtwpci->pdev;
+ int ret;
- return pci_read_config_byte(pdev, addr, value);
+ ret = pci_read_config_byte(pdev, addr, value);
+ if (!ret)
+ return 0;
+
+ if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
+ ret = rtw89_dbi_read8(rtwdev, addr, value);
+
+ return ret;
}
static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
@@ -2412,7 +2524,7 @@ static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev)
B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
}
-static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
+static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
u32 ret, check, dma_busy;
@@ -2439,7 +2551,7 @@ static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
return 0;
}
-static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
+static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
u32 ret, check, dma_busy;
@@ -2459,13 +2571,13 @@ static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
{
u32 ret;
- ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev);
+ ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev);
if (ret) {
rtw89_err(rtwdev, "txdma ch busy\n");
return ret;
}
- ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev);
+ ret = rtw89_pci_poll_rxdma_ch_idle_ax(rtwdev);
if (ret) {
rtw89_err(rtwdev, "rxdma ch busy\n");
return ret;
@@ -2644,8 +2756,8 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
}
/* disable all channels except to FW CMD channel to download firmware */
- rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, false);
- rtw89_pci_ctrl_txdma_fw_ch_pcie(rtwdev, true);
+ rtw89_pci_ctrl_txdma_ch_ax(rtwdev, false);
+ rtw89_pci_ctrl_txdma_fw_ch_ax(rtwdev, true);
/* start DMA activities */
rtw89_pci_ctrl_dma_all(rtwdev, true);
@@ -2758,7 +2870,7 @@ static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev)
}
/* enable DMA for all queues */
- rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, true);
+ rtw89_pci_ctrl_txdma_ch_ax(rtwdev, true);
/* Release PCI IO */
rtw89_write32_clr(rtwdev, info->dma_stop1.addr,
@@ -3148,6 +3260,7 @@ static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
rx_ring->buf_sz = buf_sz;
rx_ring->diliver_skb = NULL;
rx_ring->diliver_desc.ready = false;
+ rx_ring->target_rx_tag = 0;
for (i = 0; i < len; i++) {
skb = dev_alloc_skb(buf_sz);
@@ -3387,8 +3500,7 @@ static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev)
rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
rtwpci->intrs[0] = 0;
- rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
- B_BE_PCIE_RX_RPQ0_IMR0_V1;
+ rtwpci->intrs[1] = 0;
}
static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev)
@@ -3540,12 +3652,20 @@ static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev)
static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- int ret;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
if (rtw89_pci_disable_clkreq)
return;
+ gen_def->clkreq_set(rtwdev, enable);
+}
+
+static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ int ret;
+
ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
PCIE_CLKDLY_HW_30US);
if (ret)
@@ -3577,24 +3697,31 @@ static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- u8 value = 0;
- int ret;
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
if (rtw89_pci_disable_aspm_l1)
return;
+ gen_def->aspm_set(rtwdev, enable);
+}
+
+static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u8 value = 0;
+ int ret;
+
ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
if (ret)
- rtw89_err(rtwdev, "failed to read ASPM Delay\n");
+ rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
- value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK);
- value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) |
- FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US);
+ u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK);
+ u8p_replace_bits(&value, PCIE_L0SDLY_4US, RTW89_L0DLY_MASK);
ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
if (ret)
- rtw89_err(rtwdev, "failed to read ASPM Delay\n");
+ rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
if (enable)
@@ -3681,6 +3808,17 @@ static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ if (rtw89_pci_disable_l1ss)
+ return;
+
+ gen_def->l1ss_set(rtwdev, enable);
+}
+
+static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable)
+{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
@@ -3954,6 +4092,14 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
.lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax,
.lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax,
+
+ .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_ax,
+ .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_ax,
+ .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_ax,
+
+ .aspm_set = rtw89_pci_aspm_set_ax,
+ .clkreq_set = rtw89_pci_clkreq_set_ax,
+ .l1ss_set = rtw89_pci_l1ss_set_ax,
};
EXPORT_SYMBOL(rtw89_pci_gen_ax);
@@ -3988,10 +4134,11 @@ static const struct rtw89_hci_ops rtw89_pci_ops = {
.recovery_start = rtw89_pci_ops_recovery_start,
.recovery_complete = rtw89_pci_ops_recovery_complete,
- .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_pcie,
- .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_pcie,
+ .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch,
+ .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch,
.ctrl_trxhci = rtw89_pci_ctrl_dma_trx,
- .poll_txdma_ch = rtw89_poll_txdma_ch_idle_pcie,
+ .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle,
+
.clr_idx_all = rtw89_pci_clr_idx_all,
.clear = rtw89_pci_clear_resource,
.disable_intr = rtw89_pci_disable_intr_lock,
@@ -4068,6 +4215,8 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_free_irq;
}
+ set_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags);
+
return 0;
err_free_irq:
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index ca5de77fee90..a63b6b7c9bfa 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -42,6 +42,7 @@
#define B_AX_DBI_WFLAG BIT(16)
#define B_AX_DBI_WREN_MSK GENMASK(15, 12)
#define B_AX_DBI_ADDR_MSK GENMASK(11, 2)
+#define B_AX_DBI_2LSB GENMASK(1, 0)
#define R_AX_DBI_WDATA 0x1094
#define R_AX_DBI_RDATA 0x1098
@@ -281,6 +282,21 @@
#define B_BE_PCIE_EN_SWENT_L23 BIT(1)
#define B_BE_SEL_REQ_EXIT_L1 BIT(0)
+#define R_BE_PCIE_MIX_CFG 0x300C
+#define B_BE_L1SS_TIMEOUT_CTRL BIT(18)
+#define B_BE_ASPM_CTRL_L1 BIT(17)
+#define B_BE_ASPM_CTRL_L0 BIT(16)
+#define B_BE_XFER_PENDING_FW BIT(11)
+#define B_BE_XFER_PENDING BIT(10)
+#define B_BE_REQ_EXIT_L1 BIT(9)
+#define B_BE_REQ_ENTR_L1 BIT(8)
+#define B_BE_L1SUB_ENABLE BIT(0)
+
+#define R_BE_L1_CLK_CTRL 0x3010
+#define B_BE_RAS_SD_HOLD_LTSSM BIT(12)
+#define B_BE_CLK_REQ_N BIT(1)
+#define B_BE_CLK_PM_EN BIT(0)
+
#define R_BE_PCIE_LAT_CTRL 0x3044
#define B_BE_ELBI_PHY_REMAP_MASK GENMASK(29, 24)
#define B_BE_SYS_SUS_L12_EN BIT(17)
@@ -289,6 +305,8 @@
#define B_BE_RTK_LDO_POWER_LATENCY_MASK GENMASK(11, 10)
#define B_BE_RTK_LDO_BIAS_LATENCY_MASK GENMASK(9, 8)
#define B_BE_CLK_REQ_LAT_MASK GENMASK(7, 4)
+#define B_BE_RTK_PM_SEL_OPT BIT(1)
+#define B_BE_CLK_REQ_SEL BIT(0)
#define R_BE_PCIE_HIMR0 0x30B0
#define B_BE_PCIE_HB1_IND_INTA_IMR BIT(31)
@@ -924,6 +942,8 @@
#define B_BE_SER_L1SUB_IMR BIT(1)
#define B_BE_SER_PMU_IMR BIT(0)
+#define R_BE_REG_PL1_ISR 0x34B4
+
#define R_BE_RX_APPEND_MODE 0x8920
#define B_BE_APPEND_OFFSET_MASK GENMASK(23, 16)
#define B_BE_APPEND_LEN_MASK GENMASK(15, 0)
@@ -996,7 +1016,7 @@
#define RTW89_PCI_TXWD_NUM_MAX 512
#define RTW89_PCI_TXWD_PAGE_SIZE 128
#define RTW89_PCI_ADDRINFO_MAX 4
-#define RTW89_PCI_RX_BUF_SIZE 11460
+#define RTW89_PCI_RX_BUF_SIZE (11454 + 40) /* +40 for rtw89_rxdesc_long_v2 */
#define RTW89_PCI_POLL_BDRAM_RST_CNT 100
#define RTW89_PCI_MULTITAG 8
@@ -1065,6 +1085,15 @@ enum rtw89_pcie_clkdly_hw {
PCIE_CLKDLY_HW_200US = 0x5,
};
+enum rtw89_pcie_clkdly_hw_v1 {
+ PCIE_CLKDLY_HW_V1_0 = 0,
+ PCIE_CLKDLY_HW_V1_16US = 0x1,
+ PCIE_CLKDLY_HW_V1_32US = 0x2,
+ PCIE_CLKDLY_HW_V1_64US = 0x3,
+ PCIE_CLKDLY_HW_V1_80US = 0x4,
+ PCIE_CLKDLY_HW_V1_96US = 0x5,
+};
+
enum mac_ax_bd_trunc_mode {
MAC_AX_BD_NORM,
MAC_AX_BD_TRUNC,
@@ -1215,6 +1244,14 @@ struct rtw89_pci_gen_def {
int (*lv1rst_stop_dma)(struct rtw89_dev *rtwdev);
int (*lv1rst_start_dma)(struct rtw89_dev *rtwdev);
+
+ void (*ctrl_txdma_ch)(struct rtw89_dev *rtwdev, bool enable);
+ void (*ctrl_txdma_fw_ch)(struct rtw89_dev *rtwdev, bool enable);
+ int (*poll_txdma_ch_idle)(struct rtw89_dev *rtwdev);
+
+ void (*aspm_set)(struct rtw89_dev *rtwdev, bool enable);
+ void (*clkreq_set)(struct rtw89_dev *rtwdev, bool enable);
+ void (*l1ss_set)(struct rtw89_dev *rtwdev, bool enable);
};
struct rtw89_pci_info {
@@ -1234,6 +1271,7 @@ struct rtw89_pci_info {
enum mac_ax_pcie_func_ctrl io_rcy_en;
enum mac_ax_io_rcy_tmr io_rcy_tmr;
bool rx_ring_eq_is_full;
+ bool check_rx_tag;
u32 init_cfg_reg;
u32 txhci_en_bit;
@@ -1276,7 +1314,7 @@ struct rtw89_pci_tx_data {
struct rtw89_pci_rx_info {
dma_addr_t dma;
- u32 fs:1, ls:1, tag:11, len:14;
+ u32 fs:1, ls:1, tag:13, len:14;
};
#define RTW89_PCI_TXBD_OPTION_LS BIT(14)
@@ -1405,6 +1443,7 @@ struct rtw89_pci_rx_ring {
u32 buf_sz;
struct sk_buff *diliver_skb;
struct rtw89_rx_desc_info diliver_desc;
+ u32 target_rx_tag:13;
};
struct rtw89_pci_isrs {
@@ -1521,6 +1560,7 @@ static inline bool rtw89_pci_ltr_is_err_reg_val(u32 val)
}
extern const struct dev_pm_ops rtw89_pm_ops;
+extern const struct dev_pm_ops rtw89_pm_ops_be;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be;
@@ -1676,4 +1716,27 @@ static inline int rtw89_pci_reset_bdram(struct rtw89_dev *rtwdev)
return gen_def->rst_bdram(rtwdev);
}
+static inline void rtw89_pci_ctrl_txdma_ch(struct rtw89_dev *rtwdev, bool enable)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ return gen_def->ctrl_txdma_ch(rtwdev, enable);
+}
+
+static inline void rtw89_pci_ctrl_txdma_fw_ch(struct rtw89_dev *rtwdev, bool enable)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ return gen_def->ctrl_txdma_fw_ch(rtwdev, enable);
+}
+
+static inline int rtw89_pci_poll_txdma_ch_idle(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_gen_def *gen_def = info->gen_def;
+
+ return gen_def->poll_txdma_ch_idle(rtwdev);
+}
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/pci_be.c b/drivers/net/wireless/realtek/rtw89/pci_be.c
index 629ffa4bee91..7cc328222965 100644
--- a/drivers/net/wireless/realtek/rtw89/pci_be.c
+++ b/drivers/net/wireless/realtek/rtw89/pci_be.c
@@ -19,6 +19,54 @@ enum pcie_rxbd_mode {
#define PL0_TMR_MAC_1MS 0x27100
#define PL0_TMR_AUX_1MS 0x1E848
+static void rtw89_pci_aspm_set_be(struct rtw89_dev *rtwdev, bool enable)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+ u8 value = 0;
+ int ret;
+
+ ret = pci_read_config_byte(pdev, RTW89_PCIE_ASPM_CTRL, &value);
+ if (ret)
+ rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
+
+ u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK);
+
+ ret = pci_write_config_byte(pdev, RTW89_PCIE_ASPM_CTRL, value);
+ if (ret)
+ rtw89_warn(rtwdev, "failed to write ASPM Delay\n");
+
+ if (enable)
+ rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_BE_ASPM_CTRL_L1);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
+ B_BE_ASPM_CTRL_L1);
+}
+
+static void rtw89_pci_l1ss_set_be(struct rtw89_dev *rtwdev, bool enable)
+{
+ if (enable)
+ rtw89_write32_set(rtwdev, R_BE_PCIE_MIX_CFG,
+ B_BE_L1SUB_ENABLE);
+ else
+ rtw89_write32_clr(rtwdev, R_BE_PCIE_MIX_CFG,
+ B_BE_L1SUB_ENABLE);
+}
+
+static void rtw89_pci_clkreq_set_be(struct rtw89_dev *rtwdev, bool enable)
+{
+ rtw89_write32_mask(rtwdev, R_BE_PCIE_LAT_CTRL, B_BE_CLK_REQ_LAT_MASK,
+ PCIE_CLKDLY_HW_V1_0);
+
+ if (enable)
+ rtw89_write32_set(rtwdev, R_BE_L1_CLK_CTRL,
+ B_BE_CLK_PM_EN);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL,
+ B_BE_CLK_PM_EN);
+}
+
static void _patch_pcie_power_wake_be(struct rtw89_dev *rtwdev, bool power_up)
{
if (power_up)
@@ -105,6 +153,10 @@ static void rtw89_pci_ctrl_trxdma_pcie_be(struct rtw89_dev *rtwdev,
val |= B_BE_STOP_AXI_MST;
rtw89_write32(rtwdev, R_BE_HAXI_INIT_CFG1, val);
+
+ if (io_en == MAC_AX_PCIE_ENABLE)
+ rtw89_write32_mask(rtwdev, R_BE_HAXI_MST_WDT_TIMEOUT_SEL_V1,
+ B_BE_HAXI_MST_WDT_TIMEOUT_SEL_MASK, 4);
}
static void rtw89_pci_clr_idx_all_be(struct rtw89_dev *rtwdev)
@@ -257,6 +309,7 @@ static void rtw89_pci_ser_setting_be(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, R_BE_PL1_DBG_INFO, 0x0);
rtw89_write32_set(rtwdev, R_BE_FWS1IMR, B_BE_PCIE_SER_TIMEOUT_INDIC_EN);
rtw89_write32_set(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_SER_PL1_EN);
+ rtw89_write32_mask(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_TIMER_UNIT_MASK, 1);
val32 = rtw89_read32(rtwdev, R_BE_REG_PL1_MASK);
val32 |= B_BE_SER_PMU_IMR | B_BE_SER_L1SUB_IMR | B_BE_SER_PM_MASTER_IMR |
@@ -264,8 +317,7 @@ static void rtw89_pci_ser_setting_be(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, R_BE_REG_PL1_MASK, val32);
}
-static void rtw89_pci_ctrl_txdma_ch_be(struct rtw89_dev *rtwdev, bool all_en,
- bool h2c_en)
+static void rtw89_pci_ctrl_txdma_ch_be(struct rtw89_dev *rtwdev, bool enable)
{
u32 mask_all;
u32 val;
@@ -278,12 +330,19 @@ static void rtw89_pci_ctrl_txdma_ch_be(struct rtw89_dev *rtwdev, bool all_en,
val = rtw89_read32(rtwdev, R_BE_HAXI_DMA_STOP1);
val |= B_BE_STOP_CH13 | B_BE_STOP_CH14;
- if (all_en)
+ if (enable)
val &= ~mask_all;
else
val |= mask_all;
- if (h2c_en)
+ rtw89_write32(rtwdev, R_BE_HAXI_DMA_STOP1, val);
+}
+
+static void rtw89_pci_ctrl_txdma_fw_ch_be(struct rtw89_dev *rtwdev, bool enable)
+{
+ u32 val = rtw89_read32(rtwdev, R_BE_HAXI_DMA_STOP1);
+
+ if (enable)
val &= ~B_BE_STOP_CH12;
else
val |= B_BE_STOP_CH12;
@@ -322,7 +381,8 @@ static int rtw89_pci_ops_mac_pre_init_be(struct rtw89_dev *rtwdev)
rtw89_pci_pcie_setting_be(rtwdev);
rtw89_pci_ser_setting_be(rtwdev);
- rtw89_pci_ctrl_txdma_ch_be(rtwdev, false, true);
+ rtw89_pci_ctrl_txdma_ch_be(rtwdev, false);
+ rtw89_pci_ctrl_txdma_fw_ch_be(rtwdev, true);
rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_ENABLE,
MAC_AX_PCIE_ENABLE, MAC_AX_PCIE_ENABLE);
@@ -432,7 +492,8 @@ static int rtw89_pci_ops_mac_post_init_be(struct rtw89_dev *rtwdev)
rtw89_pci_ctrl_trxdma_pcie_be(rtwdev, MAC_AX_PCIE_IGNORE,
MAC_AX_PCIE_IGNORE, MAC_AX_PCIE_ENABLE);
rtw89_pci_ctrl_wpdma_pcie_be(rtwdev, true);
- rtw89_pci_ctrl_txdma_ch_be(rtwdev, true, true);
+ rtw89_pci_ctrl_txdma_ch_be(rtwdev, true);
+ rtw89_pci_ctrl_txdma_fw_ch_be(rtwdev, true);
rtw89_pci_configure_mit_be(rtwdev);
return 0;
@@ -489,6 +550,46 @@ static int rtw89_pci_lv1rst_start_dma_be(struct rtw89_dev *rtwdev)
return 0;
}
+static int __maybe_unused rtw89_pci_suspend_be(struct device *dev)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(dev);
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ rtw89_write32_set(rtwdev, R_BE_RSV_CTRL, B_BE_WLOCK_1C_BIT6);
+ rtw89_write32_set(rtwdev, R_BE_RSV_CTRL, B_BE_R_DIS_PRST);
+ rtw89_write32_clr(rtwdev, R_BE_RSV_CTRL, B_BE_WLOCK_1C_BIT6);
+ rtw89_write32_set(rtwdev, R_BE_PCIE_FRZ_CLK, B_BE_PCIE_FRZ_REG_RST);
+ rtw89_write32_clr(rtwdev, R_BE_REG_PL1_MASK, B_BE_SER_PM_MASTER_IMR);
+ return 0;
+}
+
+static int __maybe_unused rtw89_pci_resume_be(struct device *dev)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(dev);
+ struct rtw89_dev *rtwdev = hw->priv;
+ u32 polling;
+ int ret;
+
+ rtw89_write32_set(rtwdev, R_BE_RSV_CTRL, B_BE_WLOCK_1C_BIT6);
+ rtw89_write32_clr(rtwdev, R_BE_RSV_CTRL, B_BE_R_DIS_PRST);
+ rtw89_write32_clr(rtwdev, R_BE_RSV_CTRL, B_BE_WLOCK_1C_BIT6);
+ rtw89_write32_clr(rtwdev, R_BE_PCIE_FRZ_CLK, B_BE_PCIE_FRZ_REG_RST);
+ rtw89_write32_clr(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_SER_PL1_EN);
+
+ ret = read_poll_timeout_atomic(rtw89_read32, polling, !polling, 1, 1000,
+ false, rtwdev, R_BE_REG_PL1_ISR);
+ if (ret)
+ rtw89_warn(rtwdev, "[ERR] PCIE SER clear polling fail\n");
+
+ rtw89_write32_set(rtwdev, R_BE_SER_PL1_CTRL, B_BE_PL1_SER_PL1_EN);
+ rtw89_write32_set(rtwdev, R_BE_REG_PL1_MASK, B_BE_SER_PM_MASTER_IMR);
+
+ return 0;
+}
+
+SIMPLE_DEV_PM_OPS(rtw89_pm_ops_be, rtw89_pci_suspend_be, rtw89_pci_resume_be);
+EXPORT_SYMBOL(rtw89_pm_ops_be);
+
const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
.isr_rdu = B_BE_RDU_CH1_INT | B_BE_RDU_CH0_INT,
.isr_halt_c2h = B_BE_HALT_C2H_INT,
@@ -505,5 +606,13 @@ const struct rtw89_pci_gen_def rtw89_pci_gen_be = {
.lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_be,
.lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_be,
+
+ .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_be,
+ .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_be,
+ .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_be,
+
+ .aspm_set = rtw89_pci_aspm_set_be,
+ .clkreq_set = rtw89_pci_clkreq_set_be,
+ .l1ss_set = rtw89_pci_l1ss_set_be,
};
EXPORT_SYMBOL(rtw89_pci_gen_be);
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index bafc7b1cc104..12da63d64307 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -13,6 +13,13 @@
#include "txrx.h"
#include "util.h"
+static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+
+ return phy->phy0_phy1_offset(rtwdev, addr);
+}
+
static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
const struct rtw89_ra_report *report)
{
@@ -718,6 +725,53 @@ u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
}
EXPORT_SYMBOL(rtw89_phy_get_txsc);
+u8 rtw89_phy_get_txsb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_bandwidth dbw)
+{
+ enum rtw89_bandwidth cbw = chan->band_width;
+ u8 pri_ch = chan->primary_channel;
+ u8 central_ch = chan->channel;
+ u8 txsb_idx = 0;
+
+ if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
+ return txsb_idx;
+
+ switch (cbw) {
+ case RTW89_CHANNEL_WIDTH_40:
+ txsb_idx = pri_ch > central_ch ? 1 : 0;
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ if (dbw == RTW89_CHANNEL_WIDTH_20)
+ txsb_idx = (pri_ch - central_ch + 6) / 4;
+ else
+ txsb_idx = pri_ch > central_ch ? 1 : 0;
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ if (dbw == RTW89_CHANNEL_WIDTH_20)
+ txsb_idx = (pri_ch - central_ch + 14) / 4;
+ else if (dbw == RTW89_CHANNEL_WIDTH_40)
+ txsb_idx = (pri_ch - central_ch + 12) / 8;
+ else
+ txsb_idx = pri_ch > central_ch ? 1 : 0;
+ break;
+ case RTW89_CHANNEL_WIDTH_320:
+ if (dbw == RTW89_CHANNEL_WIDTH_20)
+ txsb_idx = (pri_ch - central_ch + 30) / 4;
+ else if (dbw == RTW89_CHANNEL_WIDTH_40)
+ txsb_idx = (pri_ch - central_ch + 28) / 8;
+ else if (dbw == RTW89_CHANNEL_WIDTH_80)
+ txsb_idx = (pri_ch - central_ch + 24) / 16;
+ else
+ txsb_idx = pri_ch > central_ch ? 1 : 0;
+ break;
+ default:
+ break;
+ }
+
+ return txsb_idx;
+}
+EXPORT_SYMBOL(rtw89_phy_get_txsb);
+
static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev)
{
return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) ||
@@ -796,6 +850,71 @@ u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
}
EXPORT_SYMBOL(rtw89_phy_read_rf_v1);
+static u32 rtw89_phy_read_full_rf_v2_a(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rf_path, u32 addr)
+{
+ static const u16 r_addr_ofst[2] = {0x2C24, 0x2D24};
+ static const u16 addr_ofst[2] = {0x2ADC, 0x2BDC};
+ bool busy, done;
+ int ret;
+ u32 val;
+
+ rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_CTL_MASK, 0x1);
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
+ 1, 3800, false,
+ rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_BUSY);
+ if (ret) {
+ rtw89_warn(rtwdev, "poll HWSI is busy\n");
+ return INV_RF_DATA;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_MASK, addr);
+ rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_RD, 0x1);
+ udelay(2);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done,
+ 1, 3800, false,
+ rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_RDONE);
+ if (ret) {
+ rtw89_warn(rtwdev, "read HWSI is busy\n");
+ val = INV_RF_DATA;
+ goto out;
+ }
+
+ val = rtw89_phy_read32_mask(rtwdev, r_addr_ofst[rf_path], RFREG_MASK);
+out:
+ rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_POLL_MASK, 0);
+
+ return val;
+}
+
+static u32 rtw89_phy_read_rf_v2_a(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rf_path, u32 addr, u32 mask)
+{
+ u32 val;
+
+ val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
+
+ return (val & mask) >> __ffs(mask);
+}
+
+u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask)
+{
+ bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ if (ad_sel)
+ return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
+ else
+ return rtw89_phy_read_rf_v2_a(rtwdev, rf_path, addr, mask);
+}
+EXPORT_SYMBOL(rtw89_phy_read_rf_v2);
+
bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data)
{
@@ -875,6 +994,66 @@ bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
}
EXPORT_SYMBOL(rtw89_phy_write_rf_v1);
+static
+bool rtw89_phy_write_full_rf_v2_a(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 data)
+{
+ static const u32 addr_is_idle[2] = {0x2C24, 0x2D24};
+ static const u32 addr_ofst[2] = {0x2AE0, 0x2BE0};
+ bool busy;
+ u32 val;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
+ 1, 3800, false,
+ rtwdev, addr_is_idle[rf_path], BIT(29));
+ if (ret) {
+ rtw89_warn(rtwdev, "[%s] HWSI is busy\n", __func__);
+ return false;
+ }
+
+ val = u32_encode_bits(addr, B_HWSI_DATA_ADDR) |
+ u32_encode_bits(data, B_HWSI_DATA_VAL);
+
+ rtw89_phy_write32(rtwdev, addr_ofst[rf_path], val);
+
+ return true;
+}
+
+static
+bool rtw89_phy_write_rf_a_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ u32 val;
+
+ if (mask == RFREG_MASK) {
+ val = data;
+ } else {
+ val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
+ val &= ~mask;
+ val |= (data << __ffs(mask)) & mask;
+ }
+
+ return rtw89_phy_write_full_rf_v2_a(rtwdev, rf_path, addr, val);
+}
+
+bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ if (ad_sel)
+ return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
+ else
+ return rtw89_phy_write_rf_a_v2(rtwdev, rf_path, addr, mask, data);
+}
+EXPORT_SYMBOL(rtw89_phy_write_rf_v2);
+
static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev)
{
return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1;
@@ -893,20 +1072,30 @@ static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev,
enum rtw89_rf_path rf_path,
void *extra_data)
{
- if (reg->addr == 0xfe)
+ u32 addr;
+
+ if (reg->addr == 0xfe) {
mdelay(50);
- else if (reg->addr == 0xfd)
+ } else if (reg->addr == 0xfd) {
mdelay(5);
- else if (reg->addr == 0xfc)
+ } else if (reg->addr == 0xfc) {
mdelay(1);
- else if (reg->addr == 0xfb)
+ } else if (reg->addr == 0xfb) {
udelay(50);
- else if (reg->addr == 0xfa)
+ } else if (reg->addr == 0xfa) {
udelay(5);
- else if (reg->addr == 0xf9)
+ } else if (reg->addr == 0xf9) {
udelay(1);
- else
- rtw89_phy_write32(rtwdev, reg->addr, reg->data);
+ } else if (reg->data == BYPASS_CR_DATA) {
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Bypass CR 0x%x\n", reg->addr);
+ } else {
+ addr = reg->addr;
+
+ if ((uintptr_t)extra_data == RTW89_PHY_1)
+ addr += rtw89_phy0_phy1_offset(rtwdev, reg->addr);
+
+ rtw89_phy_write32(rtwdev, addr, reg->data);
+ }
}
union rtw89_phy_bb_gain_arg {
@@ -929,7 +1118,7 @@ static void
rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
- struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 type = arg.type;
u8 path = arg.path;
u8 gband = arg.gain_band;
@@ -968,7 +1157,7 @@ static void
rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
- struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 rxsc_start = arg.rxsc_start;
u8 bw = arg.bw;
u8 path = arg.path;
@@ -1050,7 +1239,7 @@ static void
rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
- struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 type = arg.type;
u8 path = arg.path;
u8 gband = arg.gain_band;
@@ -1077,7 +1266,7 @@ static void
rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
union rtw89_phy_bb_gain_arg arg, u32 data)
{
- struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 type = arg.type;
u8 path = arg.path;
u8 gband = arg.gain_band;
@@ -1108,10 +1297,10 @@ rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
}
}
-static void rtw89_phy_config_bb_gain(struct rtw89_dev *rtwdev,
- const struct rtw89_reg2_def *reg,
- enum rtw89_rf_path rf_path,
- void *extra_data)
+static void rtw89_phy_config_bb_gain_ax(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr };
@@ -1420,12 +1609,15 @@ void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
bb_table = elm_info->bb_tbl ? elm_info->bb_tbl : chip->bb_table;
rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL);
+ if (rtwdev->dbcc_en)
+ rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg,
+ (void *)RTW89_PHY_1);
rtw89_chip_init_txpwr_unit(rtwdev, RTW89_PHY_0);
bb_gain_table = elm_info->bb_gain ? elm_info->bb_gain : chip->bb_gain_table;
if (bb_gain_table)
rtw89_phy_init_reg(rtwdev, bb_gain_table,
- rtw89_phy_config_bb_gain, NULL);
+ chip->phy_def->config_bb_gain, NULL);
rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0);
}
@@ -1467,11 +1659,9 @@ void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio)
kfree(rf_reg_info);
}
-static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
+static void rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev *rtwdev)
{
- struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
const struct rtw89_chip_info *chip = rtwdev->chip;
- const struct rtw89_phy_table *nctl_table;
u32 val;
int ret;
@@ -1491,6 +1681,15 @@ static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
1000, false, rtwdev);
if (ret)
rtw89_err(rtwdev, "failed to poll nctl block\n");
+}
+
+static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_phy_table *nctl_table;
+
+ rtw89_phy_preinit_rf_nctl(rtwdev);
nctl_table = elm_info->rf_nctl ? elm_info->rf_nctl : chip->nctl_table;
rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL);
@@ -1499,14 +1698,11 @@ static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
rtw89_rfk_parser(rtwdev, chip->nctl_post_table);
}
-static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
+static u32 rtw89_phy0_phy1_offset_ax(struct rtw89_dev *rtwdev, u32 addr)
{
u32 phy_page = addr >> 8;
u32 ofst = 0;
- if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
- return addr < 0x10000 ? 0x20000 : 0;
-
switch (phy_page) {
case 0x6:
case 0x7:
@@ -1561,6 +1757,7 @@ void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
}
+EXPORT_SYMBOL(rtw89_phy_set_phy_regs);
void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
const struct rtw89_phy_reg3_tbl *tbl)
@@ -2699,9 +2896,63 @@ void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev,
[RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk,
};
+static
+void rtw89_phy_rfk_report_prep(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
+
+ wait->state = RTW89_RFK_STATE_START;
+ wait->start_time = ktime_get();
+ reinit_completion(&wait->completion);
+}
+
+static
+int rtw89_phy_rfk_report_wait(struct rtw89_dev *rtwdev, const char *rfk_name,
+ unsigned int ms)
+{
+ struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
+ unsigned long time_left;
+
+ /* Since we can't receive C2H event during SER, use a fixed delay. */
+ if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) {
+ fsleep(1000 * ms / 2);
+ goto out;
+ }
+
+ time_left = wait_for_completion_timeout(&wait->completion,
+ msecs_to_jiffies(ms));
+ if (time_left == 0) {
+ rtw89_warn(rtwdev, "failed to wait RF %s\n", rfk_name);
+ return -ETIMEDOUT;
+ } else if (wait->state != RTW89_RFK_STATE_OK) {
+ rtw89_warn(rtwdev, "failed to do RF %s result from state %d\n",
+ rfk_name, wait->state);
+ return -EFAULT;
+ }
+
+out:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "RF %s takes %lld ms to complete\n",
+ rfk_name, ktime_ms_delta(ktime_get(), wait->start_time));
+
+ return 0;
+}
+
static void
rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
+ const struct rtw89_c2h_rfk_report *report =
+ (const struct rtw89_c2h_rfk_report *)c2h->data;
+ struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
+
+ wait->state = report->state;
+ wait->version = report->version;
+
+ complete(&wait->completion);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "RFK report state %d with version %d (%*ph)\n",
+ wait->state, wait->version,
+ (int)(len - sizeof(report->hdr)), &report->state);
}
static
@@ -2772,6 +3023,726 @@ void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
handler(rtwdev, skb, len);
}
+int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_pre_ntfy(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "PRE_NTFY", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait);
+
+int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_tssi_mode tssi_mode,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, tssi_mode);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "TSSI", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_tssi_and_wait);
+
+int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "IQK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_iqk_and_wait);
+
+int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "DPK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_dpk_and_wait);
+
+int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "TXGAPK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_txgapk_and_wait);
+
+int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "DACK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_dack_and_wait);
+
+int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms)
+{
+ int ret;
+
+ rtw89_phy_rfk_report_prep(rtwdev);
+
+ ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx);
+ if (ret)
+ return ret;
+
+ return rtw89_phy_rfk_report_wait(rtwdev, "RX_DCK", ms);
+}
+EXPORT_SYMBOL(rtw89_phy_rfk_rxdck_and_wait);
+
+static u32 phy_tssi_get_cck_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 13:
+ return 4;
+ case 14:
+ return 5;
+ }
+
+ return 0;
+}
+
+#define PHY_TSSI_EXTRA_GROUP_BIT BIT(31)
+#define PHY_TSSI_EXTRA_GROUP(idx) (PHY_TSSI_EXTRA_GROUP_BIT | (idx))
+#define PHY_IS_TSSI_EXTRA_GROUP(group) ((group) & PHY_TSSI_EXTRA_GROUP_BIT)
+#define PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) \
+ ((group) & ~PHY_TSSI_EXTRA_GROUP_BIT)
+#define PHY_TSSI_EXTRA_GET_GROUP_IDX2(group) \
+ (PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
+
+static u32 phy_tssi_get_ofdm_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 14:
+ return 4;
+ case 36 ... 40:
+ return 5;
+ case 41 ... 43:
+ return PHY_TSSI_EXTRA_GROUP(5);
+ case 44 ... 48:
+ return 6;
+ case 49 ... 51:
+ return PHY_TSSI_EXTRA_GROUP(6);
+ case 52 ... 56:
+ return 7;
+ case 57 ... 59:
+ return PHY_TSSI_EXTRA_GROUP(7);
+ case 60 ... 64:
+ return 8;
+ case 100 ... 104:
+ return 9;
+ case 105 ... 107:
+ return PHY_TSSI_EXTRA_GROUP(9);
+ case 108 ... 112:
+ return 10;
+ case 113 ... 115:
+ return PHY_TSSI_EXTRA_GROUP(10);
+ case 116 ... 120:
+ return 11;
+ case 121 ... 123:
+ return PHY_TSSI_EXTRA_GROUP(11);
+ case 124 ... 128:
+ return 12;
+ case 129 ... 131:
+ return PHY_TSSI_EXTRA_GROUP(12);
+ case 132 ... 136:
+ return 13;
+ case 137 ... 139:
+ return PHY_TSSI_EXTRA_GROUP(13);
+ case 140 ... 144:
+ return 14;
+ case 149 ... 153:
+ return 15;
+ case 154 ... 156:
+ return PHY_TSSI_EXTRA_GROUP(15);
+ case 157 ... 161:
+ return 16;
+ case 162 ... 164:
+ return PHY_TSSI_EXTRA_GROUP(16);
+ case 165 ... 169:
+ return 17;
+ case 170 ... 172:
+ return PHY_TSSI_EXTRA_GROUP(17);
+ case 173 ... 177:
+ return 18;
+ }
+
+ return 0;
+}
+
+static u32 phy_tssi_get_6g_ofdm_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 5:
+ return 0;
+ case 6 ... 8:
+ return PHY_TSSI_EXTRA_GROUP(0);
+ case 9 ... 13:
+ return 1;
+ case 14 ... 16:
+ return PHY_TSSI_EXTRA_GROUP(1);
+ case 17 ... 21:
+ return 2;
+ case 22 ... 24:
+ return PHY_TSSI_EXTRA_GROUP(2);
+ case 25 ... 29:
+ return 3;
+ case 33 ... 37:
+ return 4;
+ case 38 ... 40:
+ return PHY_TSSI_EXTRA_GROUP(4);
+ case 41 ... 45:
+ return 5;
+ case 46 ... 48:
+ return PHY_TSSI_EXTRA_GROUP(5);
+ case 49 ... 53:
+ return 6;
+ case 54 ... 56:
+ return PHY_TSSI_EXTRA_GROUP(6);
+ case 57 ... 61:
+ return 7;
+ case 65 ... 69:
+ return 8;
+ case 70 ... 72:
+ return PHY_TSSI_EXTRA_GROUP(8);
+ case 73 ... 77:
+ return 9;
+ case 78 ... 80:
+ return PHY_TSSI_EXTRA_GROUP(9);
+ case 81 ... 85:
+ return 10;
+ case 86 ... 88:
+ return PHY_TSSI_EXTRA_GROUP(10);
+ case 89 ... 93:
+ return 11;
+ case 97 ... 101:
+ return 12;
+ case 102 ... 104:
+ return PHY_TSSI_EXTRA_GROUP(12);
+ case 105 ... 109:
+ return 13;
+ case 110 ... 112:
+ return PHY_TSSI_EXTRA_GROUP(13);
+ case 113 ... 117:
+ return 14;
+ case 118 ... 120:
+ return PHY_TSSI_EXTRA_GROUP(14);
+ case 121 ... 125:
+ return 15;
+ case 129 ... 133:
+ return 16;
+ case 134 ... 136:
+ return PHY_TSSI_EXTRA_GROUP(16);
+ case 137 ... 141:
+ return 17;
+ case 142 ... 144:
+ return PHY_TSSI_EXTRA_GROUP(17);
+ case 145 ... 149:
+ return 18;
+ case 150 ... 152:
+ return PHY_TSSI_EXTRA_GROUP(18);
+ case 153 ... 157:
+ return 19;
+ case 161 ... 165:
+ return 20;
+ case 166 ... 168:
+ return PHY_TSSI_EXTRA_GROUP(20);
+ case 169 ... 173:
+ return 21;
+ case 174 ... 176:
+ return PHY_TSSI_EXTRA_GROUP(21);
+ case 177 ... 181:
+ return 22;
+ case 182 ... 184:
+ return PHY_TSSI_EXTRA_GROUP(22);
+ case 185 ... 189:
+ return 23;
+ case 193 ... 197:
+ return 24;
+ case 198 ... 200:
+ return PHY_TSSI_EXTRA_GROUP(24);
+ case 201 ... 205:
+ return 25;
+ case 206 ... 208:
+ return PHY_TSSI_EXTRA_GROUP(25);
+ case 209 ... 213:
+ return 26;
+ case 214 ... 216:
+ return PHY_TSSI_EXTRA_GROUP(26);
+ case 217 ... 221:
+ return 27;
+ case 225 ... 229:
+ return 28;
+ case 230 ... 232:
+ return PHY_TSSI_EXTRA_GROUP(28);
+ case 233 ... 237:
+ return 29;
+ case 238 ... 240:
+ return PHY_TSSI_EXTRA_GROUP(29);
+ case 241 ... 245:
+ return 30;
+ case 246 ... 248:
+ return PHY_TSSI_EXTRA_GROUP(30);
+ case 249 ... 253:
+ return 31;
+ }
+
+ return 0;
+}
+
+static u32 phy_tssi_get_trim_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 8:
+ return 0;
+ case 9 ... 14:
+ return 1;
+ case 36 ... 48:
+ return 2;
+ case 49 ... 51:
+ return PHY_TSSI_EXTRA_GROUP(2);
+ case 52 ... 64:
+ return 3;
+ case 100 ... 112:
+ return 4;
+ case 113 ... 115:
+ return PHY_TSSI_EXTRA_GROUP(4);
+ case 116 ... 128:
+ return 5;
+ case 132 ... 144:
+ return 6;
+ case 149 ... 177:
+ return 7;
+ }
+
+ return 0;
+}
+
+static u32 phy_tssi_get_6g_trim_group(u8 ch)
+{
+ switch (ch) {
+ case 1 ... 13:
+ return 0;
+ case 14 ... 16:
+ return PHY_TSSI_EXTRA_GROUP(0);
+ case 17 ... 29:
+ return 1;
+ case 33 ... 45:
+ return 2;
+ case 46 ... 48:
+ return PHY_TSSI_EXTRA_GROUP(2);
+ case 49 ... 61:
+ return 3;
+ case 65 ... 77:
+ return 4;
+ case 78 ... 80:
+ return PHY_TSSI_EXTRA_GROUP(4);
+ case 81 ... 93:
+ return 5;
+ case 97 ... 109:
+ return 6;
+ case 110 ... 112:
+ return PHY_TSSI_EXTRA_GROUP(6);
+ case 113 ... 125:
+ return 7;
+ case 129 ... 141:
+ return 8;
+ case 142 ... 144:
+ return PHY_TSSI_EXTRA_GROUP(8);
+ case 145 ... 157:
+ return 9;
+ case 161 ... 173:
+ return 10;
+ case 174 ... 176:
+ return PHY_TSSI_EXTRA_GROUP(10);
+ case 177 ... 189:
+ return 11;
+ case 193 ... 205:
+ return 12;
+ case 206 ... 208:
+ return PHY_TSSI_EXTRA_GROUP(12);
+ case 209 ... 221:
+ return 13;
+ case 225 ... 237:
+ return 14;
+ case 238 ... 240:
+ return PHY_TSSI_EXTRA_GROUP(14);
+ case 241 ... 253:
+ return 15;
+ }
+
+ return 0;
+}
+
+static s8 phy_tssi_get_ofdm_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ enum rtw89_band band = chan->band_type;
+ u8 ch = chan->channel;
+ u32 gidx_1st;
+ u32 gidx_2nd;
+ s8 de_1st;
+ s8 de_2nd;
+ u32 gidx;
+ s8 val;
+
+ if (band == RTW89_BAND_6G)
+ goto calc_6g;
+
+ gidx = phy_tssi_get_ofdm_group(ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
+ path, gidx);
+
+ if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) {
+ gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx);
+ gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx);
+ de_1st = tssi_info->tssi_mcs[path][gidx_1st];
+ de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
+ val = (de_1st + de_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
+ path, val, de_1st, de_2nd);
+ } else {
+ val = tssi_info->tssi_mcs[path][gidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
+ }
+
+ return val;
+
+calc_6g:
+ gidx = phy_tssi_get_6g_ofdm_group(ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
+ path, gidx);
+
+ if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) {
+ gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx);
+ gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx);
+ de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st];
+ de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd];
+ val = (de_1st + de_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
+ path, val, de_1st, de_2nd);
+ } else {
+ val = tssi_info->tssi_6g_mcs[path][gidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
+ }
+
+ return val;
+}
+
+static s8 phy_tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ enum rtw89_band band = chan->band_type;
+ u8 ch = chan->channel;
+ u32 tgidx_1st;
+ u32 tgidx_2nd;
+ s8 tde_1st;
+ s8 tde_2nd;
+ u32 tgidx;
+ s8 val;
+
+ if (band == RTW89_BAND_6G)
+ goto calc_6g;
+
+ tgidx = phy_tssi_get_trim_group(ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
+ path, tgidx);
+
+ if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) {
+ tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
+ tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
+ tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
+ tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
+ val = (tde_1st + tde_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
+ path, val, tde_1st, tde_2nd);
+ } else {
+ val = tssi_info->tssi_trim[path][tgidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
+ path, val);
+ }
+
+ return val;
+
+calc_6g:
+ tgidx = phy_tssi_get_6g_trim_group(ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
+ path, tgidx);
+
+ if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) {
+ tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
+ tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
+ tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st];
+ tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd];
+ val = (tde_1st + tde_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
+ path, val, tde_1st, tde_2nd);
+ } else {
+ val = tssi_info->tssi_trim_6g[path][tgidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
+ path, val);
+ }
+
+ return val;
+}
+
+void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ struct rtw89_h2c_rf_tssi *h2c)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 ch = chan->channel;
+ s8 trim_de;
+ s8 ofdm_de;
+ s8 cck_de;
+ u8 gidx;
+ s8 val;
+ int i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
+ phy, ch);
+
+ for (i = RF_PATH_A; i <= RF_PATH_B; i++) {
+ trim_de = phy_tssi_get_ofdm_trim_de(rtwdev, phy, chan, i);
+ h2c->curr_tssi_trim_de[i] = trim_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d trim_de=0x%x\n", i, trim_de);
+
+ gidx = phy_tssi_get_cck_group(ch);
+ cck_de = tssi_info->tssi_cck[i][gidx];
+ val = u32_get_bits(cck_de + trim_de, 0xff);
+
+ h2c->curr_tssi_cck_de[i] = 0x0;
+ h2c->curr_tssi_cck_de_20m[i] = val;
+ h2c->curr_tssi_cck_de_40m[i] = val;
+ h2c->curr_tssi_efuse_cck_de[i] = cck_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d cck_de=0x%x\n", i, cck_de);
+
+ ofdm_de = phy_tssi_get_ofdm_de(rtwdev, phy, chan, i);
+ val = u32_get_bits(ofdm_de + trim_de, 0xff);
+
+ h2c->curr_tssi_ofdm_de[i] = 0x0;
+ h2c->curr_tssi_ofdm_de_20m[i] = val;
+ h2c->curr_tssi_ofdm_de_40m[i] = val;
+ h2c->curr_tssi_ofdm_de_80m[i] = val;
+ h2c->curr_tssi_ofdm_de_160m[i] = val;
+ h2c->curr_tssi_ofdm_de_320m[i] = val;
+ h2c->curr_tssi_efuse_ofdm_de[i] = ofdm_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d ofdm_de=0x%x\n", i, ofdm_de);
+ }
+}
+
+void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ struct rtw89_h2c_rf_tssi *h2c)
+{
+ struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const s8 *thm_up[RF_PATH_B + 1] = {};
+ const s8 *thm_down[RF_PATH_B + 1] = {};
+ u8 subband = chan->subband_type;
+ s8 thm_ofst[128] = {0};
+ u8 thermal;
+ u8 path;
+ u8 i, j;
+
+ switch (subband) {
+ default:
+ case RTW89_CH_2G:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0];
+ break;
+ case RTW89_CH_5G_BAND_1:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0];
+ break;
+ case RTW89_CH_5G_BAND_3:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1];
+ break;
+ case RTW89_CH_5G_BAND_4:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2];
+ break;
+ case RTW89_CH_6G_BAND_IDX0:
+ case RTW89_CH_6G_BAND_IDX1:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0];
+ break;
+ case RTW89_CH_6G_BAND_IDX2:
+ case RTW89_CH_6G_BAND_IDX3:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1];
+ break;
+ case RTW89_CH_6G_BAND_IDX4:
+ case RTW89_CH_6G_BAND_IDX5:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2];
+ break;
+ case RTW89_CH_6G_BAND_IDX6:
+ case RTW89_CH_6G_BAND_IDX7:
+ thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3];
+ thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3];
+ thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3];
+ thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3];
+ break;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] tmeter tbl on subband: %u\n", subband);
+
+ for (path = RF_PATH_A; path <= RF_PATH_B; path++) {
+ thermal = tssi_info->thermal[path];
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "path: %u, pg thermal: 0x%x\n", path, thermal);
+
+ if (thermal == 0xff) {
+ h2c->pg_thermal[path] = 0x38;
+ memset(h2c->ftable[path], 0, sizeof(h2c->ftable[path]));
+ continue;
+ }
+
+ h2c->pg_thermal[path] = thermal;
+
+ i = 0;
+ for (j = 0; j < 64; j++)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ thm_up[path][i++] :
+ thm_up[path][DELTA_SWINGIDX_SIZE - 1];
+
+ i = 1;
+ for (j = 127; j >= 64; j--)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ -thm_down[path][i++] :
+ -thm_down[path][DELTA_SWINGIDX_SIZE - 1];
+
+ for (i = 0; i < 128; i += 4) {
+ h2c->ftable[path][i + 0] = thm_ofst[i + 3];
+ h2c->ftable[path][i + 1] = thm_ofst[i + 2];
+ h2c->ftable[path][i + 2] = thm_ofst[i + 1];
+ h2c->ftable[path][i + 3] = thm_ofst[i + 0];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "thm ofst [%x]: %02x %02x %02x %02x\n",
+ i, thm_ofst[i], thm_ofst[i + 1],
+ thm_ofst[i + 2], thm_ofst[i + 3]);
+ }
+ }
+}
+
static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo)
{
const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
@@ -4551,6 +5522,9 @@ static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx)
static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
const struct rtw89_agc_gaincode_set set)
{
+ if (!rtwdev->hal.support_igi)
+ return;
+
rtw89_phy_dig_set_lna_idx(rtwdev, set.lna_idx);
rtw89_phy_dig_set_tia_idx(rtwdev, set.tia_idx);
rtw89_phy_dig_set_rxb_idx(rtwdev, set.rxb_idx);
@@ -4606,7 +5580,8 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
s8 cck_cca_th;
u32 pd_val = 0;
- under_region += PD_TH_SB_FLTR_CMP_VAL;
+ if (rtwdev->chip->chip_gen == RTW89_CHIP_AX)
+ under_region += PD_TH_SB_FLTR_CMP_VAL;
switch (cbw) {
case RTW89_CHANNEL_WIDTH_40:
@@ -4953,12 +5928,15 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
rtw89_physts_parsing_init(rtwdev);
rtw89_phy_dig_init(rtwdev);
rtw89_phy_cfo_init(rtwdev);
+ rtw89_phy_bb_wrap_init(rtwdev);
rtw89_phy_edcca_init(rtwdev);
+ rtw89_phy_ch_info_init(rtwdev);
rtw89_phy_ul_tb_info_init(rtwdev);
rtw89_phy_antdiv_init(rtwdev);
rtw89_chip_rfe_gpio(rtwdev);
rtw89_phy_antdiv_set_ant(rtwdev);
+ rtw89_chip_rfk_hw_init(rtwdev);
rtw89_phy_init_rf_nctl(rtwdev);
rtw89_chip_rfk_init(rtwdev);
rtw89_chip_set_txpwr_ctrl(rtwdev);
@@ -5400,6 +6378,78 @@ void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev)
rtw89_phy_edcca_log(rtwdev);
}
+enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n",
+ rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx);
+
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_1_PLUS_1_1RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_A;
+ else
+ return RF_B;
+ case MLO_1_PLUS_1_2RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_A;
+ else
+ return RF_D;
+ case MLO_0_PLUS_2_1RF:
+ case MLO_2_PLUS_0_1RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_AB;
+ else
+ return RF_AB;
+ case MLO_0_PLUS_2_2RF:
+ case MLO_2_PLUS_0_2RF:
+ case MLO_2_PLUS_2_2RF:
+ default:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_AB;
+ else
+ return RF_CD;
+ }
+}
+EXPORT_SYMBOL(rtw89_phy_get_kpath);
+
+enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n",
+ rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx);
+
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_1_PLUS_1_1RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_PATH_A;
+ else
+ return RF_PATH_B;
+ case MLO_1_PLUS_1_2RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_PATH_A;
+ else
+ return RF_PATH_D;
+ case MLO_0_PLUS_2_1RF:
+ case MLO_2_PLUS_0_1RF:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_PATH_A;
+ else
+ return RF_PATH_B;
+ case MLO_0_PLUS_2_2RF:
+ case MLO_2_PLUS_0_2RF:
+ case MLO_2_PLUS_2_2RF:
+ default:
+ if (phy_idx == RTW89_PHY_0)
+ return RF_PATH_A;
+ else
+ return RF_PATH_C;
+ }
+}
+EXPORT_SYMBOL(rtw89_phy_get_syn_sel);
+
static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = {
.setting_addr = R_CCX,
.edcca_opt_mask = B_CCX_EDCCA_OPT_MSK,
@@ -5476,6 +6526,11 @@ const struct rtw89_phy_gen_def rtw89_phy_gen_ax = {
.ccx = &rtw89_ccx_regs_ax,
.physts = &rtw89_physts_regs_ax,
.cfo = &rtw89_cfo_regs_ax,
+ .phy0_phy1_offset = rtw89_phy0_phy1_offset_ax,
+ .config_bb_gain = rtw89_phy_config_bb_gain_ax,
+ .preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_ax,
+ .bb_wrap_init = NULL,
+ .ch_info_init = NULL,
.set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_ax,
.set_txpwr_offset = rtw89_phy_set_txpwr_offset_ax,
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 3e379077c6ca..082231ebbee5 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -7,6 +7,7 @@
#include "core.h"
+#define RTW89_BBMCU_ADDR_OFFSET 0x30000
#define RTW89_RF_ADDR_ADSEL_MASK BIT(16)
#define get_phy_headline(addr) FIELD_GET(GENMASK(31, 28), addr)
@@ -509,6 +510,14 @@ struct rtw89_phy_gen_def {
const struct rtw89_ccx_regs *ccx;
const struct rtw89_physts_regs *physts;
const struct rtw89_cfo_regs *cfo;
+ u32 (*phy0_phy1_offset)(struct rtw89_dev *rtwdev, u32 addr);
+ void (*config_bb_gain)(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data);
+ void (*preinit_rf_nctl)(struct rtw89_dev *rtwdev);
+ void (*bb_wrap_init)(struct rtw89_dev *rtwdev);
+ void (*ch_info_init)(struct rtw89_dev *rtwdev);
void (*set_txpwr_byrate)(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
@@ -604,6 +613,15 @@ static inline u32 rtw89_phy_read32_mask(struct rtw89_dev *rtwdev,
return rtw89_read32_mask(rtwdev, addr + phy->cr_base, mask);
}
+static inline void rtw89_bbmcu_write32(struct rtw89_dev *rtwdev,
+ u32 addr, u32 data, enum rtw89_phy_idx phy_idx)
+{
+ if (phy_idx && addr < 0x10000)
+ addr += 0x20000;
+
+ rtw89_write32(rtwdev, addr + RTW89_BBMCU_ADDR_OFFSET, data);
+}
+
static inline
enum rtw89_gain_offset rtw89_subband_to_gain_offset_band_of_ofdm(enum rtw89_subband subband)
{
@@ -664,6 +682,38 @@ enum rtw89_phy_bb_gain_band rtw89_subband_to_bb_gain_band(enum rtw89_subband sub
}
}
+static inline
+enum rtw89_phy_gain_band_be rtw89_subband_to_gain_band_be(enum rtw89_subband subband)
+{
+ switch (subband) {
+ default:
+ case RTW89_CH_2G:
+ return RTW89_BB_GAIN_BAND_2G_BE;
+ case RTW89_CH_5G_BAND_1:
+ return RTW89_BB_GAIN_BAND_5G_L_BE;
+ case RTW89_CH_5G_BAND_3:
+ return RTW89_BB_GAIN_BAND_5G_M_BE;
+ case RTW89_CH_5G_BAND_4:
+ return RTW89_BB_GAIN_BAND_5G_H_BE;
+ case RTW89_CH_6G_BAND_IDX0:
+ return RTW89_BB_GAIN_BAND_6G_L0_BE;
+ case RTW89_CH_6G_BAND_IDX1:
+ return RTW89_BB_GAIN_BAND_6G_L1_BE;
+ case RTW89_CH_6G_BAND_IDX2:
+ return RTW89_BB_GAIN_BAND_6G_M0_BE;
+ case RTW89_CH_6G_BAND_IDX3:
+ return RTW89_BB_GAIN_BAND_6G_M1_BE;
+ case RTW89_CH_6G_BAND_IDX4:
+ return RTW89_BB_GAIN_BAND_6G_H0_BE;
+ case RTW89_CH_6G_BAND_IDX5:
+ return RTW89_BB_GAIN_BAND_6G_H1_BE;
+ case RTW89_CH_6G_BAND_IDX6:
+ return RTW89_BB_GAIN_BAND_6G_UH0_BE;
+ case RTW89_CH_6G_BAND_IDX7:
+ return RTW89_BB_GAIN_BAND_6G_UH1_BE;
+ }
+}
+
enum rtw89_rfk_flag {
RTW89_RFK_F_WRF = 0,
RTW89_RFK_F_WM = 1,
@@ -728,14 +778,20 @@ void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_bandwidth dbw);
+u8 rtw89_phy_get_txsb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_bandwidth dbw);
u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask);
u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask);
+u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask);
bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask, u32 data);
+bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev);
void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio);
void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
@@ -759,6 +815,29 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
u8 ru, u8 ntx, u8 ch);
+static inline void rtw89_phy_preinit_rf_nctl(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+
+ phy->preinit_rf_nctl(rtwdev);
+}
+
+static inline void rtw89_phy_bb_wrap_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+
+ if (phy->bb_wrap_init)
+ phy->bb_wrap_init(rtwdev);
+}
+
+static inline void rtw89_phy_ch_info_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
+
+ if (phy->ch_info_init)
+ phy->ch_info_init(rtwdev);
+}
+
static inline
void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
@@ -809,6 +888,36 @@ void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func);
void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u32 len, u8 class, u8 func);
+int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_tssi_mode tssi_mode,
+ unsigned int ms);
+int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ unsigned int ms);
+void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ struct rtw89_h2c_rf_tssi *h2c);
+void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ const struct rtw89_chan *chan,
+ struct rtw89_h2c_rf_tssi *h2c);
void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev);
void rtw89_phy_cfo_track_work(struct work_struct *work);
void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
@@ -836,5 +945,9 @@ void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan);
void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev);
void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev);
+enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
+enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/phy_be.c b/drivers/net/wireless/realtek/rtw89/phy_be.c
index 63eeeea72b68..be0148f2b96f 100644
--- a/drivers/net/wireless/realtek/rtw89/phy_be.c
+++ b/drivers/net/wireless/realtek/rtw89/phy_be.c
@@ -78,6 +78,332 @@ static const struct rtw89_cfo_regs rtw89_cfo_regs_be = {
.valid_0_mask = B_DCFO_OPT_EN_V1,
};
+static u32 rtw89_phy0_phy1_offset_be(struct rtw89_dev *rtwdev, u32 addr)
+{
+ u32 phy_page = addr >> 8;
+ u32 ofst = 0;
+
+ if ((phy_page >= 0x4 && phy_page <= 0xF) ||
+ (phy_page >= 0x20 && phy_page <= 0x2B) ||
+ (phy_page >= 0x40 && phy_page <= 0x4f) ||
+ (phy_page >= 0x60 && phy_page <= 0x6f) ||
+ (phy_page >= 0xE4 && phy_page <= 0xE5) ||
+ (phy_page >= 0xE8 && phy_page <= 0xED))
+ ofst = 0x1000;
+ else
+ ofst = 0x0;
+
+ return ofst;
+}
+
+union rtw89_phy_bb_gain_arg_be {
+ u32 addr;
+ struct {
+ u8 type;
+#define BB_GAIN_TYPE_SUB0_BE GENMASK(3, 0)
+#define BB_GAIN_TYPE_SUB1_BE GENMASK(7, 4)
+ u8 path_bw;
+#define BB_GAIN_PATH_BE GENMASK(3, 0)
+#define BB_GAIN_BW_BE GENMASK(7, 4)
+ u8 gain_band;
+ u8 cfg_type;
+ } __packed;
+} __packed;
+
+static void
+rtw89_phy_cfg_bb_gain_error_be(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg_be arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
+ u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
+ u8 gband = arg.gain_band;
+ u8 type = arg.type;
+ int i;
+
+ switch (type) {
+ case 0:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->lna_gain[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 1:
+ for (i = 4; i < 7; i++, data >>= 8)
+ gain->lna_gain[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 2:
+ for (i = 0; i < 2; i++, data >>= 8)
+ gain->tia_gain[gband][bw_type][path][i] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain error {0x%x:0x%x} with unknown type: %d\n",
+ arg.addr, data, type);
+ break;
+ }
+}
+
+static void
+rtw89_phy_cfg_bb_rpl_ofst_be(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg_be arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 type_sub0 = u8_get_bits(arg.type, BB_GAIN_TYPE_SUB0_BE);
+ u8 type_sub1 = u8_get_bits(arg.type, BB_GAIN_TYPE_SUB1_BE);
+ u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
+ u8 gband = arg.gain_band;
+ u8 ofst = 0;
+ int i;
+
+ switch (type_sub1) {
+ case RTW89_CMAC_BW_20M:
+ gain->rpl_ofst_20[gband][path][0] = (s8)data;
+ break;
+ case RTW89_CMAC_BW_40M:
+ for (i = 0; i < RTW89_BW20_SC_40M; i++, data >>= 8)
+ gain->rpl_ofst_40[gband][path][i] = data & 0xff;
+ break;
+ case RTW89_CMAC_BW_80M:
+ for (i = 0; i < RTW89_BW20_SC_80M; i++, data >>= 8)
+ gain->rpl_ofst_80[gband][path][i] = data & 0xff;
+ break;
+ case RTW89_CMAC_BW_160M:
+ if (type_sub0 == 0)
+ ofst = 0;
+ else
+ ofst = RTW89_BW20_SC_80M;
+
+ for (i = 0; i < RTW89_BW20_SC_80M; i++, data >>= 8)
+ gain->rpl_ofst_160[gband][path][i + ofst] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb rpl ofst {0x%x:0x%x} with unknown type_sub1: %d\n",
+ arg.addr, data, type_sub1);
+ break;
+ }
+}
+
+static void
+rtw89_phy_cfg_bb_gain_op1db_be(struct rtw89_dev *rtwdev,
+ union rtw89_phy_bb_gain_arg_be arg, u32 data)
+{
+ struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
+ u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
+ u8 gband = arg.gain_band;
+ u8 type = arg.type;
+ int i;
+
+ switch (type) {
+ case 0:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->lna_op1db[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 1:
+ for (i = 4; i < 7; i++, data >>= 8)
+ gain->lna_op1db[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 2:
+ for (i = 0; i < 4; i++, data >>= 8)
+ gain->tia_lna_op1db[gband][bw_type][path][i] = data & 0xff;
+ break;
+ case 3:
+ for (i = 4; i < 8; i++, data >>= 8)
+ gain->tia_lna_op1db[gband][bw_type][path][i] = data & 0xff;
+ break;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain op1db {0x%x:0x%x} with unknown type: %d\n",
+ arg.addr, data, type);
+ break;
+ }
+}
+
+static void rtw89_phy_config_bb_gain_be(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ union rtw89_phy_bb_gain_arg_be arg = { .addr = reg->addr };
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ u8 bw_type = u8_get_bits(arg.path_bw, BB_GAIN_BW_BE);
+ u8 path = u8_get_bits(arg.path_bw, BB_GAIN_PATH_BE);
+
+ if (bw_type >= RTW89_BB_BW_NR_BE)
+ return;
+
+ if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR_BE)
+ return;
+
+ if (path >= chip->rf_path_num)
+ return;
+
+ if (arg.addr >= 0xf9 && arg.addr <= 0xfe) {
+ rtw89_warn(rtwdev, "bb gain table with flow ctrl\n");
+ return;
+ }
+
+ switch (arg.cfg_type) {
+ case 0:
+ rtw89_phy_cfg_bb_gain_error_be(rtwdev, arg, reg->data);
+ break;
+ case 1:
+ rtw89_phy_cfg_bb_rpl_ofst_be(rtwdev, arg, reg->data);
+ break;
+ case 2:
+ /* ignore BB gain bypass */
+ break;
+ case 3:
+ rtw89_phy_cfg_bb_gain_op1db_be(rtwdev, arg, reg->data);
+ break;
+ case 4:
+ /* This cfg_type is only used by rfe_type >= 50 with eFEM */
+ if (efuse->rfe_type < 50)
+ break;
+ fallthrough;
+ default:
+ rtw89_warn(rtwdev,
+ "bb gain {0x%x:0x%x} with unknown cfg type: %d\n",
+ arg.addr, reg->data, arg.cfg_type);
+ break;
+ }
+}
+
+static void rtw89_phy_preinit_rf_nctl_be(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_mask(rtwdev, R_GOTX_IQKDPK_C0, B_GOTX_IQKDPK, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_GOTX_IQKDPK_C1, B_GOTX_IQKDPK, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_IQKDPK_HC, B_IQKDPK_HC, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CLK_GCK, B_CLK_GCK, 0x00fffff);
+ rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_PRST, B_IQK_DPK_PRST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_PRST_C1, B_IQK_DPK_PRST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TXRFC, B_TXRFC_RST, 0x1);
+
+ if (rtwdev->dbcc_en) {
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST_C1, B_IQK_DPK_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TXRFC_C1, B_TXRFC_RST, 0x1);
+ }
+}
+
+static
+void rtw89_phy_bb_wrap_pwr_by_macid_init(struct rtw89_dev *rtwdev)
+{
+ u32 macid_idx, cr, base_macid_lmt, max_macid = 32;
+
+ base_macid_lmt = R_BE_PWR_MACID_LMT_BASE;
+
+ for (macid_idx = 0; macid_idx < 4 * max_macid; macid_idx += 4) {
+ cr = base_macid_lmt + macid_idx;
+ rtw89_write32(rtwdev, cr, 0x03007F7F);
+ }
+}
+
+static
+void rtw89_phy_bb_wrap_tx_path_by_macid_init(struct rtw89_dev *rtwdev)
+{
+ int i, max_macid = 32;
+ u32 cr = R_BE_PWR_MACID_PATH_BASE;
+
+ for (i = 0; i < max_macid; i++, cr += 4)
+ rtw89_write32(rtwdev, cr, 0x03C86000);
+}
+
+static void rtw89_phy_bb_wrap_tpu_set_all(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ u32 addr;
+
+ for (addr = R_BE_PWR_BY_RATE; addr <= R_BE_PWR_BY_RATE_END; addr += 4)
+ rtw89_write32(rtwdev, addr, 0);
+ for (addr = R_BE_PWR_RULMT_START; addr <= R_BE_PWR_RULMT_END; addr += 4)
+ rtw89_write32(rtwdev, addr, 0);
+ for (addr = R_BE_PWR_RATE_OFST_CTRL; addr <= R_BE_PWR_RATE_OFST_END; addr += 4)
+ rtw89_write32(rtwdev, addr, 0);
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_REF_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_LMT_DB, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_LMTBF, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_LMTBF_DB, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RATE_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_BYRATE_DB, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_RULMT, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_RULMT_DB, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_SW, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_OFST_SW_DB, 0);
+}
+
+static
+void rtw89_phy_bb_wrap_listen_path_en_init(struct rtw89_dev *rtwdev)
+{
+ u32 addr;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL);
+ if (ret)
+ return;
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_LISTEN_PATH, RTW89_MAC_1);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_LISTEN_PATH_EN, 0x2);
+}
+
+static void rtw89_phy_bb_wrap_force_cr_init(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ u32 addr;
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FORCE_LMT, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_LMT_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_BOOST, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RATE_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_OFST_RULMT, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ENON, 0);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_RU_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FORCE_MACID, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_MACID_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_COEX_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_PWR_FORCE_COEX_ON, 0);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RATE_CTRL, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, B_BE_FORCE_PWR_BY_RATE_EN, 0);
+}
+
+static void rtw89_phy_bb_wrap_ftm_init(struct rtw89_dev *rtwdev,
+ enum rtw89_mac_idx mac_idx)
+{
+ u32 addr;
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FTM, mac_idx);
+ rtw89_write32(rtwdev, addr, 0xE4E431);
+
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_FTM_SS, mac_idx);
+ rtw89_write32_mask(rtwdev, addr, 0x7, 0);
+}
+
+static void rtw89_phy_bb_wrap_init_be(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_mac_idx mac_idx = RTW89_MAC_0;
+
+ rtw89_phy_bb_wrap_pwr_by_macid_init(rtwdev);
+ rtw89_phy_bb_wrap_tx_path_by_macid_init(rtwdev);
+ rtw89_phy_bb_wrap_listen_path_en_init(rtwdev);
+ rtw89_phy_bb_wrap_force_cr_init(rtwdev, mac_idx);
+ rtw89_phy_bb_wrap_ftm_init(rtwdev, mac_idx);
+ rtw89_phy_bb_wrap_tpu_set_all(rtwdev, mac_idx);
+}
+
+static void rtw89_phy_ch_info_init_be(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_mask(rtwdev, R_CHINFO_SEG, B_CHINFO_SEG_LEN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CHINFO_SEG, B_CHINFO_SEG, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_CHINFO_DATA, B_CHINFO_DATA_BITMAP, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_ELM_SRC, B_CHINFO_ELM_BITMAP, 0x40303);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_ELM_SRC, B_CHINFO_SRC, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_TYPE_SCAL, B_CHINFO_TYPE, 0x3);
+ rtw89_phy_set_phy_regs(rtwdev, R_CHINFO_TYPE_SCAL, B_CHINFO_SCAL, 0x0);
+}
+
struct rtw89_byr_spec_ent_be {
struct rtw89_rate_desc init;
u8 num_of_idx;
@@ -644,6 +970,11 @@ const struct rtw89_phy_gen_def rtw89_phy_gen_be = {
.ccx = &rtw89_ccx_regs_be,
.physts = &rtw89_physts_regs_be,
.cfo = &rtw89_cfo_regs_be,
+ .phy0_phy1_offset = rtw89_phy0_phy1_offset_be,
+ .config_bb_gain = rtw89_phy_config_bb_gain_be,
+ .preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_be,
+ .bb_wrap_init = rtw89_phy_bb_wrap_init_be,
+ .ch_info_init = rtw89_phy_ch_info_init_be,
.set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_be,
.set_txpwr_offset = rtw89_phy_set_txpwr_offset_be,
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 917c01e5e9ed..31290d8cb7f7 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -14,6 +14,7 @@
static int rtw89_fw_leave_lps_check(struct rtw89_dev *rtwdev, u8 macid)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u32 pwr_en_bit = 0xE;
u32 chk_msk = pwr_en_bit << (4 * macid);
u32 polling;
@@ -21,7 +22,7 @@ static int rtw89_fw_leave_lps_check(struct rtw89_dev *rtwdev, u8 macid)
ret = read_poll_timeout_atomic(rtw89_read32_mask, polling, !polling,
1000, 50000, false, rtwdev,
- R_AX_PPWRBIT_SETTING, chk_msk);
+ mac->ps_status, chk_msk);
if (ret) {
rtw89_info(rtwdev, "rtw89: failed to leave lps state\n");
return -EBUSY;
@@ -83,16 +84,17 @@ void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
rtw89_ps_power_mode_change(rtwdev, false);
}
-static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id)
+static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
struct rtw89_lps_parm lps_param = {
- .macid = mac_id,
+ .macid = rtwvif->mac_id,
.psmode = RTW89_MAC_AX_PS_MODE_LEGACY,
.lastrpwm = RTW89_LAST_RPWM_PS,
};
rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_FW_CTRL);
rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
+ rtw89_fw_h2c_lps_ch_info(rtwdev, rtwvif);
}
static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, u8 mac_id)
@@ -123,7 +125,7 @@ void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
return;
- __rtw89_enter_lps(rtwdev, rtwvif->mac_id);
+ __rtw89_enter_lps(rtwdev, rtwvif);
if (ps_mode)
__rtw89_enter_ps_mode(rtwdev, rtwvif);
}
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 8456e2b0c14f..72e448e91b6f 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -3246,6 +3246,13 @@
#define R_AX_RX_SR_CTRL_C1 0xEE4A
#define B_AX_SR_EN BIT(0)
+#define R_AX_BSSID_SRC_CTRL 0xCE4B
+#define R_AX_BSSID_SRC_CTRL_C1 0xEE4B
+#define B_AX_BSSID_MATCH BIT(3)
+#define B_AX_PARTIAL_AID_MATCH BIT(2)
+#define B_AX_BSSCOLOR_MATCH BIT(1)
+#define B_AX_PLCP_SRC_EN BIT(0)
+
#define R_AX_CSIRPT_OPTION 0xCE64
#define R_AX_CSIRPT_OPTION_C1 0xEE64
#define B_AX_CSIPRT_HESU_AID_EN BIT(25)
@@ -3503,8 +3510,13 @@
#define B_AX_PTA_EDCCA_EN BIT(0)
#define R_BTC_COEX_WL_REQ 0xDA24
+#define R_BTC_COEX_WL_REQ_BE 0xE324
+#define B_BTC_TX_NULL_HI BIT(23)
#define B_BTC_TX_BCN_HI BIT(22)
+#define B_BTC_TX_TRI_HI BIT(17)
#define B_BTC_RSP_ACK_HI BIT(10)
+#define B_BTC_PRI_MASK_TX_TIME GENMASK(4, 3)
+#define B_BTC_PRI_MASK_RX_TIME_V1 GENMASK(2, 1)
#define R_BTC_BREAK_TABLE 0xDA2C
#define BTC_BREAK_PARAM 0xf0ffffff
@@ -3752,6 +3764,19 @@
#define B_BE_SYM_PADPDN_WL_RFC1_1P3 BIT(6)
#define B_BE_SYM_PADPDN_WL_RFC0_1P3 BIT(5)
+#define R_BE_RSV_CTRL 0x001C
+#define B_BE_HR_BE_DBG GENMASK(23, 12)
+#define B_BE_R_SYM_DIS_PCIE_FLR BIT(9)
+#define B_BE_R_EN_HRST_PWRON BIT(8)
+#define B_BE_LOCK_ALL_EN BIT(7)
+#define B_BE_R_DIS_PRST BIT(6)
+#define B_BE_WLOCK_1C_BIT6 BIT(5)
+#define B_BE_WLOCK_40 BIT(4)
+#define B_BE_WLOCK_08 BIT(3)
+#define B_BE_WLOCK_04 BIT(2)
+#define B_BE_WLOCK_00 BIT(1)
+#define B_BE_WLOCK_ALL BIT(0)
+
#define R_BE_AFE_LDO_CTRL 0x0020
#define B_BE_FORCE_MACBBBT_PWR_ON BIT(31)
#define B_BE_R_SYM_WLPOFF_P4_PC_EN BIT(28)
@@ -4033,6 +4058,30 @@
#define B_BE_SYSON_DIS_PMCR_BE_WRMSK BIT(2)
#define B_BE_SYSON_R_BE_ARB_MASK GENMASK(1, 0)
+#define R_BE_MEM_PWR_CTRL 0x00D0
+#define B_BE_DMEM5_WLMCU_DS BIT(31)
+#define B_BE_DMEM4_WLMCU_DS BIT(30)
+#define B_BE_DMEM3_WLMCU_DS BIT(29)
+#define B_BE_DMEM2_WLMCU_DS BIT(28)
+#define B_BE_DMEM1_WLMCU_DS BIT(27)
+#define B_BE_DMEM0_WLMCU_DS BIT(26)
+#define B_BE_IMEM5_WLMCU_DS BIT(25)
+#define B_BE_IMEM4_WLMCU_DS BIT(24)
+#define B_BE_IMEM3_WLMCU_DS BIT(23)
+#define B_BE_IMEM2_WLMCU_DS BIT(22)
+#define B_BE_IMEM1_WLMCU_DS BIT(21)
+#define B_BE_IMEM0_WLMCU_DS BIT(20)
+#define B_BE_MEM_BBMCU1_DS BIT(19)
+#define B_BE_MEM_BBMCU0_DS_V1 BIT(17)
+#define B_BE_MEM_BT_DS BIT(10)
+#define B_BE_MEM_SDIO_LS BIT(9)
+#define B_BE_MEM_SDIO_DS BIT(8)
+#define B_BE_MEM_USB_LS BIT(7)
+#define B_BE_MEM_USB_DS BIT(6)
+#define B_BE_MEM_PCI_LS BIT(5)
+#define B_BE_MEM_PCI_DS BIT(4)
+#define B_BE_MEM_WLMAC_LS BIT(3)
+
#define R_BE_PCIE_MIO_INTF 0x00E4
#define B_BE_AON_MIO_EPHY_1K_SEL_MASK GENMASK(29, 24)
#define B_BE_PCIE_MIO_ADDR_PAGE_V1_MASK GENMASK(20, 16)
@@ -4401,12 +4450,28 @@
#define R_BE_LTR_LATENCY_IDX2_V1 0x361C
#define R_BE_LTR_LATENCY_IDX3_V1 0x3620
+#define R_BE_H2CREG_DATA0 0x7140
+#define R_BE_H2CREG_DATA1 0x7144
+#define R_BE_H2CREG_DATA2 0x7148
+#define R_BE_H2CREG_DATA3 0x714C
+#define R_BE_C2HREG_DATA0 0x7150
+#define R_BE_C2HREG_DATA1 0x7154
+#define R_BE_C2HREG_DATA2 0x7158
+#define R_BE_C2HREG_DATA3 0x715C
+#define R_BE_H2CREG_CTRL 0x7160
+#define B_BE_H2CREG_TRIGGER BIT(0)
+#define R_BE_C2HREG_CTRL 0x7164
+#define B_BE_C2HREG_TRIGGER BIT(0)
+
#define R_BE_HCI_FUNC_EN 0x7880
#define B_BE_HCI_CR_PROTECT BIT(31)
#define B_BE_HCI_TRXBUF_EN BIT(2)
#define B_BE_HCI_RXDMA_EN BIT(1)
#define B_BE_HCI_TXDMA_EN BIT(0)
+#define R_BE_DBG_WOW_READY 0x815E
+#define B_BE_DBG_WOW_READY GENMASK(7, 0)
+
#define R_BE_DMAC_FUNC_EN 0x8400
#define B_BE_DMAC_CRPRT BIT(31)
#define B_BE_MAC_FUNC_EN BIT(30)
@@ -4488,6 +4553,42 @@
#define B_BE_RMAC_PPDU_HANG_CNT_MASK GENMASK(23, 16)
#define B_BE_SER_L0_COUNTER_MASK GENMASK(8, 0)
+#define R_BE_DMAC_SYS_CR32B 0x842C
+#define B_BE_DMAC_BB_PHY1_MASK GENMASK(31, 16)
+#define B_BE_DMAC_BB_PHY0_MASK GENMASK(15, 0)
+#define B_BE_DMAC_BB_CTRL_39 BIT(31)
+#define B_BE_DMAC_BB_CTRL_38 BIT(30)
+#define B_BE_DMAC_BB_CTRL_37 BIT(29)
+#define B_BE_DMAC_BB_CTRL_36 BIT(28)
+#define B_BE_DMAC_BB_CTRL_35 BIT(27)
+#define B_BE_DMAC_BB_CTRL_34 BIT(26)
+#define B_BE_DMAC_BB_CTRL_33 BIT(25)
+#define B_BE_DMAC_BB_CTRL_32 BIT(24)
+#define B_BE_DMAC_BB_CTRL_31 BIT(23)
+#define B_BE_DMAC_BB_CTRL_30 BIT(22)
+#define B_BE_DMAC_BB_CTRL_29 BIT(21)
+#define B_BE_DMAC_BB_CTRL_28 BIT(20)
+#define B_BE_DMAC_BB_CTRL_27 BIT(19)
+#define B_BE_DMAC_BB_CTRL_26 BIT(18)
+#define B_BE_DMAC_BB_CTRL_25 BIT(17)
+#define B_BE_DMAC_BB_CTRL_24 BIT(16)
+#define B_BE_DMAC_BB_CTRL_23 BIT(15)
+#define B_BE_DMAC_BB_CTRL_22 BIT(14)
+#define B_BE_DMAC_BB_CTRL_21 BIT(13)
+#define B_BE_DMAC_BB_CTRL_20 BIT(12)
+#define B_BE_DMAC_BB_CTRL_19 BIT(11)
+#define B_BE_DMAC_BB_CTRL_18 BIT(10)
+#define B_BE_DMAC_BB_CTRL_17 BIT(9)
+#define B_BE_DMAC_BB_CTRL_16 BIT(8)
+#define B_BE_DMAC_BB_CTRL_15 BIT(7)
+#define B_BE_DMAC_BB_CTRL_14 BIT(6)
+#define B_BE_DMAC_BB_CTRL_13 BIT(5)
+#define B_BE_DMAC_BB_CTRL_12 BIT(4)
+#define B_BE_DMAC_BB_CTRL_11 BIT(3)
+#define B_BE_DMAC_BB_CTRL_10 BIT(2)
+#define B_BE_DMAC_BB_CTRL_9 BIT(1)
+#define B_BE_DMAC_BB_CTRL_8 BIT(0)
+
#define R_BE_DLE_EMPTY0 0x8430
#define B_BE_PLE_EMPTY_QTA_DMAC_H2D BIT(27)
#define B_BE_PLE_EMPTY_QTA_DMAC_CPUIO BIT(26)
@@ -4924,6 +5025,12 @@
B_BE_CR_WRFF_OVERFLOW_ERR_INT_EN | \
B_BE_CR_WRFF_UNDERFLOW_ERR_INT_EN)
+#define R_BE_RX_STOP 0x8914
+#define B_BE_CPU_RX_STOP BIT(17)
+#define B_BE_HOST_RX_STOP BIT(16)
+#define B_BE_CPU_RX_CH_STOP_MSK GENMASK(15, 8)
+#define B_BE_HOST_RX_CH_STOP_MSK GENMASK(5, 0)
+
#define R_BE_DISP_FWD_WLAN_0 0x8938
#define B_BE_FWD_WLAN_CPU_TYPE_13_MASK GENMASK(31, 30)
#define B_BE_FWD_WLAN_CPU_TYPE_12_MASK GENMASK(29, 28)
@@ -4947,6 +5054,11 @@
#define B_BE_WDE_START_BOUND_MASK GENMASK(14, 8)
#define B_BE_WDE_PAGE_SEL_MASK GENMASK(1, 0)
+#define R_BE_WDE_BUFMGN_CTL 0x8C10
+#define B_BE_WDE_AVAL_UPD_REQ BIT(29)
+#define B_BE_WDE_AVAL_UPD_QTAID_MASK GENMASK(27, 24)
+#define B_BE_WDE_BUFMGN_FRZTMR_MODE BIT(0)
+
#define R_BE_WDE_ERR_IMR 0x8C38
#define B_BE_WDE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
#define B_BE_WDE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
@@ -5063,6 +5175,11 @@
#define B_BE_PLE_START_BOUND_MASK GENMASK(14, 8)
#define B_BE_PLE_PAGE_SEL_MASK GENMASK(1, 0)
+#define R_BE_PLE_BUFMGN_CTL 0x9010
+#define B_BE_PLE_AVAL_UPD_REQ BIT(29)
+#define B_BE_PLE_AVAL_UPD_QTAID_MASK GENMASK(27, 24)
+#define B_BE_PLE_BUFMGN_FRZTMR_MODE BIT(0)
+
#define R_BE_PLE_ERR_IMR 0x9038
#define B_BE_PLE_DATCHN_CAMREQ_ERR_INT_EN BIT(29)
#define B_BE_PLE_DATCHN_ADRERR_ERR_INT_EN BIT(28)
@@ -5429,6 +5546,21 @@
#define B_BE_DROP_NONDMA_PPDU BIT(2)
#define B_BE_APPEND_FCS BIT(0)
+#define R_BE_FWD_ERR 0x9C10
+#define R_BE_FWD_ACTN0 0x9C14
+#define R_BE_FWD_ACTN1 0x9C18
+#define R_BE_FWD_ACTN2 0x9C1C
+#define R_BE_FWD_TF0 0x9C20
+#define R_BE_FWD_TF1 0x9C24
+
+#define R_BE_HW_PPDU_STATUS 0x9C30
+#define B_BE_FWD_RPKTTYPE_MASK GENMASK(31, 26)
+#define B_BE_FWD_PPDU_PRTID_MASK GENMASK(25, 23)
+#define B_BE_FWD_PPDU_FW_RLS BIT(22)
+#define B_BE_FWD_PPDU_QUEID_MASK GENMASK(21, 16)
+#define B_BE_FWD_OTHER_RPKT_MASK GENMASK(15, 8)
+#define B_BE_FWD_PPDU_STAT_MASK GENMASK(7, 0)
+
#define R_BE_CUT_AMSDU_CTRL 0x9C94
#define B_BE_EN_CUT_AMSDU BIT(31)
#define B_BE_CUT_AMSDU_CHKLEN_EN BIT(30)
@@ -5437,6 +5569,12 @@
#define B_BE_CUT_AMSDU_CHKLEN_L_TH_MASK GENMASK(23, 16)
#define B_BE_CUT_AMSDU_CHKLEN_H_TH_MASK GENMASK(15, 0)
+#define R_BE_WOW_CTRL 0x9CB8
+#define B_BE_WOW_HCI BIT(5)
+#define B_BE_WOW_DROP BIT(2)
+#define B_BE_WOW_WOWEN BIT(1)
+#define B_BE_WOW_FORCE_WAKEUP BIT(0)
+
#define R_BE_RX_HDRTRNS 0x9CC0
#define B_BE_RX_MGN_MLD_ADDR_EN BIT(6)
#define B_BE_HDR_INFO_MASK GENMASK(5, 4)
@@ -5727,6 +5865,9 @@
#define B_BE_STOP_CH1 BIT(1)
#define B_BE_STOP_CH0 BIT(0)
+#define R_BE_HAXI_MST_WDT_TIMEOUT_SEL_V1 0xB02C
+#define B_BE_HAXI_MST_WDT_TIMEOUT_SEL_MASK GENMASK(4, 0)
+
#define R_BE_HAXI_IDCT_MSK 0xB0B8
#define B_BE_HAXI_RRESP_ERR_IDCT_MSK BIT(7)
#define B_BE_HAXI_BRESP_ERR_IDCT_MSK BIT(6)
@@ -5777,6 +5918,15 @@
#define B_BE_PREC_PAGE_CH12_V1_MASK GENMASK(21, 16)
#define B_BE_PREC_PAGE_CH011_V1_MASK GENMASK(5, 0)
+#define R_BE_CH0_PAGE_CTRL 0xB718
+#define B_BE_CH0_GRP BIT(31)
+#define B_BE_CH0_MAX_PG_MASK GENMASK(28, 16)
+#define B_BE_CH0_MIN_PG_MASK GENMASK(12, 0)
+
+#define R_BE_CH0_PAGE_INFO 0xB750
+#define B_BE_CH0_AVAL_PG_MASK GENMASK(28, 16)
+#define B_BE_CH0_USE_PG_MASK GENMASK(12, 0)
+
#define R_BE_PUB_PAGE_INFO3 0xB78C
#define B_BE_G1_AVAL_PG_MASK GENMASK(28, 16)
#define B_BE_G0_AVAL_PG_MASK GENMASK(12, 0)
@@ -5822,6 +5972,39 @@
#define B_BE_MACID_ACQ_GRP0_CLR_P BIT(2)
#define B_BE_R_MACID_ACQ_CHK_EN BIT(0)
+#define R_BE_BT_BREAK_TABLE 0x0E344
+
+#define R_BE_GNT_SW_CTRL 0x0E348
+#define B_BE_WL_ACT2_VAL BIT(25)
+#define B_BE_WL_ACT2_SWCTRL BIT(24)
+#define B_BE_WL_ACT_VAL BIT(23)
+#define B_BE_WL_ACT_SWCTRL BIT(22)
+#define B_BE_GNT_BT_RX_BB1_VAL BIT(21)
+#define B_BE_GNT_BT_RX_BB1_SWCTRL BIT(20)
+#define B_BE_GNT_BT_TX_BB1_VAL BIT(19)
+#define B_BE_GNT_BT_TX_BB1_SWCTRL BIT(18)
+#define B_BE_GNT_BT_RX_BB0_VAL BIT(17)
+#define B_BE_GNT_BT_RX_BB0_SWCTRL BIT(16)
+#define B_BE_GNT_BT_TX_BB0_VAL BIT(15)
+#define B_BE_GNT_BT_TX_BB0_SWCTRL BIT(14)
+#define B_BE_GNT_WL_RX_VAL BIT(13)
+#define B_BE_GNT_WL_RX_SWCTRL BIT(12)
+#define B_BE_GNT_WL_TX_VAL BIT(11)
+#define B_BE_GNT_WL_TX_SWCTRL BIT(10)
+#define B_BE_GNT_BT_BB1_VAL BIT(9)
+#define B_BE_GNT_BT_BB1_SWCTRL BIT(8)
+#define B_BE_GNT_WL_BB1_VAL BIT(7)
+#define B_BE_GNT_WL_BB1_SWCTRL BIT(6)
+#define B_BE_GNT_BT_BB0_VAL BIT(5)
+#define B_BE_GNT_BT_BB0_SWCTRL BIT(4)
+#define B_BE_GNT_WL_BB0_VAL BIT(3)
+#define B_BE_GNT_WL_BB0_SWCTRL BIT(2)
+#define B_BE_GNT_WL_BB_PWR_VAL BIT(1)
+#define B_BE_GNT_WL_BB_PWR_SWCTRL BIT(0)
+
+#define R_BE_PWR_MACID_PATH_BASE 0x0E500
+#define R_BE_PWR_MACID_LMT_BASE 0x0ED00
+
#define R_BE_CMAC_FUNC_EN 0x10000
#define R_BE_CMAC_FUNC_EN_C1 0x14000
#define B_BE_CMAC_CRPRT BIT(31)
@@ -5873,6 +6056,16 @@
B_BE_RMAC_CKEN | B_BE_TXTIME_CKEN | B_BE_RESP_PKTCTL_CKEN | \
B_BE_SIGB_CKEN)
+#define R_BE_WMAC_RFMOD 0x10010
+#define R_BE_WMAC_RFMOD_C1 0x14010
+#define B_BE_CMAC_ASSERTION BIT(31)
+#define B_BE_WMAC_RFMOD_MASK GENMASK(2, 0)
+#define BE_WMAC_RFMOD_20M 0
+#define BE_WMAC_RFMOD_40M 1
+#define BE_WMAC_RFMOD_80M 2
+#define BE_WMAC_RFMOD_160M 3
+#define BE_WMAC_RFMOD_320M 4
+
#define R_BE_TX_SUB_BAND_VALUE 0x10088
#define R_BE_TX_SUB_BAND_VALUE_C1 0x14088
#define B_BE_PRI20_BITMAP_MASK GENMASK(31, 16)
@@ -6009,6 +6202,13 @@
#define B_BE_MACTX_LATENCY_MASK GENMASK(10, 8)
#define B_BE_PREBKF_TIME_MASK GENMASK(4, 0)
+#define R_BE_PREBKF_CFG_1 0x1033C
+#define R_BE_PREBKF_CFG_1_C1 0x1433C
+#define B_BE_SIFS_TIMEOUT_TB_AGGR_MASK GENMASK(31, 24)
+#define B_BE_SIFS_PREBKF_MASK GENMASK(23, 16)
+#define B_BE_SIFS_TIMEOUT_T2_MASK GENMASK(14, 8)
+#define B_BE_SIFS_MACTXEN_T1_MASK GENMASK(6, 0)
+
#define R_BE_CCA_CFG_0 0x10340
#define R_BE_CCA_CFG_0_C1 0x14340
#define B_BE_R_SIFS_AGGR_TIME_V1_MASK GENMASK(31, 24)
@@ -6050,11 +6250,36 @@
#define R_BE_MUEDCA_EN 0x10370
#define R_BE_MUEDCA_EN_C1 0x14370
+#define B_BE_SIFS_TIMEOUT_TB_T2_MASK GENMASK(30, 24)
+#define B_BE_SIFS_MACTXEN_TB_T1_MASK GENMASK(22, 16)
#define B_BE_MUEDCA_WMM_SEL BIT(8)
-#define B_BE_SET_MUEDCATIMER_TF_1 BIT(5)
+#define B_BE_SET_MUEDCATIMER_TF_MASK GENMASK(5, 4)
#define B_BE_SET_MUEDCATIMER_TF_0 BIT(4)
+#define B_BE_MUEDCA_EN_MASK GENMASK(1, 0)
#define B_BE_MUEDCA_EN_0 BIT(0)
+#define R_BE_CTN_DRV_TXEN 0x10398
+#define R_BE_CTN_DRV_TXEN_C1 0x14398
+#define B_BE_CTN_TXEN_TWT_3 BIT(17)
+#define B_BE_CTN_TXEN_TWT_2 BIT(16)
+#define B_BE_CTN_TXEN_TWT_1 BIT(15)
+#define B_BE_CTN_TXEN_TWT_0 BIT(14)
+#define B_BE_CTN_TXEN_ULQ BIT(13)
+#define B_BE_CTN_TXEN_BCNQ BIT(12)
+#define B_BE_CTN_TXEN_HGQ BIT(11)
+#define B_BE_CTN_TXEN_CPUMGQ BIT(10)
+#define B_BE_CTN_TXEN_MGQ1 BIT(9)
+#define B_BE_CTN_TXEN_MGQ BIT(8)
+#define B_BE_CTN_TXEN_VO_1 BIT(7)
+#define B_BE_CTN_TXEN_VI_1 BIT(6)
+#define B_BE_CTN_TXEN_BK_1 BIT(5)
+#define B_BE_CTN_TXEN_BE_1 BIT(4)
+#define B_BE_CTN_TXEN_VO_0 BIT(3)
+#define B_BE_CTN_TXEN_VI_0 BIT(2)
+#define B_BE_CTN_TXEN_BK_0 BIT(1)
+#define B_BE_CTN_TXEN_BE_0 BIT(0)
+#define B_BE_CTN_TXEN_ALL_MASK GENMASK(17, 0)
+
#define R_BE_TB_CHK_CCA_NAV 0x103AC
#define R_BE_TB_CHK_CCA_NAV_C1 0x143AC
#define B_BE_TB_CHK_TX_NAV BIT(15)
@@ -6212,6 +6437,8 @@
#define R_BE_TSFTR_HIGH_P0_C1 0x1443C
#define B_BE_TSFTR_HIGH_P0_MASK GENMASK(31, 0)
+#define R_BE_BCN_DROP_ALL0 0x10560
+
#define R_BE_MBSSID_CTRL 0x10568
#define R_BE_MBSSID_CTRL_C1 0x14568
#define B_BE_MBSSID_MODE_SEL BIT(20)
@@ -6282,6 +6509,17 @@
#define B_BE_SPEC_SIFS_OFDM_PTCL_MASK GENMASK(15, 8)
#define B_BE_SPEC_SIFS_CCK_PTCL_MASK GENMASK(7, 0)
+#define R_BE_TXRATE_CHK 0x10828
+#define R_BE_TXRATE_CHK_C1 0x14828
+#define B_BE_LATENCY_PADDING_PKT_TH_MASK GENMASK(31, 24)
+#define B_BE_PLCP_FETCH_BUFF_MASK GENMASK(23, 16)
+#define B_BE_OFDM_CCK_ERR_PROC BIT(6)
+#define B_BE_PKT_LAST_TX BIT(5)
+#define B_BE_BAND_MODE BIT(4)
+#define B_BE_MAX_TXNSS_MASK GENMASK(3, 2)
+#define B_BE_RTS_LIMIT_IN_OFDM6 BIT(1)
+#define B_BE_CHECK_CCK_EN BIT(0)
+
#define R_BE_MBSSID_DROP_0 0x1083C
#define R_BE_MBSSID_DROP_0_C1 0x1483C
#define B_BE_GI_LTF_FB_SEL BIT(30)
@@ -6289,6 +6527,20 @@
#define B_BE_PORT_DROP_4_0_MASK GENMASK(20, 16)
#define B_BE_MBSSID_DROP_15_0_MASK GENMASK(15, 0)
+#define R_BE_BT_PLT 0x1087C
+#define R_BE_BT_PLT_C1 0x1487C
+#define B_BE_BT_PLT_PKT_CNT_MASK GENMASK(31, 16)
+#define B_BE_BT_PLT_RST BIT(9)
+#define B_BE_PLT_EN BIT(8)
+#define B_BE_RX_PLT_GNT_LTE_RX BIT(7)
+#define B_BE_RX_PLT_GNT_BT_RX BIT(6)
+#define B_BE_RX_PLT_GNT_BT_TX BIT(5)
+#define B_BE_RX_PLT_GNT_WL BIT(4)
+#define B_BE_TX_PLT_GNT_LTE_RX BIT(3)
+#define B_BE_TX_PLT_GNT_BT_RX BIT(2)
+#define B_BE_TX_PLT_GNT_BT_TX BIT(1)
+#define B_BE_TX_PLT_GNT_WL BIT(0)
+
#define R_BE_PTCL_BSS_COLOR_0 0x108A0
#define R_BE_PTCL_BSS_COLOR_0_C1 0x148A0
#define B_BE_BSS_COLOB_BE_PORT_3_MASK GENMASK(29, 24)
@@ -6398,6 +6650,10 @@
#define B_BE_PTCL_DROP BIT(5)
#define B_BE_PTCL_TX_QUEUE_IDX_MASK GENMASK(4, 0)
+#define R_BE_PTCL_DBG_INFO 0x108F0
+
+#define R_BE_PTCL_DBG 0x108F4
+
#define R_BE_RX_ERROR_FLAG 0x10C00
#define R_BE_RX_ERROR_FLAG_C1 0x14C00
#define B_BE_RX_CSI_NOT_RELEASE_ERROR BIT(31)
@@ -6676,6 +6932,9 @@
#define B_BE_UPD_HGQMD BIT(1)
#define B_BE_UPD_TIMIE BIT(0)
+#define R_BE_WMTX_POWER_BE_BIT_CTL 0x10E0C
+#define R_BE_WMTX_POWER_BE_BIT_CTL_C1 0x14E0C
+
#define R_BE_WMTX_TCR_BE_4 0x10E2C
#define R_BE_WMTX_TCR_BE_4_C1 0x14E2C
#define B_BE_UL_EHT_MUMIMO_LTF_MODE BIT(30)
@@ -7056,6 +7315,20 @@
#define S_BE_BACAM_RST_ENT 1
#define S_BE_BACAM_RST_ALL 2
+#define R_BE_PPDU_STAT 0x11440
+#define R_BE_PPDU_STAT_C1 0x15440
+#define B_BE_STAT_IORST BIT(13)
+#define B_BE_STAT_GCKDIS BIT(12)
+#define B_BE_PPDU_STAT_WR_BW_MASK GENMASK(11, 10)
+#define B_BE_PPDU_STAT_RPT_TRIG BIT(8)
+#define B_BE_PPDU_STAT_RPT_DMA BIT(6)
+#define B_BE_PPDU_STAT_RPT_CRC32 BIT(5)
+#define B_BE_PPDU_STAT_RPT_ADDR BIT(4)
+#define B_BE_APP_PLCP_HDR_RPT BIT(3)
+#define B_BE_APP_RX_CNT_RPT BIT(2)
+#define B_BE_PPDU_MAC_INFO BIT(1)
+#define B_BE_PPDU_STAT_RPT_EN BIT(0)
+
#define R_BE_RX_SR_CTRL 0x1144A
#define R_BE_RX_SR_CTRL_C1 0x1544A
#define B_BE_SR_OP_MODE_MASK GENMASK(5, 4)
@@ -7063,6 +7336,13 @@
#define B_BE_SR_CTRL_PLCP_EN BIT(1)
#define B_BE_SR_EN BIT(0)
+#define R_BE_BSSID_SRC_CTRL 0x1144B
+#define R_BE_BSSID_SRC_CTRL_C1 0x1544B
+#define B_BE_BSSID_MATCH BIT(3)
+#define B_BE_PARTIAL_AID_MATCH BIT(2)
+#define B_BE_BSSCOLOR_MATCH BIT(1)
+#define B_BE_PLCP_SRC_EN BIT(0)
+
#define R_BE_CSIRPT_OPTION 0x11464
#define R_BE_CSIRPT_OPTION_C1 0x15464
#define B_BE_CSIPRT_EHTSU_AID_EN BIT(26)
@@ -7178,12 +7458,56 @@
#define R_BE_PWR_MODULE 0x11900
#define R_BE_PWR_MODULE_C1 0x15900
+#define R_BE_PWR_LISTEN_PATH 0x11988
+#define B_BE_PWR_LISTEN_PATH_EN GENMASK(31, 28)
+
+#define R_BE_PWR_REF_CTRL 0x11A20
+#define B_BE_PWR_REF_CTRL_OFDM GENMASK(9, 1)
+#define B_BE_PWR_REF_CTRL_CCK GENMASK(18, 10)
+#define B_BE_PWR_OFST_LMT_DB GENMASK(27, 19)
+#define R_BE_PWR_OFST_LMTBF 0x11A24
+#define B_BE_PWR_OFST_LMTBF_DB GENMASK(8, 0)
+#define R_BE_PWR_FORCE_LMT 0x11A28
+#define B_BE_PWR_FORCE_LMT_ON BIT(6)
+
+#define R_BE_PWR_RATE_CTRL 0x11A2C
+#define B_BE_PWR_OFST_BYRATE_DB GENMASK(8, 0)
+#define B_BE_FORCE_PWR_BY_RATE_EN BIT(19)
+#define B_BE_FORCE_PWR_BY_RATE_VAL GENMASK(28, 20)
#define R_BE_PWR_RATE_OFST_CTRL 0x11A30
+#define R_BE_PWR_RATE_OFST_END 0x11A38
+#define R_BE_PWR_RULMT_START 0x12048
+#define R_BE_PWR_RULMT_END 0x120e4
+
+#define R_BE_PWR_BOOST 0x11A40
+#define B_BE_PWR_CTRL_SEL BIT(16)
+#define B_BE_PWR_FORCE_RATE_ON BIT(29)
+#define R_BE_PWR_OFST_RULMT 0x11A44
+#define B_BE_PWR_OFST_RULMT_DB GENMASK(17, 9)
+#define B_BE_PWR_FORCE_RU_ON BIT(18)
+#define B_BE_PWR_FORCE_RU_ENON BIT(28)
+#define R_BE_PWR_FORCE_MACID 0x11A48
+#define B_BE_PWR_FORCE_MACID_ON BIT(9)
+
+#define R_BE_PWR_REG_CTRL 0x11A50
+#define B_BE_PWR_BT_EN BIT(23)
+
+#define R_BE_PWR_COEX_CTRL 0x11A54
+#define B_BE_PWR_BT_VAL GENMASK(8, 0)
+#define B_BE_PWR_FORCE_COEX_ON GENMASK(29, 27)
+
+#define R_BE_PWR_OFST_SW 0x11AE8
+#define B_BE_PWR_OFST_SW_DB GENMASK(27, 24)
+
+#define R_BE_PWR_FTM 0x11B00
+#define R_BE_PWR_FTM_SS 0x11B04
+
#define R_BE_PWR_BY_RATE 0x11E00
#define R_BE_PWR_BY_RATE_MAX 0x11FA8
#define R_BE_PWR_LMT 0x11FAC
#define R_BE_PWR_LMT_MAX 0x12040
+#define R_BE_PWR_BY_RATE_END 0x12044
#define R_BE_PWR_RU_LMT 0x12048
#define R_BE_PWR_RU_LMT_MAX 0x120E4
@@ -7223,6 +7547,7 @@
#define RR_MOD_M_RXBB GENMASK(9, 5)
#define RR_MOD_LO_SEL BIT(1)
#define RR_MODOPT 0x01
+#define RR_TXG_SEL GENMASK(19, 17)
#define RR_MODOPT_M_TXPWR GENMASK(5, 0)
#define RR_WLSEL 0x02
#define RR_WLSEL_AG GENMASK(18, 16)
@@ -7256,6 +7581,12 @@
#define CFGCH_BAND0_2G 0
#define CFGCH_BAND0_5G 1
#define CFGCH_BAND0_6G 0
+#define RR_CFGCH_BW_V2 GENMASK(12, 10)
+#define CFGCH_BW_V2_20M 0
+#define CFGCH_BW_V2_40M 1
+#define CFGCH_BW_V2_80M 2
+#define CFGCH_BW_V2_160M 3
+#define CFGCH_BW_V2_320M 4
#define RR_CFGCH_BW GENMASK(11, 10)
#define RR_CFGCH_CH GENMASK(7, 0)
#define CFGCH_BW_20M 3
@@ -7292,6 +7623,7 @@
#define RR_LUTWD0_LB GENMASK(5, 0)
#define RR_TM 0x42
#define RR_TM_TRI BIT(19)
+#define RR_TM_VAL_V1 GENMASK(7, 0)
#define RR_TM_VAL GENMASK(6, 1)
#define RR_TM2 0x43
#define RR_TM2_OFF GENMASK(19, 16)
@@ -7325,8 +7657,12 @@
#define RR_TXAC 0x5f
#define RR_TXAC_IQG GENMASK(3, 0)
#define RR_BIASA 0x60
-#define RR_BIASA_TXG GENMASK(15, 12)
#define RR_BIASA_TXA GENMASK(19, 16)
+#define RR_BIASA_TXG GENMASK(15, 12)
+#define RR_BIASD_TXA_V1 GENMASK(15, 12)
+#define RR_BIASA_TXA_V1 GENMASK(11, 8)
+#define RR_BIASD_TXG_V1 GENMASK(7, 4)
+#define RR_BIASA_TXG_V1 GENMASK(3, 0)
#define RR_BIASA_A GENMASK(2, 0)
#define RR_BIASA2 0x63
#define RR_BIASA2_LB GENMASK(4, 2)
@@ -7410,6 +7746,7 @@
#define RR_MIXER_GN GENMASK(4, 3)
#define RR_POW 0xa0
#define RR_POW_SYN GENMASK(3, 2)
+#define RR_POW_SYN_V1 GENMASK(3, 0)
#define RR_LOGEN 0xa3
#define RR_LOGEN_RPT GENMASK(19, 16)
#define RR_SX 0xaf
@@ -7436,6 +7773,8 @@
#define RR_MMD 0xd5
#define RR_MMD_RST_EN BIT(8)
#define RR_MMD_RST_SYN BIT(6)
+#define RR_SMD 0xd6
+#define RR_VCO2 BIT(19)
#define RR_IQKPLL 0xdc
#define RR_IQKPLL_MOD GENMASK(9, 8)
#define RR_SYNLUT 0xdd
@@ -7459,15 +7798,24 @@
#define RR_RFC_CKEN BIT(1)
#define R_UPD_P0 0x0000
+#define R_BBCLK 0x0000
+#define B_CLK_640M BIT(2)
#define R_RSTB_WATCH_DOG 0x000C
#define B_P0_RSTB_WATCH_DOG BIT(0)
#define B_P1_RSTB_WATCH_DOG BIT(1)
#define B_UPD_P0_EN BIT(31)
+#define R_EMLSR 0x0044
+#define B_EMLSR_PARM GENMASK(27, 12)
#define R_SPOOF_CG 0x00B4
#define B_SPOOF_CG_EN BIT(17)
+#define R_CHINFO_SEG 0x00B4
+#define B_CHINFO_SEG_LEN GENMASK(2, 0)
+#define B_CHINFO_SEG GENMASK(16, 7)
#define R_DFS_FFT_CG 0x00B8
#define B_DFS_CG_EN BIT(1)
#define B_DFS_FFT_EN BIT(0)
+#define R_CHINFO_DATA 0x00C0
+#define B_CHINFO_DATA_BITMAP GENMASK(22, 0)
#define R_ANAPAR_PW15 0x030C
#define B_ANAPAR_PW15 GENMASK(31, 24)
#define B_ANAPAR_PW15_H GENMASK(27, 24)
@@ -7497,6 +7845,23 @@
#define B_SWSI_READ_ADDR_ADDR_V1 GENMASK(7, 0)
#define B_SWSI_READ_ADDR_PATH_V1 GENMASK(10, 8)
#define B_SWSI_READ_ADDR_V1 GENMASK(10, 0)
+#define R_BRK_R 0x0418
+#define B_VHTMCS_LMT GENMASK(22, 21)
+#define B_HTMCS_LMT GENMASK(9, 8)
+#define R_BRK_EHT 0x0474
+#define B_RXEHT_NSS_MAX GENMASK(4, 2)
+#define R_BRK_RXEHT 0x0478
+#define B_RXEHT_N_USER_MAX GENMASK(31, 24)
+#define B_RXEHTTB_NSS_MAX GENMASK(16, 14)
+#define R_EN_SND_WO_NDP 0x047c
+#define R_EN_SND_WO_NDP_C1 0x147c
+#define B_EN_SND_WO_NDP BIT(1)
+#define R_BRK_HE 0x0480
+#define B_TB_NSS_MAX GENMASK(25, 23)
+#define B_NSS_MAX GENMASK(16, 14)
+#define B_N_USR_MAX GENMASK(13, 6)
+#define R_RXCCA_BE1 0x0520
+#define B_RXCCA_BE1_DIS BIT(0)
#define R_UPD_CLK_ADC 0x0700
#define B_UPD_CLK_ADC_VAL GENMASK(26, 25)
#define B_UPD_CLK_ADC_ON BIT(24)
@@ -7543,6 +7908,7 @@
#define B_PMAC_RXMOD_MSK GENMASK(7, 4)
#define R_MAC_SEL 0x09A4
#define B_MAC_SEL_OFDM_TRI_FILTER BIT(31)
+#define B_MAC_SEL GENMASK(19, 17)
#define B_MAC_SEL_PWR_EN BIT(16)
#define B_MAC_SEL_DPD_EN BIT(10)
#define B_MAC_SEL_MOD GENMASK(4, 2)
@@ -7588,19 +7954,28 @@
#define R_PD_CTRL 0x0C3C
#define B_PD_HIT_DIS BIT(9)
#define R_IOQ_IQK_DPK 0x0C60
+#define B_IOQ_IQK_DPK_CLKEN GENMASK(1, 0)
#define B_IOQ_IQK_DPK_EN BIT(1)
#define R_GNT_BT_WGT_EN 0x0C6C
#define B_GNT_BT_WGT_EN BIT(21)
+#define R_IQK_DPK_RST 0x0C6C
+#define R_IQK_DPK_RST_C1 0x1C6C
+#define B_IQK_DPK_RST BIT(0)
#define R_TX_COLLISION_T2R_ST 0x0C70
#define B_TX_COLLISION_T2R_ST_M GENMASK(25, 20)
#define R_TXGATING 0x0C74
#define B_TXGATING_EN BIT(4)
+#define R_TXRFC 0x0C7C
+#define R_TXRFC_C1 0x1C7C
+#define B_TXRFC_RST GENMASK(23, 21)
#define R_PD_ARBITER_OFF 0x0C80
#define B_PD_ARBITER_OFF BIT(31)
#define R_SNDCCA_A1 0x0C9C
#define B_SNDCCA_A1_EN GENMASK(19, 12)
#define R_SNDCCA_A2 0x0CA0
#define B_SNDCCA_A2_VAL GENMASK(19, 12)
+#define R_UDP_COEEF 0x0CBC
+#define B_UDP_COEEF BIT(19)
#define R_TX_COLLISION_T2R_ST_BE 0x0CC8
#define B_TX_COLLISION_T2R_ST_BE_M GENMASK(13, 8)
#define R_RXHT_MCS_LIMIT 0x0D18
@@ -7624,7 +7999,11 @@
#define R_CTLTOP 0x1008
#define B_CTLTOP_ON BIT(23)
#define B_CTLTOP_VAL GENMASK(15, 12)
+#define R_CLK_GCK 0x1008
+#define B_CLK_GCK GENMASK(24, 0)
#define R_EDCCA_RPT_SEL_BE 0x10CC
+#define R_ADC_FIFO_V1 0x10FC
+#define B_ADC_FIFO_EN_V1 GENMASK(31, 24)
#define R_S0_HW_SI_DIS 0x1200
#define B_S0_HW_SI_DIS_W_R_TRIG GENMASK(30, 28)
#define R_P0_RXCK 0x12A0
@@ -7771,6 +8150,27 @@
#define B_P80_AT_HIGH_FREQ_RU_ALLOC_PHY0 BIT(13)
#define R_DBCC_80P80_SEL_EVM_RPT2 0x2A10
#define B_DBCC_80P80_SEL_EVM_RPT2_EN BIT(0)
+#define R_AFEDAC0 0x2A5C
+#define B_AFEDAC0 GENMASK(31, 27)
+#define R_AFEDAC1 0x2A60
+#define B_AFEDAC1 GENMASK(2, 0)
+#define R_IQKDPK_HC 0x2AB8
+#define B_IQKDPK_HC BIT(28)
+#define R_HWSI_ADD0 0x2ADC
+#define R_HWSI_ADD1 0x2BDC
+#define B_HWSI_ADD_MASK GENMASK(11, 4)
+#define B_HWSI_ADD_CTL_MASK GENMASK(2, 0)
+#define B_HWSI_ADD_RD BIT(2)
+#define B_HWSI_ADD_POLL_MASK GENMASK(1, 0)
+#define B_HWSI_ADD_RUN BIT(1)
+#define B_HWSI_ADD_BUSY BIT(0)
+#define R_HWSI_DATA 0x2AE0
+#define B_HWSI_DATA_VAL GENMASK(27, 8)
+#define B_HWSI_DATA_ADDR GENMASK(7, 0)
+#define R_HWSI_VAL0 0x2C24
+#define R_HWSI_VAL1 0x2D24
+#define B_HWSI_VAL_RDONE BIT(31)
+#define B_HWSI_VAL_BUSY BIT(29)
#define R_P1_EN_SOUND_WO_NDP 0x2D7C
#define B_P1_EN_SOUND_WO_NDP BIT(1)
#define R_EDCCA_RPT_A_BE 0x2E38
@@ -7806,8 +8206,30 @@
#define R_S1_ADDCK 0x3E00
#define B_S1_ADDCK_I GENMASK(9, 0)
#define B_S1_ADDCK_Q GENMASK(19, 10)
+#define R_OP1DB_A 0x40B0
+#define B_OP1DB_A GENMASK(31, 24)
+#define R_OP1DB1_A 0x40BC
+#define B_TIA10_A GENMASK(15, 0)
+#define B_TIA1_A GENMASK(15, 8)
+#define B_TIA0_A GENMASK(7, 0)
+#define R_BKOFF_A 0x40E0
+#define B_BKOFF_IBADC_A GENMASK(23, 18)
+#define R_BACKOFF_A 0x40E4
+#define B_LNA_IBADC_A GENMASK(29, 18)
+#define B_BACKOFF_LNA_A GENMASK(29, 24)
+#define B_BACKOFF_IBADC_A GENMASK(23, 18)
+#define R_RXBY_WBADC_A 0x40F4
+#define B_RXBY_WBADC_A GENMASK(14, 10)
#define R_MUIC 0x40F8
#define B_MUIC_EN BIT(0)
+#define R_BT_RXBY_WBADC_A 0x4160
+#define B_BT_RXBY_WBADC_A BIT(31)
+#define R_BT_SHARE_A 0x4164
+#define B_BT_SHARE_A BIT(0)
+#define B_BT_TRK_OFF_A BIT(1)
+#define B_BTG_PATH_A BIT(4)
+#define R_FORCE_FIR_A 0x418C
+#define B_FORCE_FIR_A GENMASK(1, 0)
#define R_DCFO 0x4264
#define B_DCFO GENMASK(7, 0)
#define R_SEG0CSI 0x42AC
@@ -7846,8 +8268,30 @@
#define R_DPD_BF 0x44a0
#define B_DPD_BF_OFDM GENMASK(16, 12)
#define B_DPD_BF_SCA GENMASK(6, 0)
+#define R_LNA_OP 0x44B0
+#define B_LNA6 GENMASK(31, 24)
+#define R_LNA_TIA 0x44BC
+#define B_TIA10_B GENMASK(15, 0)
+#define B_TIA1_B GENMASK(15, 8)
+#define B_TIA0_B GENMASK(7, 0)
+#define R_BKOFF_B 0x44E0
+#define B_BKOFF_IBADC_B GENMASK(23, 18)
+#define R_BACKOFF_B 0x44E4
+#define B_LNA_IBADC_B GENMASK(29, 18)
+#define B_BACKOFF_LNA_B GENMASK(29, 24)
+#define B_BACKOFF_IBADC_B GENMASK(23, 18)
+#define R_RXBY_WBADC_B 0x44F4
+#define B_RXBY_WBADC_B GENMASK(14, 10)
+#define R_BT_RXBY_WBADC_B 0x4560
+#define B_BT_RXBY_WBADC_B BIT(31)
+#define R_BT_SHARE_B 0x4564
+#define B_BT_SHARE_B BIT(0)
+#define B_BT_TRK_OFF_B BIT(1)
+#define B_BTG_PATH_B BIT(4)
#define R_TXPATH_SEL 0x458C
#define B_TXPATH_SEL_MSK GENMASK(31, 28)
+#define R_FORCE_FIR_B 0x458C
+#define B_FORCE_FIR_B GENMASK(1, 0)
#define R_TXPWR 0x4594
#define B_TXPWR_MSK GENMASK(30, 22)
#define R_TXNSS_MAP 0x45B4
@@ -7910,10 +8354,12 @@
#define R_PATH0_P20_FOLLOW_BY_PAGCUGC 0x46A0
#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V1 0x4C24
#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V2 0x46E8
+#define R_PATH0_P20_FOLLOW_BY_PAGCUGC_V3 0x41C8
#define B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH0_S20_FOLLOW_BY_PAGCUGC 0x46A4
#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V1 0x4C28
#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V2 0x46EC
+#define R_PATH0_S20_FOLLOW_BY_PAGCUGC_V3 0x41CC
#define B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH0_RXB_INIT_V1 0x46A8
#define B_PATH0_RXB_INIT_IDX_MSK_V1 GENMASK(14, 10)
@@ -7958,10 +8404,12 @@
#define R_PATH1_P20_FOLLOW_BY_PAGCUGC 0x4774
#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V1 0x4CE8
#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V2 0x47A8
+#define R_PATH1_P20_FOLLOW_BY_PAGCUGC_V3 0x45C8
#define B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH1_S20_FOLLOW_BY_PAGCUGC 0x4778
#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V1 0x4CEC
#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V2 0x47AC
+#define R_PATH1_S20_FOLLOW_BY_PAGCUGC_V3 0x45CC
#define B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
#define R_PATH1_G_TIA0_LNA6_OP1DB_V1 0x4778
#define B_PATH1_G_TIA0_LNA6_OP1DB_V1 GENMASK(7, 0)
@@ -8092,6 +8540,15 @@
#define B_PATH1_5MDET_SB2 BIT(8)
#define B_PATH1_5MDET_SB0 BIT(6)
#define B_PATH1_5MDET_TH GENMASK(5, 0)
+#define R_S0S1_CSI_WGT 0x4D34
+#define B_S0S1_CSI_WGT_EN BIT(0)
+#define B_S0S1_CSI_WGT_TONE_IDX GENMASK(31, 20)
+#define R_CHINFO_ELM_SRC 0x4D84
+#define B_CHINFO_ELM_BITMAP GENMASK(22, 0)
+#define B_CHINFO_SRC GENMASK(31, 30)
+#define R_CHINFO_TYPE_SCAL 0x4D88
+#define B_CHINFO_TYPE GENMASK(2, 1)
+#define B_CHINFO_SCAL BIT(8)
#define R_RPL_BIAS_COMP 0x4DF0
#define B_RPL_BIAS_COMP_MASK GENMASK(7, 0)
#define R_RPL_PATHAB 0x4E0C
@@ -8239,14 +8696,90 @@
#define B_S0_DACKQ8_K GENMASK(15, 8)
#define R_DCFO_WEIGHT_V1 0x6244
#define B_DCFO_WEIGHT_MSK_V1 GENMASK(31, 28)
+#define R_DAC_CLK 0x625C
+#define B_DAC_CLK GENMASK(31, 30)
#define R_DCFO_OPT_V1 0x6260
#define B_DCFO_OPT_EN_V1 BIT(17)
+#define R_TXFCTR 0x627C
+#define B_TXFCTR_THD GENMASK(19, 10)
+#define R_TXSCALE 0x6284
+#define B_TXFCTR_EN BIT(19)
+#define R_PCOEFF01 0x6684
+#define B_PCOEFF01 GENMASK(23, 0)
+#define R_PCOEFF23 0x6688
+#define B_PCOEFF23 GENMASK(23, 0)
+#define R_PCOEFF45 0x668c
+#define B_PCOEFF45 GENMASK(23, 0)
+#define R_PCOEFF67 0x6690
+#define B_PCOEFF67 GENMASK(23, 0)
+#define R_PCOEFF89 0x6694
+#define B_PCOEFF89 GENMASK(23, 0)
+#define R_PCOEFFAB 0x6698
+#define B_PCOEFFAB GENMASK(23, 0)
+#define R_PCOEFFCD 0x669c
+#define B_PCOEFFCD GENMASK(23, 0)
+#define R_PCOEFFEF 0x66a0
+#define B_PCOEFFEF GENMASK(23, 0)
+#define R_MGAIN_BIAS 0x672c
+#define B_MGAIN_BIAS_BW20 GENMASK(3, 0)
+#define B_MGAIN_BIAS_BW40 GENMASK(7, 4)
+#define R_CCK_RPL_OFST 0x6750
+#define B_CCK_RPL_OFST GENMASK(7, 0)
+#define R_BK_FC0INV 0x6758
+#define B_BK_FC0INV GENMASK(18, 0)
+#define R_CCK_FC0INV 0x675c
+#define B_CCK_FC0INV GENMASK(18, 0)
#define R_SEG0R_EDCCA_LVL_BE 0x69EC
#define R_SEG0R_PPDU_LVL_BE 0x69F0
#define R_SEGSND 0x6A14
#define B_SEGSND_EN BIT(31)
+#define R_DBCC 0x6B48
+#define B_DBCC_EN BIT(0)
+#define R_FC0 0x6B4C
+#define B_BW40_2XFFT BIT(31)
+#define B_FC0 GENMASK(12, 0)
+#define R_FC0INV_SBW 0x6B50
+#define B_SMALLBW GENMASK(31, 30)
+#define B_RX_BT_SG0 GENMASK(25, 22)
+#define B_RX_1RCCA GENMASK(17, 14)
+#define B_FC0_INV GENMASK(6, 0)
+#define R_ANT_CHBW 0x6B54
+#define B_ANT_BT_SHARE BIT(16)
+#define B_CHBW_BW GENMASK(14, 12)
+#define B_CHBW_PRICH GENMASK(11, 8)
+#define B_ANT_RX_SG0 GENMASK(3, 0)
+#define R_SLOPE 0x6B6C
+#define B_EHT_RATE_TH GENMASK(31, 28)
+#define B_SLOPE_B GENMASK(27, 14)
+#define B_SLOPE_A GENMASK(13, 0)
+#define R_SC_CORNER 0x6B70
+#define B_SC_CORNER GENMASK(10, 0)
+#define R_MAG_A 0x6BF4
+#define B_MGA_AEND GENMASK(31, 24)
+#define R_MAG_AB 0x6BF8
+#define B_BY_SLOPE GENMASK(31, 24)
+#define B_MAG_AB GENMASK(23, 0)
+#define R_BEDGE 0x6BFC
+#define B_EHT_MCS14 BIT(31)
+#define B_HE_RATE_TH GENMASK(30, 27)
+#define R_BEDGE2 0x6C00
+#define B_EHT_MCS15 BIT(31)
+#define B_HT_VHT_TH GENMASK(11, 0)
+#define R_BEDGE3 0x6C04
+#define B_TB_EN BIT(23)
+#define B_HEMU_EN BIT(21)
+#define B_HEERSU_EN BIT(19)
+#define B_EHTTB_EN BIT(15)
+#define B_BEDGE_CFG GENMASK(1, 0)
+#define R_SU_PUNC 0x6C08
+#define B_SU_PUNC_EN BIT(1)
+#define R_BEDGE5 0x6C10
+#define B_HWGEN_EN BIT(25)
+#define B_PWROFST_COMP BIT(20)
#define R_RPL_BIAS_COMP1 0x6DF0
#define B_RPL_BIAS_COMP1_MASK GENMASK(7, 0)
+#define R_DBCC_FA 0x703C
+#define B_DBCC_FA BIT(12)
#define R_P1_TSSI_ALIM1 0x7630
#define B_P1_TSSI_ALIM1 GENMASK(29, 0)
#define B_P1_TSSI_ALIM11 GENMASK(29, 20)
@@ -8389,8 +8922,12 @@
#define B_PRT_COM_RXBB_V1 GENMASK(4, 0)
#define B_PRT_COM_DONE BIT(0)
#define R_COEF_SEL 0x8104
+#define R_COEF_SEL_C1 0x8204
#define B_COEF_SEL_IQC BIT(0)
+#define B_COEF_SEL_IQC_V1 GENMASK(1, 0)
#define B_COEF_SEL_MDPD BIT(8)
+#define B_COEF_SEL_MDPD_V1 GENMASK(9, 8)
+#define B_COEF_SEL_EN BIT(31)
#define R_CFIR_SYS 0x8120
#define R_IQK_RES 0x8124
#define B_IQK_RES_K BIT(28)
@@ -8412,8 +8949,10 @@
#define B_RFGAIN_BND GENMASK(4, 0)
#define R_CFIR_MAP 0x8150
#define R_CFIR_LUT 0x8154
+#define R_CFIR_LUT_C1 0x8254
#define B_CFIR_LUT_SEL BIT(8)
#define B_CFIR_LUT_SET BIT(4)
+#define B_CFIR_LUT_G5 BIT(5)
#define B_CFIR_LUT_G3 BIT(3)
#define B_CFIR_LUT_G2 BIT(2)
#define B_CFIR_LUT_GP_V1 GENMASK(2, 0)
@@ -8626,6 +9165,35 @@
#define B_DACKN0_V GENMASK(21, 14)
#define R_DACKN1_CTL 0xC224
#define B_DACKN1_V GENMASK(21, 14)
+#define R_GAIN_MAP0 0xE44C
+#define B_GAIN_MAP0_EN BIT(0)
+#define R_GAIN_MAP1 0xE54C
+#define B_GAIN_MAP1_EN BIT(0)
+#define R_GOTX_IQKDPK_C0 0xE464
+#define R_GOTX_IQKDPK_C1 0xE564
+#define B_GOTX_IQKDPK GENMASK(28, 27)
+#define R_IQK_DPK_PRST 0xE4AC
+#define R_IQK_DPK_PRST_C1 0xE5AC
+#define B_IQK_DPK_PRST BIT(27)
+#define R_TXPWR_RSTA 0xE60C
+#define B_TXPWR_RSTA BIT(16)
+#define R_TSSI_PWR_P0 0xE610
+#define R_TSSI_PWR_P1 0xE710
+#define B_TSSI_CONT_EN BIT(3)
+#define R_TSSI_MAP_OFST_P0 0xE620
+#define R_TSSI_MAP_OFST_P1 0xE720
+#define B_TSSI_MAP_OFST_OFDM GENMASK(17, 9)
+#define B_TSSI_MAP_OFST_CCK GENMASK(26, 18)
+#define R_TXAGC_REF0_P0 0xE628
+#define R_TXAGC_REF0_P1 0xE728
+#define B_TXAGC_REF0_OFDM_DBM GENMASK(8, 0)
+#define B_TXAGC_REF0_CCK_DBM GENMASK(17, 9)
+#define B_TXAGC_REF0_OFDM_CW GENMASK(26, 18)
+#define R_TXAGC_REF1_P0 0xE62C
+#define R_TXAGC_REF1_P1 0xE72C
+#define B_TXAGC_REF1_CCK_CW GENMASK(8, 0)
+#define R_TXPWR_RSTB 0xE70C
+#define B_TXPWR_RSTB BIT(16)
/* WiFi CPU local domain */
#define R_AX_WDT_CTRL 0x0040
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 5c167a9278ce..51d3e61eaa1d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -901,7 +901,7 @@ static void rtw8851b_set_gain_error(struct rtw89_dev *rtwdev,
enum rtw89_subband subband,
enum rtw89_rf_path path)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
s32 val;
u32 reg;
@@ -987,7 +987,7 @@ next:
static
void rtw8851b_set_rxsc_rpl_comp(struct rtw89_dev *rtwdev, enum rtw89_subband subband)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 band = rtw89_subband_to_bb_gain_band(subband);
u32 val;
@@ -1921,41 +1921,81 @@ static u8 rtw8851b_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_p
static void rtw8851b_btc_set_rfe(struct rtw89_dev *rtwdev)
{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
- module->rfe_type = rtwdev->efuse.rfe_type;
- module->cv = rtwdev->hal.cv;
- module->bt_solo = 0;
- module->switch_type = BTC_SWITCH_INTERNAL;
- module->ant.isolation = 10;
- module->kt_ver_adie = rtwdev->hal.acv;
+ if (ver->fcxinit == 7) {
+ md->md_v7.rfe_type = rtwdev->efuse.rfe_type;
+ md->md_v7.kt_ver = rtwdev->hal.cv;
+ md->md_v7.bt_solo = 0;
+ md->md_v7.switch_type = BTC_SWITCH_INTERNAL;
+ md->md_v7.ant.isolation = 10;
+ md->md_v7.kt_ver_adie = rtwdev->hal.acv;
- if (module->rfe_type == 0)
- return;
+ if (md->md_v7.rfe_type == 0)
+ return;
- /* rfe_type 3*n+1: 1-Ant(shared),
- * 3*n+2: 2-Ant+Div(non-shared),
- * 3*n+3: 2-Ant+no-Div(non-shared)
- */
- module->ant.num = (module->rfe_type % 3 == 1) ? 1 : 2;
- /* WL-1ss at S0, btg at s0 (On 1 WL RF) */
- module->ant.single_pos = RF_PATH_A;
- module->ant.btg_pos = RF_PATH_A;
- module->ant.stream_cnt = 1;
-
- if (module->ant.num == 1) {
- module->ant.type = BTC_ANT_SHARED;
- module->bt_pos = BTC_BT_BTG;
- module->wa_type = 1;
- module->ant.diversity = 0;
- } else { /* ant.num == 2 */
- module->ant.type = BTC_ANT_DEDICATED;
- module->bt_pos = BTC_BT_ALONE;
- module->switch_type = BTC_SWITCH_EXTERNAL;
- module->wa_type = 0;
- if (module->rfe_type % 3 == 2)
- module->ant.diversity = 1;
+ /* rfe_type 3*n+1: 1-Ant(shared),
+ * 3*n+2: 2-Ant+Div(non-shared),
+ * 3*n+3: 2-Ant+no-Div(non-shared)
+ */
+ md->md_v7.ant.num = (md->md_v7.rfe_type % 3 == 1) ? 1 : 2;
+ /* WL-1ss at S0, btg at s0 (On 1 WL RF) */
+ md->md_v7.ant.single_pos = RF_PATH_A;
+ md->md_v7.ant.btg_pos = RF_PATH_A;
+ md->md_v7.ant.stream_cnt = 1;
+
+ if (md->md_v7.ant.num == 1) {
+ md->md_v7.ant.type = BTC_ANT_SHARED;
+ md->md_v7.bt_pos = BTC_BT_BTG;
+ md->md_v7.wa_type = 1;
+ md->md_v7.ant.diversity = 0;
+ } else { /* ant.num == 2 */
+ md->md_v7.ant.type = BTC_ANT_DEDICATED;
+ md->md_v7.bt_pos = BTC_BT_ALONE;
+ md->md_v7.switch_type = BTC_SWITCH_EXTERNAL;
+ md->md_v7.wa_type = 0;
+ if (md->md_v7.rfe_type % 3 == 2)
+ md->md_v7.ant.diversity = 1;
+ }
+ rtwdev->btc.btg_pos = md->md_v7.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md_v7.ant.type;
+ } else {
+ md->md.rfe_type = rtwdev->efuse.rfe_type;
+ md->md.cv = rtwdev->hal.cv;
+ md->md.bt_solo = 0;
+ md->md.switch_type = BTC_SWITCH_INTERNAL;
+ md->md.ant.isolation = 10;
+ md->md.kt_ver_adie = rtwdev->hal.acv;
+
+ if (md->md.rfe_type == 0)
+ return;
+
+ /* rfe_type 3*n+1: 1-Ant(shared),
+ * 3*n+2: 2-Ant+Div(non-shared),
+ * 3*n+3: 2-Ant+no-Div(non-shared)
+ */
+ md->md.ant.num = (md->md.rfe_type % 3 == 1) ? 1 : 2;
+ /* WL-1ss at S0, btg at s0 (On 1 WL RF) */
+ md->md.ant.single_pos = RF_PATH_A;
+ md->md.ant.btg_pos = RF_PATH_A;
+ md->md.ant.stream_cnt = 1;
+
+ if (md->md.ant.num == 1) {
+ md->md.ant.type = BTC_ANT_SHARED;
+ md->md.bt_pos = BTC_BT_BTG;
+ md->md.wa_type = 1;
+ md->md.ant.diversity = 0;
+ } else { /* ant.num == 2 */
+ md->md.ant.type = BTC_ANT_DEDICATED;
+ md->md.bt_pos = BTC_BT_ALONE;
+ md->md.switch_type = BTC_SWITCH_EXTERNAL;
+ md->md.wa_type = 0;
+ if (md->md.rfe_type % 3 == 2)
+ md->md.ant.diversity = 1;
+ }
+ rtwdev->btc.btg_pos = md->md.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md.ant.type;
}
}
@@ -1965,7 +2005,7 @@ void rtw8851b_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
if (group > BTC_BT_SS_GROUP)
group--; /* Tx-group=1, Rx-group=2 */
- if (rtwdev->btc.mdinfo.ant.type == BTC_ANT_SHARED) /* 1-Ant */
+ if (rtwdev->btc.ant_type == BTC_ANT_SHARED) /* 1-Ant */
group += 3;
rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group);
@@ -1980,9 +2020,9 @@ static void rtw8851b_btc_init_cfg(struct rtw89_dev *rtwdev)
};
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
- struct rtw89_btc_ant_info *ant = &module->ant;
- u8 path, path_min, path_max;
+ union rtw89_btc_module_info *md = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = btc->ver;
+ u8 path, path_min, path_max, str_cnt, ant_sing_pos;
/* PTA init */
rtw89_mac_coex_init(rtwdev, &coex_params);
@@ -1991,9 +2031,17 @@ static void rtw8851b_btc_init_cfg(struct rtw89_dev *rtwdev)
chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_TX_RESP, true);
chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_BEACON, true);
+ if (ver->fcxinit == 7) {
+ str_cnt = md->md_v7.ant.stream_cnt;
+ ant_sing_pos = md->md_v7.ant.single_pos;
+ } else {
+ str_cnt = md->md.ant.stream_cnt;
+ ant_sing_pos = md->md.ant.single_pos;
+ }
+
/* for 1-Ant && 1-ss case: only 1-path */
- if (ant->stream_cnt == 1) {
- path_min = ant->single_pos;
+ if (str_cnt == 1) {
+ path_min = ant_sing_pos;
path_max = path_min;
} else {
path_min = RF_PATH_A;
@@ -2016,7 +2064,7 @@ static void rtw8851b_btc_init_cfg(struct rtw89_dev *rtwdev)
/* if GNT_WL = 0 && BT = Tx_group -->
* Shared-Ant && BTG-path:WL mask(0x55f), others:WL THRU(0x5ff)
*/
- if (ant->type == BTC_ANT_SHARED && ant->btg_pos == path)
+ if (btc->ant_type == BTC_ANT_SHARED && btc->btg_pos == path)
rtw8851b_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x55f);
else
rtw8851b_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x5ff);
@@ -2148,19 +2196,18 @@ void rtw8851b_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
static void rtw8851b_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_ant_info *ant = &btc->mdinfo.ant;
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWE, RFREG_MASK, 0x80000);
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWA, RFREG_MASK, 0x1);
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWD1, RFREG_MASK, 0x110);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWD1, RFREG_MASK, 0x110);
/* set WL standby = Rx for GNT_BT_Tx = 1->0 settle issue */
if (state)
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWD0, RFREG_MASK, 0x179c);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWD0, RFREG_MASK, 0x179c);
else
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWD0, RFREG_MASK, 0x208);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWD0, RFREG_MASK, 0x208);
- rtw89_write_rf(rtwdev, ant->btg_pos, RR_LUTWE, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, btc->btg_pos, RR_LUTWE, RFREG_MASK, 0x0);
}
#define LNA2_51B_MA 0x700
@@ -2175,7 +2222,6 @@ static void rtw8851b_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
* level=1 Fix LNA2=5: TIA 1/0= (LNA2,TIAN6) = (5,0)/(5,1) = 18dB/12dB
*/
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_ant_info *ant = &btc->mdinfo.ant;
const struct rtw89_reg2_def *rf;
u32 n, i, val;
@@ -2203,10 +2249,10 @@ static void rtw8851b_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
for (i = 0; i < n; i++, rf++) {
val = rf->data;
/* bit[10] = 1 if non-shared-ant for 8851b */
- if (btc->mdinfo.ant.type == BTC_ANT_DEDICATED)
+ if (btc->ant_type == BTC_ANT_DEDICATED)
val |= 0x4;
- rtw89_write_rf(rtwdev, ant->btg_pos, rf->addr, LNA2_51B_MA, val);
+ rtw89_write_rf(rtwdev, btc->btg_pos, rf->addr, LNA2_51B_MA, val);
}
}
@@ -2299,6 +2345,7 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.enable_bb_rf = rtw8851b_mac_enable_bb_rf,
.disable_bb_rf = rtw8851b_mac_disable_bb_rf,
.bb_preinit = NULL,
+ .bb_postinit = NULL,
.bb_reset = rtw8851b_bb_reset,
.bb_sethw = rtw8851b_bb_sethw,
.read_rf = rtw89_phy_read_rf_v1,
@@ -2309,7 +2356,9 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.read_phycap = rtw8851b_read_phycap,
.fem_setup = NULL,
.rfe_gpio = rtw8851b_rfe_gpio,
+ .rfk_hw_init = NULL,
.rfk_init = rtw8851b_rfk_init,
+ .rfk_init_late = NULL,
.rfk_channel = rtw8851b_rfk_channel,
.rfk_band_changed = rtw8851b_rfk_band_changed,
.rfk_scan = rtw8851b_rfk_scan,
@@ -2334,6 +2383,12 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = {
.stop_sch_tx = rtw89_mac_stop_sch_tx,
.resume_sch_tx = rtw89_mac_resume_sch_tx,
.h2c_dctl_sec_cam = NULL,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8851b_btc_set_rfe,
.btc_init_cfg = rtw8851b_btc_init_cfg,
@@ -2394,7 +2449,9 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.support_chanctx_num = 0,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
- .support_bw160 = false,
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
@@ -2449,6 +2506,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.c2h_regs = rtw8851b_c2h_regs,
.page_regs = &rtw8851b_page_regs,
+ .wow_reason_reg = R_AX_C2HREG_DATA3 + 3,
.cfo_src_fd = true,
.cfo_hw_comp = true,
.dcfo_comp = &rtw8851b_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c
index 8cb5bde8f625..522883c8dfb9 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b_table.c
@@ -5345,7 +5345,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][48] = 72,
[0][0][1][0][RTW89_ETSI][48] = 127,
[0][0][1][0][RTW89_MKK][48] = 127,
- [0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_IC][48] = 72,
[0][0][1][0][RTW89_KCC][48] = 127,
[0][0][1][0][RTW89_ACMA][48] = 127,
[0][0][1][0][RTW89_CN][48] = 127,
@@ -5353,7 +5353,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][50] = 72,
[0][0][1][0][RTW89_ETSI][50] = 127,
[0][0][1][0][RTW89_MKK][50] = 127,
- [0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_IC][50] = 72,
[0][0][1][0][RTW89_KCC][50] = 127,
[0][0][1][0][RTW89_ACMA][50] = 127,
[0][0][1][0][RTW89_CN][50] = 127,
@@ -5361,7 +5361,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][52] = 72,
[0][0][1][0][RTW89_ETSI][52] = 127,
[0][0][1][0][RTW89_MKK][52] = 127,
- [0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_IC][52] = 72,
[0][0][1][0][RTW89_KCC][52] = 127,
[0][0][1][0][RTW89_ACMA][52] = 127,
[0][0][1][0][RTW89_CN][52] = 127,
@@ -5793,7 +5793,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][48] = 74,
[0][0][2][0][RTW89_ETSI][48] = 127,
[0][0][2][0][RTW89_MKK][48] = 127,
- [0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_IC][48] = 74,
[0][0][2][0][RTW89_KCC][48] = 127,
[0][0][2][0][RTW89_ACMA][48] = 127,
[0][0][2][0][RTW89_CN][48] = 127,
@@ -5801,7 +5801,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][50] = 76,
[0][0][2][0][RTW89_ETSI][50] = 127,
[0][0][2][0][RTW89_MKK][50] = 127,
- [0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_IC][50] = 76,
[0][0][2][0][RTW89_KCC][50] = 127,
[0][0][2][0][RTW89_ACMA][50] = 127,
[0][0][2][0][RTW89_CN][50] = 127,
@@ -5809,7 +5809,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][52] = 76,
[0][0][2][0][RTW89_ETSI][52] = 127,
[0][0][2][0][RTW89_MKK][52] = 127,
- [0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_IC][52] = 76,
[0][0][2][0][RTW89_KCC][52] = 127,
[0][0][2][0][RTW89_ACMA][52] = 127,
[0][0][2][0][RTW89_CN][52] = 127,
@@ -6361,7 +6361,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][47] = 84,
[1][0][2][0][RTW89_ETSI][47] = 127,
[1][0][2][0][RTW89_MKK][47] = 127,
- [1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_IC][47] = 84,
[1][0][2][0][RTW89_KCC][47] = 127,
[1][0][2][0][RTW89_ACMA][47] = 127,
[1][0][2][0][RTW89_CN][47] = 127,
@@ -6369,7 +6369,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][51] = 84,
[1][0][2][0][RTW89_ETSI][51] = 127,
[1][0][2][0][RTW89_MKK][51] = 127,
- [1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_IC][51] = 84,
[1][0][2][0][RTW89_KCC][51] = 127,
[1][0][2][0][RTW89_ACMA][51] = 127,
[1][0][2][0][RTW89_CN][51] = 127,
@@ -6649,7 +6649,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_FCC][49] = 74,
[2][0][2][0][RTW89_ETSI][49] = 127,
[2][0][2][0][RTW89_MKK][49] = 127,
- [2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_IC][49] = 74,
[2][0][2][0][RTW89_KCC][49] = 127,
[2][0][2][0][RTW89_ACMA][49] = 127,
[2][0][2][0][RTW89_CN][49] = 127,
@@ -7975,7 +7975,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][48] = 42,
[0][0][RTW89_ETSI][48] = 127,
[0][0][RTW89_MKK][48] = 127,
- [0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_IC][48] = 42,
[0][0][RTW89_KCC][48] = 127,
[0][0][RTW89_ACMA][48] = 127,
[0][0][RTW89_CN][48] = 127,
@@ -7983,7 +7983,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][50] = 42,
[0][0][RTW89_ETSI][50] = 127,
[0][0][RTW89_MKK][50] = 127,
- [0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_IC][50] = 42,
[0][0][RTW89_KCC][50] = 127,
[0][0][RTW89_ACMA][50] = 127,
[0][0][RTW89_CN][50] = 127,
@@ -7991,7 +7991,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][52] = 40,
[0][0][RTW89_ETSI][52] = 127,
[0][0][RTW89_MKK][52] = 127,
- [0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_IC][52] = 40,
[0][0][RTW89_KCC][52] = 127,
[0][0][RTW89_ACMA][52] = 127,
[0][0][RTW89_CN][52] = 127,
@@ -8423,7 +8423,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][48] = 52,
[1][0][RTW89_ETSI][48] = 127,
[1][0][RTW89_MKK][48] = 127,
- [1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_IC][48] = 52,
[1][0][RTW89_KCC][48] = 127,
[1][0][RTW89_ACMA][48] = 127,
[1][0][RTW89_CN][48] = 127,
@@ -8431,7 +8431,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][50] = 52,
[1][0][RTW89_ETSI][50] = 127,
[1][0][RTW89_MKK][50] = 127,
- [1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_IC][50] = 52,
[1][0][RTW89_KCC][50] = 127,
[1][0][RTW89_ACMA][50] = 127,
[1][0][RTW89_CN][50] = 127,
@@ -8439,7 +8439,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][52] = 52,
[1][0][RTW89_ETSI][52] = 127,
[1][0][RTW89_MKK][52] = 127,
- [1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_IC][52] = 52,
[1][0][RTW89_KCC][52] = 127,
[1][0][RTW89_ACMA][52] = 127,
[1][0][RTW89_CN][52] = 127,
@@ -8871,7 +8871,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][48] = 64,
[2][0][RTW89_ETSI][48] = 127,
[2][0][RTW89_MKK][48] = 127,
- [2][0][RTW89_IC][48] = 127,
+ [2][0][RTW89_IC][48] = 64,
[2][0][RTW89_KCC][48] = 127,
[2][0][RTW89_ACMA][48] = 127,
[2][0][RTW89_CN][48] = 127,
@@ -8879,7 +8879,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][50] = 64,
[2][0][RTW89_ETSI][50] = 127,
[2][0][RTW89_MKK][50] = 127,
- [2][0][RTW89_IC][50] = 127,
+ [2][0][RTW89_IC][50] = 64,
[2][0][RTW89_KCC][50] = 127,
[2][0][RTW89_ACMA][50] = 127,
[2][0][RTW89_CN][50] = 127,
@@ -8887,7 +8887,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][52] = 60,
[2][0][RTW89_ETSI][52] = 127,
[2][0][RTW89_MKK][52] = 127,
- [2][0][RTW89_IC][52] = 127,
+ [2][0][RTW89_IC][52] = 60,
[2][0][RTW89_KCC][52] = 127,
[2][0][RTW89_ACMA][52] = 127,
[2][0][RTW89_CN][52] = 127,
@@ -11055,7 +11055,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][48] = 72,
[0][0][1][0][RTW89_ETSI][48] = 127,
[0][0][1][0][RTW89_MKK][48] = 127,
- [0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_IC][48] = 72,
[0][0][1][0][RTW89_KCC][48] = 127,
[0][0][1][0][RTW89_ACMA][48] = 127,
[0][0][1][0][RTW89_CN][48] = 127,
@@ -11063,7 +11063,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][50] = 72,
[0][0][1][0][RTW89_ETSI][50] = 127,
[0][0][1][0][RTW89_MKK][50] = 127,
- [0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_IC][50] = 72,
[0][0][1][0][RTW89_KCC][50] = 127,
[0][0][1][0][RTW89_ACMA][50] = 127,
[0][0][1][0][RTW89_CN][50] = 127,
@@ -11071,7 +11071,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][52] = 72,
[0][0][1][0][RTW89_ETSI][52] = 127,
[0][0][1][0][RTW89_MKK][52] = 127,
- [0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_IC][52] = 72,
[0][0][1][0][RTW89_KCC][52] = 127,
[0][0][1][0][RTW89_ACMA][52] = 127,
[0][0][1][0][RTW89_CN][52] = 127,
@@ -11503,7 +11503,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][48] = 74,
[0][0][2][0][RTW89_ETSI][48] = 127,
[0][0][2][0][RTW89_MKK][48] = 127,
- [0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_IC][48] = 74,
[0][0][2][0][RTW89_KCC][48] = 127,
[0][0][2][0][RTW89_ACMA][48] = 127,
[0][0][2][0][RTW89_CN][48] = 127,
@@ -11511,7 +11511,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][50] = 74,
[0][0][2][0][RTW89_ETSI][50] = 127,
[0][0][2][0][RTW89_MKK][50] = 127,
- [0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_IC][50] = 74,
[0][0][2][0][RTW89_KCC][50] = 127,
[0][0][2][0][RTW89_ACMA][50] = 127,
[0][0][2][0][RTW89_CN][50] = 127,
@@ -11519,7 +11519,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][52] = 74,
[0][0][2][0][RTW89_ETSI][52] = 127,
[0][0][2][0][RTW89_MKK][52] = 127,
- [0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_IC][52] = 74,
[0][0][2][0][RTW89_KCC][52] = 127,
[0][0][2][0][RTW89_ACMA][52] = 127,
[0][0][2][0][RTW89_CN][52] = 127,
@@ -12071,7 +12071,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][47] = 80,
[1][0][2][0][RTW89_ETSI][47] = 127,
[1][0][2][0][RTW89_MKK][47] = 127,
- [1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_IC][47] = 80,
[1][0][2][0][RTW89_KCC][47] = 127,
[1][0][2][0][RTW89_ACMA][47] = 127,
[1][0][2][0][RTW89_CN][47] = 127,
@@ -12079,7 +12079,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][51] = 80,
[1][0][2][0][RTW89_ETSI][51] = 127,
[1][0][2][0][RTW89_MKK][51] = 127,
- [1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_IC][51] = 80,
[1][0][2][0][RTW89_KCC][51] = 127,
[1][0][2][0][RTW89_ACMA][51] = 127,
[1][0][2][0][RTW89_CN][51] = 127,
@@ -12359,7 +12359,7 @@ const s8 rtw89_8851b_txpwr_lmt_5g_type2[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_FCC][49] = 72,
[2][0][2][0][RTW89_ETSI][49] = 127,
[2][0][2][0][RTW89_MKK][49] = 127,
- [2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_IC][49] = 72,
[2][0][2][0][RTW89_KCC][49] = 127,
[2][0][2][0][RTW89_ACMA][49] = 127,
[2][0][2][0][RTW89_CN][49] = 127,
@@ -13685,7 +13685,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][48] = 40,
[0][0][RTW89_ETSI][48] = 127,
[0][0][RTW89_MKK][48] = 127,
- [0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_IC][48] = 40,
[0][0][RTW89_KCC][48] = 127,
[0][0][RTW89_ACMA][48] = 127,
[0][0][RTW89_CN][48] = 127,
@@ -13693,7 +13693,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][50] = 42,
[0][0][RTW89_ETSI][50] = 127,
[0][0][RTW89_MKK][50] = 127,
- [0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_IC][50] = 42,
[0][0][RTW89_KCC][50] = 127,
[0][0][RTW89_ACMA][50] = 127,
[0][0][RTW89_CN][50] = 127,
@@ -13701,7 +13701,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][52] = 38,
[0][0][RTW89_ETSI][52] = 127,
[0][0][RTW89_MKK][52] = 127,
- [0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_IC][52] = 38,
[0][0][RTW89_KCC][52] = 127,
[0][0][RTW89_ACMA][52] = 127,
[0][0][RTW89_CN][52] = 127,
@@ -14133,7 +14133,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][48] = 52,
[1][0][RTW89_ETSI][48] = 127,
[1][0][RTW89_MKK][48] = 127,
- [1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_IC][48] = 52,
[1][0][RTW89_KCC][48] = 127,
[1][0][RTW89_ACMA][48] = 127,
[1][0][RTW89_CN][48] = 127,
@@ -14141,7 +14141,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][50] = 52,
[1][0][RTW89_ETSI][50] = 127,
[1][0][RTW89_MKK][50] = 127,
- [1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_IC][50] = 52,
[1][0][RTW89_KCC][50] = 127,
[1][0][RTW89_ACMA][50] = 127,
[1][0][RTW89_CN][50] = 127,
@@ -14149,7 +14149,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][52] = 50,
[1][0][RTW89_ETSI][52] = 127,
[1][0][RTW89_MKK][52] = 127,
- [1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_IC][52] = 50,
[1][0][RTW89_KCC][52] = 127,
[1][0][RTW89_ACMA][52] = 127,
[1][0][RTW89_CN][52] = 127,
@@ -14581,7 +14581,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][48] = 62,
[2][0][RTW89_ETSI][48] = 127,
[2][0][RTW89_MKK][48] = 127,
- [2][0][RTW89_IC][48] = 127,
+ [2][0][RTW89_IC][48] = 62,
[2][0][RTW89_KCC][48] = 127,
[2][0][RTW89_ACMA][48] = 127,
[2][0][RTW89_CN][48] = 127,
@@ -14589,7 +14589,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][50] = 62,
[2][0][RTW89_ETSI][50] = 127,
[2][0][RTW89_MKK][50] = 127,
- [2][0][RTW89_IC][50] = 127,
+ [2][0][RTW89_IC][50] = 62,
[2][0][RTW89_KCC][50] = 127,
[2][0][RTW89_ACMA][50] = 127,
[2][0][RTW89_CN][50] = 127,
@@ -14597,7 +14597,7 @@ const s8 rtw89_8851b_txpwr_lmt_ru_5g_type2[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][52] = 60,
[2][0][RTW89_ETSI][52] = 127,
[2][0][RTW89_MKK][52] = 127,
- [2][0][RTW89_IC][52] = 127,
+ [2][0][RTW89_IC][52] = 60,
[2][0][RTW89_KCC][52] = 127,
[2][0][RTW89_ACMA][52] = 127,
[2][0][RTW89_CN][52] = 127,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
index ade69bd30fc8..ca1374a71727 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
@@ -25,6 +25,8 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
.autok_en = MAC_AX_PCIE_DISABLE,
.io_rcy_en = MAC_AX_PCIE_DISABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
+ .rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 0c76c52ce22c..2deadec715cf 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -1665,28 +1665,55 @@ static u8 rtw8852a_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_p
static void rtw8852a_btc_set_rfe(struct rtw89_dev *rtwdev)
{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
- module->rfe_type = rtwdev->efuse.rfe_type;
- module->cv = rtwdev->hal.cv;
- module->bt_solo = 0;
- module->switch_type = BTC_SWITCH_INTERNAL;
+ if (ver->fcxinit == 7) {
+ md->md_v7.rfe_type = rtwdev->efuse.rfe_type;
+ md->md_v7.kt_ver = rtwdev->hal.cv;
+ md->md_v7.bt_solo = 0;
+ md->md_v7.switch_type = BTC_SWITCH_INTERNAL;
- if (module->rfe_type > 0)
- module->ant.num = (module->rfe_type % 2 ? 2 : 3);
- else
- module->ant.num = 2;
+ if (md->md_v7.rfe_type > 0)
+ md->md_v7.ant.num = (md->md_v7.rfe_type % 2 ? 2 : 3);
+ else
+ md->md_v7.ant.num = 2;
- module->ant.diversity = 0;
- module->ant.isolation = 10;
+ md->md_v7.ant.diversity = 0;
+ md->md_v7.ant.isolation = 10;
- if (module->ant.num == 3) {
- module->ant.type = BTC_ANT_DEDICATED;
- module->bt_pos = BTC_BT_ALONE;
+ if (md->md_v7.ant.num == 3) {
+ md->md_v7.ant.type = BTC_ANT_DEDICATED;
+ md->md_v7.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md_v7.ant.type = BTC_ANT_SHARED;
+ md->md_v7.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md_v7.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md_v7.ant.type;
} else {
- module->ant.type = BTC_ANT_SHARED;
- module->bt_pos = BTC_BT_BTG;
+ md->md.rfe_type = rtwdev->efuse.rfe_type;
+ md->md.cv = rtwdev->hal.cv;
+ md->md.bt_solo = 0;
+ md->md.switch_type = BTC_SWITCH_INTERNAL;
+
+ if (md->md.rfe_type > 0)
+ md->md.ant.num = (md->md.rfe_type % 2 ? 2 : 3);
+ else
+ md->md.ant.num = 2;
+
+ md->md.ant.diversity = 0;
+ md->md.ant.isolation = 10;
+
+ if (md->md.ant.num == 3) {
+ md->md.ant.type = BTC_ANT_DEDICATED;
+ md->md.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md.ant.type = BTC_ANT_SHARED;
+ md->md.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md.ant.type;
}
}
@@ -1717,7 +1744,6 @@ static void rtw8852a_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
static void rtw8852a_btc_init_cfg(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_mac_ax_coex coex_params = {
.pta_mode = RTW89_MAC_AX_COEX_RTK_MODE,
@@ -1736,7 +1762,7 @@ static void rtw8852a_btc_init_cfg(struct rtw89_dev *rtwdev)
rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, 0xfffff, 0x0);
/* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */
- if (module->ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
rtw8852a_set_trx_mask(rtwdev,
RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
rtw8852a_set_trx_mask(rtwdev,
@@ -2043,6 +2069,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.enable_bb_rf = rtw89_mac_enable_bb_rf,
.disable_bb_rf = rtw89_mac_disable_bb_rf,
.bb_preinit = NULL,
+ .bb_postinit = NULL,
.bb_reset = rtw8852a_bb_reset,
.bb_sethw = rtw8852a_bb_sethw,
.read_rf = rtw89_phy_read_rf,
@@ -2053,7 +2080,9 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.read_phycap = rtw8852a_read_phycap,
.fem_setup = rtw8852a_fem_setup,
.rfe_gpio = NULL,
+ .rfk_hw_init = NULL,
.rfk_init = rtw8852a_rfk_init,
+ .rfk_init_late = NULL,
.rfk_channel = rtw8852a_rfk_channel,
.rfk_band_changed = rtw8852a_rfk_band_changed,
.rfk_scan = rtw8852a_rfk_scan,
@@ -2078,6 +2107,12 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = {
.stop_sch_tx = rtw89_mac_stop_sch_tx,
.resume_sch_tx = rtw89_mac_resume_sch_tx,
.h2c_dctl_sec_cam = NULL,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8852a_btc_set_rfe,
.btc_init_cfg = rtw8852a_btc_init_cfg,
@@ -2130,7 +2165,9 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.support_chanctx_num = 1,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
- .support_bw160 = false,
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = false,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
@@ -2186,6 +2223,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.c2h_regs = rtw8852a_c2h_regs,
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.page_regs = &rtw8852a_page_regs,
+ .wow_reason_reg = R_AX_C2HREG_DATA3 + 3,
.cfo_src_fd = false,
.cfo_hw_comp = false,
.dcfo_comp = &rtw8852a_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index f1e890bde049..7c6ffedb77e2 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -26,6 +26,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.io_rcy_en = MAC_AX_PCIE_DISABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index de887a35f3fb..d025c4135e1c 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -988,7 +988,7 @@ static void rtw8852b_set_gain_error(struct rtw89_dev *rtwdev,
enum rtw89_subband subband,
enum rtw89_rf_path path)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
s32 val;
u32 reg;
@@ -1086,7 +1086,7 @@ next:
static
void rtw8852b_set_rxsc_rpl_comp(struct rtw89_dev *rtwdev, enum rtw89_subband subband)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 band = rtw89_subband_to_bb_gain_band(subband);
u32 val;
@@ -2125,28 +2125,55 @@ static u8 rtw8852b_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_p
static void rtw8852b_btc_set_rfe(struct rtw89_dev *rtwdev)
{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
- module->rfe_type = rtwdev->efuse.rfe_type;
- module->cv = rtwdev->hal.cv;
- module->bt_solo = 0;
- module->switch_type = BTC_SWITCH_INTERNAL;
+ if (ver->fcxinit == 7) {
+ md->md_v7.rfe_type = rtwdev->efuse.rfe_type;
+ md->md_v7.kt_ver = rtwdev->hal.cv;
+ md->md_v7.bt_solo = 0;
+ md->md_v7.switch_type = BTC_SWITCH_INTERNAL;
- if (module->rfe_type > 0)
- module->ant.num = module->rfe_type % 2 ? 2 : 3;
- else
- module->ant.num = 2;
+ if (md->md_v7.rfe_type > 0)
+ md->md_v7.ant.num = (md->md_v7.rfe_type % 2 ? 2 : 3);
+ else
+ md->md_v7.ant.num = 2;
- module->ant.diversity = 0;
- module->ant.isolation = 10;
+ md->md_v7.ant.diversity = 0;
+ md->md_v7.ant.isolation = 10;
- if (module->ant.num == 3) {
- module->ant.type = BTC_ANT_DEDICATED;
- module->bt_pos = BTC_BT_ALONE;
+ if (md->md_v7.ant.num == 3) {
+ md->md_v7.ant.type = BTC_ANT_DEDICATED;
+ md->md_v7.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md_v7.ant.type = BTC_ANT_SHARED;
+ md->md_v7.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md_v7.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md_v7.ant.type;
} else {
- module->ant.type = BTC_ANT_SHARED;
- module->bt_pos = BTC_BT_BTG;
+ md->md.rfe_type = rtwdev->efuse.rfe_type;
+ md->md.cv = rtwdev->hal.cv;
+ md->md.bt_solo = 0;
+ md->md.switch_type = BTC_SWITCH_INTERNAL;
+
+ if (md->md.rfe_type > 0)
+ md->md.ant.num = (md->md.rfe_type % 2 ? 2 : 3);
+ else
+ md->md.ant.num = 2;
+
+ md->md.ant.diversity = 0;
+ md->md.ant.isolation = 10;
+
+ if (md->md.ant.num == 3) {
+ md->md.ant.type = BTC_ANT_DEDICATED;
+ md->md.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md.ant.type = BTC_ANT_SHARED;
+ md->md.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md.ant.type;
}
}
@@ -2162,7 +2189,6 @@ void rtw8852b_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
static void rtw8852b_btc_init_cfg(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_mac_ax_coex coex_params = {
.pta_mode = RTW89_MAC_AX_COEX_RTK_MODE,
@@ -2181,7 +2207,7 @@ static void rtw8852b_btc_init_cfg(struct rtw89_dev *rtwdev)
rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0);
/* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */
- if (module->ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff);
/* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */
@@ -2468,6 +2494,7 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.enable_bb_rf = rtw8852b_mac_enable_bb_rf,
.disable_bb_rf = rtw8852b_mac_disable_bb_rf,
.bb_preinit = NULL,
+ .bb_postinit = NULL,
.bb_reset = rtw8852b_bb_reset,
.bb_sethw = rtw8852b_bb_sethw,
.read_rf = rtw89_phy_read_rf_v1,
@@ -2478,7 +2505,9 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.read_phycap = rtw8852b_read_phycap,
.fem_setup = NULL,
.rfe_gpio = NULL,
+ .rfk_hw_init = NULL,
.rfk_init = rtw8852b_rfk_init,
+ .rfk_init_late = NULL,
.rfk_channel = rtw8852b_rfk_channel,
.rfk_band_changed = rtw8852b_rfk_band_changed,
.rfk_scan = rtw8852b_rfk_scan,
@@ -2503,6 +2532,12 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.stop_sch_tx = rtw89_mac_stop_sch_tx,
.resume_sch_tx = rtw89_mac_resume_sch_tx,
.h2c_dctl_sec_cam = NULL,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8852b_btc_set_rfe,
.btc_init_cfg = rtw8852b_btc_init_cfg,
@@ -2564,7 +2599,9 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.support_chanctx_num = 0,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
- .support_bw160 = false,
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
.support_unii4 = true,
.ul_tb_waveform_ctrl = true,
.ul_tb_pwr_diff = false,
@@ -2620,6 +2657,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.c2h_regs = rtw8852b_c2h_regs,
.page_regs = &rtw8852b_page_regs,
+ .wow_reason_reg = R_AX_C2HREG_DATA3 + 3,
.cfo_src_fd = true,
.cfo_hw_comp = true,
.dcfo_comp = &rtw8852b_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c
index d2ce16e98bac..07945d06dc59 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_table.c
@@ -16936,7 +16936,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_WW][8] = 52,
[0][0][1][0][RTW89_WW][10] = 52,
[0][0][1][0][RTW89_WW][12] = 52,
- [0][0][1][0][RTW89_WW][14] = 1,
+ [0][0][1][0][RTW89_WW][14] = 52,
[0][0][1][0][RTW89_WW][15] = 52,
[0][0][1][0][RTW89_WW][17] = 52,
[0][0][1][0][RTW89_WW][19] = 52,
@@ -16954,10 +16954,10 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_WW][42] = 28,
[0][0][1][0][RTW89_WW][44] = 28,
[0][0][1][0][RTW89_WW][46] = 28,
- [0][0][1][0][RTW89_WW][48] = 78,
- [0][0][1][0][RTW89_WW][50] = 78,
- [0][0][1][0][RTW89_WW][52] = 78,
- [0][1][1][0][RTW89_WW][0] = 1,
+ [0][0][1][0][RTW89_WW][48] = 76,
+ [0][0][1][0][RTW89_WW][50] = 76,
+ [0][0][1][0][RTW89_WW][52] = 76,
+ [0][1][1][0][RTW89_WW][0] = 30,
[0][1][1][0][RTW89_WW][2] = 32,
[0][1][1][0][RTW89_WW][4] = 30,
[0][1][1][0][RTW89_WW][6] = 30,
@@ -16982,9 +16982,9 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_WW][42] = 16,
[0][1][1][0][RTW89_WW][44] = 16,
[0][1][1][0][RTW89_WW][46] = 16,
- [0][1][1][0][RTW89_WW][48] = 56,
- [0][1][1][0][RTW89_WW][50] = 56,
- [0][1][1][0][RTW89_WW][52] = 56,
+ [0][1][1][0][RTW89_WW][48] = 50,
+ [0][1][1][0][RTW89_WW][50] = 50,
+ [0][1][1][0][RTW89_WW][52] = 50,
[0][0][2][0][RTW89_WW][0] = 42,
[0][0][2][0][RTW89_WW][2] = 42,
[0][0][2][0][RTW89_WW][4] = 42,
@@ -17038,9 +17038,9 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_WW][42] = 16,
[0][1][2][0][RTW89_WW][44] = 16,
[0][1][2][0][RTW89_WW][46] = 16,
- [0][1][2][0][RTW89_WW][48] = 58,
- [0][1][2][0][RTW89_WW][50] = 58,
- [0][1][2][0][RTW89_WW][52] = 58,
+ [0][1][2][0][RTW89_WW][48] = 50,
+ [0][1][2][0][RTW89_WW][50] = 52,
+ [0][1][2][0][RTW89_WW][52] = 52,
[0][1][2][1][RTW89_WW][0] = 14,
[0][1][2][1][RTW89_WW][2] = 14,
[0][1][2][1][RTW89_WW][4] = 14,
@@ -17066,9 +17066,9 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_WW][42] = 4,
[0][1][2][1][RTW89_WW][44] = 4,
[0][1][2][1][RTW89_WW][46] = 4,
- [0][1][2][1][RTW89_WW][48] = 58,
- [0][1][2][1][RTW89_WW][50] = 58,
- [0][1][2][1][RTW89_WW][52] = 58,
+ [0][1][2][1][RTW89_WW][48] = 50,
+ [0][1][2][1][RTW89_WW][50] = 52,
+ [0][1][2][1][RTW89_WW][52] = 52,
[1][0][2][0][RTW89_WW][1] = 42,
[1][0][2][0][RTW89_WW][5] = 42,
[1][0][2][0][RTW89_WW][9] = 52,
@@ -17095,8 +17095,8 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_WW][36] = 50,
[1][1][2][0][RTW89_WW][39] = 16,
[1][1][2][0][RTW89_WW][43] = 16,
- [1][1][2][0][RTW89_WW][47] = 68,
- [1][1][2][0][RTW89_WW][51] = 66,
+ [1][1][2][0][RTW89_WW][47] = 62,
+ [1][1][2][0][RTW89_WW][51] = 62,
[1][1][2][1][RTW89_WW][1] = 16,
[1][1][2][1][RTW89_WW][5] = 16,
[1][1][2][1][RTW89_WW][9] = 28,
@@ -17109,8 +17109,8 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_WW][36] = 36,
[1][1][2][1][RTW89_WW][39] = 4,
[1][1][2][1][RTW89_WW][43] = 4,
- [1][1][2][1][RTW89_WW][47] = 68,
- [1][1][2][1][RTW89_WW][51] = 66,
+ [1][1][2][1][RTW89_WW][47] = 62,
+ [1][1][2][1][RTW89_WW][51] = 62,
[2][0][2][0][RTW89_WW][3] = 42,
[2][0][2][0][RTW89_WW][11] = 52,
[2][0][2][0][RTW89_WW][18] = 52,
@@ -17227,7 +17227,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_MEXICO][14] = 78,
[0][0][1][0][RTW89_CN][14] = 58,
[0][0][1][0][RTW89_QATAR][14] = 58,
- [0][0][1][0][RTW89_UK][14] = 1,
+ [0][0][1][0][RTW89_UK][14] = 58,
[0][0][1][0][RTW89_FCC][15] = 76,
[0][0][1][0][RTW89_ETSI][15] = 58,
[0][0][1][0][RTW89_MKK][15] = 76,
@@ -17435,7 +17435,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][48] = 78,
[0][0][1][0][RTW89_ETSI][48] = 127,
[0][0][1][0][RTW89_MKK][48] = 127,
- [0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_IC][48] = 76,
[0][0][1][0][RTW89_KCC][48] = 127,
[0][0][1][0][RTW89_ACMA][48] = 127,
[0][0][1][0][RTW89_CHILE][48] = 127,
@@ -17447,7 +17447,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][50] = 78,
[0][0][1][0][RTW89_ETSI][50] = 127,
[0][0][1][0][RTW89_MKK][50] = 127,
- [0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_IC][50] = 76,
[0][0][1][0][RTW89_KCC][50] = 127,
[0][0][1][0][RTW89_ACMA][50] = 127,
[0][0][1][0][RTW89_CHILE][50] = 127,
@@ -17459,7 +17459,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][52] = 78,
[0][0][1][0][RTW89_ETSI][52] = 127,
[0][0][1][0][RTW89_MKK][52] = 127,
- [0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_IC][52] = 76,
[0][0][1][0][RTW89_KCC][52] = 127,
[0][0][1][0][RTW89_ACMA][52] = 127,
[0][0][1][0][RTW89_CHILE][52] = 127,
@@ -17479,7 +17479,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_MEXICO][0] = 50,
[0][1][1][0][RTW89_CN][0] = 46,
[0][1][1][0][RTW89_QATAR][0] = 46,
- [0][1][1][0][RTW89_UK][0] = 1,
+ [0][1][1][0][RTW89_UK][0] = 46,
[0][1][1][0][RTW89_FCC][2] = 68,
[0][1][1][0][RTW89_ETSI][2] = 46,
[0][1][1][0][RTW89_MKK][2] = 48,
@@ -17771,7 +17771,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][48] = 56,
[0][1][1][0][RTW89_ETSI][48] = 127,
[0][1][1][0][RTW89_MKK][48] = 127,
- [0][1][1][0][RTW89_IC][48] = 127,
+ [0][1][1][0][RTW89_IC][48] = 50,
[0][1][1][0][RTW89_KCC][48] = 127,
[0][1][1][0][RTW89_ACMA][48] = 127,
[0][1][1][0][RTW89_CHILE][48] = 127,
@@ -17783,7 +17783,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][50] = 56,
[0][1][1][0][RTW89_ETSI][50] = 127,
[0][1][1][0][RTW89_MKK][50] = 127,
- [0][1][1][0][RTW89_IC][50] = 127,
+ [0][1][1][0][RTW89_IC][50] = 50,
[0][1][1][0][RTW89_KCC][50] = 127,
[0][1][1][0][RTW89_ACMA][50] = 127,
[0][1][1][0][RTW89_CHILE][50] = 127,
@@ -17795,7 +17795,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][52] = 56,
[0][1][1][0][RTW89_ETSI][52] = 127,
[0][1][1][0][RTW89_MKK][52] = 127,
- [0][1][1][0][RTW89_IC][52] = 127,
+ [0][1][1][0][RTW89_IC][52] = 50,
[0][1][1][0][RTW89_KCC][52] = 127,
[0][1][1][0][RTW89_ACMA][52] = 127,
[0][1][1][0][RTW89_CHILE][52] = 127,
@@ -18107,7 +18107,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][48] = 78,
[0][0][2][0][RTW89_ETSI][48] = 127,
[0][0][2][0][RTW89_MKK][48] = 127,
- [0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_IC][48] = 78,
[0][0][2][0][RTW89_KCC][48] = 127,
[0][0][2][0][RTW89_ACMA][48] = 127,
[0][0][2][0][RTW89_CHILE][48] = 127,
@@ -18119,7 +18119,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][50] = 78,
[0][0][2][0][RTW89_ETSI][50] = 127,
[0][0][2][0][RTW89_MKK][50] = 127,
- [0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_IC][50] = 78,
[0][0][2][0][RTW89_KCC][50] = 127,
[0][0][2][0][RTW89_ACMA][50] = 127,
[0][0][2][0][RTW89_CHILE][50] = 127,
@@ -18131,7 +18131,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][52] = 78,
[0][0][2][0][RTW89_ETSI][52] = 127,
[0][0][2][0][RTW89_MKK][52] = 127,
- [0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_IC][52] = 78,
[0][0][2][0][RTW89_KCC][52] = 127,
[0][0][2][0][RTW89_ACMA][52] = 127,
[0][0][2][0][RTW89_CHILE][52] = 127,
@@ -18443,7 +18443,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][48] = 58,
[0][1][2][0][RTW89_ETSI][48] = 127,
[0][1][2][0][RTW89_MKK][48] = 127,
- [0][1][2][0][RTW89_IC][48] = 127,
+ [0][1][2][0][RTW89_IC][48] = 50,
[0][1][2][0][RTW89_KCC][48] = 127,
[0][1][2][0][RTW89_ACMA][48] = 127,
[0][1][2][0][RTW89_CHILE][48] = 127,
@@ -18455,7 +18455,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][50] = 58,
[0][1][2][0][RTW89_ETSI][50] = 127,
[0][1][2][0][RTW89_MKK][50] = 127,
- [0][1][2][0][RTW89_IC][50] = 127,
+ [0][1][2][0][RTW89_IC][50] = 52,
[0][1][2][0][RTW89_KCC][50] = 127,
[0][1][2][0][RTW89_ACMA][50] = 127,
[0][1][2][0][RTW89_CHILE][50] = 127,
@@ -18467,7 +18467,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][52] = 58,
[0][1][2][0][RTW89_ETSI][52] = 127,
[0][1][2][0][RTW89_MKK][52] = 127,
- [0][1][2][0][RTW89_IC][52] = 127,
+ [0][1][2][0][RTW89_IC][52] = 52,
[0][1][2][0][RTW89_KCC][52] = 127,
[0][1][2][0][RTW89_ACMA][52] = 127,
[0][1][2][0][RTW89_CHILE][52] = 127,
@@ -18779,7 +18779,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][48] = 58,
[0][1][2][1][RTW89_ETSI][48] = 127,
[0][1][2][1][RTW89_MKK][48] = 127,
- [0][1][2][1][RTW89_IC][48] = 127,
+ [0][1][2][1][RTW89_IC][48] = 50,
[0][1][2][1][RTW89_KCC][48] = 127,
[0][1][2][1][RTW89_ACMA][48] = 127,
[0][1][2][1][RTW89_CHILE][48] = 127,
@@ -18791,7 +18791,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][50] = 58,
[0][1][2][1][RTW89_ETSI][50] = 127,
[0][1][2][1][RTW89_MKK][50] = 127,
- [0][1][2][1][RTW89_IC][50] = 127,
+ [0][1][2][1][RTW89_IC][50] = 52,
[0][1][2][1][RTW89_KCC][50] = 127,
[0][1][2][1][RTW89_ACMA][50] = 127,
[0][1][2][1][RTW89_CHILE][50] = 127,
@@ -18803,7 +18803,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][52] = 58,
[0][1][2][1][RTW89_ETSI][52] = 127,
[0][1][2][1][RTW89_MKK][52] = 127,
- [0][1][2][1][RTW89_IC][52] = 127,
+ [0][1][2][1][RTW89_IC][52] = 52,
[0][1][2][1][RTW89_KCC][52] = 127,
[0][1][2][1][RTW89_ACMA][52] = 127,
[0][1][2][1][RTW89_CHILE][52] = 127,
@@ -18959,7 +18959,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][47] = 78,
[1][0][2][0][RTW89_ETSI][47] = 127,
[1][0][2][0][RTW89_MKK][47] = 127,
- [1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_IC][47] = 78,
[1][0][2][0][RTW89_KCC][47] = 127,
[1][0][2][0][RTW89_ACMA][47] = 127,
[1][0][2][0][RTW89_CHILE][47] = 127,
@@ -18971,7 +18971,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][51] = 70,
[1][0][2][0][RTW89_ETSI][51] = 127,
[1][0][2][0][RTW89_MKK][51] = 127,
- [1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_IC][51] = 78,
[1][0][2][0][RTW89_KCC][51] = 127,
[1][0][2][0][RTW89_ACMA][51] = 127,
[1][0][2][0][RTW89_CHILE][51] = 127,
@@ -19127,7 +19127,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_FCC][47] = 68,
[1][1][2][0][RTW89_ETSI][47] = 127,
[1][1][2][0][RTW89_MKK][47] = 127,
- [1][1][2][0][RTW89_IC][47] = 127,
+ [1][1][2][0][RTW89_IC][47] = 62,
[1][1][2][0][RTW89_KCC][47] = 127,
[1][1][2][0][RTW89_ACMA][47] = 127,
[1][1][2][0][RTW89_CHILE][47] = 127,
@@ -19139,7 +19139,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_FCC][51] = 66,
[1][1][2][0][RTW89_ETSI][51] = 127,
[1][1][2][0][RTW89_MKK][51] = 127,
- [1][1][2][0][RTW89_IC][51] = 127,
+ [1][1][2][0][RTW89_IC][51] = 62,
[1][1][2][0][RTW89_KCC][51] = 127,
[1][1][2][0][RTW89_ACMA][51] = 127,
[1][1][2][0][RTW89_CHILE][51] = 127,
@@ -19295,7 +19295,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_FCC][47] = 68,
[1][1][2][1][RTW89_ETSI][47] = 127,
[1][1][2][1][RTW89_MKK][47] = 127,
- [1][1][2][1][RTW89_IC][47] = 127,
+ [1][1][2][1][RTW89_IC][47] = 62,
[1][1][2][1][RTW89_KCC][47] = 127,
[1][1][2][1][RTW89_ACMA][47] = 127,
[1][1][2][1][RTW89_CHILE][47] = 127,
@@ -19307,7 +19307,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_FCC][51] = 66,
[1][1][2][1][RTW89_ETSI][51] = 127,
[1][1][2][1][RTW89_MKK][51] = 127,
- [1][1][2][1][RTW89_IC][51] = 127,
+ [1][1][2][1][RTW89_IC][51] = 62,
[1][1][2][1][RTW89_KCC][51] = 127,
[1][1][2][1][RTW89_ACMA][51] = 127,
[1][1][2][1][RTW89_CHILE][51] = 127,
@@ -19391,7 +19391,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_FCC][49] = 64,
[2][0][2][0][RTW89_ETSI][49] = 127,
[2][0][2][0][RTW89_MKK][49] = 127,
- [2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_IC][49] = 74,
[2][0][2][0][RTW89_KCC][49] = 127,
[2][0][2][0][RTW89_ACMA][49] = 127,
[2][0][2][0][RTW89_CHILE][49] = 127,
@@ -19475,7 +19475,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_FCC][49] = 58,
[2][1][2][0][RTW89_ETSI][49] = 127,
[2][1][2][0][RTW89_MKK][49] = 127,
- [2][1][2][0][RTW89_IC][49] = 127,
+ [2][1][2][0][RTW89_IC][49] = 66,
[2][1][2][0][RTW89_KCC][49] = 127,
[2][1][2][0][RTW89_ACMA][49] = 127,
[2][1][2][0][RTW89_CHILE][49] = 127,
@@ -19559,7 +19559,7 @@ const s8 rtw89_8852b_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_FCC][49] = 58,
[2][1][2][1][RTW89_ETSI][49] = 127,
[2][1][2][1][RTW89_MKK][49] = 127,
- [2][1][2][1][RTW89_IC][49] = 127,
+ [2][1][2][1][RTW89_IC][49] = 66,
[2][1][2][1][RTW89_KCC][49] = 127,
[2][1][2][1][RTW89_ACMA][49] = 127,
[2][1][2][1][RTW89_CHILE][49] = 127,
@@ -20723,9 +20723,9 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_WW][42] = 14,
[0][1][RTW89_WW][44] = 14,
[0][1][RTW89_WW][46] = 14,
- [0][1][RTW89_WW][48] = 20,
- [0][1][RTW89_WW][50] = 20,
- [0][1][RTW89_WW][52] = 20,
+ [0][1][RTW89_WW][48] = 16,
+ [0][1][RTW89_WW][50] = 16,
+ [0][1][RTW89_WW][52] = 16,
[1][0][RTW89_WW][0] = 34,
[1][0][RTW89_WW][2] = 34,
[1][0][RTW89_WW][4] = 34,
@@ -20779,9 +20779,9 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_WW][42] = 16,
[1][1][RTW89_WW][44] = 16,
[1][1][RTW89_WW][46] = 16,
- [1][1][RTW89_WW][48] = 32,
- [1][1][RTW89_WW][50] = 32,
- [1][1][RTW89_WW][52] = 32,
+ [1][1][RTW89_WW][48] = 28,
+ [1][1][RTW89_WW][50] = 30,
+ [1][1][RTW89_WW][52] = 30,
[2][0][RTW89_WW][0] = 44,
[2][0][RTW89_WW][2] = 44,
[2][0][RTW89_WW][4] = 44,
@@ -20835,9 +20835,9 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_WW][42] = 16,
[2][1][RTW89_WW][44] = 16,
[2][1][RTW89_WW][46] = 16,
- [2][1][RTW89_WW][48] = 44,
- [2][1][RTW89_WW][50] = 44,
- [2][1][RTW89_WW][52] = 44,
+ [2][1][RTW89_WW][48] = 40,
+ [2][1][RTW89_WW][50] = 40,
+ [2][1][RTW89_WW][52] = 40,
[0][0][RTW89_FCC][0] = 52,
[0][0][RTW89_ETSI][0] = 24,
[0][0][RTW89_MKK][0] = 26,
@@ -21141,7 +21141,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][48] = 32,
[0][0][RTW89_ETSI][48] = 127,
[0][0][RTW89_MKK][48] = 127,
- [0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_IC][48] = 42,
[0][0][RTW89_KCC][48] = 127,
[0][0][RTW89_ACMA][48] = 127,
[0][0][RTW89_CHILE][48] = 127,
@@ -21153,7 +21153,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][50] = 32,
[0][0][RTW89_ETSI][50] = 127,
[0][0][RTW89_MKK][50] = 127,
- [0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_IC][50] = 42,
[0][0][RTW89_KCC][50] = 127,
[0][0][RTW89_ACMA][50] = 127,
[0][0][RTW89_CHILE][50] = 127,
@@ -21165,7 +21165,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][52] = 32,
[0][0][RTW89_ETSI][52] = 127,
[0][0][RTW89_MKK][52] = 127,
- [0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_IC][52] = 40,
[0][0][RTW89_KCC][52] = 127,
[0][0][RTW89_ACMA][52] = 127,
[0][0][RTW89_CHILE][52] = 127,
@@ -21477,7 +21477,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][48] = 20,
[0][1][RTW89_ETSI][48] = 127,
[0][1][RTW89_MKK][48] = 127,
- [0][1][RTW89_IC][48] = 127,
+ [0][1][RTW89_IC][48] = 16,
[0][1][RTW89_KCC][48] = 127,
[0][1][RTW89_ACMA][48] = 127,
[0][1][RTW89_CHILE][48] = 127,
@@ -21489,7 +21489,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][50] = 20,
[0][1][RTW89_ETSI][50] = 127,
[0][1][RTW89_MKK][50] = 127,
- [0][1][RTW89_IC][50] = 127,
+ [0][1][RTW89_IC][50] = 16,
[0][1][RTW89_KCC][50] = 127,
[0][1][RTW89_ACMA][50] = 127,
[0][1][RTW89_CHILE][50] = 127,
@@ -21501,7 +21501,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][52] = 20,
[0][1][RTW89_ETSI][52] = 127,
[0][1][RTW89_MKK][52] = 127,
- [0][1][RTW89_IC][52] = 127,
+ [0][1][RTW89_IC][52] = 16,
[0][1][RTW89_KCC][52] = 127,
[0][1][RTW89_ACMA][52] = 127,
[0][1][RTW89_CHILE][52] = 127,
@@ -21813,7 +21813,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][48] = 44,
[1][0][RTW89_ETSI][48] = 127,
[1][0][RTW89_MKK][48] = 127,
- [1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_IC][48] = 54,
[1][0][RTW89_KCC][48] = 127,
[1][0][RTW89_ACMA][48] = 127,
[1][0][RTW89_CHILE][48] = 127,
@@ -21825,7 +21825,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][50] = 44,
[1][0][RTW89_ETSI][50] = 127,
[1][0][RTW89_MKK][50] = 127,
- [1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_IC][50] = 54,
[1][0][RTW89_KCC][50] = 127,
[1][0][RTW89_ACMA][50] = 127,
[1][0][RTW89_CHILE][50] = 127,
@@ -21837,7 +21837,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][52] = 44,
[1][0][RTW89_ETSI][52] = 127,
[1][0][RTW89_MKK][52] = 127,
- [1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_IC][52] = 52,
[1][0][RTW89_KCC][52] = 127,
[1][0][RTW89_ACMA][52] = 127,
[1][0][RTW89_CHILE][52] = 127,
@@ -22149,7 +22149,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][48] = 32,
[1][1][RTW89_ETSI][48] = 127,
[1][1][RTW89_MKK][48] = 127,
- [1][1][RTW89_IC][48] = 127,
+ [1][1][RTW89_IC][48] = 28,
[1][1][RTW89_KCC][48] = 127,
[1][1][RTW89_ACMA][48] = 127,
[1][1][RTW89_CHILE][48] = 127,
@@ -22161,7 +22161,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][50] = 32,
[1][1][RTW89_ETSI][50] = 127,
[1][1][RTW89_MKK][50] = 127,
- [1][1][RTW89_IC][50] = 127,
+ [1][1][RTW89_IC][50] = 30,
[1][1][RTW89_KCC][50] = 127,
[1][1][RTW89_ACMA][50] = 127,
[1][1][RTW89_CHILE][50] = 127,
@@ -22173,7 +22173,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][52] = 32,
[1][1][RTW89_ETSI][52] = 127,
[1][1][RTW89_MKK][52] = 127,
- [1][1][RTW89_IC][52] = 127,
+ [1][1][RTW89_IC][52] = 30,
[1][1][RTW89_KCC][52] = 127,
[1][1][RTW89_ACMA][52] = 127,
[1][1][RTW89_CHILE][52] = 127,
@@ -22486,7 +22486,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_ETSI][48] = 127,
[2][0][RTW89_MKK][48] = 127,
[2][0][RTW89_IC][48] = 127,
- [2][0][RTW89_KCC][48] = 127,
+ [2][0][RTW89_KCC][48] = 66,
[2][0][RTW89_ACMA][48] = 127,
[2][0][RTW89_CHILE][48] = 127,
[2][0][RTW89_UKRAINE][48] = 127,
@@ -22498,7 +22498,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_ETSI][50] = 127,
[2][0][RTW89_MKK][50] = 127,
[2][0][RTW89_IC][50] = 127,
- [2][0][RTW89_KCC][50] = 127,
+ [2][0][RTW89_KCC][50] = 66,
[2][0][RTW89_ACMA][50] = 127,
[2][0][RTW89_CHILE][50] = 127,
[2][0][RTW89_UKRAINE][50] = 127,
@@ -22510,7 +22510,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_ETSI][52] = 127,
[2][0][RTW89_MKK][52] = 127,
[2][0][RTW89_IC][52] = 127,
- [2][0][RTW89_KCC][52] = 127,
+ [2][0][RTW89_KCC][52] = 66,
[2][0][RTW89_ACMA][52] = 127,
[2][0][RTW89_CHILE][52] = 127,
[2][0][RTW89_UKRAINE][52] = 127,
@@ -22821,7 +22821,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][48] = 44,
[2][1][RTW89_ETSI][48] = 127,
[2][1][RTW89_MKK][48] = 127,
- [2][1][RTW89_IC][48] = 127,
+ [2][1][RTW89_IC][48] = 40,
[2][1][RTW89_KCC][48] = 127,
[2][1][RTW89_ACMA][48] = 127,
[2][1][RTW89_CHILE][48] = 127,
@@ -22833,7 +22833,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][50] = 44,
[2][1][RTW89_ETSI][50] = 127,
[2][1][RTW89_MKK][50] = 127,
- [2][1][RTW89_IC][50] = 127,
+ [2][1][RTW89_IC][50] = 40,
[2][1][RTW89_KCC][50] = 127,
[2][1][RTW89_ACMA][50] = 127,
[2][1][RTW89_CHILE][50] = 127,
@@ -22845,7 +22845,7 @@ const s8 rtw89_8852b_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][52] = 44,
[2][1][RTW89_ETSI][52] = 127,
[2][1][RTW89_MKK][52] = 127,
- [2][1][RTW89_IC][52] = 127,
+ [2][1][RTW89_IC][52] = 40,
[2][1][RTW89_KCC][52] = 127,
[2][1][RTW89_ACMA][52] = 127,
[2][1][RTW89_CHILE][52] = 127,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
index 920b20bbcfb7..ed71364e6437 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -26,6 +26,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
.io_rcy_en = MAC_AX_PCIE_DISABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_PCIE_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 8618d0204f66..17e6164855fa 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -842,7 +842,7 @@ static void rtw8852c_set_gain_error(struct rtw89_dev *rtwdev,
enum rtw89_subband subband,
enum rtw89_rf_path path)
{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain;
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
s32 val;
u32 reg;
@@ -2365,28 +2365,55 @@ static u8 rtw8852c_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_p
static void rtw8852c_btc_set_rfe(struct rtw89_dev *rtwdev)
{
- struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
- module->rfe_type = rtwdev->efuse.rfe_type;
- module->cv = rtwdev->hal.cv;
- module->bt_solo = 0;
- module->switch_type = BTC_SWITCH_INTERNAL;
+ if (ver->fcxinit == 7) {
+ md->md_v7.rfe_type = rtwdev->efuse.rfe_type;
+ md->md_v7.kt_ver = rtwdev->hal.cv;
+ md->md_v7.bt_solo = 0;
+ md->md_v7.switch_type = BTC_SWITCH_INTERNAL;
- if (module->rfe_type > 0)
- module->ant.num = (module->rfe_type % 2 ? 2 : 3);
- else
- module->ant.num = 2;
+ if (md->md_v7.rfe_type > 0)
+ md->md_v7.ant.num = (md->md_v7.rfe_type % 2 ? 2 : 3);
+ else
+ md->md_v7.ant.num = 2;
- module->ant.diversity = 0;
- module->ant.isolation = 10;
+ md->md_v7.ant.diversity = 0;
+ md->md_v7.ant.isolation = 10;
- if (module->ant.num == 3) {
- module->ant.type = BTC_ANT_DEDICATED;
- module->bt_pos = BTC_BT_ALONE;
+ if (md->md_v7.ant.num == 3) {
+ md->md_v7.ant.type = BTC_ANT_DEDICATED;
+ md->md_v7.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md_v7.ant.type = BTC_ANT_SHARED;
+ md->md_v7.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md_v7.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md_v7.ant.type;
} else {
- module->ant.type = BTC_ANT_SHARED;
- module->bt_pos = BTC_BT_BTG;
+ md->md.rfe_type = rtwdev->efuse.rfe_type;
+ md->md.cv = rtwdev->hal.cv;
+ md->md.bt_solo = 0;
+ md->md.switch_type = BTC_SWITCH_INTERNAL;
+
+ if (md->md.rfe_type > 0)
+ md->md.ant.num = (md->md.rfe_type % 2 ? 2 : 3);
+ else
+ md->md.ant.num = 2;
+
+ md->md.ant.diversity = 0;
+ md->md.ant.isolation = 10;
+
+ if (md->md.ant.num == 3) {
+ md->md.ant.type = BTC_ANT_DEDICATED;
+ md->md.bt_pos = BTC_BT_ALONE;
+ } else {
+ md->md.ant.type = BTC_ANT_SHARED;
+ md->md.bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = md->md.ant.btg_pos;
+ rtwdev->btc.ant_type = md->md.ant.type;
}
}
@@ -2449,7 +2476,6 @@ void rtw8852c_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
static void rtw8852c_btc_init_cfg(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_module *module = &btc->mdinfo;
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_mac_ax_coex coex_params = {
.pta_mode = RTW89_MAC_AX_COEX_RTK_MODE,
@@ -2468,7 +2494,7 @@ static void rtw8852c_btc_init_cfg(struct rtw89_dev *rtwdev)
rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0);
/* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */
- if (module->ant.type == BTC_ANT_SHARED) {
+ if (btc->ant_type == BTC_ANT_SHARED) {
rtw8852c_set_trx_mask(rtwdev,
RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
rtw8852c_set_trx_mask(rtwdev,
@@ -2813,6 +2839,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.enable_bb_rf = rtw8852c_mac_enable_bb_rf,
.disable_bb_rf = rtw8852c_mac_disable_bb_rf,
.bb_preinit = NULL,
+ .bb_postinit = NULL,
.bb_reset = rtw8852c_bb_reset,
.bb_sethw = rtw8852c_bb_sethw,
.read_rf = rtw89_phy_read_rf_v1,
@@ -2823,7 +2850,9 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.read_phycap = rtw8852c_read_phycap,
.fem_setup = NULL,
.rfe_gpio = NULL,
+ .rfk_hw_init = NULL,
.rfk_init = rtw8852c_rfk_init,
+ .rfk_init_late = NULL,
.rfk_channel = rtw8852c_rfk_channel,
.rfk_band_changed = rtw8852c_rfk_band_changed,
.rfk_scan = rtw8852c_rfk_scan,
@@ -2848,6 +2877,12 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = {
.stop_sch_tx = rtw89_mac_stop_sch_tx_v1,
.resume_sch_tx = rtw89_mac_resume_sch_tx_v1,
.h2c_dctl_sec_cam = rtw89_fw_h2c_dctl_sec_cam_v1,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl,
+ .h2c_ampdu_cmac_tbl = NULL,
+ .h2c_default_dmac_tbl = NULL,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8852c_btc_set_rfe,
.btc_init_cfg = rtw8852c_btc_init_cfg,
@@ -2902,7 +2937,10 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ) |
BIT(NL80211_BAND_6GHZ),
- .support_bw160 = true,
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = true,
@@ -2959,6 +2997,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.c2h_regs = rtw8852c_c2h_regs,
.page_regs = &rtw8852c_page_regs,
+ .wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3,
.cfo_src_fd = false,
.cfo_hw_comp = false,
.dcfo_comp = &rtw8852c_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index 4592de3dbd94..583ea673a4f5 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -35,6 +35,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.io_rcy_en = MAC_AX_PCIE_ENABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_6MS,
.rx_ring_eq_is_full = false,
+ .check_rx_tag = false,
.init_cfg_reg = R_AX_HAXI_INIT_CFG1,
.txhci_en_bit = B_AX_TXHCI_EN_V1,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 0e7300cc6d9e..367459bd1345 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2023 Realtek Corporation
*/
+#include "coex.h"
#include "debug.h"
#include "efuse.h"
#include "fw.h"
@@ -9,12 +10,16 @@
#include "phy.h"
#include "reg.h"
#include "rtw8922a.h"
+#include "rtw8922a_rfk.h"
+#include "util.h"
#define RTW8922A_FW_FORMAT_MAX 0
#define RTW8922A_FW_BASENAME "rtw89/rtw8922a_fw"
#define RTW8922A_MODULE_FIRMWARE \
RTW8922A_FW_BASENAME ".bin"
+#define HE_N_USER_MAX_8922A 4
+
static const struct rtw89_hfc_ch_cfg rtw8922a_hfc_chcfg_pcie[] = {
{2, 1641, grp_0}, /* ACH 0 */
{2, 1641, grp_0}, /* ACH 1 */
@@ -43,6 +48,8 @@ static const struct rtw89_hfc_pub_cfg rtw8922a_hfc_pubcfg_pcie = {
static const struct rtw89_hfc_param_ini rtw8922a_hfc_param_ini_pcie[] = {
[RTW89_QTA_SCC] = {rtw8922a_hfc_chcfg_pcie, &rtw8922a_hfc_pubcfg_pcie,
&rtw89_mac_size.hfc_prec_cfg_c0, RTW89_HCIFC_POH},
+ [RTW89_QTA_DBCC] = {rtw8922a_hfc_chcfg_pcie, &rtw8922a_hfc_pubcfg_pcie,
+ &rtw89_mac_size.hfc_prec_cfg_c0, RTW89_HCIFC_POH},
[RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_mac_size.hfc_prec_cfg_c2,
RTW89_HCIFC_POH},
[RTW89_QTA_INVALID] = {NULL},
@@ -54,6 +61,11 @@ static const struct rtw89_dle_mem rtw8922a_dle_mem_pcie[] = {
&rtw89_mac_size.wde_qt0_v1, &rtw89_mac_size.ple_qt0,
&rtw89_mac_size.ple_qt1, &rtw89_mac_size.ple_rsvd_qt0,
&rtw89_mac_size.rsvd0_size0, &rtw89_mac_size.rsvd1_size0},
+ [RTW89_QTA_DBCC] = {RTW89_QTA_DBCC, &rtw89_mac_size.wde_size0_v1,
+ &rtw89_mac_size.ple_size0_v1, &rtw89_mac_size.wde_qt0_v1,
+ &rtw89_mac_size.wde_qt0_v1, &rtw89_mac_size.ple_qt0,
+ &rtw89_mac_size.ple_qt1, &rtw89_mac_size.ple_rsvd_qt0,
+ &rtw89_mac_size.rsvd0_size0, &rtw89_mac_size.rsvd1_size0},
[RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size4_v1,
&rtw89_mac_size.ple_size3_v1, &rtw89_mac_size.wde_qt4,
&rtw89_mac_size.wde_qt4, &rtw89_mac_size.ple_qt9,
@@ -63,6 +75,31 @@ static const struct rtw89_dle_mem rtw8922a_dle_mem_pcie[] = {
NULL},
};
+static const u32 rtw8922a_h2c_regs[RTW89_H2CREG_MAX] = {
+ R_BE_H2CREG_DATA0, R_BE_H2CREG_DATA1, R_BE_H2CREG_DATA2,
+ R_BE_H2CREG_DATA3
+};
+
+static const u32 rtw8922a_c2h_regs[RTW89_H2CREG_MAX] = {
+ R_BE_C2HREG_DATA0, R_BE_C2HREG_DATA1, R_BE_C2HREG_DATA2,
+ R_BE_C2HREG_DATA3
+};
+
+static const struct rtw89_page_regs rtw8922a_page_regs = {
+ .hci_fc_ctrl = R_BE_HCI_FC_CTRL,
+ .ch_page_ctrl = R_BE_CH_PAGE_CTRL,
+ .ach_page_ctrl = R_BE_CH0_PAGE_CTRL,
+ .ach_page_info = R_BE_CH0_PAGE_INFO,
+ .pub_page_info3 = R_BE_PUB_PAGE_INFO3,
+ .pub_page_ctrl1 = R_BE_PUB_PAGE_CTRL1,
+ .pub_page_ctrl2 = R_BE_PUB_PAGE_CTRL2,
+ .pub_page_info1 = R_BE_PUB_PAGE_INFO1,
+ .pub_page_info2 = R_BE_PUB_PAGE_INFO2,
+ .wp_page_ctrl1 = R_BE_WP_PAGE_CTRL1,
+ .wp_page_ctrl2 = R_BE_WP_PAGE_CTRL2,
+ .wp_page_info1 = R_BE_WP_PAGE_INFO1,
+};
+
static const struct rtw89_reg_imr rtw8922a_imr_dmac_regs[] = {
{R_BE_DISP_HOST_IMR, B_BE_DISP_HOST_IMR_CLR, B_BE_DISP_HOST_IMR_SET},
{R_BE_DISP_CPU_IMR, B_BE_DISP_CPU_IMR_CLR, B_BE_DISP_CPU_IMR_SET},
@@ -119,6 +156,51 @@ static const struct rtw89_imr_table rtw8922a_imr_cmac_table = {
.n_regs = ARRAY_SIZE(rtw8922a_imr_cmac_regs),
};
+static const struct rtw89_rrsr_cfgs rtw8922a_rrsr_cfgs = {
+ .ref_rate = {R_BE_TRXPTCL_RESP_1, B_BE_WMAC_RESP_REF_RATE_SEL, 0},
+ .rsc = {R_BE_PTCL_RRSR1, B_BE_RSC_MASK, 2},
+};
+
+static const struct rtw89_dig_regs rtw8922a_dig_regs = {
+ .seg0_pd_reg = R_SEG0R_PD_V2,
+ .pd_lower_bound_mask = B_SEG0R_PD_LOWER_BOUND_MSK,
+ .pd_spatial_reuse_en = B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1,
+ .bmode_pd_reg = R_BMODE_PDTH_EN_V2,
+ .bmode_cca_rssi_limit_en = B_BMODE_PDTH_LIMIT_EN_MSK_V1,
+ .bmode_pd_lower_bound_reg = R_BMODE_PDTH_V2,
+ .bmode_rssi_nocca_low_th_mask = B_BMODE_PDTH_LOWER_BOUND_MSK_V1,
+ .p0_lna_init = {R_PATH0_LNA_INIT_V1, B_PATH0_LNA_INIT_IDX_MSK},
+ .p1_lna_init = {R_PATH1_LNA_INIT_V1, B_PATH1_LNA_INIT_IDX_MSK},
+ .p0_tia_init = {R_PATH0_TIA_INIT_V1, B_PATH0_TIA_INIT_IDX_MSK_V1},
+ .p1_tia_init = {R_PATH1_TIA_INIT_V1, B_PATH1_TIA_INIT_IDX_MSK_V1},
+ .p0_rxb_init = {R_PATH0_RXB_INIT_V1, B_PATH0_RXB_INIT_IDX_MSK_V1},
+ .p1_rxb_init = {R_PATH1_RXB_INIT_V1, B_PATH1_RXB_INIT_IDX_MSK_V1},
+ .p0_p20_pagcugc_en = {R_PATH0_P20_FOLLOW_BY_PAGCUGC_V3,
+ B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p0_s20_pagcugc_en = {R_PATH0_S20_FOLLOW_BY_PAGCUGC_V3,
+ B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_p20_pagcugc_en = {R_PATH1_P20_FOLLOW_BY_PAGCUGC_V3,
+ B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ .p1_s20_pagcugc_en = {R_PATH1_S20_FOLLOW_BY_PAGCUGC_V3,
+ B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+};
+
+static const struct rtw89_edcca_regs rtw8922a_edcca_regs = {
+ .edcca_level = R_SEG0R_EDCCA_LVL_BE,
+ .edcca_mask = B_EDCCA_LVL_MSK0,
+ .edcca_p_mask = B_EDCCA_LVL_MSK1,
+ .ppdu_level = R_SEG0R_PPDU_LVL_BE,
+ .ppdu_mask = B_EDCCA_LVL_MSK1,
+ .rpt_a = R_EDCCA_RPT_A_BE,
+ .rpt_b = R_EDCCA_RPT_B_BE,
+ .rpt_sel = R_EDCCA_RPT_SEL_BE,
+ .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK,
+ .rpt_sel_be = R_EDCCA_RPTREG_SEL_BE,
+ .rpt_sel_be_mask = B_EDCCA_RPTREG_SEL_BE_MSK,
+ .tx_collision_t2r_st = R_TX_COLLISION_T2R_ST_BE,
+ .tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_BE_M,
+};
+
static const struct rtw89_efuse_block_cfg rtw8922a_efuse_blocks[] = {
[RTW89_EFUSE_BLOCK_SYS] = {.offset = 0x00000, .size = 0x310},
[RTW89_EFUSE_BLOCK_RF] = {.offset = 0x10000, .size = 0x240},
@@ -130,6 +212,36 @@ static const struct rtw89_efuse_block_cfg rtw8922a_efuse_blocks[] = {
[RTW89_EFUSE_BLOCK_ADIE] = {.offset = 0x70000, .size = 0x10},
};
+static void rtw8922a_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (en) {
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BT_SHARE_A, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BTG_PATH_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BT_SHARE_B, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BTG_PATH_B, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_OP, B_LNA6, 0x20, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_TIA, B_TIA0_B, 0x30, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_BT_SHARE, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_BT_SG0, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN,
+ 0x1, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BT_SHARE_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BTG_PATH_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BT_SHARE_B, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BTG_PATH_B, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_OP, B_LNA6, 0x1a, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_TIA, B_TIA0_B, 0x2a, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xc, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_BT_SHARE, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_BT_SG0, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN,
+ 0x0, phy_idx);
+ }
+}
+
static int rtw8922a_pwr_on_func(struct rtw89_dev *rtwdev)
{
struct rtw89_hal *hal = &rtwdev->hal;
@@ -273,6 +385,9 @@ static int rtw8922a_pwr_on_func(struct rtw89_dev *rtwdev)
rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, B_BE_FEN_BB_IP_RSTN |
B_BE_FEN_BBPLAT_RSTB);
+ if (!test_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags))
+ rtw89_efuse_read_fw_secure_be(rtwdev);
+
return 0;
}
@@ -574,6 +689,32 @@ static void rtw8922a_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev,
}
}
+static void rtw8922a_pa_bias_trim(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 pabias_2g, pabias_5g;
+ u8 i;
+
+ if (!info->pg_pa_bias_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]);
+ pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
+ i, pabias_2g, pabias_5g);
+
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG_V1, pabias_2g);
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA_V1, pabias_5g);
+ }
+}
+
static void rtw8922a_phycap_parsing_pad_bias_trim(struct rtw89_dev *rtwdev,
u8 *phycap_map)
{
@@ -591,6 +732,31 @@ static void rtw8922a_phycap_parsing_pad_bias_trim(struct rtw89_dev *rtwdev,
}
}
+static void rtw8922a_pad_bias_trim(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 pad_bias_2g, pad_bias_5g;
+ u8 i;
+
+ if (!info->pg_pa_bias_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PAD_BIAS][TRIM] no PG, do nothing\n");
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8922A; i++) {
+ pad_bias_2g = u8_get_bits(info->pad_bias_trim[i], GENMASK(3, 0));
+ pad_bias_5g = u8_get_bits(info->pad_bias_trim[i], GENMASK(7, 4));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PAD_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
+ i, pad_bias_2g, pad_bias_5g);
+
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASD_TXG_V1, pad_bias_2g);
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASD_TXA_V1, pad_bias_5g);
+ }
+}
+
static int rtw8922a_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
{
rtw8922a_phycap_parsing_thermal_trim(rtwdev, phycap_map);
@@ -600,6 +766,1547 @@ static int rtw8922a_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
return 0;
}
+static void rtw8922a_power_trim(struct rtw89_dev *rtwdev)
+{
+ rtw8922a_pa_bias_trim(rtwdev);
+ rtw8922a_pad_bias_trim(rtwdev);
+}
+
+static void rtw8922a_set_channel_mac(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ u8 mac_idx)
+{
+ u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_BE_TX_SUB_BAND_VALUE, mac_idx);
+ u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_BE_TXRATE_CHK, mac_idx);
+ u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_BE_WMAC_RFMOD, mac_idx);
+ u8 txsb20 = 0, txsb40 = 0, txsb80 = 0;
+ u8 rf_mod_val, chk_rate_mask;
+ u32 txsb;
+ u32 reg;
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_160:
+ txsb80 = rtw89_phy_get_txsb(rtwdev, chan, RTW89_CHANNEL_WIDTH_80);
+ fallthrough;
+ case RTW89_CHANNEL_WIDTH_80:
+ txsb40 = rtw89_phy_get_txsb(rtwdev, chan, RTW89_CHANNEL_WIDTH_40);
+ fallthrough;
+ case RTW89_CHANNEL_WIDTH_40:
+ txsb20 = rtw89_phy_get_txsb(rtwdev, chan, RTW89_CHANNEL_WIDTH_20);
+ break;
+ default:
+ break;
+ }
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_160:
+ rf_mod_val = BE_WMAC_RFMOD_160M;
+ txsb = u32_encode_bits(txsb20, B_BE_TXSB_20M_MASK) |
+ u32_encode_bits(txsb40, B_BE_TXSB_40M_MASK) |
+ u32_encode_bits(txsb80, B_BE_TXSB_80M_MASK);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rf_mod_val = BE_WMAC_RFMOD_80M;
+ txsb = u32_encode_bits(txsb20, B_BE_TXSB_20M_MASK) |
+ u32_encode_bits(txsb40, B_BE_TXSB_40M_MASK);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rf_mod_val = BE_WMAC_RFMOD_40M;
+ txsb = u32_encode_bits(txsb20, B_BE_TXSB_20M_MASK);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ default:
+ rf_mod_val = BE_WMAC_RFMOD_20M;
+ txsb = 0;
+ break;
+ }
+
+ if (txsb20 <= BE_PRI20_BITMAP_MAX)
+ txsb |= u32_encode_bits(BIT(txsb20), B_BE_PRI20_BITMAP_MASK);
+
+ rtw89_write8_mask(rtwdev, rf_mod, B_BE_WMAC_RFMOD_MASK, rf_mod_val);
+ rtw89_write32(rtwdev, sub_carr, txsb);
+
+ switch (chan->band_type) {
+ case RTW89_BAND_2G:
+ chk_rate_mask = B_BE_BAND_MODE;
+ break;
+ case RTW89_BAND_5G:
+ case RTW89_BAND_6G:
+ chk_rate_mask = B_BE_CHECK_CCK_EN | B_BE_RTS_LIMIT_IN_OFDM6;
+ break;
+ default:
+ rtw89_warn(rtwdev, "Invalid band_type:%d\n", chan->band_type);
+ return;
+ }
+
+ rtw89_write8_clr(rtwdev, chk_rate, B_BE_BAND_MODE | B_BE_CHECK_CCK_EN |
+ B_BE_RTS_LIMIT_IN_OFDM6);
+ rtw89_write8_set(rtwdev, chk_rate, chk_rate_mask);
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_320:
+ case RTW89_CHANNEL_WIDTH_160:
+ case RTW89_CHANNEL_WIDTH_80:
+ case RTW89_CHANNEL_WIDTH_40:
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PREBKF_CFG_1, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_SIFS_MACTXEN_T1_MASK, 0x41);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_MUEDCA_EN, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_SIFS_MACTXEN_TB_T1_MASK, 0x41);
+ break;
+ default:
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PREBKF_CFG_1, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_SIFS_MACTXEN_T1_MASK, 0x3f);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_MUEDCA_EN, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_BE_SIFS_MACTXEN_TB_T1_MASK, 0x3e);
+ break;
+ }
+}
+
+static const u32 rtw8922a_sco_barker_threshold[14] = {
+ 0x1fe4f, 0x1ff5e, 0x2006c, 0x2017b, 0x2028a, 0x20399, 0x204a8, 0x205b6,
+ 0x206c5, 0x207d4, 0x208e3, 0x209f2, 0x20b00, 0x20d8a
+};
+
+static const u32 rtw8922a_sco_cck_threshold[14] = {
+ 0x2bdac, 0x2bf21, 0x2c095, 0x2c209, 0x2c37e, 0x2c4f2, 0x2c666, 0x2c7db,
+ 0x2c94f, 0x2cac3, 0x2cc38, 0x2cdac, 0x2cf21, 0x2d29e
+};
+
+static int rtw8922a_ctrl_sco_cck(struct rtw89_dev *rtwdev,
+ u8 primary_ch, enum rtw89_bandwidth bw,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 ch_element;
+
+ if (primary_ch >= 14)
+ return -EINVAL;
+
+ ch_element = primary_ch - 1;
+
+ rtw89_phy_write32_idx(rtwdev, R_BK_FC0INV, B_BK_FC0INV,
+ rtw8922a_sco_barker_threshold[ch_element],
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CCK_FC0INV, B_CCK_FC0INV,
+ rtw8922a_sco_cck_threshold[ch_element],
+ phy_idx);
+
+ return 0;
+}
+
+struct rtw8922a_bb_gain {
+ u32 gain_g[BB_PATH_NUM_8922A];
+ u32 gain_a[BB_PATH_NUM_8922A];
+ u32 gain_g_mask;
+ u32 gain_a_mask;
+};
+
+static const struct rtw89_reg_def rpl_comp_bw160[RTW89_BW20_SC_160M] = {
+ { .addr = 0x41E8, .mask = 0xFF00},
+ { .addr = 0x41E8, .mask = 0xFF0000},
+ { .addr = 0x41E8, .mask = 0xFF000000},
+ { .addr = 0x41EC, .mask = 0xFF},
+ { .addr = 0x41EC, .mask = 0xFF00},
+ { .addr = 0x41EC, .mask = 0xFF0000},
+ { .addr = 0x41EC, .mask = 0xFF000000},
+ { .addr = 0x41F0, .mask = 0xFF}
+};
+
+static const struct rtw89_reg_def rpl_comp_bw80[RTW89_BW20_SC_80M] = {
+ { .addr = 0x41F4, .mask = 0xFF},
+ { .addr = 0x41F4, .mask = 0xFF00},
+ { .addr = 0x41F4, .mask = 0xFF0000},
+ { .addr = 0x41F4, .mask = 0xFF000000}
+};
+
+static const struct rtw89_reg_def rpl_comp_bw40[RTW89_BW20_SC_40M] = {
+ { .addr = 0x41F0, .mask = 0xFF0000},
+ { .addr = 0x41F0, .mask = 0xFF000000}
+};
+
+static const struct rtw89_reg_def rpl_comp_bw20[RTW89_BW20_SC_20M] = {
+ { .addr = 0x41F0, .mask = 0xFF00}
+};
+
+static const struct rtw8922a_bb_gain bb_gain_lna[LNA_GAIN_NUM] = {
+ { .gain_g = {0x409c, 0x449c}, .gain_a = {0x406C, 0x446C},
+ .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF},
+ { .gain_g = {0x409c, 0x449c}, .gain_a = {0x406C, 0x446C},
+ .gain_g_mask = 0xFF000000, .gain_a_mask = 0xFF0000},
+ { .gain_g = {0x40a0, 0x44a0}, .gain_a = {0x4070, 0x4470},
+ .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF},
+ { .gain_g = {0x40a0, 0x44a0}, .gain_a = {0x4070, 0x4470},
+ .gain_g_mask = 0xFF000000, .gain_a_mask = 0xFF0000},
+ { .gain_g = {0x40a4, 0x44a4}, .gain_a = {0x4074, 0x4474},
+ .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF},
+ { .gain_g = {0x40a4, 0x44a4}, .gain_a = {0x4074, 0x4474},
+ .gain_g_mask = 0xFF000000, .gain_a_mask = 0xFF0000},
+ { .gain_g = {0x40a8, 0x44a8}, .gain_a = {0x4078, 0x4478},
+ .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF},
+};
+
+static const struct rtw8922a_bb_gain bb_gain_tia[TIA_GAIN_NUM] = {
+ { .gain_g = {0x4054, 0x4454}, .gain_a = {0x4054, 0x4454},
+ .gain_g_mask = 0x7FC0000, .gain_a_mask = 0x1FF},
+ { .gain_g = {0x4058, 0x4458}, .gain_a = {0x4054, 0x4454},
+ .gain_g_mask = 0x1FF, .gain_a_mask = 0x3FE00 },
+};
+
+struct rtw8922a_bb_gain_bypass {
+ u32 gain_g[BB_PATH_NUM_8922A];
+ u32 gain_a[BB_PATH_NUM_8922A];
+ u32 gain_mask_g;
+ u32 gain_mask_a;
+};
+
+static void rtw8922a_set_rpl_gain(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
+ u32 reg_path_ofst = 0;
+ u32 mask;
+ s32 val;
+ u32 reg;
+ int i;
+
+ if (path == RF_PATH_B)
+ reg_path_ofst = 0x400;
+
+ for (i = 0; i < RTW89_BW20_SC_160M; i++) {
+ reg = rpl_comp_bw160[i].addr | reg_path_ofst;
+ mask = rpl_comp_bw160[i].mask;
+ val = gain->rpl_ofst_160[gain_band][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+
+ for (i = 0; i < RTW89_BW20_SC_80M; i++) {
+ reg = rpl_comp_bw80[i].addr | reg_path_ofst;
+ mask = rpl_comp_bw80[i].mask;
+ val = gain->rpl_ofst_80[gain_band][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+
+ for (i = 0; i < RTW89_BW20_SC_40M; i++) {
+ reg = rpl_comp_bw40[i].addr | reg_path_ofst;
+ mask = rpl_comp_bw40[i].mask;
+ val = gain->rpl_ofst_40[gain_band][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+
+ for (i = 0; i < RTW89_BW20_SC_20M; i++) {
+ reg = rpl_comp_bw20[i].addr | reg_path_ofst;
+ mask = rpl_comp_bw20[i].mask;
+ val = gain->rpl_ofst_20[gain_band][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+}
+
+static void rtw8922a_set_lna_tia_gain(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
+ u8 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
+ enum rtw89_phy_bb_bw_be bw_type;
+ s32 val;
+ u32 reg;
+ u32 mask;
+ int i;
+
+ bw_type = chan->band_width <= RTW89_CHANNEL_WIDTH_40 ?
+ RTW89_BB_BW_20_40 : RTW89_BB_BW_80_160_320;
+
+ for (i = 0; i < LNA_GAIN_NUM; i++) {
+ if (chan->band_type == RTW89_BAND_2G) {
+ reg = bb_gain_lna[i].gain_g[path];
+ mask = bb_gain_lna[i].gain_g_mask;
+ } else {
+ reg = bb_gain_lna[i].gain_a[path];
+ mask = bb_gain_lna[i].gain_a_mask;
+ }
+ val = gain->lna_gain[gain_band][bw_type][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+
+ for (i = 0; i < TIA_GAIN_NUM; i++) {
+ if (chan->band_type == RTW89_BAND_2G) {
+ reg = bb_gain_tia[i].gain_g[path];
+ mask = bb_gain_tia[i].gain_g_mask;
+ } else {
+ reg = bb_gain_tia[i].gain_a[path];
+ mask = bb_gain_tia[i].gain_a_mask;
+ }
+ val = gain->tia_gain[gain_band][bw_type][path][i];
+ rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx);
+ }
+}
+
+static void rtw8922a_set_gain(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_set_lna_tia_gain(rtwdev, chan, path, phy_idx);
+ rtw8922a_set_rpl_gain(rtwdev, chan, path, phy_idx);
+}
+
+static void rtw8922a_set_rx_gain_normal_cck(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+ s8 value = -gain->offset[path][RTW89_GAIN_OFFSET_2G_CCK]; /* S(8,2) */
+ u8 fraction = value & 0x3;
+
+ if (fraction) {
+ rtw89_phy_write32_mask(rtwdev, R_MGAIN_BIAS, B_MGAIN_BIAS_BW20,
+ (0x4 - fraction) << 1);
+ rtw89_phy_write32_mask(rtwdev, R_MGAIN_BIAS, B_MGAIN_BIAS_BW40,
+ (0x4 - fraction) << 1);
+
+ value >>= 2;
+ rtw89_phy_write32_mask(rtwdev, R_CCK_RPL_OFST, B_CCK_RPL_OFST,
+ value + 1 + 0xdc);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_MGAIN_BIAS, B_MGAIN_BIAS_BW20, 0);
+ rtw89_phy_write32_mask(rtwdev, R_MGAIN_BIAS, B_MGAIN_BIAS_BW40, 0);
+
+ value >>= 2;
+ rtw89_phy_write32_mask(rtwdev, R_CCK_RPL_OFST, B_CCK_RPL_OFST,
+ value + 0xdc);
+ }
+}
+
+static void rtw8922a_set_rx_gain_normal_ofdm(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ static const u32 rssi_tb_bias_comp[2] = {0x41f8, 0x45f8};
+ static const u32 rssi_tb_ext_comp[2] = {0x4208, 0x4608};
+ static const u32 rssi_ofst_addr[2] = {0x40c8, 0x44c8};
+ static const u32 rpl_bias_comp[2] = {0x41e8, 0x45e8};
+ static const u32 rpl_ext_comp[2] = {0x41f8, 0x45f8};
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+ enum rtw89_gain_offset gain_band;
+ s8 v1, v2, v3;
+ s32 value;
+
+ gain_band = rtw89_subband_to_gain_offset_band_of_ofdm(chan->subband_type);
+ value = gain->offset[path][gain_band];
+ rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[path], 0xff000000, value + 0xF8);
+
+ value *= -4;
+ v1 = clamp_t(s32, value, S8_MIN, S8_MAX);
+ value -= v1;
+ v2 = clamp_t(s32, value, S8_MIN, S8_MAX);
+ value -= v2;
+ v3 = clamp_t(s32, value, S8_MIN, S8_MAX);
+
+ rtw89_phy_write32_mask(rtwdev, rpl_bias_comp[path], 0xff, v1);
+ rtw89_phy_write32_mask(rtwdev, rpl_ext_comp[path], 0xff, v2);
+ rtw89_phy_write32_mask(rtwdev, rpl_ext_comp[path], 0xff00, v3);
+
+ rtw89_phy_write32_mask(rtwdev, rssi_tb_bias_comp[path], 0xff0000, v1);
+ rtw89_phy_write32_mask(rtwdev, rssi_tb_ext_comp[path], 0xff0000, v2);
+ rtw89_phy_write32_mask(rtwdev, rssi_tb_ext_comp[path], 0xff000000, v3);
+}
+
+static void rtw8922a_set_rx_gain_normal(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+
+ if (!gain->offset_valid)
+ return;
+
+ if (chan->band_type == RTW89_BAND_2G)
+ rtw8922a_set_rx_gain_normal_cck(rtwdev, chan, path);
+
+ rtw8922a_set_rx_gain_normal_ofdm(rtwdev, chan, path);
+}
+
+static void rtw8922a_set_cck_parameters(struct rtw89_dev *rtwdev, u8 central_ch,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (central_ch == 14) {
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF01, B_PCOEFF01, 0x3b13ff, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF23, B_PCOEFF23, 0x1c42de, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF45, B_PCOEFF45, 0xfdb0ad, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF67, B_PCOEFF67, 0xf60f6e, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF89, B_PCOEFF89, 0xfd8f92, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFAB, B_PCOEFFAB, 0x02d011, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFCD, B_PCOEFFCD, 0x01c02c, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFEF, B_PCOEFFEF, 0xfff00a, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF01, B_PCOEFF01, 0x3a63ca, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF23, B_PCOEFF23, 0x2a833f, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF45, B_PCOEFF45, 0x1491f8, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF67, B_PCOEFF67, 0x03c0b0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFF89, B_PCOEFF89, 0xfccff1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFAB, B_PCOEFFAB, 0xfccfc3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFCD, B_PCOEFFCD, 0xfebfdc, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PCOEFFEF, B_PCOEFFEF, 0xffdff7, phy_idx);
+ }
+}
+
+static void rtw8922a_ctrl_ch(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ static const u32 band_sel[2] = {0x4160, 0x4560};
+ u16 central_freq = chan->freq;
+ u8 central_ch = chan->channel;
+ u8 band = chan->band_type;
+ bool is_2g = band == RTW89_BAND_2G;
+ u8 chan_idx;
+ u8 path;
+ u8 sco;
+
+ if (!central_freq) {
+ rtw89_warn(rtwdev, "Invalid central_freq\n");
+ return;
+ }
+
+ rtw8922a_set_gain(rtwdev, chan, RF_PATH_A, phy_idx);
+ rtw8922a_set_gain(rtwdev, chan, RF_PATH_B, phy_idx);
+
+ for (path = RF_PATH_A; path < BB_PATH_NUM_8922A; path++)
+ rtw89_phy_write32_idx(rtwdev, band_sel[path], BIT((26)), is_2g, phy_idx);
+
+ rtw8922a_set_rx_gain_normal(rtwdev, chan, RF_PATH_A);
+ rtw8922a_set_rx_gain_normal(rtwdev, chan, RF_PATH_B);
+
+ rtw89_phy_write32_idx(rtwdev, R_FC0, B_FC0, central_freq, phy_idx);
+ sco = DIV_ROUND_CLOSEST(1 << 18, central_freq);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_FC0_INV, sco, phy_idx);
+
+ if (band == RTW89_BAND_2G)
+ rtw8922a_set_cck_parameters(rtwdev, central_ch, phy_idx);
+
+ chan_idx = rtw89_encode_chan_idx(rtwdev, chan->primary_channel, band);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, chan_idx, phy_idx);
+}
+
+static void
+rtw8922a_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_sb, u8 bw,
+ enum rtw89_phy_idx phy_idx)
+{
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_10:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, pri_sb, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, pri_sb, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x1, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_BW, 0x3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_SMALLBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_CHBW_PRICH, pri_sb, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_DAC_CLK, B_DAC_CLK, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP0, B_GAIN_MAP0_EN, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_GAIN_MAP1, B_GAIN_MAP1_EN, 0x1, phy_idx);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri_sb:%d)\n", bw,
+ pri_sb);
+ break;
+ }
+
+ if (bw == RTW89_CHANNEL_WIDTH_40)
+ rtw89_phy_write32_idx(rtwdev, R_FC0, B_BW40_2XFFT, 1, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_FC0, B_BW40_2XFFT, 0, phy_idx);
+}
+
+static u32 rtw8922a_spur_freq(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ return 0;
+}
+
+#define CARRIER_SPACING_312_5 312500 /* 312.5 kHz */
+#define CARRIER_SPACING_78_125 78125 /* 78.125 kHz */
+#define MAX_TONE_NUM 2048
+
+static void rtw8922a_set_csi_tone_idx(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ s32 freq_diff, csi_idx, csi_tone_idx;
+ u32 spur_freq;
+
+ spur_freq = rtw8922a_spur_freq(rtwdev, chan);
+ if (spur_freq == 0) {
+ rtw89_phy_write32_idx(rtwdev, R_S0S1_CSI_WGT, B_S0S1_CSI_WGT_EN,
+ 0, phy_idx);
+ return;
+ }
+
+ freq_diff = (spur_freq - chan->freq) * 1000000;
+ csi_idx = s32_div_u32_round_closest(freq_diff, CARRIER_SPACING_78_125);
+ s32_div_u32_round_down(csi_idx, MAX_TONE_NUM, &csi_tone_idx);
+
+ rtw89_phy_write32_idx(rtwdev, R_S0S1_CSI_WGT, B_S0S1_CSI_WGT_TONE_IDX,
+ csi_tone_idx, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S0S1_CSI_WGT, B_S0S1_CSI_WGT_EN, 1, phy_idx);
+}
+
+static const struct rtw89_nbi_reg_def rtw8922a_nbi_reg_def[] = {
+ [RF_PATH_A] = {
+ .notch1_idx = {0x41a0, 0xFF},
+ .notch1_frac_idx = {0x41a0, 0xC00},
+ .notch1_en = {0x41a0, 0x1000},
+ .notch2_idx = {0x41ac, 0xFF},
+ .notch2_frac_idx = {0x41ac, 0xC00},
+ .notch2_en = {0x41ac, 0x1000},
+ },
+ [RF_PATH_B] = {
+ .notch1_idx = {0x45a0, 0xFF},
+ .notch1_frac_idx = {0x45a0, 0xC00},
+ .notch1_en = {0x45a0, 0x1000},
+ .notch2_idx = {0x45ac, 0xFF},
+ .notch2_frac_idx = {0x45ac, 0xC00},
+ .notch2_en = {0x45ac, 0x1000},
+ },
+};
+
+static void rtw8922a_set_nbi_tone_idx(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_nbi_reg_def *nbi = &rtw8922a_nbi_reg_def[path];
+ s32 nbi_frac_idx, nbi_frac_tone_idx;
+ s32 nbi_idx, nbi_tone_idx;
+ bool notch2_chk = false;
+ u32 spur_freq, fc;
+ s32 freq_diff;
+
+ spur_freq = rtw8922a_spur_freq(rtwdev, chan);
+ if (spur_freq == 0) {
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_en.addr,
+ nbi->notch1_en.mask, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_en.addr,
+ nbi->notch2_en.mask, 0, phy_idx);
+ return;
+ }
+
+ fc = chan->freq;
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160) {
+ fc = (spur_freq > fc) ? fc + 40 : fc - 40;
+ if ((fc > spur_freq &&
+ chan->channel < chan->primary_channel) ||
+ (fc < spur_freq &&
+ chan->channel > chan->primary_channel))
+ notch2_chk = true;
+ }
+
+ freq_diff = (spur_freq - fc) * 1000000;
+ nbi_idx = s32_div_u32_round_down(freq_diff, CARRIER_SPACING_312_5,
+ &nbi_frac_idx);
+
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_20) {
+ s32_div_u32_round_down(nbi_idx + 32, 64, &nbi_tone_idx);
+ } else {
+ u16 tone_para = (chan->band_width == RTW89_CHANNEL_WIDTH_40) ?
+ 128 : 256;
+
+ s32_div_u32_round_down(nbi_idx, tone_para, &nbi_tone_idx);
+ }
+ nbi_frac_tone_idx =
+ s32_div_u32_round_closest(nbi_frac_idx, CARRIER_SPACING_78_125);
+
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_160 && notch2_chk) {
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_idx.addr,
+ nbi->notch2_idx.mask, nbi_tone_idx, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_frac_idx.addr,
+ nbi->notch2_frac_idx.mask, nbi_frac_tone_idx,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_en.addr,
+ nbi->notch2_en.mask, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_en.addr,
+ nbi->notch2_en.mask, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_en.addr,
+ nbi->notch1_en.mask, 0, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_idx.addr,
+ nbi->notch1_idx.mask, nbi_tone_idx, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_frac_idx.addr,
+ nbi->notch1_frac_idx.mask, nbi_frac_tone_idx,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_en.addr,
+ nbi->notch1_en.mask, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch1_en.addr,
+ nbi->notch1_en.mask, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, nbi->notch2_en.addr,
+ nbi->notch2_en.mask, 0, phy_idx);
+ }
+}
+
+static void rtw8922a_spur_elimination(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_set_csi_tone_idx(rtwdev, chan, phy_idx);
+ rtw8922a_set_nbi_tone_idx(rtwdev, chan, RF_PATH_A, phy_idx);
+ rtw8922a_set_nbi_tone_idx(rtwdev, chan, RF_PATH_B, phy_idx);
+}
+
+static void rtw8922a_ctrl_afe_dac(struct rtw89_dev *rtwdev, enum rtw89_bandwidth bw,
+ enum rtw89_rf_path path)
+{
+ u32 cr_ofst = 0x0;
+
+ if (path == RF_PATH_B)
+ cr_ofst = 0x100;
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ case RTW89_CHANNEL_WIDTH_40:
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_mask(rtwdev, R_AFEDAC0 + cr_ofst, B_AFEDAC0, 0xE);
+ rtw89_phy_write32_mask(rtwdev, R_AFEDAC1 + cr_ofst, B_AFEDAC1, 0x7);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_write32_mask(rtwdev, R_AFEDAC0 + cr_ofst, B_AFEDAC0, 0xD);
+ rtw89_phy_write32_mask(rtwdev, R_AFEDAC1 + cr_ofst, B_AFEDAC1, 0x6);
+ break;
+ default:
+ break;
+ }
+}
+
+static const struct rtw89_reg2_def bb_mcu0_init_reg[] = {
+ {0x6990, 0x00000000},
+ {0x6994, 0x00000000},
+ {0x6998, 0x00000000},
+ {0x6820, 0xFFFFFFFE},
+ {0x6800, 0xC0000FFE},
+ {0x6808, 0x76543210},
+ {0x6814, 0xBFBFB000},
+ {0x6818, 0x0478C009},
+ {0x6800, 0xC0000FFF},
+ {0x6820, 0xFFFFFFFF},
+};
+
+static const struct rtw89_reg2_def bb_mcu1_init_reg[] = {
+ {0x6990, 0x00000000},
+ {0x6994, 0x00000000},
+ {0x6998, 0x00000000},
+ {0x6820, 0xFFFFFFFE},
+ {0x6800, 0xC0000FFE},
+ {0x6808, 0x76543210},
+ {0x6814, 0xBFBFB000},
+ {0x6818, 0x0478C009},
+ {0x6800, 0xC0000FFF},
+ {0x6820, 0xFFFFFFFF},
+};
+
+static void rtw8922a_bbmcu_cr_init(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_reg2_def *reg;
+ int size;
+ int i;
+
+ if (phy_idx == RTW89_PHY_0) {
+ reg = bb_mcu0_init_reg;
+ size = ARRAY_SIZE(bb_mcu0_init_reg);
+ } else {
+ reg = bb_mcu1_init_reg;
+ size = ARRAY_SIZE(bb_mcu1_init_reg);
+ }
+
+ for (i = 0; i < size; i++, reg++)
+ rtw89_bbmcu_write32(rtwdev, reg->addr, reg->data, phy_idx);
+}
+
+static const u32 dmac_sys_mask[2] = {B_BE_DMAC_BB_PHY0_MASK, B_BE_DMAC_BB_PHY1_MASK};
+static const u32 bbrst_mask[2] = {B_BE_FEN_BBPLAT_RSTB, B_BE_FEN_BB1PLAT_RSTB};
+static const u32 glbrst_mask[2] = {B_BE_FEN_BB_IP_RSTN, B_BE_FEN_BB1_IP_RSTN};
+static const u32 mcu_bootrdy_mask[2] = {B_BE_BOOT_RDY0, B_BE_BOOT_RDY1};
+
+static void rtw8922a_bb_preinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u32 rdy = 0;
+
+ if (phy_idx == RTW89_PHY_1)
+ rdy = 1;
+
+ rtw89_write32_mask(rtwdev, R_BE_DMAC_SYS_CR32B, dmac_sys_mask[phy_idx], 0x7FF9);
+ rtw89_write32_mask(rtwdev, R_BE_FEN_RST_ENABLE, glbrst_mask[phy_idx], 0x0);
+ rtw89_write32_mask(rtwdev, R_BE_FEN_RST_ENABLE, bbrst_mask[phy_idx], 0x0);
+ rtw89_write32_mask(rtwdev, R_BE_FEN_RST_ENABLE, glbrst_mask[phy_idx], 0x1);
+ rtw89_write32_mask(rtwdev, R_BE_FEN_RST_ENABLE, mcu_bootrdy_mask[phy_idx], rdy);
+ rtw89_write32_mask(rtwdev, R_BE_MEM_PWR_CTRL, B_BE_MEM_BBMCU0_DS_V1, 0);
+
+ fsleep(1);
+ rtw8922a_bbmcu_cr_init(rtwdev, phy_idx);
+}
+
+static void rtw8922a_bb_postinit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ if (phy_idx == RTW89_PHY_0)
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, mcu_bootrdy_mask[phy_idx]);
+ rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, bbrst_mask[phy_idx]);
+
+ rtw89_phy_write32_set(rtwdev, R_BBCLK, B_CLK_640M);
+ rtw89_phy_write32_clr(rtwdev, R_TXSCALE, B_TXFCTR_EN);
+ rtw89_phy_set_phy_regs(rtwdev, R_TXFCTR, B_TXFCTR_THD, 0x200);
+ rtw89_phy_set_phy_regs(rtwdev, R_SLOPE, B_EHT_RATE_TH, 0xA);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE, B_HE_RATE_TH, 0xA);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE2, B_HT_VHT_TH, 0xAAA);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE, B_EHT_MCS14, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE2, B_EHT_MCS15, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE3, B_EHTTB_EN, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE3, B_HEERSU_EN, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE3, B_HEMU_EN, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE3, B_TB_EN, 0x0);
+ rtw89_phy_set_phy_regs(rtwdev, R_SU_PUNC, B_SU_PUNC_EN, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE5, B_HWGEN_EN, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_BEDGE5, B_PWROFST_COMP, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_MAG_AB, B_BY_SLOPE, 0x1);
+ rtw89_phy_set_phy_regs(rtwdev, R_MAG_A, B_MGA_AEND, 0xe0);
+ rtw89_phy_set_phy_regs(rtwdev, R_MAG_AB, B_MAG_AB, 0xe0c000);
+ rtw89_phy_set_phy_regs(rtwdev, R_SLOPE, B_SLOPE_A, 0x3FE0);
+ rtw89_phy_set_phy_regs(rtwdev, R_SLOPE, B_SLOPE_B, 0x3FE0);
+ rtw89_phy_set_phy_regs(rtwdev, R_SC_CORNER, B_SC_CORNER, 0x200);
+ rtw89_phy_write32_idx(rtwdev, R_UDP_COEEF, B_UDP_COEEF, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_UDP_COEEF, B_UDP_COEEF, 0x1, phy_idx);
+}
+
+static void rtw8922a_bb_reset_en(struct rtw89_dev *rtwdev, enum rtw89_band band,
+ bool en, enum rtw89_phy_idx phy_idx)
+{
+ if (en) {
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
+ if (band == RTW89_BAND_2G)
+ rtw89_phy_write32_idx(rtwdev, R_RXCCA_BE1,
+ B_RXCCA_BE1_DIS, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_RXCCA_BE1, B_RXCCA_BE1_DIS, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1, phy_idx);
+ fsleep(1);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, phy_idx);
+ }
+}
+
+static int rtw8922a_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path tx_path,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_reg2_def path_com_cr[] = {
+ {0x11A00, 0x21C86900},
+ {0x11A04, 0x00E4E433},
+ {0x11A08, 0x39390CC9},
+ {0x11A0C, 0x4E433240},
+ {0x11A10, 0x90CC900E},
+ {0x11A14, 0x00240393},
+ {0x11A18, 0x201C8600},
+ };
+ int ret = 0;
+ u32 reg;
+ int i;
+
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL, 0x0, phy_idx);
+
+ if (phy_idx == RTW89_PHY_1 && !rtwdev->dbcc_en)
+ return 0;
+
+ if (tx_path == RF_PATH_A) {
+ path_com_cr[0].data = 0x21C82900;
+ path_com_cr[1].data = 0x00E4E431;
+ path_com_cr[2].data = 0x39390C49;
+ path_com_cr[3].data = 0x4E431240;
+ path_com_cr[4].data = 0x90C4900E;
+ path_com_cr[6].data = 0x201C8200;
+ } else if (tx_path == RF_PATH_B) {
+ path_com_cr[0].data = 0x21C04900;
+ path_com_cr[1].data = 0x00E4E032;
+ path_com_cr[2].data = 0x39380C89;
+ path_com_cr[3].data = 0x4E032240;
+ path_com_cr[4].data = 0x80C8900E;
+ path_com_cr[6].data = 0x201C0400;
+ } else if (tx_path == RF_PATH_AB) {
+ path_com_cr[0].data = 0x21C86900;
+ path_com_cr[1].data = 0x00E4E433;
+ path_com_cr[2].data = 0x39390CC9;
+ path_com_cr[3].data = 0x4E433240;
+ path_com_cr[4].data = 0x90CC900E;
+ path_com_cr[6].data = 0x201C8600;
+ } else {
+ ret = -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(path_com_cr); i++) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, path_com_cr[i].addr, phy_idx);
+ rtw89_write32(rtwdev, reg, path_com_cr[i].data);
+ }
+
+ return ret;
+}
+
+static void rtw8922a_bb_reset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+}
+
+static int rtw8922a_cfg_rx_nss_limit(struct rtw89_dev *rtwdev, u8 rx_nss,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (rx_nss == 1) {
+ rtw89_phy_write32_idx(rtwdev, R_BRK_R, B_HTMCS_LMT, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_R, B_VHTMCS_LMT, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_N_USR_MAX,
+ HE_N_USER_MAX_8922A, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_NSS_MAX, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_TB_NSS_MAX, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_EHT, B_RXEHT_NSS_MAX, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_RXEHT, B_RXEHTTB_NSS_MAX, 0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_RXEHT, B_RXEHT_N_USER_MAX,
+ HE_N_USER_MAX_8922A, phy_idx);
+ } else if (rx_nss == 2) {
+ rtw89_phy_write32_idx(rtwdev, R_BRK_R, B_HTMCS_LMT, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_R, B_VHTMCS_LMT, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_N_USR_MAX,
+ HE_N_USER_MAX_8922A, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_NSS_MAX, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_HE, B_TB_NSS_MAX, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_EHT, B_RXEHT_NSS_MAX, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_RXEHT, B_RXEHTTB_NSS_MAX, 1,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BRK_RXEHT, B_RXEHT_N_USER_MAX,
+ HE_N_USER_MAX_8922A, phy_idx);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void rtw8922a_tssi_reset(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) {
+ if (phy_idx == RTW89_PHY_0) {
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTA, B_TXPWR_RSTA, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTA, B_TXPWR_RSTA, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTB, B_TXPWR_RSTB, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTB, B_TXPWR_RSTB, 0x1);
+ }
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTA, B_TXPWR_RSTA, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTA, B_TXPWR_RSTA, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTB, B_TXPWR_RSTB, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXPWR_RSTB, B_TXPWR_RSTB, 0x1);
+ }
+}
+
+static int rtw8922a_ctrl_rx_path_tmac(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rx_path,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 rx_nss = (rx_path == RF_PATH_AB) ? 2 : 1;
+
+ /* Set to 0 first to avoid abnormal EDCCA report */
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_RX_SG0, 0x0, phy_idx);
+
+ if (rx_path == RF_PATH_A) {
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_RX_SG0, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_1RCCA, 1, phy_idx);
+ rtw8922a_cfg_rx_nss_limit(rtwdev, rx_nss, phy_idx);
+ rtw8922a_tssi_reset(rtwdev, rx_path, phy_idx);
+ } else if (rx_path == RF_PATH_B) {
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_RX_SG0, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_1RCCA, 2, phy_idx);
+ rtw8922a_cfg_rx_nss_limit(rtwdev, rx_nss, phy_idx);
+ rtw8922a_tssi_reset(rtwdev, rx_path, phy_idx);
+ } else if (rx_path == RF_PATH_AB) {
+ rtw89_phy_write32_idx(rtwdev, R_ANT_CHBW, B_ANT_RX_SG0, 0x3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FC0INV_SBW, B_RX_1RCCA, 3, phy_idx);
+ rtw8922a_cfg_rx_nss_limit(rtwdev, rx_nss, phy_idx);
+ rtw8922a_tssi_reset(rtwdev, rx_path, phy_idx);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rtw8922a_ctrl_mlo(struct rtw89_dev *rtwdev, enum rtw89_mlo_dbcc_mode mode)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+
+ if (mode == MLO_1_PLUS_1_1RF || mode == DBCC_LEGACY) {
+ rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DBCC_FA, B_DBCC_FA, 0x0);
+ } else if (mode == MLO_2_PLUS_0_1RF || mode == MLO_0_PLUS_2_1RF ||
+ mode == MLO_DBCC_NOT_SUPPORT) {
+ rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DBCC_FA, B_DBCC_FA, 0x1);
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (mode == MLO_2_PLUS_0_1RF) {
+ rtw8922a_ctrl_afe_dac(rtwdev, chan->band_width, RF_PATH_A);
+ rtw8922a_ctrl_afe_dac(rtwdev, chan->band_width, RF_PATH_B);
+ } else {
+ rtw89_warn(rtwdev, "unsupported MLO mode %d\n", mode);
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x6180);
+
+ if (mode == MLO_2_PLUS_0_1RF) {
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xBBAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xABA9);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEBA9);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEAA9);
+ } else if (mode == MLO_0_PLUS_2_1RF) {
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xBBAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xAFFF);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEFFF);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEEFF);
+ } else if ((mode == MLO_1_PLUS_1_1RF) || (mode == DBCC_LEGACY)) {
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x7BAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x3BAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x3AAB);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x180);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x0);
+ }
+
+ return 0;
+}
+
+static void rtw8922a_bb_sethw(struct rtw89_dev *rtwdev)
+{
+ u32 reg;
+
+ rtw89_phy_write32_clr(rtwdev, R_EN_SND_WO_NDP, B_EN_SND_WO_NDP);
+ rtw89_phy_write32_clr(rtwdev, R_EN_SND_WO_NDP_C1, B_EN_SND_WO_NDP);
+
+ rtw89_write32_mask(rtwdev, R_BE_PWR_BOOST, B_BE_PWR_CTRL_SEL, 0);
+ if (rtwdev->dbcc_en) {
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_BOOST, RTW89_MAC_1);
+ rtw89_write32_mask(rtwdev, reg, B_BE_PWR_CTRL_SEL, 0);
+ }
+
+ rtw8922a_ctrl_mlo(rtwdev, rtwdev->mlo_dbcc_mode);
+}
+
+static void rtw8922a_ctrl_cck_en(struct rtw89_dev *rtwdev, bool cck_en,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (cck_en) {
+ rtw89_phy_write32_idx(rtwdev, R_RXCCA_BE1, B_RXCCA_BE1_DIS, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_ARBITER_OFF, B_PD_ARBITER_OFF,
+ 0, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_RXCCA_BE1, B_RXCCA_BE1_DIS, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_ARBITER_OFF, B_PD_ARBITER_OFF,
+ 1, phy_idx);
+ }
+}
+
+static void rtw8922a_set_channel_bb(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ bool cck_en = chan->band_type == RTW89_BAND_2G;
+ u8 pri_sb = chan->pri_sb_idx;
+
+ if (cck_en)
+ rtw8922a_ctrl_sco_cck(rtwdev, chan->primary_channel,
+ chan->band_width, phy_idx);
+
+ rtw8922a_ctrl_ch(rtwdev, chan, phy_idx);
+ rtw8922a_ctrl_bw(rtwdev, pri_sb, chan->band_width, phy_idx);
+ rtw8922a_ctrl_cck_en(rtwdev, cck_en, phy_idx);
+ rtw8922a_spur_elimination(rtwdev, chan, phy_idx);
+
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
+ rtw8922a_tssi_reset(rtwdev, RF_PATH_AB, phy_idx);
+}
+
+static void rtw8922a_pre_set_channel_bb(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (!rtwdev->dbcc_en)
+ return;
+
+ if (phy_idx == RTW89_PHY_0) {
+ rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x6180);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xBBAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xABA9);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEBA9);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEAA9);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xBBAB);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xAFFF);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEFFF);
+ rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0xEEFF);
+ }
+}
+
+static void rtw8922a_post_set_channel_bb(struct rtw89_dev *rtwdev,
+ enum rtw89_mlo_dbcc_mode mode)
+{
+ if (!rtwdev->dbcc_en)
+ return;
+
+ rtw8922a_ctrl_mlo(rtwdev, mode);
+}
+
+static void rtw8922a_set_channel(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_set_channel_mac(rtwdev, chan, mac_idx);
+ rtw8922a_set_channel_bb(rtwdev, chan, phy_idx);
+ rtw8922a_set_channel_rf(rtwdev, chan, phy_idx);
+}
+
+static void rtw8922a_dfs_en_idx(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, enum rtw89_rf_path path,
+ bool en)
+{
+ u32 path_ofst = (path == RF_PATH_B) ? 0x100 : 0x0;
+
+ if (en)
+ rtw89_phy_write32_idx(rtwdev, 0x2800 + path_ofst, BIT(1), 1,
+ phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, 0x2800 + path_ofst, BIT(1), 0,
+ phy_idx);
+}
+
+static void rtw8922a_dfs_en(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_dfs_en_idx(rtwdev, phy_idx, RF_PATH_A, en);
+ rtw8922a_dfs_en_idx(rtwdev, phy_idx, RF_PATH_B, en);
+}
+
+static void rtw8922a_adc_en_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool en)
+{
+ u32 val;
+
+ val = rtw89_phy_read32_mask(rtwdev, R_ADC_FIFO_V1, B_ADC_FIFO_EN_V1);
+
+ if (en) {
+ if (path == RF_PATH_A)
+ val &= ~0x1;
+ else
+ val &= ~0x2;
+ } else {
+ if (path == RF_PATH_A)
+ val |= 0x1;
+ else
+ val |= 0x2;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO_V1, B_ADC_FIFO_EN_V1, val);
+}
+
+static void rtw8922a_adc_en(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
+{
+ if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) {
+ if (phy_idx == RTW89_PHY_0)
+ rtw8922a_adc_en_path(rtwdev, RF_PATH_A, en);
+ else
+ rtw8922a_adc_en_path(rtwdev, RF_PATH_B, en);
+ } else {
+ rtw8922a_adc_en_path(rtwdev, RF_PATH_A, en);
+ rtw8922a_adc_en_path(rtwdev, RF_PATH_B, en);
+ }
+}
+
+static
+void rtw8922a_hal_reset(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, enum rtw89_mac_idx mac_idx,
+ enum rtw89_band band, u32 *tx_en, bool enter)
+{
+ if (enter) {
+ rtw89_chip_stop_sch_tx(rtwdev, mac_idx, tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, false);
+ rtw8922a_dfs_en(rtwdev, false, phy_idx);
+ rtw8922a_tssi_cont_en_phyidx(rtwdev, false, phy_idx);
+ rtw8922a_adc_en(rtwdev, false, phy_idx);
+ fsleep(40);
+ rtw8922a_bb_reset_en(rtwdev, band, false, phy_idx);
+ } else {
+ rtw89_mac_cfg_ppdu_status(rtwdev, mac_idx, true);
+ rtw8922a_adc_en(rtwdev, true, phy_idx);
+ rtw8922a_dfs_en(rtwdev, true, phy_idx);
+ rtw8922a_tssi_cont_en_phyidx(rtwdev, true, phy_idx);
+ rtw8922a_bb_reset_en(rtwdev, band, true, phy_idx);
+ rtw89_chip_resume_sch_tx(rtwdev, mac_idx, *tx_en);
+ }
+}
+
+static void rtw8922a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
+ struct rtw89_channel_help_params *p,
+ const struct rtw89_chan *chan,
+ enum rtw89_mac_idx mac_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (enter) {
+ rtw8922a_pre_set_channel_bb(rtwdev, phy_idx);
+ rtw8922a_pre_set_channel_rf(rtwdev, phy_idx);
+ }
+
+ rtw8922a_hal_reset(rtwdev, phy_idx, mac_idx, chan->band_type, &p->tx_en, enter);
+
+ if (!enter) {
+ rtw8922a_post_set_channel_bb(rtwdev, rtwdev->mlo_dbcc_mode);
+ rtw8922a_post_set_channel_rf(rtwdev, phy_idx);
+ }
+}
+
+static void rtw8922a_rfk_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+
+ rtwdev->is_tssi_mode[RF_PATH_A] = false;
+ rtwdev->is_tssi_mode[RF_PATH_B] = false;
+ memset(rfk_mcc, 0, sizeof(*rfk_mcc));
+}
+
+static void rtw8922a_rfk_init_late(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, RTW89_PHY_0, 5);
+
+ rtw89_phy_rfk_dack_and_wait(rtwdev, RTW89_PHY_0, 58);
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32);
+}
+
+static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
+{
+ u32 rf_mode;
+ u8 path;
+ int ret;
+
+ for (path = 0; path < RF_PATH_NUM_8922A; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2,
+ 2, 5000, false, rtwdev, path, 0x00,
+ RR_MOD_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
+ path, ret);
+ }
+}
+
+static void rtw8922a_rfk_channel(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+ u32 tx_en;
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, RF_AB);
+
+ rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, phy_idx, 5);
+ rtw89_phy_rfk_txgapk_and_wait(rtwdev, phy_idx, 54);
+ rtw89_phy_rfk_iqk_and_wait(rtwdev, phy_idx, 84);
+ rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_NORMAL, 6);
+ rtw89_phy_rfk_dpk_and_wait(rtwdev, phy_idx, 34);
+ rtw89_phy_rfk_rxdck_and_wait(rtwdev, RTW89_PHY_0, 32);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_CHLK, BTC_WRFK_STOP);
+}
+
+static void rtw8922a_rfk_band_changed(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_rfk_tssi_and_wait(rtwdev, phy_idx, RTW89_TSSI_SCAN, 6);
+}
+
+static void rtw8922a_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+{
+}
+
+static void rtw8922a_rfk_track(struct rtw89_dev *rtwdev)
+{
+}
+
+static void rtw8922a_set_txpwr_ref(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ s16 ref_ofdm = 0;
+ s16 ref_cck = 0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n");
+
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_BE_PWR_REF_CTRL,
+ B_BE_PWR_REF_CTRL_OFDM, ref_ofdm);
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_BE_PWR_REF_CTRL,
+ B_BE_PWR_REF_CTRL_CCK, ref_cck);
+}
+
+static void rtw8922a_bb_tx_triangular(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 ctrl = en ? 0x1 : 0x0;
+
+ rtw89_phy_write32_idx(rtwdev, R_BEDGE3, B_BEDGE_CFG, ctrl, phy_idx);
+}
+
+static void rtw8922a_set_tx_shape(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
+ const struct rtw89_tx_shape *tx_shape = &rfe_parms->tx_shape;
+ u8 tx_shape_idx;
+ u8 band, regd;
+
+ band = chan->band_type;
+ regd = rtw89_regd_get(rtwdev, band);
+ tx_shape_idx = (*tx_shape->lmt)[band][RTW89_RS_OFDM][regd];
+
+ if (tx_shape_idx == 0)
+ rtw8922a_bb_tx_triangular(rtwdev, false, phy_idx);
+ else
+ rtw8922a_bb_tx_triangular(rtwdev, true, phy_idx);
+}
+
+static void rtw8922a_set_txpwr(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_set_txpwr_byrate(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_offset(rtwdev, chan, phy_idx);
+ rtw8922a_set_tx_shape(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+}
+
+static void rtw8922a_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_set_txpwr_ref(rtwdev, phy_idx);
+}
+
+static void rtw8922a_ctrl_trx_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path tx_path, u8 tx_nss,
+ enum rtw89_rf_path rx_path, u8 rx_nss)
+{
+ enum rtw89_phy_idx phy_idx;
+
+ for (phy_idx = RTW89_PHY_0; phy_idx <= RTW89_PHY_1; phy_idx++) {
+ rtw8922a_ctrl_tx_path_tmac(rtwdev, tx_path, phy_idx);
+ rtw8922a_ctrl_rx_path_tmac(rtwdev, rx_path, phy_idx);
+ rtw8922a_cfg_rx_nss_limit(rtwdev, rx_nss, phy_idx);
+ }
+}
+
+static void rtw8922a_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (en) {
+ rtw89_phy_write32_idx(rtwdev, R_FORCE_FIR_A, B_FORCE_FIR_A, 0x3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RXBY_WBADC_A, B_RXBY_WBADC_A,
+ 0xf, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_RXBY_WBADC_A, B_BT_RXBY_WBADC_A,
+ 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BT_TRK_OFF_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_OP1DB_A, B_OP1DB_A, 0x80, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_OP1DB1_A, B_TIA10_A, 0x8080, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BACKOFF_A, B_LNA_IBADC_A, 0x34, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BKOFF_A, B_BKOFF_IBADC_A, 0x34, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FORCE_FIR_B, B_FORCE_FIR_B, 0x3, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RXBY_WBADC_B, B_RXBY_WBADC_B,
+ 0xf, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_RXBY_WBADC_B, B_BT_RXBY_WBADC_B,
+ 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BT_TRK_OFF_B, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_OP, B_LNA6, 0x80, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_TIA, B_TIA10_B, 0x8080, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BACKOFF_B, B_LNA_IBADC_B, 0x34, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BKOFF_B, B_BKOFF_IBADC_B, 0x34, phy_idx);
+ } else {
+ rtw89_phy_write32_idx(rtwdev, R_FORCE_FIR_A, B_FORCE_FIR_A, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RXBY_WBADC_A, B_RXBY_WBADC_A,
+ 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_RXBY_WBADC_A, B_BT_RXBY_WBADC_A,
+ 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_A, B_BT_TRK_OFF_A, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_OP1DB_A, B_OP1DB_A, 0x1a, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_OP1DB1_A, B_TIA10_A, 0x2a2a, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BACKOFF_A, B_LNA_IBADC_A, 0x7a6, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BKOFF_A, B_BKOFF_IBADC_A, 0x26, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_FORCE_FIR_B, B_FORCE_FIR_B, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RXBY_WBADC_B, B_RXBY_WBADC_B,
+ 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_RXBY_WBADC_B, B_BT_RXBY_WBADC_B,
+ 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BT_SHARE_B, B_BT_TRK_OFF_B, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_OP, B_LNA6, 0x20, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_LNA_TIA, B_TIA10_B, 0x2a30, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BACKOFF_B, B_LNA_IBADC_B, 0x7a6, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BKOFF_B, B_BKOFF_IBADC_B, 0x26, phy_idx);
+ }
+}
+
+static void rtw8922a_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u8 ntx_path = RF_PATH_AB;
+ u32 tx_en0, tx_en1;
+
+ if (hal->antenna_tx == RF_A)
+ ntx_path = RF_PATH_A;
+ else if (hal->antenna_tx == RF_B)
+ ntx_path = RF_PATH_B;
+
+ rtw8922a_hal_reset(rtwdev, RTW89_PHY_0, RTW89_MAC_0, band, &tx_en0, true);
+ if (rtwdev->dbcc_en)
+ rtw8922a_hal_reset(rtwdev, RTW89_PHY_1, RTW89_MAC_1, band,
+ &tx_en1, true);
+
+ rtw8922a_ctrl_trx_path(rtwdev, ntx_path, 2, RF_PATH_AB, 2);
+
+ rtw8922a_hal_reset(rtwdev, RTW89_PHY_0, RTW89_MAC_0, band, &tx_en0, false);
+ if (rtwdev->dbcc_en)
+ rtw8922a_hal_reset(rtwdev, RTW89_PHY_1, RTW89_MAC_1, band,
+ &tx_en0, false);
+}
+
+static u8 rtw8922a_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ int th;
+
+ /* read thermal only if debugging */
+ if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_CFO | RTW89_DBG_RFK_TRACK))
+ return 80;
+
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+
+ fsleep(200);
+
+ th = rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL_V1);
+ th += (s8)info->thermal_trim[rf_path];
+
+ return clamp_t(int, th, 0, U8_MAX);
+}
+
+static void rtw8922a_btc_set_rfe(struct rtw89_dev *rtwdev)
+{
+ union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo;
+ struct rtw89_btc_module_v7 *module = &md->md_v7;
+
+ module->rfe_type = rtwdev->efuse.rfe_type;
+ module->kt_ver = rtwdev->hal.cv;
+ module->bt_solo = 0;
+ module->switch_type = BTC_SWITCH_INTERNAL;
+ module->wa_type = 0;
+
+ module->ant.type = BTC_ANT_SHARED;
+ module->ant.num = 2;
+ module->ant.isolation = 10;
+ module->ant.diversity = 0;
+ module->ant.single_pos = RF_PATH_A;
+ module->ant.btg_pos = RF_PATH_B;
+
+ if (module->kt_ver <= 1)
+ module->wa_type |= BTC_WA_HFP_ZB;
+
+ rtwdev->btc.cx.other.type = BTC_3CX_NONE;
+
+ if (module->rfe_type == 0) {
+ rtwdev->btc.dm.error.map.rfe_type0 = true;
+ return;
+ }
+
+ module->ant.num = (module->rfe_type % 2) ? 2 : 3;
+
+ if (module->kt_ver == 0)
+ module->ant.num = 2;
+
+ if (module->ant.num == 3) {
+ module->ant.type = BTC_ANT_DEDICATED;
+ module->bt_pos = BTC_BT_ALONE;
+ } else {
+ module->ant.type = BTC_ANT_SHARED;
+ module->bt_pos = BTC_BT_BTG;
+ }
+ rtwdev->btc.btg_pos = module->ant.btg_pos;
+ rtwdev->btc.ant_type = module->ant.type;
+}
+
+static
+void rtw8922a_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
+{
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group);
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val);
+}
+
+static void rtw8922a_btc_init_cfg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_ant_info_v7 *ant = &btc->mdinfo.md_v7.ant;
+ u32 wl_pri, path_min, path_max;
+ u8 path;
+
+ /* for 1-Ant && 1-ss case: only 1-path */
+ if (ant->num == 1) {
+ path_min = ant->single_pos;
+ path_max = path_min;
+ } else {
+ path_min = RF_PATH_A;
+ path_max = RF_PATH_B;
+ }
+
+ path = path_min;
+
+ for (path = path_min; path <= path_max; path++) {
+ /* set DEBUG_LUT_RFMODE_MASK = 1 to start trx-mask-setup */
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, BIT(17));
+
+ /* if GNT_WL=0 && BT=SS_group --> WL Tx/Rx = THRU */
+ rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_SS_GROUP, 0x5ff);
+
+ /* if GNT_WL=0 && BT=Rx_group --> WL-Rx = THRU + WL-Tx = MASK */
+ rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_RX_GROUP, 0x5df);
+
+ /* if GNT_WL = 0 && BT = Tx_group -->
+ * Shared-Ant && BTG-path:WL mask(0x55f), others:WL THRU(0x5ff)
+ */
+ if (btc->ant_type == BTC_ANT_SHARED && btc->btg_pos == path)
+ rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x5ff);
+ else
+ rtw8922a_set_trx_mask(rtwdev, path, BTC_BT_TX_GROUP, 0x5ff);
+
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0);
+ }
+
+ /* set WL PTA Hi-Pri: Ack-Tx, beacon-tx, Trig-frame-Tx, Null-Tx*/
+ wl_pri = B_BTC_RSP_ACK_HI | B_BTC_TX_BCN_HI | B_BTC_TX_TRI_HI |
+ B_BTC_TX_NULL_HI;
+ rtw89_write32(rtwdev, R_BTC_COEX_WL_REQ_BE, wl_pri);
+
+ /* set PTA break table */
+ rtw89_write32(rtwdev, R_BE_BT_BREAK_TABLE, BTC_BREAK_PARAM);
+
+ /* ZB coex table init for HFP PTA req-cmd bit-4 define issue COEX-900*/
+ rtw89_write32(rtwdev, R_BTC_ZB_COEX_TBL_0, 0xda5a5a5a);
+
+ rtw89_write32(rtwdev, R_BTC_ZB_COEX_TBL_1, 0xda5a5a5a);
+
+ rtw89_write32(rtwdev, R_BTC_ZB_BREAK_TBL, 0xf0ffffff);
+ btc->cx.wl.status.map.init_ok = true;
+}
+
+static void rtw8922a_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u8 chan_idx = phy_ppdu->chan_idx;
+ enum nl80211_band band;
+ u8 ch;
+
+ if (chan_idx == 0)
+ return;
+
+ rtw89_decode_chan_idx(rtwdev, chan_idx, &ch, &band);
+ status->freq = ieee80211_channel_to_frequency(ch, band);
+ status->band = band;
+}
+
+static void rtw8922a_query_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u8 path;
+ u8 *rx_power = phy_ppdu->rssi;
+
+ status->signal =
+ RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
+ status->chains |= BIT(path);
+ status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
+ }
+ if (phy_ppdu->valid)
+ rtw8922a_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
+}
+
+static int rtw8922a_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ rtw89_write8_set(rtwdev, R_BE_FEN_RST_ENABLE,
+ B_BE_FEN_BBPLAT_RSTB | B_BE_FEN_BB_IP_RSTN);
+ rtw89_write32(rtwdev, R_BE_DMAC_SYS_CR32B, 0x7FF97FF9);
+
+ return 0;
+}
+
+static int rtw8922a_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ rtw89_write8_clr(rtwdev, R_BE_FEN_RST_ENABLE,
+ B_BE_FEN_BBPLAT_RSTB | B_BE_FEN_BB_IP_RSTN);
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
@@ -610,10 +2317,56 @@ static const struct wiphy_wowlan_support rtw_wowlan_stub_8922a = {
#endif
static const struct rtw89_chip_ops rtw8922a_chip_ops = {
+ .enable_bb_rf = rtw8922a_mac_enable_bb_rf,
+ .disable_bb_rf = rtw8922a_mac_disable_bb_rf,
+ .bb_preinit = rtw8922a_bb_preinit,
+ .bb_postinit = rtw8922a_bb_postinit,
+ .bb_reset = rtw8922a_bb_reset,
+ .bb_sethw = rtw8922a_bb_sethw,
+ .read_rf = rtw89_phy_read_rf_v2,
+ .write_rf = rtw89_phy_write_rf_v2,
+ .set_channel = rtw8922a_set_channel,
+ .set_channel_help = rtw8922a_set_channel_help,
.read_efuse = rtw8922a_read_efuse,
.read_phycap = rtw8922a_read_phycap,
+ .fem_setup = NULL,
+ .rfe_gpio = NULL,
+ .rfk_hw_init = rtw8922a_rfk_hw_init,
+ .rfk_init = rtw8922a_rfk_init,
+ .rfk_init_late = rtw8922a_rfk_init_late,
+ .rfk_channel = rtw8922a_rfk_channel,
+ .rfk_band_changed = rtw8922a_rfk_band_changed,
+ .rfk_scan = rtw8922a_rfk_scan,
+ .rfk_track = rtw8922a_rfk_track,
+ .power_trim = rtw8922a_power_trim,
+ .set_txpwr = rtw8922a_set_txpwr,
+ .set_txpwr_ctrl = rtw8922a_set_txpwr_ctrl,
+ .init_txpwr_unit = NULL,
+ .get_thermal = rtw8922a_get_thermal,
+ .ctrl_btg_bt_rx = rtw8922a_ctrl_btg_bt_rx,
+ .query_ppdu = rtw8922a_query_ppdu,
+ .ctrl_nbtg_bt_tx = rtw8922a_ctrl_nbtg_bt_tx,
+ .cfg_txrx_path = rtw8922a_bb_cfg_txrx_path,
+ .set_txpwr_ul_tb_offset = NULL,
.pwr_on_func = rtw8922a_pwr_on_func,
.pwr_off_func = rtw8922a_pwr_off_func,
+ .query_rxdesc = rtw89_core_query_rxdesc_v2,
+ .fill_txdesc = rtw89_core_fill_txdesc_v2,
+ .fill_txdesc_fwcmd = rtw89_core_fill_txdesc_fwcmd_v2,
+ .cfg_ctrl_path = rtw89_mac_cfg_ctrl_path_v2,
+ .mac_cfg_gnt = rtw89_mac_cfg_gnt_v2,
+ .stop_sch_tx = rtw89_mac_stop_sch_tx_v2,
+ .resume_sch_tx = rtw89_mac_resume_sch_tx_v2,
+ .h2c_dctl_sec_cam = rtw89_fw_h2c_dctl_sec_cam_v2,
+ .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl_g7,
+ .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl_g7,
+ .h2c_ampdu_cmac_tbl = rtw89_fw_h2c_ampdu_cmac_tbl_g7,
+ .h2c_default_dmac_tbl = rtw89_fw_h2c_default_dmac_tbl_v2,
+ .h2c_update_beacon = rtw89_fw_h2c_update_beacon_be,
+ .h2c_ba_cam = rtw89_fw_h2c_ba_cam_v1,
+
+ .btc_set_rfe = rtw8922a_btc_set_rfe,
+ .btc_init_cfg = rtw8922a_btc_init_cfg,
};
const struct rtw89_chip_info rtw8922a_chip_info = {
@@ -650,11 +2403,16 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = NULL,
+ .dig_regs = &rtw8922a_dig_regs,
.tssi_dbw_table = NULL,
- .support_chanctx_num = 1,
+ .support_chanctx_num = 2,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ) |
BIT(NL80211_BAND_6GHZ),
+ .support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
.support_unii4 = true,
.ul_tb_waveform_ctrl = false,
.ul_tb_pwr_diff = false,
@@ -665,7 +2423,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.acam_num = 128,
.bcam_num = 20,
.scam_num = 32,
- .bacam_num = 8,
+ .bacam_num = 24,
.bacam_dynamic_num = 8,
.bacam_ver = RTW89_BACAM_V1,
.ppdu_max_usr = 16,
@@ -683,10 +2441,19 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
BIT(RTW89_PS_MODE_CLK_GATED) |
BIT(RTW89_PS_MODE_PWR_GATED),
.low_power_hci_modes = 0,
+ .h2c_cctl_func_id = H2C_FUNC_MAC_CCTLINFO_UD_G7,
.hci_func_en_addr = R_BE_HCI_FUNC_EN,
.h2c_desc_size = sizeof(struct rtw89_rxdesc_short_v2),
.txwd_body_size = sizeof(struct rtw89_txwd_body_v2),
.txwd_info_size = sizeof(struct rtw89_txwd_info_v2),
+ .h2c_ctrl_reg = R_BE_H2CREG_CTRL,
+ .h2c_counter_reg = {R_BE_UDM1 + 1, B_BE_UDM1_HALMAC_H2C_DEQ_CNT_MASK >> 8},
+ .h2c_regs = rtw8922a_h2c_regs,
+ .c2h_ctrl_reg = R_BE_C2HREG_CTRL,
+ .c2h_counter_reg = {R_BE_UDM1 + 1, B_BE_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
+ .c2h_regs = rtw8922a_c2h_regs,
+ .page_regs = &rtw8922a_page_regs,
+ .wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3,
.cfo_src_fd = true,
.cfo_hw_comp = true,
.dcfo_comp = NULL,
@@ -694,9 +2461,11 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.imr_info = NULL,
.imr_dmac_table = &rtw8922a_imr_dmac_table,
.imr_cmac_table = &rtw8922a_imr_cmac_table,
+ .rrsr_cfgs = &rtw8922a_rrsr_cfgs,
.bss_clr_vld = {R_BSS_CLR_VLD_V2, B_BSS_CLR_VLD0_V2},
.bss_clr_map_reg = R_BSS_CLR_MAP_V2,
.dma_ch_mask = 0,
+ .edcca_regs = &rtw8922a_edcca_regs,
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8922a,
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
new file mode 100644
index 000000000000..2a371829268c
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2023 Realtek Corporation
+ */
+
+#include "chan.h"
+#include "debug.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8922a.h"
+#include "rtw8922a_rfk.h"
+
+static void rtw8922a_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_rf_path path)
+{
+ static const u32 tssi_trk_man[2] = {R_TSSI_PWR_P0, R_TSSI_PWR_P1};
+
+ if (en)
+ rtw89_phy_write32_mask(rtwdev, tssi_trk_man[path], B_TSSI_CONT_EN, 0);
+ else
+ rtw89_phy_write32_mask(rtwdev, tssi_trk_man[path], B_TSSI_CONT_EN, 1);
+}
+
+void rtw8922a_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx)
+{
+ if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) {
+ if (phy_idx == RTW89_PHY_0)
+ rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ else
+ rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ } else {
+ rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ }
+}
+
+static
+void rtw8922a_ctl_band_ch_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ u8 central_ch, enum rtw89_band band,
+ enum rtw89_bandwidth bw)
+{
+ const u32 rf_addr[2] = {RR_CFGCH, RR_CFGCH_V1};
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 rf_reg[RF_PATH_NUM_8922A][2];
+ u8 synpath;
+ u32 rf18;
+ u8 kpath;
+ u8 path;
+ u8 i;
+
+ rf_reg[RF_PATH_A][0] = rtw89_read_rf(rtwdev, RF_PATH_A, rf_addr[0], RFREG_MASK);
+ rf_reg[RF_PATH_A][1] = rtw89_read_rf(rtwdev, RF_PATH_A, rf_addr[1], RFREG_MASK);
+ rf_reg[RF_PATH_B][0] = rtw89_read_rf(rtwdev, RF_PATH_B, rf_addr[0], RFREG_MASK);
+ rf_reg[RF_PATH_B][1] = rtw89_read_rf(rtwdev, RF_PATH_B, rf_addr[1], RFREG_MASK);
+
+ kpath = rtw89_phy_get_kpath(rtwdev, phy);
+ synpath = rtw89_phy_get_syn_sel(rtwdev, phy);
+
+ rf18 = rtw89_read_rf(rtwdev, synpath, RR_CFGCH, RFREG_MASK);
+ if (rf18 == INV_RF_DATA) {
+ rtw89_warn(rtwdev, "[RFK] Invalid RF18 value\n");
+ return;
+ }
+
+ for (path = 0; path < RF_PATH_NUM_8922A; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ for (i = 0; i < 2; i++) {
+ if (rf_reg[path][i] == INV_RF_DATA) {
+ rtw89_warn(rtwdev,
+ "[RFK] Invalid RF_0x18 for Path-%d\n", path);
+ return;
+ }
+
+ rf_reg[path][i] &= ~(RR_CFGCH_BAND1 | RR_CFGCH_BW |
+ RR_CFGCH_BAND0 | RR_CFGCH_CH);
+ rf_reg[path][i] |= u32_encode_bits(central_ch, RR_CFGCH_CH);
+
+ if (band == RTW89_BAND_2G)
+ rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x0);
+ else
+ rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x1);
+
+ switch (band) {
+ case RTW89_BAND_2G:
+ default:
+ break;
+ case RTW89_BAND_5G:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BAND1_5G, RR_CFGCH_BAND1) |
+ u32_encode_bits(CFGCH_BAND0_5G, RR_CFGCH_BAND0);
+ break;
+ case RTW89_BAND_6G:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BAND1_6G, RR_CFGCH_BAND1) |
+ u32_encode_bits(CFGCH_BAND0_6G, RR_CFGCH_BAND0);
+ break;
+ }
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ default:
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BW_V2_40M, RR_CFGCH_BW_V2);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BW_V2_80M, RR_CFGCH_BW_V2);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BW_V2_160M, RR_CFGCH_BW_V2);
+ break;
+ case RTW89_CHANNEL_WIDTH_320:
+ rf_reg[path][i] |=
+ u32_encode_bits(CFGCH_BW_V2_320M, RR_CFGCH_BW_V2);
+ break;
+ }
+
+ rtw89_write_rf(rtwdev, path, rf_addr[i],
+ RFREG_MASK, rf_reg[path][i]);
+ fsleep(100);
+ }
+ }
+
+ if (hal->cv != CHIP_CAV)
+ return;
+
+ if (band == RTW89_BAND_2G) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x00003);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD1, RFREG_MASK, 0x0c990);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0xebe38);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x00000);
+ } else {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x00003);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD1, RFREG_MASK, 0x0c190);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0xebe38);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x00000);
+ }
+}
+
+void rtw8922a_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_ctl_band_ch_bw(rtwdev, phy_idx, chan->channel, chan->band_type,
+ chan->band_width);
+}
+
+enum _rf_syn_pow {
+ RF_SYN_ON_OFF,
+ RF_SYN_OFF_ON,
+ RF_SYN_ALLON,
+ RF_SYN_ALLOFF,
+};
+
+static void rtw8922a_set_syn01_cav(struct rtw89_dev *rtwdev, enum _rf_syn_pow syn)
+{
+ if (syn == RF_SYN_ALLON) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3);
+ } else if (syn == RF_SYN_ON_OFF) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x0);
+ } else if (syn == RF_SYN_OFF_ON) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3);
+ } else if (syn == RF_SYN_ALLOFF) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x0);
+ }
+}
+
+static void rtw8922a_set_syn01_cbv(struct rtw89_dev *rtwdev, enum _rf_syn_pow syn)
+{
+ if (syn == RF_SYN_ALLON) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0xf);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0xf);
+ } else if (syn == RF_SYN_ON_OFF) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0xf);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0x0);
+ } else if (syn == RF_SYN_OFF_ON) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0xf);
+ } else if (syn == RF_SYN_ALLOFF) {
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0x0);
+ }
+}
+
+static void rtw8922a_set_syn01(struct rtw89_dev *rtwdev, enum _rf_syn_pow syn)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "SYN config=%d\n", syn);
+
+ if (hal->cv == CHIP_CAV)
+ rtw8922a_set_syn01_cav(rtwdev, syn);
+ else
+ rtw8922a_set_syn01_cbv(rtwdev, syn);
+}
+
+static void rtw8922a_chlk_ktbl_sel(struct rtw89_dev *rtwdev, u8 kpath, u8 idx)
+{
+ u32 tmp;
+
+ if (idx > 2) {
+ rtw89_warn(rtwdev, "[DBCC][ERROR]indx is out of limit!! index(%d)", idx);
+ return;
+ }
+
+ if (kpath & RF_A) {
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1, idx);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_MDPD_V1, idx);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RR_TXG_SEL, 0x4 | idx);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, BIT(0));
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, BIT(1));
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G5, tmp);
+ }
+
+ if (kpath & RF_B) {
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1, idx);
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_MDPD_V1, idx);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RR_TXG_SEL, 0x4 | idx);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, BIT(0));
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT_C1, B_CFIR_LUT_G3, tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, BIT(1));
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT_C1, B_CFIR_LUT_G5, tmp);
+ }
+}
+
+static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+ enum rtw89_sub_entity_idx sub_entity_idx;
+ const struct rtw89_chan *chan;
+ enum rtw89_entity_mode mode;
+ u8 s0_tbl, s1_tbl;
+ u8 tbl_sel;
+
+ mode = rtw89_get_entity_mode(rtwdev);
+ switch (mode) {
+ case RTW89_ENTITY_MODE_MCC_PREPARE:
+ sub_entity_idx = RTW89_SUB_ENTITY_1;
+ tbl_sel = 1;
+ break;
+ default:
+ sub_entity_idx = RTW89_SUB_ENTITY_0;
+ tbl_sel = 0;
+ break;
+ }
+
+ chan = rtw89_chan_get(rtwdev, sub_entity_idx);
+
+ rfk_mcc->ch[tbl_sel] = chan->channel;
+ rfk_mcc->band[tbl_sel] = chan->band_type;
+ rfk_mcc->bw[tbl_sel] = chan->band_width;
+ rfk_mcc->table_idx = tbl_sel;
+
+ s0_tbl = tbl_sel;
+ s1_tbl = tbl_sel;
+
+ rtw8922a_chlk_ktbl_sel(rtwdev, RF_A, s0_tbl);
+ rtw8922a_chlk_ktbl_sel(rtwdev, RF_B, s1_tbl);
+}
+
+static void rtw8922a_rfk_mlo_ctrl(struct rtw89_dev *rtwdev)
+{
+ enum _rf_syn_pow syn_pow;
+
+ if (!rtwdev->dbcc_en)
+ goto set_rfk_reload;
+
+ switch (rtwdev->mlo_dbcc_mode) {
+ case MLO_0_PLUS_2_1RF:
+ syn_pow = RF_SYN_OFF_ON;
+ break;
+ case MLO_0_PLUS_2_2RF:
+ case MLO_1_PLUS_1_2RF:
+ case MLO_2_PLUS_0_1RF:
+ case MLO_2_PLUS_0_2RF:
+ case MLO_2_PLUS_2_2RF:
+ case MLO_DBCC_NOT_SUPPORT:
+ default:
+ syn_pow = RF_SYN_ON_OFF;
+ break;
+ case MLO_1_PLUS_1_1RF:
+ case DBCC_LEGACY:
+ syn_pow = RF_SYN_ALLON;
+ break;
+ }
+
+ rtw8922a_set_syn01(rtwdev, syn_pow);
+
+set_rfk_reload:
+ rtw8922a_chlk_reload(rtwdev);
+}
+
+static void rtw8922a_rfk_pll_init(struct rtw89_dev *rtwdev)
+{
+ int ret;
+ u8 tmp;
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_PLL_1, &tmp);
+ if (ret)
+ return;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_PLL_1, tmp | 0xf8, 0xFF);
+ if (ret)
+ return;
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_APBT, &tmp);
+ if (ret)
+ return;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_APBT, tmp & ~0x60, 0xFF);
+ if (ret)
+ return;
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_PLL, &tmp);
+ if (ret)
+ return;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_PLL, tmp | 0x38, 0xFF);
+ if (ret)
+ return;
+}
+
+void rtw8922a_rfk_hw_init(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->dbcc_en)
+ rtw8922a_rfk_mlo_ctrl(rtwdev);
+
+ rtw8922a_rfk_pll_init(rtwdev);
+}
+
+void rtw8922a_pre_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ bool mlo_1_1;
+
+ if (!rtwdev->dbcc_en)
+ return;
+
+ mlo_1_1 = rtw89_is_mlo_1_1(rtwdev);
+ if (mlo_1_1)
+ rtw8922a_set_syn01(rtwdev, RF_SYN_ALLON);
+ else if (phy_idx == RTW89_PHY_0)
+ rtw8922a_set_syn01(rtwdev, RF_SYN_ON_OFF);
+ else
+ rtw8922a_set_syn01(rtwdev, RF_SYN_OFF_ON);
+
+ fsleep(1000);
+}
+
+void rtw8922a_post_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ rtw8922a_rfk_mlo_ctrl(rtwdev);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h
new file mode 100644
index 000000000000..66bdd57c1eea
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2023 Realtek Corporation
+ */
+
+#ifndef __RTW89_8922A_RFK_H__
+#define __RTW89_8922A_RFK_H__
+
+#include "core.h"
+
+void rtw8922a_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx);
+void rtw8922a_set_channel_rf(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+void rtw8922a_rfk_hw_init(struct rtw89_dev *rtwdev);
+void rtw8922a_pre_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8922a_post_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
index 7b3d98d2c402..4981b657bd7b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
@@ -26,6 +26,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
.io_rcy_en = MAC_AX_PCIE_ENABLE,
.io_rcy_tmr = MAC_AX_IO_RCY_ANA_TMR_DEF,
.rx_ring_eq_is_full = true,
+ .check_rx_tag = true,
.init_cfg_reg = R_BE_HAXI_INIT_CFG1,
.txhci_en_bit = B_BE_TXDMA_EN,
@@ -79,7 +80,7 @@ static struct pci_driver rtw89_8922ae_driver = {
.id_table = rtw89_8922ae_id_table,
.probe = rtw89_pci_probe,
.remove = rtw89_pci_remove,
- .driver.pm = &rtw89_pm_ops,
+ .driver.pm = &rtw89_pm_ops_be,
};
module_pci_driver(rtw89_8922ae_driver);
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index 5c7ca36c09b6..ccad026defb5 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -41,34 +41,8 @@ static void rtw89_wow_leave_lps(struct rtw89_dev *rtwdev)
static int rtw89_wow_config_mac(struct rtw89_dev *rtwdev, bool enable_wow)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
- int ret;
-
- if (enable_wow) {
- ret = rtw89_mac_resize_ple_rx_quota(rtwdev, true);
- if (ret) {
- rtw89_err(rtwdev, "[ERR]patch rx qta %d\n", ret);
- return ret;
- }
- rtw89_write32_set(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
- rtw89_write32_clr(rtwdev, mac->rx_fltr, B_AX_SNIFFER_MODE);
- rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
- rtw89_write32(rtwdev, R_AX_ACTION_FWD0, 0);
- rtw89_write32(rtwdev, R_AX_ACTION_FWD1, 0);
- rtw89_write32(rtwdev, R_AX_TF_FWD, 0);
- rtw89_write32(rtwdev, R_AX_HW_RPT_FWD, 0);
- } else {
- ret = rtw89_mac_resize_ple_rx_quota(rtwdev, false);
- if (ret) {
- rtw89_err(rtwdev, "[ERR]patch rx qta %d\n", ret);
- return ret;
- }
- rtw89_write32_clr(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
- rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
- rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD);
- rtw89_write32(rtwdev, R_AX_TF_FWD, TRXCFG_MPDU_PROC_TF_FRWD);
- }
- return 0;
+ return mac->wow_config_mac(rtwdev, enable_wow);
}
static void rtw89_wow_set_rx_filter(struct rtw89_dev *rtwdev, bool enable)
@@ -85,21 +59,14 @@ static void rtw89_wow_set_rx_filter(struct rtw89_dev *rtwdev, bool enable)
static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 wow_reason_reg = rtwdev->chip->wow_reason_reg;
struct cfg80211_wowlan_nd_info nd_info;
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
- u32 wow_reason_reg;
u8 reason;
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
- wow_reason_reg = R_AX_C2HREG_DATA3 + 3;
- else
- wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3;
-
reason = rtw89_read8(rtwdev, wow_reason_reg);
-
switch (reason) {
case RTW89_WOW_RSN_RX_DEAUTH:
wakeup.disconnect = true;
@@ -470,13 +437,14 @@ static int rtw89_wow_cfg_wake(struct rtw89_dev *rtwdev, bool wow)
static int rtw89_wow_check_fw_status(struct rtw89_dev *rtwdev, bool wow_enable)
{
+ const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
u8 polling;
int ret;
ret = read_poll_timeout_atomic(rtw89_read8_mask, polling,
wow_enable == !!polling,
50, 50000, false, rtwdev,
- R_AX_WOW_CTRL, B_AX_WOW_WOWEN);
+ mac->wow_ctrl.addr, mac->wow_ctrl.mask);
if (ret)
rtw89_err(rtwdev, "failed to check wow status %s\n",
wow_enable ? "enabled" : "disabled");
@@ -519,7 +487,7 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
return ret;
}
- ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, wow_vif, wow_sta);
+ ret = rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, wow_vif, wow_sta);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c assoc cmac tbl\n");
return ret;
@@ -566,7 +534,7 @@ static int rtw89_wow_enable_trx_pre(struct rtw89_dev *rtwdev)
rtw89_mac_ptk_drop_by_band_and_wait(rtwdev, RTW89_MAC_0);
- ret = rtw89_hci_poll_txdma_ch(rtwdev);
+ ret = rtw89_hci_poll_txdma_ch_idle(rtwdev);
if (ret) {
rtw89_err(rtwdev, "txdma ch busy\n");
return ret;
@@ -589,7 +557,7 @@ static int rtw89_wow_enable_trx_post(struct rtw89_dev *rtwdev)
rtw89_hci_disable_intr(rtwdev);
rtw89_hci_ctrl_trxhci(rtwdev, false);
- ret = rtw89_hci_poll_txdma_ch(rtwdev);
+ ret = rtw89_hci_poll_txdma_ch_idle(rtwdev);
if (ret) {
rtw89_err(rtwdev, "failed to poll txdma ch idle pcie\n");
return ret;
@@ -699,14 +667,14 @@ static int rtw89_wow_fw_stop(struct rtw89_dev *rtwdev)
goto out;
}
+ rtw89_fw_release_general_pkt_list(rtwdev, true);
+
ret = rtw89_wow_cfg_wake(rtwdev, false);
if (ret) {
rtw89_err(rtwdev, "wow: failed to disable config wake\n");
goto out;
}
- rtw89_fw_release_general_pkt_list(rtwdev, true);
-
ret = rtw89_wow_check_fw_status(rtwdev, false);
if (ret) {
rtw89_err(rtwdev, "wow: failed to check disable fw ready\n");
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 05890536e353..211fa25b9a78 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -740,7 +740,7 @@ u16 rsi_get_connected_channel(struct ieee80211_vif *vif)
return 0;
bss = &vif->bss_conf;
- channel = bss->chandef.chan;
+ channel = bss->chanreq.oper.chan;
if (!channel)
return 0;
@@ -759,7 +759,7 @@ static void rsi_switch_channel(struct rsi_hw *adapter,
if (!vif)
return;
- channel = vif->bss_conf.chandef.chan;
+ channel = vif->bss_conf.chanreq.oper.chan;
if (!channel)
return;
@@ -1957,6 +1957,10 @@ static int rsi_mac80211_resume(struct ieee80211_hw *hw)
#endif
static const struct ieee80211_ops mac80211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = rsi_mac80211_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = rsi_mac80211_start,
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 10a465686439..dccc139cabb2 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -232,17 +232,17 @@ static int rsi_usb_reg_write(struct usb_device *usbdev,
if (!usb_reg_buf)
return status;
- usb_reg_buf[0] = (cpu_to_le32(value) & 0x00ff);
- usb_reg_buf[1] = (cpu_to_le32(value) & 0xff00) >> 8;
- usb_reg_buf[2] = (cpu_to_le32(value) & 0x00ff0000) >> 16;
- usb_reg_buf[3] = (cpu_to_le32(value) & 0xff000000) >> 24;
+ usb_reg_buf[0] = value & 0x00ff;
+ usb_reg_buf[1] = (value & 0xff00) >> 8;
+ usb_reg_buf[2] = (value & 0x00ff0000) >> 16;
+ usb_reg_buf[3] = (value & 0xff000000) >> 24;
status = usb_control_msg(usbdev,
usb_sndctrlpipe(usbdev, 0),
USB_VENDOR_REGISTER_WRITE,
RSI_USB_REQ_OUT,
- ((cpu_to_le32(reg) & 0xffff0000) >> 16),
- (cpu_to_le32(reg) & 0xffff),
+ (reg & 0xffff0000) >> 16,
+ reg & 0xffff,
(void *)usb_reg_buf,
len,
USB_CTRL_SET_TIMEOUT);
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
index 537caf9d914a..a904602f02ce 100644
--- a/drivers/net/wireless/silabs/wfx/sta.c
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -144,13 +144,13 @@ static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps)
struct wfx_vif *wvif_ch0 = wdev_to_wvif(wvif->wdev, 0);
struct ieee80211_vif *vif_ch0 = wvif_to_vif(wvif_ch0);
- chan0 = vif_ch0->bss_conf.chandef.chan;
+ chan0 = vif_ch0->bss_conf.chanreq.oper.chan;
}
if (wdev_to_wvif(wvif->wdev, 1)) {
struct wfx_vif *wvif_ch1 = wdev_to_wvif(wvif->wdev, 1);
struct ieee80211_vif *vif_ch1 = wvif_to_vif(wvif_ch1);
- chan1 = vif_ch1->bss_conf.chandef.chan;
+ chan1 = vif_ch1->bss_conf.chanreq.oper.chan;
}
if (chan0 && chan1 && vif->type != NL80211_IFTYPE_AP) {
if (chan0->hw_value == chan1->hw_value) {
@@ -344,6 +344,7 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif)
const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16);
const int pairwise_cipher_suite_size = 4 / sizeof(u16);
const int akm_suite_size = 4 / sizeof(u16);
+ int ret = -EINVAL;
const u16 *ptr;
if (unlikely(!skb))
@@ -352,22 +353,26 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif)
ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
skb->len - ieoffset);
if (unlikely(!ptr))
- return -EINVAL;
+ goto free_skb;
ptr += pairwise_cipher_suite_count_offset;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
- return -EINVAL;
+ goto free_skb;
ptr += 1 + pairwise_cipher_suite_size * *ptr;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
- return -EINVAL;
+ goto free_skb;
ptr += 1 + akm_suite_size * *ptr;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
- return -EINVAL;
+ goto free_skb;
wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6));
- return 0;
+ ret = 0;
+
+free_skb:
+ dev_kfree_skb(skb);
+ return ret;
}
int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/st/cw1200/cw1200_sdio.c b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
index 4c30b5772ce0..00c4731d8f8e 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_sdio.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio_func.h>
@@ -178,12 +178,15 @@ static int cw1200_sdio_irq_unsubscribe(struct hwbus_priv *self)
return ret;
}
+/* Like the rest of the driver, this only supports one device per system */
+static struct gpio_desc *cw1200_reset;
+static struct gpio_desc *cw1200_powerup;
+
static int cw1200_sdio_off(const struct cw1200_platform_data_sdio *pdata)
{
- if (pdata->reset) {
- gpio_set_value(pdata->reset, 0);
+ if (cw1200_reset) {
+ gpiod_set_value(cw1200_reset, 0);
msleep(30); /* Min is 2 * CLK32K cycles */
- gpio_free(pdata->reset);
}
if (pdata->power_ctrl)
@@ -196,16 +199,21 @@ static int cw1200_sdio_off(const struct cw1200_platform_data_sdio *pdata)
static int cw1200_sdio_on(const struct cw1200_platform_data_sdio *pdata)
{
- /* Ensure I/Os are pulled low */
- if (pdata->reset) {
- gpio_request(pdata->reset, "cw1200_wlan_reset");
- gpio_direction_output(pdata->reset, 0);
+ /* Ensure I/Os are pulled low (reset is active low) */
+ cw1200_reset = devm_gpiod_get_optional(NULL, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(cw1200_reset)) {
+ pr_err("could not get CW1200 SDIO reset GPIO\n");
+ return PTR_ERR(cw1200_reset);
}
- if (pdata->powerup) {
- gpio_request(pdata->powerup, "cw1200_wlan_powerup");
- gpio_direction_output(pdata->powerup, 0);
+ gpiod_set_consumer_name(cw1200_reset, "cw1200_wlan_reset");
+ cw1200_powerup = devm_gpiod_get_optional(NULL, "powerup", GPIOD_OUT_LOW);
+ if (IS_ERR(cw1200_powerup)) {
+ pr_err("could not get CW1200 SDIO powerup GPIO\n");
+ return PTR_ERR(cw1200_powerup);
}
- if (pdata->reset || pdata->powerup)
+ gpiod_set_consumer_name(cw1200_powerup, "cw1200_wlan_powerup");
+
+ if (cw1200_reset || cw1200_powerup)
msleep(10); /* Settle time? */
/* Enable 3v3 and 1v8 to hardware */
@@ -226,13 +234,13 @@ static int cw1200_sdio_on(const struct cw1200_platform_data_sdio *pdata)
}
/* Enable POWERUP signal */
- if (pdata->powerup) {
- gpio_set_value(pdata->powerup, 1);
+ if (cw1200_powerup) {
+ gpiod_set_value(cw1200_powerup, 1);
msleep(250); /* or more..? */
}
- /* Enable RSTn signal */
- if (pdata->reset) {
- gpio_set_value(pdata->reset, 1);
+ /* Deassert RSTn signal, note active low */
+ if (cw1200_reset) {
+ gpiod_set_value(cw1200_reset, 0);
msleep(50); /* Or more..? */
}
return 0;
diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c
index c82c0688b549..4f346fb977a9 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c
@@ -11,7 +11,7 @@
*/
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -38,6 +38,8 @@ struct hwbus_priv {
const struct cw1200_platform_data_spi *pdata;
spinlock_t lock; /* Serialize all bus operations */
wait_queue_head_t wq;
+ struct gpio_desc *reset;
+ struct gpio_desc *powerup;
int claimed;
};
@@ -80,7 +82,7 @@ static int cw1200_spi_memcpy_fromio(struct hwbus_priv *self,
#endif
/* Header is LE16 */
- regaddr = cpu_to_le16(regaddr);
+ regaddr = (__force u16)cpu_to_le16(regaddr);
/* We have to byteswap if the SPI bus is limited to 8b operation
or we are running on a Big Endian system
@@ -145,7 +147,7 @@ static int cw1200_spi_memcpy_toio(struct hwbus_priv *self,
#endif
/* Header is LE16 */
- regaddr = cpu_to_le16(regaddr);
+ regaddr = (__force u16)cpu_to_le16(regaddr);
/* We have to byteswap if the SPI bus is limited to 8b operation
or we are running on a Big Endian system
@@ -275,12 +277,12 @@ static void cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
free_irq(self->func->irq, self);
}
-static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
+static int cw1200_spi_off(struct hwbus_priv *self, const struct cw1200_platform_data_spi *pdata)
{
- if (pdata->reset) {
- gpio_set_value(pdata->reset, 0);
+ if (self->reset) {
+ /* Assert RESET, note active low */
+ gpiod_set_value(self->reset, 1);
msleep(30); /* Min is 2 * CLK32K cycles */
- gpio_free(pdata->reset);
}
if (pdata->power_ctrl)
@@ -291,18 +293,12 @@ static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
return 0;
}
-static int cw1200_spi_on(const struct cw1200_platform_data_spi *pdata)
+static int cw1200_spi_on(struct hwbus_priv *self, const struct cw1200_platform_data_spi *pdata)
{
/* Ensure I/Os are pulled low */
- if (pdata->reset) {
- gpio_request(pdata->reset, "cw1200_wlan_reset");
- gpio_direction_output(pdata->reset, 0);
- }
- if (pdata->powerup) {
- gpio_request(pdata->powerup, "cw1200_wlan_powerup");
- gpio_direction_output(pdata->powerup, 0);
- }
- if (pdata->reset || pdata->powerup)
+ gpiod_direction_output(self->reset, 1); /* Active low */
+ gpiod_direction_output(self->powerup, 0);
+ if (self->reset || self->powerup)
msleep(10); /* Settle time? */
/* Enable 3v3 and 1v8 to hardware */
@@ -323,13 +319,13 @@ static int cw1200_spi_on(const struct cw1200_platform_data_spi *pdata)
}
/* Enable POWERUP signal */
- if (pdata->powerup) {
- gpio_set_value(pdata->powerup, 1);
+ if (self->powerup) {
+ gpiod_set_value(self->powerup, 1);
msleep(250); /* or more..? */
}
- /* Enable RSTn signal */
- if (pdata->reset) {
- gpio_set_value(pdata->reset, 1);
+ /* Assert RSTn signal, note active low */
+ if (self->reset) {
+ gpiod_set_value(self->reset, 0);
msleep(50); /* Or more..? */
}
return 0;
@@ -381,20 +377,33 @@ static int cw1200_spi_probe(struct spi_device *func)
spi_get_chipselect(func, 0), func->mode, func->bits_per_word,
func->max_speed_hz);
- if (cw1200_spi_on(plat_data)) {
+ self = devm_kzalloc(&func->dev, sizeof(*self), GFP_KERNEL);
+ if (!self) {
+ pr_err("Can't allocate SPI hwbus_priv.");
+ return -ENOMEM;
+ }
+
+ /* Request reset asserted */
+ self->reset = devm_gpiod_get_optional(&func->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(self->reset))
+ return dev_err_probe(&func->dev, PTR_ERR(self->reset),
+ "could not get reset GPIO\n");
+ gpiod_set_consumer_name(self->reset, "cw1200_wlan_reset");
+
+ self->powerup = devm_gpiod_get_optional(&func->dev, "powerup", GPIOD_OUT_LOW);
+ if (IS_ERR(self->powerup))
+ return dev_err_probe(&func->dev, PTR_ERR(self->powerup),
+ "could not get powerup GPIO\n");
+ gpiod_set_consumer_name(self->reset, "cw1200_wlan_powerup");
+
+ if (cw1200_spi_on(self, plat_data)) {
pr_err("spi_on() failed!\n");
- return -1;
+ return -ENODEV;
}
if (spi_setup(func)) {
pr_err("spi_setup() failed!\n");
- return -1;
- }
-
- self = devm_kzalloc(&func->dev, sizeof(*self), GFP_KERNEL);
- if (!self) {
- pr_err("Can't allocate SPI hwbus_priv.");
- return -ENOMEM;
+ return -ENODEV;
}
self->pdata = plat_data;
@@ -416,7 +425,7 @@ static int cw1200_spi_probe(struct spi_device *func)
if (status) {
cw1200_spi_irq_unsubscribe(self);
- cw1200_spi_off(plat_data);
+ cw1200_spi_off(self, plat_data);
}
return status;
@@ -434,7 +443,7 @@ static void cw1200_spi_disconnect(struct spi_device *func)
self->core = NULL;
}
}
- cw1200_spi_off(dev_get_platdata(&func->dev));
+ cw1200_spi_off(self, dev_get_platdata(&func->dev));
}
static int __maybe_unused cw1200_spi_suspend(struct device *dev)
diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
index 381013e0db63..a54a7b86864f 100644
--- a/drivers/net/wireless/st/cw1200/main.c
+++ b/drivers/net/wireless/st/cw1200/main.c
@@ -203,6 +203,10 @@ static const unsigned long cw1200_ttl[] = {
};
static const struct ieee80211_ops cw1200_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = cw1200_start,
.stop = cw1200_stop,
.add_interface = cw1200_add_interface,
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index cd9a41f59f32..0da2d29dd7bd 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1351,6 +1351,10 @@ static struct ieee80211_supported_band wl1251_band_2ghz = {
};
static const struct ieee80211_ops wl1251_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.start = wl1251_op_start,
.stop = wl1251_op_stop,
.add_interface = wl1251_op_add_interface,
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 301bd0043a43..4e5b351f80f0 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -343,5 +343,6 @@ static void __exit wl1251_sdio_exit(void)
module_init(wl1251_sdio_init);
module_exit(wl1251_sdio_exit);
+MODULE_DESCRIPTION("TI WL1251 SDIO helpers");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 29292f06bd3d..1936bb3af54a 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -342,6 +342,7 @@ static struct spi_driver wl1251_spi_driver = {
module_spi_driver(wl1251_spi_driver);
+MODULE_DESCRIPTION("TI WL1251 SPI helpers");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
MODULE_ALIAS("spi:wl1251");
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index de045fe4ca1e..b26d42b4e3cc 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1955,6 +1955,7 @@ module_param_named(tcxo, tcxo_param, charp, 0);
MODULE_PARM_DESC(tcxo,
"TCXO clock: 19.2, 26, 38.4, 52, 16.368, 32.736, 16.8, 33.6");
+MODULE_DESCRIPTION("TI WL12xx wireless driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 20d9181b3410..2ccac1cdec01 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -2086,6 +2086,7 @@ module_param_named(num_rx_desc, num_rx_desc_param, int, 0400);
MODULE_PARM_DESC(num_rx_desc_param,
"Number of Rx descriptors: u8 (default is 32)");
+MODULE_DESCRIPTION("TI WiLink 8 wireless driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_FIRMWARE(WL18XX_FW_NAME);
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 1e082d039b82..2499dc908305 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -233,7 +233,7 @@ void wlcore_event_channel_switch(struct wl1271 *wl,
cancel_delayed_work(&wlvif->channel_switch_work);
} else {
set_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags);
- ieee80211_csa_finish(vif);
+ ieee80211_csa_finish(vif, 0);
}
}
}
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index fb9ed97774c7..ef12169f8044 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -2910,7 +2910,7 @@ static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int ret;
wlvif->aid = vif->cfg.aid;
- wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
+ wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chanreq.oper);
wlvif->beacon_int = bss_conf->beacon_int;
wlvif->wmm_enabled = bss_conf->qos;
@@ -4242,7 +4242,7 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
/* Handle HT information change */
if ((changed & BSS_CHANGED_HT) &&
- (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
+ (bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)) {
ret = wl1271_acx_set_ht_information(wl, wlvif,
bss_conf->ht_operation_mode);
if (ret < 0) {
@@ -4515,7 +4515,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
/* Handle new association with HT. Do this after join. */
if (sta_exists) {
bool enabled =
- bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
+ bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT;
ret = wlcore_hw_set_peer_cap(wl,
&sta_ht_cap,
@@ -6793,6 +6793,7 @@ MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
module_param(no_recovery, int, 0600);
MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
+MODULE_DESCRIPTION("TI WLAN core driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index f0686635db46..92fb5b8dcdae 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -16,7 +16,6 @@
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
-#include <linux/gpio.h>
#include <linux/pm_runtime.h>
#include <linux/printk.h>
#include <linux/of.h>
@@ -75,8 +74,8 @@ static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr,
sdio_release_host(func);
- if (WARN_ON(ret))
- dev_err(child->parent, "sdio read failed (%d)\n", ret);
+ if (ret)
+ dev_err_ratelimited(child->parent, "sdio read failed (%d)\n", ret);
if (unlikely(dump)) {
printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr);
@@ -120,8 +119,8 @@ static int __must_check wl12xx_sdio_raw_write(struct device *child, int addr,
sdio_release_host(func);
- if (WARN_ON(ret))
- dev_err(child->parent, "sdio write failed (%d)\n", ret);
+ if (ret)
+ dev_err_ratelimited(child->parent, "sdio write failed (%d)\n", ret);
return ret;
}
@@ -447,6 +446,7 @@ module_sdio_driver(wl1271_sdio_driver);
module_param(dump, bool, 0600);
MODULE_PARM_DESC(dump, "Enable sdio read/write dumps.");
+MODULE_DESCRIPTION("TI WLAN SDIO helpers");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 7d9a139db59e..0aa2b2f3c5c9 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -562,6 +562,7 @@ static struct spi_driver wl1271_spi_driver = {
};
module_spi_driver(wl1271_spi_driver);
+MODULE_DESCRIPTION("TI WLAN SPI helpers");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index a84340c2075f..b55fe320633c 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -4,7 +4,7 @@
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*/
/*
@@ -196,8 +196,11 @@ static const struct ieee80211_regdomain hwsim_world_regdom_custom_04 = {
.reg_rules = {
REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0),
REG_RULE(2484 - 10, 2484 + 10, 40, 0, 20, 0),
- REG_RULE(5150 - 10, 5240 + 10, 80, 0, 30, 0),
+ REG_RULE(5150 - 10, 5240 + 10, 80, 0, 30, NL80211_RRF_AUTO_BW),
REG_RULE(5260 - 10, 5320 + 10, 80, 0, 30,
+ NL80211_RRF_DFS_CONCURRENT | NL80211_RRF_DFS |
+ NL80211_RRF_AUTO_BW),
+ REG_RULE(5500 - 10, 5720 + 10, 160, 0, 30,
NL80211_RRF_DFS_CONCURRENT | NL80211_RRF_DFS),
REG_RULE(5745 - 10, 5825 + 10, 80, 0, 30, 0),
REG_RULE(5855 - 10, 5925 + 10, 80, 0, 33, 0),
@@ -213,6 +216,7 @@ static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = {
struct hwsim_vif_priv {
u32 magic;
+ u32 skip_beacons;
u8 bssid[ETH_ALEN];
bool assoc;
bool bcn_en;
@@ -2128,6 +2132,16 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
return 0;
}
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void mac80211_hwsim_vif_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
+
+ debugfs_create_u32("skip_beacons", 0600, vif->debugfs_dir,
+ &vp->skip_beacons);
+}
+#endif
static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -2193,12 +2207,19 @@ static void __mac80211_hwsim_beacon_tx(struct ieee80211_bss_conf *link_conf,
struct ieee80211_vif *vif,
struct sk_buff *skb)
{
+ struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct ieee80211_tx_info *info;
struct ieee80211_rate *txrate;
struct ieee80211_mgmt *mgmt;
/* TODO: get MCS */
int bitrate = 100;
+ if (vp->skip_beacons) {
+ vp->skip_beacons--;
+ dev_kfree_skb(skb);
+ return;
+ }
+
info = IEEE80211_SKB_CB(skb);
if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE))
ieee80211_get_tx_rates(vif, NULL, skb,
@@ -2284,8 +2305,8 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
rcu_dereference(link_conf->chanctx_conf)->def.chan);
}
- if (link_conf->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
- ieee80211_csa_finish(vif);
+ if (link_conf->csa_active && ieee80211_beacon_cntdwn_is_complete(vif, link_id))
+ ieee80211_csa_finish(vif, link_id);
}
static enum hrtimer_restart
@@ -2462,7 +2483,7 @@ static void mac80211_hwsim_vif_info_changed(struct ieee80211_hw *hw,
}
if (vif->type == NL80211_IFTYPE_STATION &&
- changed & BSS_CHANGED_MLD_VALID_LINKS) {
+ changed & (BSS_CHANGED_MLD_VALID_LINKS | BSS_CHANGED_MLD_TTLM)) {
u16 usable_links = ieee80211_vif_usable_links(vif);
if (vif->active_links != usable_links)
@@ -2653,10 +2674,11 @@ static int mac80211_hwsim_sta_state(struct ieee80211_hw *hw,
return mac80211_hwsim_sta_add(hw, vif, sta);
/*
- * when client is authorized (AP station marked as such),
- * enable all links
+ * in an MLO connection, when client is authorized
+ * (AP station marked as such), enable all links
*/
- if (vif->type == NL80211_IFTYPE_STATION &&
+ if (ieee80211_vif_is_mld(vif) &&
+ vif->type == NL80211_IFTYPE_STATION &&
new_state == IEEE80211_STA_AUTHORIZED && !sta->tdls)
ieee80211_set_active_links_async(vif,
ieee80211_vif_usable_links(vif));
@@ -2738,6 +2760,24 @@ static int mac80211_hwsim_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
+static enum ieee80211_neg_ttlm_res
+mac80211_hwsim_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_neg_ttlm *neg_ttlm)
+{
+ u32 i;
+
+ /* For testing purposes, accept if all TIDs are mapped to the same links
+ * set, otherwise reject.
+ */
+ for (i = 0; i < IEEE80211_TTLM_NUM_TIDS; i++) {
+ if (neg_ttlm->downlink[i] != neg_ttlm->uplink[i] ||
+ neg_ttlm->downlink[i] != neg_ttlm->downlink[0])
+ return NEG_TTLM_RES_REJECT;
+ }
+
+ return NEG_TTLM_RES_ACCEPT;
+}
+
#ifdef CONFIG_NL80211_TESTMODE
/*
* This section contains example code for using netlink
@@ -3175,6 +3215,47 @@ static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
}
+static int mac80211_hwsim_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ int i;
+
+ if (n_vifs <= 0)
+ return -EINVAL;
+
+ wiphy_dbg(hw->wiphy,
+ "switch vif channel context mode: %u\n", mode);
+
+ for (i = 0; i < n_vifs; i++) {
+ hwsim_check_chanctx_magic(vifs[i].old_ctx);
+ wiphy_dbg(hw->wiphy,
+ "switch vif channel context: %d MHz/width: %d/cfreqs:%d/%d MHz -> %d MHz/width: %d/cfreqs:%d/%d MHz\n",
+ vifs[i].old_ctx->def.chan->center_freq,
+ vifs[i].old_ctx->def.width,
+ vifs[i].old_ctx->def.center_freq1,
+ vifs[i].old_ctx->def.center_freq2,
+ vifs[i].new_ctx->def.chan->center_freq,
+ vifs[i].new_ctx->def.width,
+ vifs[i].new_ctx->def.center_freq1,
+ vifs[i].new_ctx->def.center_freq2);
+
+ switch (mode) {
+ case CHANCTX_SWMODE_REASSIGN_VIF:
+ hwsim_check_chanctx_magic(vifs[i].new_ctx);
+ break;
+ case CHANCTX_SWMODE_SWAP_CONTEXTS:
+ hwsim_set_chanctx_magic(vifs[i].new_ctx);
+ hwsim_clear_chanctx_magic(vifs[i].old_ctx);
+ break;
+ default:
+ WARN_ON("Invalid mode");
+ }
+ }
+ return 0;
+}
+
static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx_pkts_nic",
"tx_bytes_nic",
@@ -3839,6 +3920,13 @@ out:
return err;
}
+#ifdef CONFIG_MAC80211_DEBUGFS
+#define HWSIM_DEBUGFS_OPS \
+ .vif_add_debugfs = mac80211_hwsim_vif_add_debugfs,
+#else
+#define HWSIM_DEBUGFS_OPS
+#endif
+
#define HWSIM_COMMON_OPS \
.tx = mac80211_hwsim_tx, \
.wake_tx_queue = ieee80211_handle_wake_tx_queue, \
@@ -3863,7 +3951,8 @@ out:
.get_et_stats = mac80211_hwsim_get_et_stats, \
.get_et_strings = mac80211_hwsim_get_et_strings, \
.start_pmsr = mac80211_hwsim_start_pmsr, \
- .abort_pmsr = mac80211_hwsim_abort_pmsr,
+ .abort_pmsr = mac80211_hwsim_abort_pmsr, \
+ HWSIM_DEBUGFS_OPS
#define HWSIM_NON_MLO_OPS \
.sta_add = mac80211_hwsim_sta_add, \
@@ -3877,6 +3966,10 @@ static const struct ieee80211_ops mac80211_hwsim_ops = {
HWSIM_NON_MLO_OPS
.sw_scan_start = mac80211_hwsim_sw_scan,
.sw_scan_complete = mac80211_hwsim_sw_scan_complete,
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
};
#define HWSIM_CHANCTX_OPS \
@@ -3888,7 +3981,8 @@ static const struct ieee80211_ops mac80211_hwsim_ops = {
.remove_chanctx = mac80211_hwsim_remove_chanctx, \
.change_chanctx = mac80211_hwsim_change_chanctx, \
.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx,\
- .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx,
+ .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx, \
+ .switch_vif_chanctx = mac80211_hwsim_switch_vif_chanctx,
static const struct ieee80211_ops mac80211_hwsim_mchan_ops = {
HWSIM_COMMON_OPS
@@ -3903,6 +3997,7 @@ static const struct ieee80211_ops mac80211_hwsim_mlo_ops = {
.change_vif_links = mac80211_hwsim_change_vif_links,
.change_sta_links = mac80211_hwsim_change_sta_links,
.sta_state = mac80211_hwsim_sta_state,
+ .can_neg_ttlm = mac80211_hwsim_can_neg_ttlm,
};
struct hwsim_new_radio_params {
@@ -4965,6 +5060,33 @@ static void mac80211_hwsim_sband_capab(struct ieee80211_supported_band *sband)
BIT(NL80211_IFTYPE_MESH_POINT) | \
BIT(NL80211_IFTYPE_OCB))
+static const u8 iftypes_ext_capa_ap[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF |
+ WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB,
+ [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB,
+ [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
+};
+
+#define MAC80211_HWSIM_MLD_CAPA_OPS \
+ FIELD_PREP_CONST(IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \
+ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME) | \
+ FIELD_PREP_CONST(IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS, \
+ IEEE80211_MLD_MAX_NUM_LINKS - 1)
+
+static const struct wiphy_iftype_ext_capab mac80211_hwsim_iftypes_ext_capa[] = {
+ {
+ .iftype = NL80211_IFTYPE_AP,
+ .extended_capabilities = iftypes_ext_capa_ap,
+ .extended_capabilities_mask = iftypes_ext_capa_ap,
+ .extended_capabilities_len = sizeof(iftypes_ext_capa_ap),
+ .eml_capabilities = IEEE80211_EML_CAP_EMLSR_SUPP |
+ IEEE80211_EML_CAP_EMLMR_SUPPORT,
+ .mld_capa_and_ops = MAC80211_HWSIM_MLD_CAPA_OPS,
+ },
+};
+
static int mac80211_hwsim_new_radio(struct genl_info *info,
struct hwsim_new_radio_params *param)
{
@@ -5159,6 +5281,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(hw, CONNECTION_MONITOR);
ieee80211_hw_set(hw, AP_LINK_PS);
+
+ hw->wiphy->iftype_ext_capab = mac80211_hwsim_iftypes_ext_capa;
+ hw->wiphy->num_iftype_ext_capab =
+ ARRAY_SIZE(mac80211_hwsim_iftypes_ext_capa);
} else {
ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
@@ -5309,7 +5435,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
schedule_timeout_interruptible(1);
}
- /* TODO: Add param */
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_DFS_CONCURRENT);
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.h b/drivers/net/wireless/virtual/mac80211_hwsim.h
index 4676cdaf4cfd..21b1afd83dc1 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.h
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.h
@@ -3,7 +3,7 @@
* mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211
* Copyright (c) 2008, Jouni Malinen <j@w1.fi>
* Copyright (c) 2011, Javier Lopez <jlopex@gmail.com>
- * Copyright (C) 2020, 2022-2023 Intel Corporation
+ * Copyright (C) 2020, 2022-2024 Intel Corporation
*/
#ifndef __MAC80211_HWSIM_H
@@ -84,6 +84,8 @@ enum hwsim_tx_control_flags {
* @HWSIM_CMD_START_PMSR: request to start peer measurement with the
* %HWSIM_ATTR_PMSR_REQUEST. Result will be sent back asynchronously
* with %HWSIM_CMD_REPORT_PMSR.
+ * @HWSIM_CMD_ABORT_PMSR: Abort previously started peer measurement.
+ * @HWSIM_CMD_REPORT_PMSR: Report peer measurement data.
* @__HWSIM_CMD_MAX: enum limit
*/
enum hwsim_commands {
@@ -298,6 +300,7 @@ enum hwsim_vqs {
* Information about a receiving or transmitting bitrate
* that can be mapped to struct rate_info
*
+ * @__HWSIM_RATE_INFO_ATTR_INVALID: reserved, netlink attribute 0 is invalid
* @HWSIM_RATE_INFO_ATTR_FLAGS: bitflag of flags from &enum rate_info_flags
* @HWSIM_RATE_INFO_ATTR_MCS: mcs index if struct describes an HT/VHT/HE rate
* @HWSIM_RATE_INFO_ATTR_LEGACY: bitrate in 100kbit/s for 802.11abg
diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c
index ba14d83353a4..6a84ec58d618 100644
--- a/drivers/net/wireless/virtual/virt_wifi.c
+++ b/drivers/net/wireless/virtual/virt_wifi.c
@@ -453,7 +453,7 @@ static int virt_wifi_net_device_get_iflink(const struct net_device *dev)
{
struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
- return priv->lowerdev->ifindex;
+ return READ_ONCE(priv->lowerdev->ifindex);
}
static const struct net_device_ops virt_wifi_ops = {
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_def.h b/drivers/net/wireless/zydas/zd1211rw/zd_def.h
index 8ca2d0aab170..2f55e8deee82 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_def.h
@@ -12,7 +12,7 @@
#include <linux/stringify.h>
#include <linux/device.h>
-typedef u16 __nocast zd_addr_t;
+typedef u16 zd_addr_t;
#define dev_printk_f(level, dev, fmt, args...) \
dev_printk(level, dev, "%s() " fmt, __func__, ##args)
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index 5d534e15a844..900c063bd724 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -1343,6 +1343,10 @@ static u64 zd_op_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops zd_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = zd_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = zd_op_start,
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index 8505d84eeed6..f3b567a13ded 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -380,7 +380,7 @@ static inline void handle_regs_int(struct urb *urb)
spin_lock_irqsave(&intr->lock, flags);
int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2));
- if (int_num == CR_INTERRUPT) {
+ if (int_num == (u16)CR_INTERRUPT) {
struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context));
spin_lock(&mac->lock);
memcpy(&mac->intr_buffer, urb->transfer_buffer,
@@ -416,7 +416,8 @@ out:
spin_unlock_irqrestore(&intr->lock, flags);
/* CR_INTERRUPT might override read_reg too. */
- if (int_num == CR_INTERRUPT && atomic_read(&intr->read_regs_enabled))
+ if (int_num == (u16)CR_INTERRUPT &&
+ atomic_read(&intr->read_regs_enabled))
handle_regs_int_override(urb);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
index cc70360364b7..abc41a7089fa 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -57,8 +57,6 @@
#define CHECK_Q_STOP_TIMEOUT_US 1000000
#define CHECK_Q_STOP_STEP_US 10000
-#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header))
-
static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
enum mtk_txrx tx_rx, unsigned int index)
{
@@ -161,7 +159,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
skb_reset_tail_pointer(skb);
skb_put(skb, le16_to_cpu(gpd->data_buff_len));
- ret = md_ctrl->recv_skb(queue, skb);
+ ret = queue->recv_skb(queue, skb);
/* Break processing, will try again later */
if (ret < 0)
return ret;
@@ -897,13 +895,13 @@ static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
/**
* t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
- * @md_ctrl: CLDMA context structure.
+ * @queue: CLDMA queue.
* @recv_skb: Receiving skb callback.
*/
-void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
+void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
{
- md_ctrl->recv_skb = recv_skb;
+ queue->recv_skb = recv_skb;
}
/**
@@ -993,6 +991,28 @@ allow_sleep:
return ret;
}
+static void t7xx_cldma_adjust_config(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
+{
+ int qno;
+
+ for (qno = 0; qno < CLDMA_RXQ_NUM; qno++) {
+ md_ctrl->rx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
+ t7xx_cldma_set_recv_skb(&md_ctrl->rxq[qno], t7xx_port_proxy_recv_skb);
+ }
+
+ md_ctrl->rx_ring[CLDMA_RXQ_NUM - 1].pkt_size = CLDMA_JUMBO_BUFF_SZ;
+
+ for (qno = 0; qno < CLDMA_TXQ_NUM; qno++)
+ md_ctrl->tx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
+
+ if (cfg_id == CLDMA_DEDICATED_Q_CFG) {
+ md_ctrl->tx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
+ md_ctrl->rx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
+ t7xx_cldma_set_recv_skb(&md_ctrl->rxq[CLDMA_Q_IDX_DUMP],
+ t7xx_port_proxy_recv_skb_from_dedicated_queue);
+ }
+}
+
static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
{
char dma_pool_name[32];
@@ -1018,16 +1038,9 @@ static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
dev_err(md_ctrl->dev, "control TX ring init fail\n");
goto err_free_tx_ring;
}
-
- md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU;
}
for (j = 0; j < CLDMA_RXQ_NUM; j++) {
- md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU;
-
- if (j == CLDMA_RXQ_NUM - 1)
- md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ;
-
ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
if (ret) {
dev_err(md_ctrl->dev, "Control RX ring init fail\n");
@@ -1094,6 +1107,7 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
{
struct device *dev = &t7xx_dev->pdev->dev;
struct cldma_ctrl *md_ctrl;
+ int qno;
md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
if (!md_ctrl)
@@ -1102,7 +1116,9 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
md_ctrl->t7xx_dev = t7xx_dev;
md_ctrl->dev = dev;
md_ctrl->hif_id = hif_id;
- md_ctrl->recv_skb = t7xx_cldma_default_recv_skb;
+ for (qno = 0; qno < CLDMA_RXQ_NUM; qno++)
+ md_ctrl->rxq[qno].recv_skb = t7xx_cldma_default_recv_skb;
+
t7xx_hw_info_init(md_ctrl);
t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
return 0;
@@ -1332,9 +1348,10 @@ err_workqueue:
return -ENOMEM;
}
-void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl)
+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
{
t7xx_cldma_late_release(md_ctrl);
+ t7xx_cldma_adjust_config(md_ctrl, cfg_id);
t7xx_cldma_late_init(md_ctrl);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
index 4410bac6993a..f2d9941be9c8 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.h
@@ -31,6 +31,10 @@
#include "t7xx_cldma.h"
#include "t7xx_pci.h"
+#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header))
+#define CLDMA_SHARED_Q_BUFF_SZ 3584
+#define CLDMA_DEDICATED_Q_BUFF_SZ 2048
+
/**
* enum cldma_id - Identifiers for CLDMA HW units.
* @CLDMA_ID_MD: Modem control channel.
@@ -55,6 +59,11 @@ struct cldma_gpd {
__le16 not_used2;
};
+enum cldma_cfg {
+ CLDMA_SHARED_Q_CFG,
+ CLDMA_DEDICATED_Q_CFG,
+};
+
struct cldma_request {
struct cldma_gpd *gpd; /* Virtual address for CPU */
dma_addr_t gpd_addr; /* Physical address for DMA */
@@ -82,6 +91,7 @@ struct cldma_queue {
wait_queue_head_t req_wq; /* Only for TX */
struct workqueue_struct *worker;
struct work_struct cldma_work;
+ int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
};
struct cldma_ctrl {
@@ -101,24 +111,22 @@ struct cldma_ctrl {
struct md_pm_entity *pm_entity;
struct t7xx_cldma_hw hw_info;
bool is_late_init;
- int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb);
};
+#define CLDMA_Q_IDX_DUMP 1
#define GPD_FLAGS_HWO BIT(0)
#define GPD_FLAGS_IOC BIT(7)
#define GPD_DMAPOOL_ALIGN 16
-#define CLDMA_MTU 3584 /* 3.5kB */
-
int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev);
void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl);
int t7xx_cldma_init(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl);
-void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl);
+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id);
void t7xx_cldma_start(struct cldma_ctrl *md_ctrl);
int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl);
void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl);
-void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
+void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb));
int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb);
void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx);
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.c b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
index 24e7d491468e..8d864d4ed77f 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.c
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.c
@@ -177,6 +177,11 @@ int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev)
return t7xx_acpi_reset(t7xx_dev, "_RST");
}
+int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev)
+{
+ return t7xx_acpi_reset(t7xx_dev, "MRST._RST");
+}
+
static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
{
u32 val;
@@ -192,6 +197,7 @@ static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
{
struct t7xx_pci_dev *t7xx_dev = data;
+ t7xx_mode_update(t7xx_dev, T7XX_RESET);
msleep(RGU_RESET_DELAY_MS);
t7xx_reset_device_via_pmic(t7xx_dev);
return IRQ_HANDLED;
@@ -529,7 +535,7 @@ static void t7xx_md_hk_wq(struct work_struct *work)
/* Clear the HS2 EXIT event appended in core_reset() */
t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
- t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD], CLDMA_SHARED_Q_CFG);
t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
md->core_md.handshake_ongoing = true;
@@ -544,7 +550,7 @@ static void t7xx_ap_hk_wq(struct work_struct *work)
/* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
- t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP], CLDMA_SHARED_Q_CFG);
t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
md->core_ap.handshake_ongoing = true;
t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
@@ -758,6 +764,7 @@ err_destroy_hswq:
void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
{
+ enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
struct t7xx_modem *md = t7xx_dev->md;
t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
@@ -765,7 +772,8 @@ void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
if (!md->md_init_finish)
return;
- t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
+ if (mode != T7XX_RESET && mode != T7XX_UNKNOWN)
+ t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
t7xx_port_proxy_uninit(md->port_prox);
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
diff --git a/drivers/net/wwan/t7xx/t7xx_modem_ops.h b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
index abe633cf7adc..b39e945a92e0 100644
--- a/drivers/net/wwan/t7xx/t7xx_modem_ops.h
+++ b/drivers/net/wwan/t7xx/t7xx_modem_ops.h
@@ -85,6 +85,7 @@ int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev);
void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev);
void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev);
int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_acpi_pldr_func(struct t7xx_pci_dev *t7xx_dev);
int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev);
#endif /* __T7XX_MODEM_OPS_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 91256e005b84..e0b1e7a616ca 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -52,6 +52,81 @@
#define PM_RESOURCE_POLL_TIMEOUT_US 10000
#define PM_RESOURCE_POLL_STEP_US 100
+static const char * const t7xx_mode_names[] = {
+ [T7XX_UNKNOWN] = "unknown",
+ [T7XX_READY] = "ready",
+ [T7XX_RESET] = "reset",
+ [T7XX_FASTBOOT_SWITCHING] = "fastboot_switching",
+ [T7XX_FASTBOOT_DOWNLOAD] = "fastboot_download",
+ [T7XX_FASTBOOT_DUMP] = "fastboot_dump",
+};
+
+static_assert(ARRAY_SIZE(t7xx_mode_names) == T7XX_MODE_LAST);
+
+static ssize_t t7xx_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct t7xx_pci_dev *t7xx_dev;
+ struct pci_dev *pdev;
+ int index = 0;
+
+ pdev = to_pci_dev(dev);
+ t7xx_dev = pci_get_drvdata(pdev);
+ if (!t7xx_dev)
+ return -ENODEV;
+
+ index = sysfs_match_string(t7xx_mode_names, buf);
+ if (index == T7XX_FASTBOOT_SWITCHING) {
+ WRITE_ONCE(t7xx_dev->mode, T7XX_FASTBOOT_SWITCHING);
+ } else if (index == T7XX_RESET) {
+ WRITE_ONCE(t7xx_dev->mode, T7XX_RESET);
+ t7xx_acpi_pldr_func(t7xx_dev);
+ }
+
+ return count;
+};
+
+static ssize_t t7xx_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum t7xx_mode mode = T7XX_UNKNOWN;
+ struct t7xx_pci_dev *t7xx_dev;
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(dev);
+ t7xx_dev = pci_get_drvdata(pdev);
+ if (!t7xx_dev)
+ return -ENODEV;
+
+ mode = READ_ONCE(t7xx_dev->mode);
+ if (mode < T7XX_MODE_LAST)
+ return sysfs_emit(buf, "%s\n", t7xx_mode_names[mode]);
+
+ return sysfs_emit(buf, "%s\n", t7xx_mode_names[T7XX_UNKNOWN]);
+}
+
+static DEVICE_ATTR_RW(t7xx_mode);
+
+static struct attribute *t7xx_mode_attr[] = {
+ &dev_attr_t7xx_mode.attr,
+ NULL
+};
+
+static const struct attribute_group t7xx_mode_attribute_group = {
+ .attrs = t7xx_mode_attr,
+};
+
+void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode)
+{
+ if (!t7xx_dev)
+ return;
+
+ WRITE_ONCE(t7xx_dev->mode, mode);
+ sysfs_notify(&t7xx_dev->pdev->dev.kobj, NULL, "t7xx_mode");
+}
+
enum t7xx_pm_state {
MTK_PM_EXCEPTION,
MTK_PM_INIT, /* Device initialized, but handshake not completed */
@@ -108,7 +183,7 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
pm_runtime_use_autosuspend(&pdev->dev);
- return t7xx_wait_pm_config(t7xx_dev);
+ return 0;
}
void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
@@ -279,7 +354,8 @@ static int __t7xx_pci_pm_suspend(struct pci_dev *pdev)
int ret;
t7xx_dev = pci_get_drvdata(pdev);
- if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
+ if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT ||
+ READ_ONCE(t7xx_dev->mode) != T7XX_READY) {
dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n");
return -EFAULT;
}
@@ -729,16 +805,28 @@ static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
t7xx_pcie_mac_interrupts_dis(t7xx_dev);
+ ret = sysfs_create_group(&t7xx_dev->pdev->dev.kobj,
+ &t7xx_mode_attribute_group);
+ if (ret)
+ goto err_md_exit;
+
ret = t7xx_interrupt_init(t7xx_dev);
- if (ret) {
- t7xx_md_exit(t7xx_dev);
- return ret;
- }
+ if (ret)
+ goto err_remove_group;
+
t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
t7xx_pcie_mac_interrupts_en(t7xx_dev);
return 0;
+
+err_remove_group:
+ sysfs_remove_group(&t7xx_dev->pdev->dev.kobj,
+ &t7xx_mode_attribute_group);
+
+err_md_exit:
+ t7xx_md_exit(t7xx_dev);
+ return ret;
}
static void t7xx_pci_remove(struct pci_dev *pdev)
@@ -747,6 +835,9 @@ static void t7xx_pci_remove(struct pci_dev *pdev)
int i;
t7xx_dev = pci_get_drvdata(pdev);
+
+ sysfs_remove_group(&t7xx_dev->pdev->dev.kobj,
+ &t7xx_mode_attribute_group);
t7xx_md_exit(t7xx_dev);
for (i = 0; i < EXT_INT_NUM; i++) {
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h
index f08f1ab74469..49a11586d8d8 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.h
+++ b/drivers/net/wwan/t7xx/t7xx_pci.h
@@ -43,6 +43,16 @@ struct t7xx_addr_base {
typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param);
+enum t7xx_mode {
+ T7XX_UNKNOWN,
+ T7XX_READY,
+ T7XX_RESET,
+ T7XX_FASTBOOT_SWITCHING,
+ T7XX_FASTBOOT_DOWNLOAD,
+ T7XX_FASTBOOT_DUMP,
+ T7XX_MODE_LAST, /* must always be last */
+};
+
/* struct t7xx_pci_dev - MTK device context structure
* @intr_handler: array of handler function for request_threaded_irq
* @intr_thread: array of thread_fn for request_threaded_irq
@@ -59,6 +69,7 @@ typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param);
* @md_pm_lock: protects PCIe sleep lock
* @sleep_disable_count: PCIe L1.2 lock counter
* @sleep_lock_acquire: indicates that sleep has been disabled
+ * @mode: indicates the device mode
*/
struct t7xx_pci_dev {
t7xx_intr_callback intr_handler[EXT_INT_NUM];
@@ -82,6 +93,7 @@ struct t7xx_pci_dev {
#ifdef CONFIG_WWAN_DEBUGFS
struct dentry *debugfs_dir;
#endif
+ u32 mode;
};
enum t7xx_pm_id {
@@ -120,5 +132,5 @@ int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_enti
int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity);
void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev);
void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev);
-
+void t7xx_mode_update(struct t7xx_pci_dev *t7xx_dev, enum t7xx_mode mode);
#endif /* __T7XX_PCI_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_port.h b/drivers/net/wwan/t7xx/t7xx_port.h
index 4ae8a00a8532..f74d3bab810d 100644
--- a/drivers/net/wwan/t7xx/t7xx_port.h
+++ b/drivers/net/wwan/t7xx/t7xx_port.h
@@ -75,6 +75,8 @@ enum port_ch {
PORT_CH_DSS6_TX = 0x20df,
PORT_CH_DSS7_RX = 0x20e0,
PORT_CH_DSS7_TX = 0x20e1,
+
+ PORT_CH_UNIMPORTANT = 0xffff,
};
struct t7xx_port;
@@ -135,11 +137,13 @@ struct t7xx_port {
};
};
+int t7xx_get_port_mtu(struct t7xx_port *port);
struct sk_buff *t7xx_port_alloc_skb(int payload);
struct sk_buff *t7xx_ctrl_alloc_skb(int payload);
int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb);
int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
unsigned int ex_msg);
+int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb);
int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg,
unsigned int ex_msg);
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.c b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
index 274846d39fbf..7d6388bf1d7c 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.c
@@ -48,6 +48,9 @@
i < (proxy)->port_count; \
i++, (p) = &(proxy)->ports[i])
+#define T7XX_MAX_POSSIBLE_PORTS_NUM \
+ (max(ARRAY_SIZE(t7xx_port_conf), ARRAY_SIZE(t7xx_early_port_conf)))
+
static const struct t7xx_port_conf t7xx_port_conf[] = {
{
.tx_ch = PORT_CH_UART2_TX,
@@ -100,6 +103,21 @@ static const struct t7xx_port_conf t7xx_port_conf[] = {
},
};
+static const struct t7xx_port_conf t7xx_early_port_conf[] = {
+ {
+ .tx_ch = PORT_CH_UNIMPORTANT,
+ .rx_ch = PORT_CH_UNIMPORTANT,
+ .txq_index = CLDMA_Q_IDX_DUMP,
+ .rxq_index = CLDMA_Q_IDX_DUMP,
+ .txq_exp_index = CLDMA_Q_IDX_DUMP,
+ .rxq_exp_index = CLDMA_Q_IDX_DUMP,
+ .path_id = CLDMA_ID_AP,
+ .ops = &wwan_sub_port_ops,
+ .name = "fastboot",
+ .port_type = WWAN_PORT_FASTBOOT,
+ },
+};
+
static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch)
{
const struct t7xx_port_conf *port_conf;
@@ -214,7 +232,17 @@ int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb)
return 0;
}
-static int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
+int t7xx_get_port_mtu(struct t7xx_port *port)
+{
+ enum cldma_id path_id = port->port_conf->path_id;
+ int tx_qno = t7xx_port_get_queue_no(port);
+ struct cldma_ctrl *md_ctrl;
+
+ md_ctrl = port->t7xx_dev->md->md_ctrl[path_id];
+ return md_ctrl->tx_ring[tx_qno].pkt_size;
+}
+
+int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
{
enum cldma_id path_id = port->port_conf->path_id;
struct cldma_ctrl *md_ctrl;
@@ -329,6 +357,39 @@ static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox)
}
}
+/**
+ * t7xx_port_proxy_recv_skb_from_dedicated_queue() - Dispatch early port received skb.
+ * @queue: CLDMA queue.
+ * @skb: Socket buffer.
+ *
+ * Return:
+ ** 0 - Packet consumed.
+ ** -ERROR - Failed to process skb.
+ */
+int t7xx_port_proxy_recv_skb_from_dedicated_queue(struct cldma_queue *queue, struct sk_buff *skb)
+{
+ struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
+ struct port_proxy *port_prox = t7xx_dev->md->port_prox;
+ const struct t7xx_port_conf *port_conf;
+ struct t7xx_port *port;
+ int ret;
+
+ port = &port_prox->ports[0];
+ if (WARN_ON_ONCE(port->port_conf->rxq_index != queue->index)) {
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ port_conf = port->port_conf;
+ ret = port_conf->ops->recv_skb(port, skb);
+ if (ret < 0 && ret != -ENOBUFS) {
+ dev_err(port->dev, "drop on RX ch %d, %d\n", port_conf->rx_ch, ret);
+ dev_kfree_skb_any(skb);
+ }
+
+ return ret;
+}
+
static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev,
struct cldma_queue *queue, u16 channel)
{
@@ -359,7 +420,7 @@ static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev
** 0 - Packet consumed.
** -ERROR - Failed to process skb.
*/
-static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
+int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
{
struct ccci_header *ccci_h = (struct ccci_header *)skb->data;
struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
@@ -444,33 +505,56 @@ static void t7xx_proxy_init_all_ports(struct t7xx_modem *md)
spin_lock_init(&port->port_update_lock);
port->chan_enable = false;
- if (port_conf->ops->init)
+ if (port_conf->ops && port_conf->ops->init)
port_conf->ops->init(port);
}
t7xx_proxy_setup_ch_mapping(port_prox);
}
+void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id)
+{
+ struct port_proxy *port_prox = md->port_prox;
+ const struct t7xx_port_conf *port_conf;
+ u32 port_count;
+ int i;
+
+ t7xx_port_proxy_uninit(port_prox);
+
+ if (cfg_id == PORT_CFG_ID_EARLY) {
+ port_conf = t7xx_early_port_conf;
+ port_count = ARRAY_SIZE(t7xx_early_port_conf);
+ } else {
+ port_conf = t7xx_port_conf;
+ port_count = ARRAY_SIZE(t7xx_port_conf);
+ }
+
+ for (i = 0; i < port_count; i++)
+ port_prox->ports[i].port_conf = &port_conf[i];
+
+ port_prox->cfg_id = cfg_id;
+ port_prox->port_count = port_count;
+
+ t7xx_proxy_init_all_ports(md);
+}
+
static int t7xx_proxy_alloc(struct t7xx_modem *md)
{
- unsigned int port_count = ARRAY_SIZE(t7xx_port_conf);
struct device *dev = &md->t7xx_dev->pdev->dev;
struct port_proxy *port_prox;
- int i;
- port_prox = devm_kzalloc(dev, sizeof(*port_prox) + sizeof(struct t7xx_port) * port_count,
+ port_prox = devm_kzalloc(dev,
+ struct_size(port_prox,
+ ports,
+ T7XX_MAX_POSSIBLE_PORTS_NUM),
GFP_KERNEL);
if (!port_prox)
return -ENOMEM;
md->port_prox = port_prox;
port_prox->dev = dev;
+ t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY);
- for (i = 0; i < port_count; i++)
- port_prox->ports[i].port_conf = &t7xx_port_conf[i];
-
- port_prox->port_count = port_count;
- t7xx_proxy_init_all_ports(md);
return 0;
}
@@ -492,8 +576,6 @@ int t7xx_port_proxy_init(struct t7xx_modem *md)
if (ret)
return ret;
- t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb);
- t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb);
return 0;
}
@@ -505,7 +587,7 @@ void t7xx_port_proxy_uninit(struct port_proxy *port_prox)
for_each_proxy_port(i, port, port_prox) {
const struct t7xx_port_conf *port_conf = port->port_conf;
- if (port_conf->ops->uninit)
+ if (port_conf->ops && port_conf->ops->uninit)
port_conf->ops->uninit(port);
}
}
diff --git a/drivers/net/wwan/t7xx/t7xx_port_proxy.h b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
index 81d059fbc0fb..7f5706811445 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_proxy.h
+++ b/drivers/net/wwan/t7xx/t7xx_port_proxy.h
@@ -31,11 +31,18 @@
#define RX_QUEUE_MAXLEN 32
#define CTRL_QUEUE_MAXLEN 16
+enum port_cfg_id {
+ PORT_CFG_ID_INVALID,
+ PORT_CFG_ID_NORMAL,
+ PORT_CFG_ID_EARLY,
+};
+
struct port_proxy {
int port_count;
struct list_head rx_ch_ports[PORT_CH_ID_MASK + 1];
struct list_head queue_ports[CLDMA_NUM][MTK_QUEUES];
struct device *dev;
+ enum port_cfg_id cfg_id;
struct t7xx_port ports[];
};
@@ -98,5 +105,8 @@ void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int
int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg);
int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id,
bool en_flag);
+void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id);
+int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb);
+int t7xx_port_proxy_recv_skb_from_dedicated_queue(struct cldma_queue *queue, struct sk_buff *skb);
#endif /* __T7XX_PORT_PROXY_H__ */
diff --git a/drivers/net/wwan/t7xx/t7xx_port_wwan.c b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
index 17389c8f6600..4b23ba693f3f 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_wwan.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
@@ -2,6 +2,7 @@
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
+ * Copyright (c) 2024, Fibocom Wireless Inc.
*
* Authors:
* Amir Hanania <amir.hanania@intel.com>
@@ -15,6 +16,7 @@
* Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
* Eliot Lee <eliot.lee@intel.com>
* Sreehari Kancharla <sreehari.kancharla@intel.com>
+ * Jinjian Song <jinjian.song@fibocom.com>
*/
#include <linux/atomic.h>
@@ -33,7 +35,7 @@
#include "t7xx_port_proxy.h"
#include "t7xx_state_monitor.h"
-static int t7xx_port_ctrl_start(struct wwan_port *port)
+static int t7xx_port_wwan_start(struct wwan_port *port)
{
struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
@@ -44,30 +46,60 @@ static int t7xx_port_ctrl_start(struct wwan_port *port)
return 0;
}
-static void t7xx_port_ctrl_stop(struct wwan_port *port)
+static void t7xx_port_wwan_stop(struct wwan_port *port)
{
struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
atomic_dec(&port_mtk->usage_cnt);
}
-static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
+static int t7xx_port_fastboot_tx(struct t7xx_port *port, struct sk_buff *skb)
+{
+ struct sk_buff *cur = skb, *tx_skb;
+ size_t actual, len, offset = 0;
+ int txq_mtu;
+ int ret;
+
+ txq_mtu = t7xx_get_port_mtu(port);
+ if (txq_mtu < 0)
+ return -EINVAL;
+
+ actual = cur->len;
+ while (actual) {
+ len = min_t(size_t, actual, txq_mtu);
+ tx_skb = __dev_alloc_skb(len, GFP_KERNEL);
+ if (!tx_skb)
+ return -ENOMEM;
+
+ skb_put_data(tx_skb, cur->data + offset, len);
+
+ ret = t7xx_port_send_raw_skb(port, tx_skb);
+ if (ret) {
+ dev_kfree_skb(tx_skb);
+ dev_err(port->dev, "Write error on fastboot port, %d\n", ret);
+ break;
+ }
+ offset += len;
+ actual -= len;
+ }
+
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+static int t7xx_port_ctrl_tx(struct t7xx_port *port, struct sk_buff *skb)
{
- struct t7xx_port *port_private = wwan_port_get_drvdata(port);
const struct t7xx_port_conf *port_conf;
struct sk_buff *cur = skb, *cloned;
struct t7xx_fsm_ctl *ctl;
enum md_state md_state;
int cnt = 0, ret;
- if (!port_private->chan_enable)
- return -EINVAL;
-
- port_conf = port_private->port_conf;
- ctl = port_private->t7xx_dev->md->fsm_ctl;
+ port_conf = port->port_conf;
+ ctl = port->t7xx_dev->md->fsm_ctl;
md_state = t7xx_fsm_get_md_state(ctl);
if (md_state == MD_STATE_WAITING_FOR_HS1 || md_state == MD_STATE_WAITING_FOR_HS2) {
- dev_warn(port_private->dev, "Cannot write to %s port when md_state=%d\n",
+ dev_warn(port->dev, "Cannot write to %s port when md_state=%d\n",
port_conf->name, md_state);
return -ENODEV;
}
@@ -75,10 +107,10 @@ static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
while (cur) {
cloned = skb_clone(cur, GFP_KERNEL);
cloned->len = skb_headlen(cur);
- ret = t7xx_port_send_skb(port_private, cloned, 0, 0);
+ ret = t7xx_port_send_skb(port, cloned, 0, 0);
if (ret) {
dev_kfree_skb(cloned);
- dev_err(port_private->dev, "Write error on %s port, %d\n",
+ dev_err(port->dev, "Write error on %s port, %d\n",
port_conf->name, ret);
return cnt ? cnt + ret : ret;
}
@@ -93,14 +125,53 @@ static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
return 0;
}
+static int t7xx_port_wwan_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct t7xx_port *port_private = wwan_port_get_drvdata(port);
+ const struct t7xx_port_conf *port_conf = port_private->port_conf;
+ int ret;
+
+ if (!port_private->chan_enable)
+ return -EINVAL;
+
+ if (port_conf->port_type != WWAN_PORT_FASTBOOT)
+ ret = t7xx_port_ctrl_tx(port_private, skb);
+ else
+ ret = t7xx_port_fastboot_tx(port_private, skb);
+
+ return ret;
+}
+
static const struct wwan_port_ops wwan_ops = {
- .start = t7xx_port_ctrl_start,
- .stop = t7xx_port_ctrl_stop,
- .tx = t7xx_port_ctrl_tx,
+ .start = t7xx_port_wwan_start,
+ .stop = t7xx_port_wwan_stop,
+ .tx = t7xx_port_wwan_tx,
};
+static void t7xx_port_wwan_create(struct t7xx_port *port)
+{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+ unsigned int header_len = sizeof(struct ccci_header), mtu;
+ struct wwan_port_caps caps;
+
+ if (!port->wwan.wwan_port) {
+ mtu = t7xx_get_port_mtu(port);
+ caps.frag_len = mtu - header_len;
+ caps.headroom_len = header_len;
+ port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type,
+ &wwan_ops, &caps, port);
+ if (IS_ERR(port->wwan.wwan_port))
+ dev_err(port->dev, "Unable to create WWAN port %s", port_conf->name);
+ }
+}
+
static int t7xx_port_wwan_init(struct t7xx_port *port)
{
+ const struct t7xx_port_conf *port_conf = port->port_conf;
+
+ if (port_conf->port_type == WWAN_PORT_FASTBOOT)
+ t7xx_port_wwan_create(port);
+
port->rx_length_th = RX_QUEUE_MAXLEN;
return 0;
}
@@ -152,20 +223,14 @@ static int t7xx_port_wwan_disable_chl(struct t7xx_port *port)
static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state)
{
const struct t7xx_port_conf *port_conf = port->port_conf;
- unsigned int header_len = sizeof(struct ccci_header);
- struct wwan_port_caps caps;
+
+ if (port_conf->port_type == WWAN_PORT_FASTBOOT)
+ return;
if (state != MD_STATE_READY)
return;
- if (!port->wwan.wwan_port) {
- caps.frag_len = CLDMA_MTU - header_len;
- caps.headroom_len = header_len;
- port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type,
- &wwan_ops, &caps, port);
- if (IS_ERR(port->wwan.wwan_port))
- dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name);
- }
+ t7xx_port_wwan_create(port);
}
struct port_ops wwan_sub_port_ops = {
diff --git a/drivers/net/wwan/t7xx/t7xx_reg.h b/drivers/net/wwan/t7xx/t7xx_reg.h
index c41d7d094c08..9c7dc72ac6f6 100644
--- a/drivers/net/wwan/t7xx/t7xx_reg.h
+++ b/drivers/net/wwan/t7xx/t7xx_reg.h
@@ -101,11 +101,33 @@ enum t7xx_pm_resume_state {
PM_RESUME_REG_STATE_L2_EXP,
};
+enum host_event_e {
+ HOST_EVENT_INIT = 0,
+ FASTBOOT_DL_NOTIFY = 0x3,
+};
+
#define T7XX_PCIE_MISC_DEV_STATUS 0x0d1c
#define MISC_STAGE_MASK GENMASK(2, 0)
#define MISC_RESET_TYPE_PLDR BIT(26)
#define MISC_RESET_TYPE_FLDR BIT(27)
-#define LINUX_STAGE 4
+#define MISC_RESET_TYPE_PLDR BIT(26)
+#define MISC_LK_EVENT_MASK GENMASK(11, 8)
+#define HOST_EVENT_MASK GENMASK(31, 28)
+
+enum lk_event_id {
+ LK_EVENT_NORMAL = 0,
+ LK_EVENT_CREATE_PD_PORT = 1,
+ LK_EVENT_CREATE_POST_DL_PORT = 2,
+ LK_EVENT_RESET = 7,
+};
+
+enum t7xx_device_stage {
+ T7XX_DEV_STAGE_INIT = 0,
+ T7XX_DEV_STAGE_BROM_PRE = 1,
+ T7XX_DEV_STAGE_BROM_POST = 2,
+ T7XX_DEV_STAGE_LK = 3,
+ T7XX_DEV_STAGE_LINUX = 4,
+};
#define T7XX_PCIE_RESOURCE_STATUS 0x0d28
#define T7XX_PCIE_RESOURCE_STS_MSK GENMASK(4, 0)
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
index 0bc97430211b..9889ca4621cf 100644
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
@@ -47,6 +47,13 @@
#define FSM_MD_EX_PASS_TIMEOUT_MS 45000
#define FSM_CMD_TIMEOUT_MS 2000
+#define wait_for_expected_dev_stage(status) \
+ read_poll_timeout(ioread32, status, \
+ ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LINUX) || \
+ ((status & MISC_STAGE_MASK) == T7XX_DEV_STAGE_LK), 100000, \
+ 20000000, false, IREG_BASE(md->t7xx_dev) + \
+ T7XX_PCIE_MISC_DEV_STATUS)
+
void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
{
struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
@@ -206,6 +213,55 @@ static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comm
fsm_finish_command(ctl, cmd, 0);
}
+static void t7xx_host_event_notify(struct t7xx_modem *md, unsigned int event_id)
+{
+ u32 value;
+
+ value = ioread32(IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+ value &= ~HOST_EVENT_MASK;
+ value |= FIELD_PREP(HOST_EVENT_MASK, event_id);
+ iowrite32(value, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+}
+
+static void t7xx_lk_stage_event_handling(struct t7xx_fsm_ctl *ctl, unsigned int status)
+{
+ struct t7xx_modem *md = ctl->md;
+ struct cldma_ctrl *md_ctrl;
+ enum lk_event_id lk_event;
+ struct device *dev;
+ struct t7xx_port *port;
+
+ dev = &md->t7xx_dev->pdev->dev;
+ lk_event = FIELD_GET(MISC_LK_EVENT_MASK, status);
+ switch (lk_event) {
+ case LK_EVENT_NORMAL:
+ case LK_EVENT_RESET:
+ break;
+
+ case LK_EVENT_CREATE_PD_PORT:
+ case LK_EVENT_CREATE_POST_DL_PORT:
+ md_ctrl = md->md_ctrl[CLDMA_ID_AP];
+ t7xx_cldma_hif_hw_init(md_ctrl);
+ t7xx_cldma_stop(md_ctrl);
+ t7xx_cldma_switch_cfg(md_ctrl, CLDMA_DEDICATED_Q_CFG);
+
+ port = &ctl->md->port_prox->ports[0];
+ port->port_conf->ops->enable_chl(port);
+
+ t7xx_cldma_start(md_ctrl);
+
+ if (lk_event == LK_EVENT_CREATE_POST_DL_PORT)
+ t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DOWNLOAD);
+ else
+ t7xx_mode_update(md->t7xx_dev, T7XX_FASTBOOT_DUMP);
+ break;
+
+ default:
+ dev_err(dev, "Invalid LK event %d\n", lk_event);
+ break;
+ }
+}
+
static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
{
ctl->curr_state = FSM_STATE_STOPPED;
@@ -226,8 +282,9 @@ static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comman
static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
{
- struct t7xx_pci_dev *t7xx_dev;
- struct cldma_ctrl *md_ctrl;
+ struct cldma_ctrl *md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
+ struct t7xx_pci_dev *t7xx_dev = ctl->md->t7xx_dev;
+ enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode);
int err;
if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
@@ -235,18 +292,20 @@ static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_comma
return;
}
- md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
- t7xx_dev = ctl->md->t7xx_dev;
-
ctl->curr_state = FSM_STATE_STOPPING;
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
t7xx_cldma_stop(md_ctrl);
- if (!ctl->md->rgu_irq_asserted) {
- t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
- /* Wait for the DRM disable to take effect */
- msleep(FSM_DRM_DISABLE_DELAY_MS);
+ if (mode == T7XX_FASTBOOT_SWITCHING)
+ t7xx_host_event_notify(ctl->md, FASTBOOT_DL_NOTIFY);
+
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
+ /* Wait for the DRM disable to take effect */
+ msleep(FSM_DRM_DISABLE_DELAY_MS);
+ if (mode == T7XX_FASTBOOT_SWITCHING) {
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
+ } else {
err = t7xx_acpi_fldr_func(t7xx_dev);
if (err)
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
@@ -272,6 +331,7 @@ static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl)
ctl->curr_state = FSM_STATE_READY;
t7xx_fsm_broadcast_ready_state(ctl);
+ t7xx_mode_update(md->t7xx_dev, T7XX_READY);
t7xx_md_event_notify(md, FSM_READY);
}
@@ -317,7 +377,8 @@ static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
{
struct t7xx_modem *md = ctl->md;
- u32 dev_status;
+ struct device *dev;
+ u32 status;
int ret;
if (!md)
@@ -329,23 +390,53 @@ static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command
return;
}
+ dev = &md->t7xx_dev->pdev->dev;
ctl->curr_state = FSM_STATE_PRE_START;
t7xx_md_event_notify(md, FSM_PRE_START);
- ret = read_poll_timeout(ioread32, dev_status,
- (dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000,
- false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
+ ret = wait_for_expected_dev_stage(status);
+
if (ret) {
- struct device *dev = &md->t7xx_dev->pdev->dev;
+ dev_err(dev, "read poll timeout %d\n", ret);
+ goto finish_command;
+ }
- fsm_finish_command(ctl, cmd, -ETIMEDOUT);
- dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK);
- return;
+ if (status != ctl->status || cmd->flag != 0) {
+ u32 stage = FIELD_GET(MISC_STAGE_MASK, status);
+
+ switch (stage) {
+ case T7XX_DEV_STAGE_INIT:
+ case T7XX_DEV_STAGE_BROM_PRE:
+ case T7XX_DEV_STAGE_BROM_POST:
+ dev_dbg(dev, "BROM_STAGE Entered\n");
+ ret = t7xx_fsm_append_cmd(ctl, FSM_CMD_START, 0);
+ break;
+
+ case T7XX_DEV_STAGE_LK:
+ dev_dbg(dev, "LK_STAGE Entered\n");
+ t7xx_lk_stage_event_handling(ctl, status);
+ break;
+
+ case T7XX_DEV_STAGE_LINUX:
+ dev_dbg(dev, "LINUX_STAGE Entered\n");
+ t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM |
+ D2H_INT_ASYNC_MD_HK | D2H_INT_ASYNC_AP_HK);
+ if (cmd->flag == 0)
+ break;
+ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
+ t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
+ t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_NORMAL);
+ ret = fsm_routine_starting(ctl);
+ break;
+
+ default:
+ break;
+ }
+ ctl->status = status;
}
- t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
- t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
- fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl));
+finish_command:
+ fsm_finish_command(ctl, cmd, ret);
}
static int fsm_main_thread(void *data)
@@ -517,6 +608,7 @@ void t7xx_fsm_reset(struct t7xx_modem *md)
fsm_flush_event_cmd_qs(ctl);
ctl->curr_state = FSM_STATE_STOPPED;
ctl->exp_flg = false;
+ ctl->status = T7XX_DEV_STAGE_INIT;
}
int t7xx_fsm_init(struct t7xx_modem *md)
diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
index b0b3662ae6d7..7b0a9baf488c 100644
--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
@@ -96,6 +96,7 @@ struct t7xx_fsm_ctl {
bool exp_flg;
spinlock_t notifier_lock; /* Protects notifier list */
struct list_head notifier_list;
+ u32 status; /* Device boot stage */
};
struct t7xx_fsm_event {
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index 72e01e550a16..17431f1b1a0c 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -26,7 +26,9 @@
static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */
static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */
-static struct class *wwan_class;
+static const struct class wwan_class = {
+ .name = "wwan",
+};
static int wwan_major;
static struct dentry *wwan_debugfs_dir;
@@ -130,7 +132,7 @@ static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
{
struct device *dev;
- dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match);
+ dev = class_find_device(&wwan_class, NULL, parent, wwan_dev_parent_match);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -147,7 +149,7 @@ static struct wwan_device *wwan_dev_get_by_name(const char *name)
{
struct device *dev;
- dev = class_find_device(wwan_class, NULL, name, wwan_dev_name_match);
+ dev = class_find_device(&wwan_class, NULL, name, wwan_dev_name_match);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -183,7 +185,7 @@ static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir)
{
struct device *dev;
- dev = class_find_device(wwan_class, NULL, dir, wwan_dev_debugfs_match);
+ dev = class_find_device(&wwan_class, NULL, dir, wwan_dev_debugfs_match);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -239,7 +241,7 @@ static struct wwan_device *wwan_create_dev(struct device *parent)
}
wwandev->dev.parent = parent;
- wwandev->dev.class = wwan_class;
+ wwandev->dev.class = &wwan_class;
wwandev->dev.type = &wwan_dev_type;
wwandev->id = id;
dev_set_name(&wwandev->dev, "wwan%d", wwandev->id);
@@ -265,7 +267,7 @@ done_unlock:
static int is_wwan_child(struct device *dev, void *data)
{
- return dev->class == wwan_class;
+ return dev->class == &wwan_class;
}
static void wwan_remove_dev(struct wwan_device *wwandev)
@@ -328,6 +330,10 @@ static const struct {
.name = "XMMRPC",
.devsuf = "xmmrpc",
},
+ [WWAN_PORT_FASTBOOT] = {
+ .name = "FASTBOOT",
+ .devsuf = "fastboot",
+ },
};
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
@@ -371,7 +377,7 @@ static struct wwan_port *wwan_port_get_by_minor(unsigned int minor)
{
struct device *dev;
- dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match);
+ dev = class_find_device(&wwan_class, NULL, &minor, wwan_port_minor_match);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -401,7 +407,7 @@ static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt)
return -ENOMEM;
/* Collect ids of same name format ports */
- class_dev_iter_init(&iter, wwan_class, NULL, &wwan_port_dev_type);
+ class_dev_iter_init(&iter, &wwan_class, NULL, &wwan_port_dev_type);
while ((dev = class_dev_iter_next(&iter))) {
if (dev->parent != &wwandev->dev)
continue;
@@ -473,7 +479,7 @@ struct wwan_port *wwan_create_port(struct device *parent,
mutex_init(&port->data_lock);
port->dev.parent = &wwandev->dev;
- port->dev.class = wwan_class;
+ port->dev.class = &wwan_class;
port->dev.type = &wwan_port_dev_type;
port->dev.devt = MKDEV(wwan_major, minor);
dev_set_drvdata(&port->dev, drvdata);
@@ -916,7 +922,7 @@ static int wwan_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-static struct device_type wwan_type = { .name = "wwan" };
+static const struct device_type wwan_type = { .name = "wwan" };
static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[],
const char *ifname,
@@ -1208,11 +1214,9 @@ static int __init wwan_init(void)
if (err)
return err;
- wwan_class = class_create("wwan");
- if (IS_ERR(wwan_class)) {
- err = PTR_ERR(wwan_class);
+ err = class_register(&wwan_class);
+ if (err)
goto unregister;
- }
/* chrdev used for wwan ports */
wwan_major = __register_chrdev(0, 0, WWAN_MAX_MINORS, "wwan_port",
@@ -1229,7 +1233,7 @@ static int __init wwan_init(void)
return 0;
destroy:
- class_destroy(wwan_class);
+ class_unregister(&wwan_class);
unregister:
rtnl_link_unregister(&wwan_rtnl_link_ops);
return err;
@@ -1240,7 +1244,7 @@ static void __exit wwan_exit(void)
debugfs_remove_recursive(wwan_debugfs_dir);
__unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port");
rtnl_link_unregister(&wwan_rtnl_link_ops);
- class_destroy(wwan_class);
+ class_unregister(&wwan_class);
}
module_init(wwan_init);
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
index ff3dd24ddb33..b02befd1b6fb 100644
--- a/drivers/net/wwan/wwan_hwsim.c
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -25,7 +25,9 @@ static int wwan_hwsim_devsnum = 2;
module_param_named(devices, wwan_hwsim_devsnum, int, 0444);
MODULE_PARM_DESC(devices, "Number of simulated devices");
-static struct class *wwan_hwsim_class;
+static const struct class wwan_hwsim_class = {
+ .name = "wwan_hwsim",
+};
static struct dentry *wwan_hwsim_debugfs_topdir;
static struct dentry *wwan_hwsim_debugfs_devcreate;
@@ -277,7 +279,7 @@ static struct wwan_hwsim_dev *wwan_hwsim_dev_new(void)
spin_unlock(&wwan_hwsim_devs_lock);
dev->dev.release = wwan_hwsim_dev_release;
- dev->dev.class = wwan_hwsim_class;
+ dev->dev.class = &wwan_hwsim_class;
dev_set_name(&dev->dev, "hwsim%u", dev->id);
spin_lock_init(&dev->ports_lock);
@@ -511,11 +513,9 @@ static int __init wwan_hwsim_init(void)
if (!wwan_wq)
return -ENOMEM;
- wwan_hwsim_class = class_create("wwan_hwsim");
- if (IS_ERR(wwan_hwsim_class)) {
- err = PTR_ERR(wwan_hwsim_class);
+ err = class_register(&wwan_hwsim_class);
+ if (err)
goto err_wq_destroy;
- }
wwan_hwsim_debugfs_topdir = debugfs_create_dir("wwan_hwsim", NULL);
wwan_hwsim_debugfs_devcreate =
@@ -534,7 +534,7 @@ err_clean_devs:
wwan_hwsim_free_devs();
flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
- class_destroy(wwan_hwsim_class);
+ class_unregister(&wwan_hwsim_class);
err_wq_destroy:
destroy_workqueue(wwan_wq);
@@ -547,7 +547,7 @@ static void __exit wwan_hwsim_exit(void)
wwan_hwsim_free_devs();
flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
- class_destroy(wwan_hwsim_class);
+ class_unregister(&wwan_hwsim_class);
destroy_workqueue(wwan_wq);
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index fab361a250d6..ef76850d9bcd 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1778,5 +1778,6 @@ static void __exit netback_fini(void)
}
module_exit(netback_fini);
+MODULE_DESCRIPTION("Xen backend network device module");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("xen-backend:vif");
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index bb3726b622ad..4d0c527e8576 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1496,19 +1496,21 @@ static int btt_blk_init(struct btt *btt)
{
struct nd_btt *nd_btt = btt->nd_btt;
struct nd_namespace_common *ndns = nd_btt->ndns;
- int rc = -ENOMEM;
+ struct queue_limits lim = {
+ .logical_block_size = btt->sector_size,
+ .max_hw_sectors = UINT_MAX,
+ };
+ int rc;
- btt->btt_disk = blk_alloc_disk(NUMA_NO_NODE);
- if (!btt->btt_disk)
- return -ENOMEM;
+ btt->btt_disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(btt->btt_disk))
+ return PTR_ERR(btt->btt_disk);
nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
btt->btt_disk->first_minor = 0;
btt->btt_disk->fops = &btt_fops;
btt->btt_disk->private_data = btt;
- blk_queue_logical_block_size(btt->btt_disk->queue, btt->sector_size);
- blk_queue_max_hw_sectors(btt->btt_disk->queue, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 4e8fdcb3f1c8..8dcc10b6db5b 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -451,6 +451,11 @@ static int pmem_attach_disk(struct device *dev,
{
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
struct nd_region *nd_region = to_nd_region(dev->parent);
+ struct queue_limits lim = {
+ .logical_block_size = pmem_sector_size(ndns),
+ .physical_block_size = PAGE_SIZE,
+ .max_hw_sectors = UINT_MAX,
+ };
int nid = dev_to_node(dev), fua;
struct resource *res = &nsio->res;
struct range bb_range;
@@ -497,9 +502,9 @@ static int pmem_attach_disk(struct device *dev,
return -EBUSY;
}
- disk = blk_alloc_disk(nid);
- if (!disk)
- return -ENOMEM;
+ disk = blk_alloc_disk(&lim, nid);
+ if (IS_ERR(disk))
+ return PTR_ERR(disk);
q = disk->queue;
pmem->disk = disk;
@@ -539,9 +544,6 @@ static int pmem_attach_disk(struct device *dev,
pmem->virt_addr = addr;
blk_queue_write_cache(q, true, fua);
- blk_queue_physical_block_size(q, PAGE_SIZE);
- blk_queue_logical_block_size(q, pmem_sector_size(ndns));
- blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q);
if (pmem->pfn_flags & PFN_MAP)
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index c727cd1f264b..a480cdeac288 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1516,7 +1516,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
goto put_dev;
}
- anv->ctrl.admin_q = blk_mq_init_queue(&anv->admin_tagset);
+ anv->ctrl.admin_q = blk_mq_alloc_queue(&anv->admin_tagset, NULL, NULL);
if (IS_ERR(anv->ctrl.admin_q)) {
ret = -ENOMEM;
goto put_dev;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 0d124a8ca9c3..00864a634470 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -114,12 +114,21 @@ static DEFINE_MUTEX(nvme_subsystems_lock);
static DEFINE_IDA(nvme_instance_ida);
static dev_t nvme_ctrl_base_chr_devt;
-static struct class *nvme_class;
-static struct class *nvme_subsys_class;
+static int nvme_class_uevent(const struct device *dev, struct kobj_uevent_env *env);
+static const struct class nvme_class = {
+ .name = "nvme",
+ .dev_uevent = nvme_class_uevent,
+};
+
+static const struct class nvme_subsys_class = {
+ .name = "nvme-subsystem",
+};
static DEFINE_IDA(nvme_ns_chr_minor_ida);
static dev_t nvme_ns_chr_devt;
-static struct class *nvme_ns_chr_class;
+static const struct class nvme_ns_chr_class = {
+ .name = "nvme-generic",
+};
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
@@ -713,7 +722,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd)
if (req->q->queuedata) {
struct nvme_ns *ns = req->q->disk->private_data;
- logging_enabled = ns->passthru_err_log_enabled;
+ logging_enabled = ns->head->passthru_err_log_enabled;
req->timeout = NVME_IO_TIMEOUT;
} else { /* no queuedata implies admin queue */
logging_enabled = nr->ctrl->passthru_err_log_enabled;
@@ -1153,6 +1162,10 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
} else {
effects = le32_to_cpu(ctrl->effects->acs[opcode]);
+
+ /* Ignore execution restrictions if any relaxation bits are set */
+ if (effects & NVME_CMD_EFFECTS_CSER_MASK)
+ effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
}
return effects;
@@ -1394,8 +1407,10 @@ static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
error = nvme_submit_sync_cmd(dev->admin_q, &c, *id,
sizeof(struct nvme_id_ctrl));
- if (error)
+ if (error) {
kfree(*id);
+ *id = NULL;
+ }
return error;
}
@@ -1524,6 +1539,7 @@ int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
if (error) {
dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
kfree(*id);
+ *id = NULL;
}
return error;
}
@@ -1723,12 +1739,23 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
-#ifdef CONFIG_BLK_DEV_INTEGRITY
-static void nvme_init_integrity(struct gendisk *disk,
- struct nvme_ns_head *head, u32 max_integrity_segments)
+static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head)
{
struct blk_integrity integrity = { };
+ blk_integrity_unregister(disk);
+
+ if (!head->ms)
+ return true;
+
+ /*
+ * PI can always be supported as we can ask the controller to simply
+ * insert/strip it, which is not possible for other kinds of metadata.
+ */
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) ||
+ !(head->features & NVME_NS_METADATA_SUPPORTED))
+ return nvme_ns_has_pi(head);
+
switch (head->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
switch (head->guard_type) {
@@ -1771,53 +1798,32 @@ static void nvme_init_integrity(struct gendisk *disk,
}
integrity.tuple_size = head->ms;
+ integrity.pi_offset = head->pi_offset;
blk_integrity_register(disk, &integrity);
- blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
-}
-#else
-static void nvme_init_integrity(struct gendisk *disk,
- struct nvme_ns_head *head, u32 max_integrity_segments)
-{
+ return true;
}
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-static void nvme_config_discard(struct nvme_ctrl *ctrl, struct gendisk *disk,
- struct nvme_ns_head *head)
+static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim)
{
- struct request_queue *queue = disk->queue;
- u32 max_discard_sectors;
-
- if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(head, UINT_MAX)) {
- max_discard_sectors = nvme_lba_to_sect(head, ctrl->dmrsl);
- } else if (ctrl->oncs & NVME_CTRL_ONCS_DSM) {
- max_discard_sectors = UINT_MAX;
- } else {
- blk_queue_max_discard_sectors(queue, 0);
- return;
- }
+ struct nvme_ctrl *ctrl = ns->ctrl;
BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
NVME_DSM_MAX_RANGES);
- /*
- * If discard is already enabled, don't reset queue limits.
- *
- * This works around the fact that the block layer can't cope well with
- * updating the hardware limits when overridden through sysfs. This is
- * harmless because discard limits in NVMe are purely advisory.
- */
- if (queue->limits.max_discard_sectors)
- return;
+ if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
+ lim->max_hw_discard_sectors =
+ nvme_lba_to_sect(ns->head, ctrl->dmrsl);
+ else if (ctrl->oncs & NVME_CTRL_ONCS_DSM)
+ lim->max_hw_discard_sectors = UINT_MAX;
+ else
+ lim->max_hw_discard_sectors = 0;
+
+ lim->discard_granularity = lim->logical_block_size;
- blk_queue_max_discard_sectors(queue, max_discard_sectors);
if (ctrl->dmrl)
- blk_queue_max_discard_segments(queue, ctrl->dmrl);
+ lim->max_discard_segments = ctrl->dmrl;
else
- blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
- queue->limits.discard_granularity = queue_logical_block_size(queue);
-
- if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
- blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
+ lim->max_discard_segments = NVME_DSM_MAX_RANGES;
}
static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
@@ -1828,42 +1834,38 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
a->csi == b->csi;
}
-static int nvme_init_ms(struct nvme_ctrl *ctrl, struct nvme_ns_head *head,
- struct nvme_id_ns *id)
+static int nvme_identify_ns_nvm(struct nvme_ctrl *ctrl, unsigned int nsid,
+ struct nvme_id_ns_nvm **nvmp)
{
- bool first = id->dps & NVME_NS_DPS_PI_FIRST;
- unsigned lbaf = nvme_lbaf_index(id->flbas);
- struct nvme_command c = { };
+ struct nvme_command c = {
+ .identify.opcode = nvme_admin_identify,
+ .identify.nsid = cpu_to_le32(nsid),
+ .identify.cns = NVME_ID_CNS_CS_NS,
+ .identify.csi = NVME_CSI_NVM,
+ };
struct nvme_id_ns_nvm *nvm;
- int ret = 0;
- u32 elbaf;
-
- head->pi_size = 0;
- head->ms = le16_to_cpu(id->lbaf[lbaf].ms);
- if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
- head->pi_size = sizeof(struct t10_pi_tuple);
- head->guard_type = NVME_NVM_NS_16B_GUARD;
- goto set_pi;
- }
+ int ret;
nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
if (!nvm)
return -ENOMEM;
- c.identify.opcode = nvme_admin_identify;
- c.identify.nsid = cpu_to_le32(head->ns_id);
- c.identify.cns = NVME_ID_CNS_CS_NS;
- c.identify.csi = NVME_CSI_NVM;
-
ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, nvm, sizeof(*nvm));
if (ret)
- goto free_data;
+ kfree(nvm);
+ else
+ *nvmp = nvm;
+ return ret;
+}
- elbaf = le32_to_cpu(nvm->elbaf[lbaf]);
+static void nvme_configure_pi_elbas(struct nvme_ns_head *head,
+ struct nvme_id_ns *id, struct nvme_id_ns_nvm *nvm)
+{
+ u32 elbaf = le32_to_cpu(nvm->elbaf[nvme_lbaf_index(id->flbas)]);
/* no support for storage tag formats right now */
if (nvme_elbaf_sts(elbaf))
- goto free_data;
+ return;
head->guard_type = nvme_elbaf_guard_type(elbaf);
switch (head->guard_type) {
@@ -1876,30 +1878,31 @@ static int nvme_init_ms(struct nvme_ctrl *ctrl, struct nvme_ns_head *head,
default:
break;
}
-
-free_data:
- kfree(nvm);
-set_pi:
- if (head->pi_size && (first || head->ms == head->pi_size))
- head->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
- else
- head->pi_type = 0;
-
- return ret;
}
-static int nvme_configure_metadata(struct nvme_ctrl *ctrl,
- struct nvme_ns_head *head, struct nvme_id_ns *id)
+static void nvme_configure_metadata(struct nvme_ctrl *ctrl,
+ struct nvme_ns_head *head, struct nvme_id_ns *id,
+ struct nvme_id_ns_nvm *nvm)
{
- int ret;
-
- ret = nvme_init_ms(ctrl, head, id);
- if (ret)
- return ret;
-
head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+ head->pi_type = 0;
+ head->pi_size = 0;
+ head->pi_offset = 0;
+ head->ms = le16_to_cpu(id->lbaf[nvme_lbaf_index(id->flbas)].ms);
if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
- return 0;
+ return;
+
+ if (nvm && (ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
+ nvme_configure_pi_elbas(head, id, nvm);
+ } else {
+ head->pi_size = sizeof(struct t10_pi_tuple);
+ head->guard_type = NVME_NVM_NS_16B_GUARD;
+ }
+
+ if (head->pi_size && head->ms >= head->pi_size)
+ head->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+ if (!(id->dps & NVME_NS_DPS_PI_FIRST))
+ head->pi_offset = head->ms - head->pi_size;
if (ctrl->ops->flags & NVME_F_FABRICS) {
/*
@@ -1908,7 +1911,7 @@ static int nvme_configure_metadata(struct nvme_ctrl *ctrl,
* remap the separate metadata buffer from the block layer.
*/
if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
- return 0;
+ return;
head->features |= NVME_NS_EXT_LBAS;
@@ -1935,33 +1938,32 @@ static int nvme_configure_metadata(struct nvme_ctrl *ctrl,
else
head->features |= NVME_NS_METADATA_SUPPORTED;
}
- return 0;
}
-static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
- struct request_queue *q)
+static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
{
- bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT;
-
- if (ctrl->max_hw_sectors) {
- u32 max_segments =
- (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1;
+ return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1;
+}
- max_segments = min_not_zero(max_segments, ctrl->max_segments);
- blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
- blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
- }
- blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1);
- blk_queue_dma_alignment(q, 3);
- blk_queue_write_cache(q, vwc, vwc);
+static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl,
+ struct queue_limits *lim)
+{
+ lim->max_hw_sectors = ctrl->max_hw_sectors;
+ lim->max_segments = min_t(u32, USHRT_MAX,
+ min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments));
+ lim->max_integrity_segments = ctrl->max_integrity_segments;
+ lim->virt_boundary_mask = NVME_CTRL_PAGE_SIZE - 1;
+ lim->max_segment_size = UINT_MAX;
+ lim->dma_alignment = 3;
}
-static void nvme_update_disk_info(struct nvme_ctrl *ctrl, struct gendisk *disk,
- struct nvme_ns_head *head, struct nvme_id_ns *id)
+static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id,
+ struct queue_limits *lim)
{
- sector_t capacity = nvme_lba_to_sect(head, le64_to_cpu(id->nsze));
+ struct nvme_ns_head *head = ns->head;
u32 bs = 1U << head->lba_shift;
u32 atomic_bs, phys_bs, io_opt = 0;
+ bool valid = true;
/*
* The block layer can't support LBA sizes larger than the page size
@@ -1969,12 +1971,10 @@ static void nvme_update_disk_info(struct nvme_ctrl *ctrl, struct gendisk *disk,
* allow block I/O.
*/
if (head->lba_shift > PAGE_SHIFT || head->lba_shift < SECTOR_SHIFT) {
- capacity = 0;
bs = (1 << 9);
+ valid = false;
}
- blk_integrity_unregister(disk);
-
atomic_bs = phys_bs = bs;
if (id->nabo == 0) {
/*
@@ -1985,7 +1985,7 @@ static void nvme_update_disk_info(struct nvme_ctrl *ctrl, struct gendisk *disk,
if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
else
- atomic_bs = (1 + ctrl->subsys->awupf) * bs;
+ atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs;
}
if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
@@ -1995,36 +1995,20 @@ static void nvme_update_disk_info(struct nvme_ctrl *ctrl, struct gendisk *disk,
io_opt = bs * (1 + le16_to_cpu(id->nows));
}
- blk_queue_logical_block_size(disk->queue, bs);
/*
* Linux filesystems assume writing a single physical block is
* an atomic operation. Hence limit the physical block size to the
* value of the Atomic Write Unit Power Fail parameter.
*/
- blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs));
- blk_queue_io_min(disk->queue, phys_bs);
- blk_queue_io_opt(disk->queue, io_opt);
-
- /*
- * Register a metadata profile for PI, or the plain non-integrity NVMe
- * metadata masquerading as Type 0 if supported, otherwise reject block
- * I/O to namespaces with metadata except when the namespace supports
- * PI, as it can strip/insert in that case.
- */
- if (head->ms) {
- if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
- (head->features & NVME_NS_METADATA_SUPPORTED))
- nvme_init_integrity(disk, head,
- ctrl->max_integrity_segments);
- else if (!nvme_ns_has_pi(head))
- capacity = 0;
- }
-
- set_capacity_and_notify(disk, capacity);
-
- nvme_config_discard(ctrl, disk, head);
- blk_queue_max_write_zeroes_sectors(disk->queue,
- ctrl->max_zeroes_sectors);
+ lim->logical_block_size = bs;
+ lim->physical_block_size = min(phys_bs, atomic_bs);
+ lim->io_min = phys_bs;
+ lim->io_opt = io_opt;
+ if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
+ lim->max_write_zeroes_sectors = UINT_MAX;
+ else
+ lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors;
+ return valid;
}
static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
@@ -2038,7 +2022,8 @@ static inline bool nvme_first_scan(struct gendisk *disk)
return !disk_live(disk);
}
-static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
+static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id,
+ struct queue_limits *lim)
{
struct nvme_ctrl *ctrl = ns->ctrl;
u32 iob;
@@ -2066,38 +2051,36 @@ static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
return;
}
- blk_queue_chunk_sectors(ns->queue, iob);
+ lim->chunk_sectors = iob;
}
static int nvme_update_ns_info_generic(struct nvme_ns *ns,
struct nvme_ns_info *info)
{
+ struct queue_limits lim;
+ int ret;
+
blk_mq_freeze_queue(ns->disk->queue);
- nvme_set_queue_limits(ns->ctrl, ns->queue);
+ lim = queue_limits_start_update(ns->disk->queue);
+ nvme_set_ctrl_limits(ns->ctrl, &lim);
+ ret = queue_limits_commit_update(ns->disk->queue, &lim);
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
blk_mq_unfreeze_queue(ns->disk->queue);
- if (nvme_ns_head_multipath(ns->head)) {
- blk_mq_freeze_queue(ns->head->disk->queue);
- set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
- nvme_mpath_revalidate_paths(ns);
- blk_stack_limits(&ns->head->disk->queue->limits,
- &ns->queue->limits, 0);
- ns->head->disk->flags |= GENHD_FL_HIDDEN;
- blk_mq_unfreeze_queue(ns->head->disk->queue);
- }
-
/* Hide the block-interface for these devices */
- ns->disk->flags |= GENHD_FL_HIDDEN;
- set_bit(NVME_NS_READY, &ns->flags);
-
- return 0;
+ if (!ret)
+ ret = -ENODEV;
+ return ret;
}
static int nvme_update_ns_info_block(struct nvme_ns *ns,
struct nvme_ns_info *info)
{
+ bool vwc = ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT;
+ struct queue_limits lim;
+ struct nvme_id_ns_nvm *nvm = NULL;
struct nvme_id_ns *id;
+ sector_t capacity;
unsigned lbaf;
int ret;
@@ -2109,30 +2092,52 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
/* namespace not allocated or attached */
info->is_removed = true;
ret = -ENODEV;
- goto error;
+ goto out;
+ }
+
+ if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) {
+ ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm);
+ if (ret < 0)
+ goto out;
}
blk_mq_freeze_queue(ns->disk->queue);
lbaf = nvme_lbaf_index(id->flbas);
ns->head->lba_shift = id->lbaf[lbaf].ds;
ns->head->nuse = le64_to_cpu(id->nuse);
- nvme_set_queue_limits(ns->ctrl, ns->queue);
-
- ret = nvme_configure_metadata(ns->ctrl, ns->head, id);
- if (ret < 0) {
- blk_mq_unfreeze_queue(ns->disk->queue);
- goto out;
- }
- nvme_set_chunk_sectors(ns, id);
- nvme_update_disk_info(ns->ctrl, ns->disk, ns->head, id);
+ capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
- if (ns->head->ids.csi == NVME_CSI_ZNS) {
- ret = nvme_update_zone_info(ns, lbaf);
+ lim = queue_limits_start_update(ns->disk->queue);
+ nvme_set_ctrl_limits(ns->ctrl, &lim);
+ nvme_configure_metadata(ns->ctrl, ns->head, id, nvm);
+ nvme_set_chunk_sectors(ns, id, &lim);
+ if (!nvme_update_disk_info(ns, id, &lim))
+ capacity = 0;
+ nvme_config_discard(ns, &lim);
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ ns->head->ids.csi == NVME_CSI_ZNS) {
+ ret = nvme_update_zone_info(ns, lbaf, &lim);
if (ret) {
blk_mq_unfreeze_queue(ns->disk->queue);
goto out;
}
}
+ ret = queue_limits_commit_update(ns->disk->queue, &lim);
+ if (ret) {
+ blk_mq_unfreeze_queue(ns->disk->queue);
+ goto out;
+ }
+
+ /*
+ * Register a metadata profile for PI, or the plain non-integrity NVMe
+ * metadata masquerading as Type 0 if supported, otherwise reject block
+ * I/O to namespaces with metadata except when the namespace supports
+ * PI, as it can strip/insert in that case.
+ */
+ if (!nvme_init_integrity(ns->disk, ns->head))
+ capacity = 0;
+
+ set_capacity_and_notify(ns->disk, capacity);
/*
* Only set the DEAC bit if the device guarantees that reads from
@@ -2143,62 +2148,81 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3)))
ns->head->features |= NVME_NS_DEAC;
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
+ blk_queue_write_cache(ns->disk->queue, vwc, vwc);
set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue);
if (blk_queue_is_zoned(ns->queue)) {
- ret = nvme_revalidate_zones(ns);
+ ret = blk_revalidate_disk_zones(ns->disk, NULL);
if (ret && !nvme_first_scan(ns->disk))
goto out;
}
- if (nvme_ns_head_multipath(ns->head)) {
- blk_mq_freeze_queue(ns->head->disk->queue);
- nvme_update_disk_info(ns->ctrl, ns->head->disk, ns->head, id);
- set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
- nvme_mpath_revalidate_paths(ns);
- blk_stack_limits(&ns->head->disk->queue->limits,
- &ns->queue->limits, 0);
- disk_update_readahead(ns->head->disk);
- blk_mq_unfreeze_queue(ns->head->disk->queue);
- }
-
ret = 0;
out:
- /*
- * If probing fails due an unsupported feature, hide the block device,
- * but still allow other access.
- */
- if (ret == -ENODEV) {
- ns->disk->flags |= GENHD_FL_HIDDEN;
- set_bit(NVME_NS_READY, &ns->flags);
- ret = 0;
- }
-
-error:
+ kfree(nvm);
kfree(id);
return ret;
}
static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
{
+ bool unsupported = false;
+ int ret;
+
switch (info->ids.csi) {
case NVME_CSI_ZNS:
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
dev_info(ns->ctrl->device,
"block device for nsid %u not supported without CONFIG_BLK_DEV_ZONED\n",
info->nsid);
- return nvme_update_ns_info_generic(ns, info);
+ ret = nvme_update_ns_info_generic(ns, info);
+ break;
}
- return nvme_update_ns_info_block(ns, info);
+ ret = nvme_update_ns_info_block(ns, info);
+ break;
case NVME_CSI_NVM:
- return nvme_update_ns_info_block(ns, info);
+ ret = nvme_update_ns_info_block(ns, info);
+ break;
default:
dev_info(ns->ctrl->device,
"block device for nsid %u not supported (csi %u)\n",
info->nsid, info->ids.csi);
- return nvme_update_ns_info_generic(ns, info);
+ ret = nvme_update_ns_info_generic(ns, info);
+ break;
+ }
+
+ /*
+ * If probing fails due an unsupported feature, hide the block device,
+ * but still allow other access.
+ */
+ if (ret == -ENODEV) {
+ ns->disk->flags |= GENHD_FL_HIDDEN;
+ set_bit(NVME_NS_READY, &ns->flags);
+ unsupported = true;
+ ret = 0;
}
+
+ if (!ret && nvme_ns_head_multipath(ns->head)) {
+ struct queue_limits lim;
+
+ blk_mq_freeze_queue(ns->head->disk->queue);
+ if (unsupported)
+ ns->head->disk->flags |= GENHD_FL_HIDDEN;
+ else
+ nvme_init_integrity(ns->head->disk, ns->head);
+ set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk));
+ set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
+ nvme_mpath_revalidate_paths(ns);
+
+ lim = queue_limits_start_update(ns->head->disk->queue);
+ queue_limits_stack_bdev(&lim, ns->disk->part0, 0,
+ ns->head->disk->disk_name);
+ ret = queue_limits_commit_update(ns->head->disk->queue, &lim);
+ blk_mq_unfreeze_queue(ns->head->disk->queue);
+ }
+
+ return ret;
}
#ifdef CONFIG_BLK_SED_OPAL
@@ -2873,7 +2897,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
subsys->awupf = le16_to_cpu(id->awupf);
nvme_mpath_default_iopolicy(subsys);
- subsys->dev.class = nvme_subsys_class;
+ subsys->dev.class = &nvme_subsys_class;
subsys->dev.release = nvme_release_subsystem;
subsys->dev.groups = nvme_subsys_attrs_groups;
dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance);
@@ -3113,11 +3137,17 @@ static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ct
return -EINVAL;
}
+ if (!ctrl->maxcmd) {
+ dev_err(ctrl->device, "Maximum outstanding commands is 0\n");
+ return -EINVAL;
+ }
+
return 0;
}
static int nvme_init_identify(struct nvme_ctrl *ctrl)
{
+ struct queue_limits lim;
struct nvme_id_ctrl *id;
u32 max_hw_sectors;
bool prev_apst_enabled;
@@ -3184,7 +3214,12 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->max_hw_sectors =
min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
- nvme_set_queue_limits(ctrl, ctrl->admin_q);
+ lim = queue_limits_start_update(ctrl->admin_q);
+ nvme_set_ctrl_limits(ctrl, &lim);
+ ret = queue_limits_commit_update(ctrl->admin_q, &lim);
+ if (ret)
+ goto out_free;
+
ctrl->sgls = le32_to_cpu(id->sgls);
ctrl->kas = le16_to_cpu(id->kas);
ctrl->max_namespaces = le32_to_cpu(id->mnan);
@@ -3416,7 +3451,7 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
if (minor < 0)
return minor;
cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
- cdev_device->class = nvme_ns_chr_class;
+ cdev_device->class = &nvme_ns_chr_class;
cdev_device->release = nvme_cdev_rel;
device_initialize(cdev_device);
cdev_init(cdev, fops);
@@ -3688,7 +3723,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
if (!ns)
return;
- disk = blk_mq_alloc_disk(ctrl->tagset, ns);
+ disk = blk_mq_alloc_disk(ctrl->tagset, NULL, ns);
if (IS_ERR(disk))
goto out_free_ns;
disk->fops = &nvme_bdev_ops;
@@ -3696,7 +3731,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
ns->disk = disk;
ns->queue = disk->queue;
- ns->passthru_err_log_enabled = false;
if (ctrl->opts && ctrl->opts->data_digest)
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
@@ -3762,8 +3796,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
/*
* Set ns->disk->device->driver_data to ns so we can access
- * ns->logging_enabled in nvme_passthru_err_log_enabled_store() and
- * nvme_passthru_err_log_enabled_show().
+ * ns->head->passthru_err_log_enabled in
+ * nvme_io_passthru_err_log_enabled_[store | show]().
*/
dev_set_drvdata(disk_to_dev(ns->disk), ns);
@@ -4191,6 +4225,7 @@ static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
{
struct nvme_fw_slot_info_log *log;
+ u8 next_fw_slot, cur_fw_slot;
log = kmalloc(sizeof(*log), GFP_KERNEL);
if (!log)
@@ -4202,13 +4237,15 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
goto out_free_log;
}
- if (log->afi & 0x70 || !(log->afi & 0x7)) {
+ cur_fw_slot = log->afi & 0x7;
+ next_fw_slot = (log->afi & 0x70) >> 4;
+ if (!cur_fw_slot || (next_fw_slot && (cur_fw_slot != next_fw_slot))) {
dev_info(ctrl->device,
"Firmware is activated after next Controller Level Reset\n");
goto out_free_log;
}
- memcpy(ctrl->subsys->firmware_rev, &log->frs[(log->afi & 0x7) - 1],
+ memcpy(ctrl->subsys->firmware_rev, &log->frs[cur_fw_slot - 1],
sizeof(ctrl->subsys->firmware_rev));
out_free_log:
@@ -4347,6 +4384,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int cmd_size)
{
+ struct queue_limits lim = {};
int ret;
memset(set, 0, sizeof(*set));
@@ -4366,14 +4404,14 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
if (ret)
return ret;
- ctrl->admin_q = blk_mq_init_queue(set);
+ ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL);
if (IS_ERR(ctrl->admin_q)) {
ret = PTR_ERR(ctrl->admin_q);
goto out_free_tagset;
}
if (ctrl->ops->flags & NVME_F_FABRICS) {
- ctrl->fabrics_q = blk_mq_init_queue(set);
+ ctrl->fabrics_q = blk_mq_alloc_queue(set, NULL, NULL);
if (IS_ERR(ctrl->fabrics_q)) {
ret = PTR_ERR(ctrl->fabrics_q);
goto out_cleanup_admin_q;
@@ -4437,7 +4475,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
return ret;
if (ctrl->ops->flags & NVME_F_FABRICS) {
- ctrl->connect_q = blk_mq_init_queue(set);
+ ctrl->connect_q = blk_mq_alloc_queue(set, NULL, NULL);
if (IS_ERR(ctrl->connect_q)) {
ret = PTR_ERR(ctrl->connect_q);
goto out_free_tag_set;
@@ -4607,7 +4645,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
ctrl->device = &ctrl->ctrl_device;
ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt),
ctrl->instance);
- ctrl->device->class = nvme_class;
+ ctrl->device->class = &nvme_class;
ctrl->device->parent = ctrl->dev;
if (ops->dev_attr_groups)
ctrl->device->groups = ops->dev_attr_groups;
@@ -4840,42 +4878,36 @@ static int __init nvme_core_init(void)
if (result < 0)
goto destroy_delete_wq;
- nvme_class = class_create("nvme");
- if (IS_ERR(nvme_class)) {
- result = PTR_ERR(nvme_class);
+ result = class_register(&nvme_class);
+ if (result)
goto unregister_chrdev;
- }
- nvme_class->dev_uevent = nvme_class_uevent;
- nvme_subsys_class = class_create("nvme-subsystem");
- if (IS_ERR(nvme_subsys_class)) {
- result = PTR_ERR(nvme_subsys_class);
+ result = class_register(&nvme_subsys_class);
+ if (result)
goto destroy_class;
- }
result = alloc_chrdev_region(&nvme_ns_chr_devt, 0, NVME_MINORS,
"nvme-generic");
if (result < 0)
goto destroy_subsys_class;
- nvme_ns_chr_class = class_create("nvme-generic");
- if (IS_ERR(nvme_ns_chr_class)) {
- result = PTR_ERR(nvme_ns_chr_class);
+ result = class_register(&nvme_ns_chr_class);
+ if (result)
goto unregister_generic_ns;
- }
+
result = nvme_init_auth();
if (result)
goto destroy_ns_chr;
return 0;
destroy_ns_chr:
- class_destroy(nvme_ns_chr_class);
+ class_unregister(&nvme_ns_chr_class);
unregister_generic_ns:
unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
destroy_subsys_class:
- class_destroy(nvme_subsys_class);
+ class_unregister(&nvme_subsys_class);
destroy_class:
- class_destroy(nvme_class);
+ class_unregister(&nvme_class);
unregister_chrdev:
unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
destroy_delete_wq:
@@ -4891,9 +4923,9 @@ out:
static void __exit nvme_core_exit(void)
{
nvme_exit_auth();
- class_destroy(nvme_ns_chr_class);
- class_destroy(nvme_subsys_class);
- class_destroy(nvme_class);
+ class_unregister(&nvme_ns_chr_class);
+ class_unregister(&nvme_subsys_class);
+ class_unregister(&nvme_class);
unregister_chrdev_region(nvme_ns_chr_devt, NVME_MINORS);
unregister_chrdev_region(nvme_ctrl_base_chr_devt, NVME_MINORS);
destroy_workqueue(nvme_delete_wq);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 3499acbf6a82..1f0ea1f32d22 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -534,6 +534,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
+ goto out_free_data;
}
result = le32_to_cpu(res.u32);
if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
@@ -637,7 +638,7 @@ static struct key *nvmf_parse_key(int key_id)
}
key = key_lookup(key_id);
- if (!IS_ERR(key))
+ if (IS_ERR(key))
pr_err("key id %08x not found\n", key_id);
else
pr_debug("Using key id %08x\n", key_id);
@@ -1318,7 +1319,10 @@ out_free_opts:
return ERR_PTR(ret);
}
-static struct class *nvmf_class;
+static const struct class nvmf_class = {
+ .name = "nvme-fabrics",
+};
+
static struct device *nvmf_device;
static DEFINE_MUTEX(nvmf_dev_mutex);
@@ -1438,15 +1442,14 @@ static int __init nvmf_init(void)
if (!nvmf_default_host)
return -ENOMEM;
- nvmf_class = class_create("nvme-fabrics");
- if (IS_ERR(nvmf_class)) {
+ ret = class_register(&nvmf_class);
+ if (ret) {
pr_err("couldn't register class nvme-fabrics\n");
- ret = PTR_ERR(nvmf_class);
goto out_free_host;
}
nvmf_device =
- device_create(nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
+ device_create(&nvmf_class, NULL, MKDEV(0, 0), NULL, "ctl");
if (IS_ERR(nvmf_device)) {
pr_err("couldn't create nvme-fabrics device!\n");
ret = PTR_ERR(nvmf_device);
@@ -1462,9 +1465,9 @@ static int __init nvmf_init(void)
return 0;
out_destroy_device:
- device_destroy(nvmf_class, MKDEV(0, 0));
+ device_destroy(&nvmf_class, MKDEV(0, 0));
out_destroy_class:
- class_destroy(nvmf_class);
+ class_unregister(&nvmf_class);
out_free_host:
nvmf_host_put(nvmf_default_host);
return ret;
@@ -1473,8 +1476,8 @@ out_free_host:
static void __exit nvmf_exit(void)
{
misc_deregister(&nvmf_misc);
- device_destroy(nvmf_class, MKDEV(0, 0));
- class_destroy(nvmf_class);
+ device_destroy(&nvmf_class, MKDEV(0, 0));
+ class_unregister(&nvmf_class);
nvmf_host_put(nvmf_default_host);
BUILD_BUG_ON(sizeof(struct nvmf_common_command) != 64);
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 18f5c1be5d67..3dfd5ae99ae0 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -228,7 +228,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
length = (io.nblocks + 1) << ns->head->lba_shift;
if ((io.control & NVME_RW_PRINFO_PRACT) &&
- ns->head->ms == sizeof(struct t10_pi_tuple)) {
+ (ns->head->ms == ns->head->pi_size)) {
/*
* Protection information is stripped/inserted by the
* controller.
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 74de1e64aeea..5397fb428b24 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -516,6 +516,7 @@ static void nvme_requeue_work(struct work_struct *work)
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
{
+ struct queue_limits lim;
bool vwc = false;
mutex_init(&head->lock);
@@ -532,9 +533,14 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
!nvme_is_unique_nsid(ctrl, head) || !multipath)
return 0;
- head->disk = blk_alloc_disk(ctrl->numa_node);
- if (!head->disk)
- return -ENOMEM;
+ blk_set_stacking_limits(&lim);
+ lim.dma_alignment = 3;
+ if (head->ids.csi != NVME_CSI_ZNS)
+ lim.max_zone_append_sectors = 0;
+
+ head->disk = blk_alloc_disk(&lim, ctrl->numa_node);
+ if (IS_ERR(head->disk))
+ return PTR_ERR(head->disk);
head->disk->fops = &nvme_ns_head_ops;
head->disk->private_data = head;
sprintf(head->disk->disk_name, "nvme%dn%d",
@@ -553,11 +559,6 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
- /* set to a default value of 512 until the disk is validated */
- blk_queue_logical_block_size(head->disk->queue, 512);
- blk_set_stacking_limits(&head->disk->queue->limits);
- blk_queue_dma_alignment(head->disk->queue, 3);
-
/* we need to propagate up the VMC settings */
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
vwc = true;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 3897334e3950..24193fcb8bd5 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -455,6 +455,7 @@ struct nvme_ns_head {
struct list_head entry;
struct kref ref;
bool shared;
+ bool passthru_err_log_enabled;
int instance;
struct nvme_effects_log *effects;
u64 nuse;
@@ -463,6 +464,7 @@ struct nvme_ns_head {
u16 ms;
u16 pi_size;
u8 pi_type;
+ u8 pi_offset;
u8 guard_type;
u16 sgs;
u32 sws;
@@ -523,7 +525,6 @@ struct nvme_ns {
struct device cdev_device;
struct nvme_fault_inject fault_inject;
- bool passthru_err_log_enabled;
};
/* NVMe ns supports metadata actions by the controller (generate/strip) */
@@ -1035,11 +1036,11 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
}
#endif /* CONFIG_NVME_MULTIPATH */
-int nvme_revalidate_zones(struct nvme_ns *ns);
int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
+int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
+ struct queue_limits *lim);
#ifdef CONFIG_BLK_DEV_ZONED
-int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd,
enum nvme_zone_mgmt_action action);
@@ -1050,13 +1051,6 @@ static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
{
return BLK_STS_NOTSUPP;
}
-
-static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
-{
- dev_warn(ns->ctrl->device,
- "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
- return -EPROTONOSUPPORT;
-}
#endif
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 20fdd40b1879..366f0bb4ebfc 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1006,6 +1006,7 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
{
int ret;
bool changed;
+ u16 max_queue_size;
ret = nvme_rdma_configure_admin_queue(ctrl, new);
if (ret)
@@ -1030,11 +1031,16 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
}
- if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) {
+ if (ctrl->ctrl.max_integrity_segments)
+ max_queue_size = NVME_RDMA_MAX_METADATA_QUEUE_SIZE;
+ else
+ max_queue_size = NVME_RDMA_MAX_QUEUE_SIZE;
+
+ if (ctrl->ctrl.sqsize + 1 > max_queue_size) {
dev_warn(ctrl->ctrl.device,
- "ctrl sqsize %u > max queue size %u, clamping down\n",
- ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE);
- ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1;
+ "ctrl sqsize %u > max queue size %u, clamping down\n",
+ ctrl->ctrl.sqsize + 1, max_queue_size);
+ ctrl->ctrl.sqsize = max_queue_size - 1;
}
if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index d099218e494a..09fcaa519e5b 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -48,8 +48,8 @@ static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- int err;
bool passthru_err_log_enabled;
+ int err;
err = kstrtobool(buf, &passthru_err_log_enabled);
if (err)
@@ -60,25 +60,34 @@ static ssize_t nvme_adm_passthru_err_log_enabled_store(struct device *dev,
return count;
}
+static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+
+ if (nvme_disk_is_ns_head(disk))
+ return disk->private_data;
+ return nvme_get_ns_from_dev(dev)->head;
+}
+
static ssize_t nvme_io_passthru_err_log_enabled_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct nvme_ns *n = dev_get_drvdata(dev);
+ struct nvme_ns_head *head = dev_to_ns_head(dev);
- return sysfs_emit(buf, n->passthru_err_log_enabled ? "on\n" : "off\n");
+ return sysfs_emit(buf, head->passthru_err_log_enabled ? "on\n" : "off\n");
}
static ssize_t nvme_io_passthru_err_log_enabled_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct nvme_ns *ns = dev_get_drvdata(dev);
- int err;
+ struct nvme_ns_head *head = dev_to_ns_head(dev);
bool passthru_err_log_enabled;
+ int err;
err = kstrtobool(buf, &passthru_err_log_enabled);
if (err)
return -EINVAL;
- ns->passthru_err_log_enabled = passthru_err_log_enabled;
+ head->passthru_err_log_enabled = passthru_err_log_enabled;
return count;
}
@@ -91,15 +100,6 @@ static struct device_attribute dev_attr_io_passthru_err_log_enabled = \
__ATTR(passthru_err_log_enabled, S_IRUGO | S_IWUSR, \
nvme_io_passthru_err_log_enabled_show, nvme_io_passthru_err_log_enabled_store);
-static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev)
-{
- struct gendisk *disk = dev_to_disk(dev);
-
- if (nvme_disk_is_ns_head(disk))
- return disk->private_data;
- return nvme_get_ns_from_dev(dev)->head;
-}
-
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -221,14 +221,11 @@ static int ns_update_nuse(struct nvme_ns *ns)
ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, &id);
if (ret)
- goto out_free_id;
+ return ret;
ns->head->nuse = le64_to_cpu(id->nuse);
-
-out_free_id:
kfree(id);
-
- return ret;
+ return 0;
}
static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index a6d596e05602..3692b56cb58d 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1344,7 +1344,6 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
{
- struct page *page;
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
unsigned int noreclaim_flag;
@@ -1355,11 +1354,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
if (queue->hdr_digest || queue->data_digest)
nvme_tcp_free_crypto(queue);
- if (queue->pf_cache.va) {
- page = virt_to_head_page(queue->pf_cache.va);
- __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
- queue->pf_cache.va = NULL;
- }
+ page_frag_cache_drain(&queue->pf_cache);
noreclaim_flag = memalloc_noreclaim_save();
/* ->sock will be released by fput() */
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index 499bbb0eee8d..722384bcc765 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -7,16 +7,6 @@
#include <linux/vmalloc.h>
#include "nvme.h"
-int nvme_revalidate_zones(struct nvme_ns *ns)
-{
- struct request_queue *q = ns->queue;
-
- blk_queue_chunk_sectors(q, ns->head->zsze);
- blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
-
- return blk_revalidate_disk_zones(ns->disk, NULL);
-}
-
static int nvme_set_max_append(struct nvme_ctrl *ctrl)
{
struct nvme_command c = { };
@@ -45,10 +35,10 @@ static int nvme_set_max_append(struct nvme_ctrl *ctrl)
return 0;
}
-int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
+int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
+ struct queue_limits *lim)
{
struct nvme_effects_log *log = ns->head->effects;
- struct request_queue *q = ns->queue;
struct nvme_command c = { };
struct nvme_id_ns_zns *id;
int status;
@@ -109,10 +99,12 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
goto free_data;
}
- disk_set_zoned(ns->disk);
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
- disk_set_max_open_zones(ns->disk, le32_to_cpu(id->mor) + 1);
- disk_set_max_active_zones(ns->disk, le32_to_cpu(id->mar) + 1);
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
+ lim->zoned = 1;
+ lim->max_open_zones = le32_to_cpu(id->mor) + 1;
+ lim->max_active_zones = le32_to_cpu(id->mar) + 1;
+ lim->chunk_sectors = ns->head->zsze;
+ lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
free_data:
kfree(id);
return status;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 39cb570f833d..f5b7054a4a05 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -428,7 +428,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->cqes = (0x4 << 4) | 0x4;
/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
- id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 2482a0db2504..77a6e817b315 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -273,6 +273,32 @@ static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
CONFIGFS_ATTR(nvmet_, param_inline_data_size);
+static ssize_t nvmet_param_max_queue_size_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", port->max_queue_size);
+}
+
+static ssize_t nvmet_param_max_queue_size_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+ int ret;
+
+ if (nvmet_is_port_enabled(port, __func__))
+ return -EACCES;
+ ret = kstrtoint(page, 0, &port->max_queue_size);
+ if (ret) {
+ pr_err("Invalid value '%s' for max_queue_size\n", page);
+ return -EINVAL;
+ }
+ return count;
+}
+
+CONFIGFS_ATTR(nvmet_, param_max_queue_size);
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
char *page)
@@ -1859,6 +1885,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = {
&nvmet_attr_addr_trtype,
&nvmet_attr_addr_tsas,
&nvmet_attr_param_inline_data_size,
+ &nvmet_attr_param_max_queue_size,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_attr_param_pi_enable,
#endif
@@ -1917,6 +1944,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
INIT_LIST_HEAD(&port->subsystems);
INIT_LIST_HEAD(&port->referrals);
port->inline_data_size = -1; /* < 0 == let the transport choose */
+ port->max_queue_size = -1; /* < 0 == let the transport choose */
port->disc_addr.portid = cpu_to_le16(portid);
port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 8658e9c08534..6bbe4df0166c 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -358,6 +358,18 @@ int nvmet_enable_port(struct nvmet_port *port)
if (port->inline_data_size < 0)
port->inline_data_size = 0;
+ /*
+ * If the transport didn't set the max_queue_size properly, then clamp
+ * it to the target limits. Also set default values in case the
+ * transport didn't set it at all.
+ */
+ if (port->max_queue_size < 0)
+ port->max_queue_size = NVMET_MAX_QUEUE_SIZE;
+ else
+ port->max_queue_size = clamp_t(int, port->max_queue_size,
+ NVMET_MIN_QUEUE_SIZE,
+ NVMET_MAX_QUEUE_SIZE);
+
port->enabled = true;
port->tr_ops = ops;
return 0;
@@ -1223,9 +1235,10 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
ctrl->cap |= (15ULL << 24);
/* maximum queue entries supported: */
if (ctrl->ops->get_max_queue_size)
- ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1;
+ ctrl->cap |= min_t(u16, ctrl->ops->get_max_queue_size(ctrl),
+ ctrl->port->max_queue_size) - 1;
else
- ctrl->cap |= NVMET_QUEUE_SIZE - 1;
+ ctrl->cap |= ctrl->port->max_queue_size - 1;
if (nvmet_is_passthru_subsys(ctrl->subsys))
nvmet_passthrough_override_cap(ctrl);
@@ -1411,6 +1424,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
kref_init(&ctrl->ref);
ctrl->subsys = subsys;
+ ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
nvmet_init_cap(ctrl);
WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 68e82ccc0e4e..ce54da8c6b36 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -282,7 +282,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
id->lpa = (1 << 2);
/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
- id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
if (ctrl->ops->flags & NVMF_KEYED_SGLS)
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index d8da840a1c0e..b23f4cf840bd 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -157,7 +157,8 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
}
- if (sqsize > mqes) {
+ /* for fabrics, this value applies to only the I/O Submission Queues */
+ if (qid && sqsize > mqes) {
pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n",
sqsize, mqes, ctrl->cntlid);
req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
@@ -209,7 +210,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
struct nvmf_connect_command *c = &req->cmd->connect;
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl = NULL;
- u16 status = 0;
+ u16 status;
int ret;
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
@@ -251,8 +252,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
if (status)
goto out;
- ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
-
uuid_copy(&ctrl->hostid, &d->hostid);
ret = nvmet_setup_auth(ctrl);
@@ -290,7 +289,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
struct nvmf_connect_data *d;
struct nvmet_ctrl *ctrl;
u16 qid = le16_to_cpu(c->qid);
- u16 status = 0;
+ u16 status;
if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data)))
return;
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 1471af250ea6..913cd2ec7a6f 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -1556,7 +1556,9 @@ static const struct attribute_group *fcloop_dev_attr_groups[] = {
NULL,
};
-static struct class *fcloop_class;
+static const struct class fcloop_class = {
+ .name = "fcloop",
+};
static struct device *fcloop_device;
@@ -1564,15 +1566,14 @@ static int __init fcloop_init(void)
{
int ret;
- fcloop_class = class_create("fcloop");
- if (IS_ERR(fcloop_class)) {
+ ret = class_register(&fcloop_class);
+ if (ret) {
pr_err("couldn't register class fcloop\n");
- ret = PTR_ERR(fcloop_class);
return ret;
}
fcloop_device = device_create_with_groups(
- fcloop_class, NULL, MKDEV(0, 0), NULL,
+ &fcloop_class, NULL, MKDEV(0, 0), NULL,
fcloop_dev_attr_groups, "ctl");
if (IS_ERR(fcloop_device)) {
pr_err("couldn't create ctl device!\n");
@@ -1585,7 +1586,7 @@ static int __init fcloop_init(void)
return 0;
out_destroy_class:
- class_destroy(fcloop_class);
+ class_unregister(&fcloop_class);
return ret;
}
@@ -1643,8 +1644,8 @@ static void __exit fcloop_exit(void)
put_device(fcloop_device);
- device_destroy(fcloop_class, MKDEV(0, 0));
- class_destroy(fcloop_class);
+ device_destroy(&fcloop_class, MKDEV(0, 0));
+ class_unregister(&fcloop_class);
}
module_init(fcloop_init);
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index f11400a908f2..6426aac2634a 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -50,10 +50,10 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
{
- if (ns->bdev_handle) {
- bdev_release(ns->bdev_handle);
+ if (ns->bdev_file) {
+ fput(ns->bdev_file);
ns->bdev = NULL;
- ns->bdev_handle = NULL;
+ ns->bdev_file = NULL;
}
}
@@ -85,18 +85,18 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
if (ns->buffered_io)
return -ENOTBLK;
- ns->bdev_handle = bdev_open_by_path(ns->device_path,
+ ns->bdev_file = bdev_file_open_by_path(ns->device_path,
BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL);
- if (IS_ERR(ns->bdev_handle)) {
- ret = PTR_ERR(ns->bdev_handle);
+ if (IS_ERR(ns->bdev_file)) {
+ ret = PTR_ERR(ns->bdev_file);
if (ret != -ENOTBLK) {
pr_err("failed to open block device %s: (%d)\n",
ns->device_path, ret);
}
- ns->bdev_handle = NULL;
+ ns->bdev_file = NULL;
return ret;
}
- ns->bdev = ns->bdev_handle->bdev;
+ ns->bdev = file_bdev(ns->bdev_file);
ns->size = bdev_nr_bytes(ns->bdev);
ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 6c8acebe1a1a..f460728e1df1 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -58,7 +58,7 @@
struct nvmet_ns {
struct percpu_ref ref;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct block_device *bdev;
struct file *file;
bool readonly;
@@ -163,6 +163,7 @@ struct nvmet_port {
void *priv;
bool enabled;
int inline_data_size;
+ int max_queue_size;
const struct nvmet_fabrics_ops *tr_ops;
bool pi_enable;
};
@@ -543,9 +544,10 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
u8 event_info, u8 log_page);
-#define NVMET_QUEUE_SIZE 1024
+#define NVMET_MIN_QUEUE_SIZE 16
+#define NVMET_MAX_QUEUE_SIZE 1024
#define NVMET_NR_QUEUES 128
-#define NVMET_MAX_CMD NVMET_QUEUE_SIZE
+#define NVMET_MAX_CMD(ctrl) (NVME_CAP_MQES(ctrl->cap) + 1)
/*
* Nice round number that makes a list of nsids fit into a page.
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index f2d963e1fe94..bb4a69d538fd 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -132,7 +132,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
- id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
+ id->maxcmd = cpu_to_le16(NVMET_MAX_CMD(ctrl));
/* don't support fuse commands */
id->fuses = 0;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 3a0f2c170f4c..f2bb9d95ecf4 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1956,6 +1956,14 @@ static int nvmet_rdma_add_port(struct nvmet_port *nport)
nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
}
+ if (nport->max_queue_size < 0) {
+ nport->max_queue_size = NVME_RDMA_DEFAULT_QUEUE_SIZE;
+ } else if (nport->max_queue_size > NVME_RDMA_MAX_QUEUE_SIZE) {
+ pr_warn("max_queue_size %u is too large, reducing to %u\n",
+ nport->max_queue_size, NVME_RDMA_MAX_QUEUE_SIZE);
+ nport->max_queue_size = NVME_RDMA_MAX_QUEUE_SIZE;
+ }
+
ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
nport->disc_addr.trsvcid, &port->addr);
if (ret) {
@@ -2015,6 +2023,8 @@ static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl)
{
+ if (ctrl->pi_support)
+ return NVME_RDMA_MAX_METADATA_QUEUE_SIZE;
return NVME_RDMA_MAX_QUEUE_SIZE;
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index c8655fc5aa5b..2aa5762e9f50 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1591,7 +1591,6 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
static void nvmet_tcp_release_queue_work(struct work_struct *w)
{
- struct page *page;
struct nvmet_tcp_queue *queue =
container_of(w, struct nvmet_tcp_queue, release_work);
@@ -1615,8 +1614,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue);
ida_free(&nvmet_tcp_queue_ida, queue->idx);
- page = virt_to_head_page(queue->pf_cache.va);
- __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
+ page_frag_cache_drain(&queue->pf_cache);
kfree(queue);
}
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 5b5c1e481722..3148d9f1bde6 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -456,8 +456,7 @@ static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req)
switch (zsa_req_op(req->cmd->zms.zsa)) {
case REQ_OP_ZONE_RESET:
ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0,
- get_capacity(req->ns->bdev->bd_disk),
- GFP_KERNEL);
+ get_capacity(req->ns->bdev->bd_disk));
if (ret < 0)
return blkdev_zone_mgmt_errno_to_nvme_status(ret);
break;
@@ -508,7 +507,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w)
goto out;
}
- ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors, GFP_KERNEL);
+ ret = blkdev_zone_mgmt(bdev, op, sect, zone_sectors);
if (ret < 0)
status = blkdev_zone_mgmt_errno_to_nvme_status(ret);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 980123fb4dde..eb357ac2e54a 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -460,8 +460,9 @@ static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
list_for_each_entry(entry, &nvmem->cells, node) {
sysfs_bin_attr_init(&attrs[i]);
attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL,
- "%s@%x", entry->name,
- entry->offset);
+ "%s@%x,%x", entry->name,
+ entry->offset,
+ entry->bit_offset);
attrs[i].attr.mode = 0444;
attrs[i].size = entry->bytes;
attrs[i].read = &nvmem_cell_attr_read;
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 641a40cf5cf3..fa8cd33be131 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -763,7 +763,9 @@ struct device_node *of_graph_get_port_parent(struct device_node *node)
/* Walk 3 levels up only if there is 'ports' node. */
for (depth = 3; depth && node; depth--) {
node = of_get_next_parent(node);
- if (depth == 2 && !of_node_name_eq(node, "ports"))
+ if (depth == 2 && !of_node_name_eq(node, "ports") &&
+ !of_node_name_eq(node, "in-ports") &&
+ !of_node_name_eq(node, "out-ports"))
break;
}
return node;
@@ -1063,36 +1065,6 @@ of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
return of_device_get_match_data(dev);
}
-static struct device_node *of_get_compat_node(struct device_node *np)
-{
- of_node_get(np);
-
- while (np) {
- if (!of_device_is_available(np)) {
- of_node_put(np);
- np = NULL;
- }
-
- if (of_property_present(np, "compatible"))
- break;
-
- np = of_get_next_parent(np);
- }
-
- return np;
-}
-
-static struct device_node *of_get_compat_node_parent(struct device_node *np)
-{
- struct device_node *parent, *node;
-
- parent = of_get_parent(np);
- node = of_get_compat_node(parent);
- of_node_put(parent);
-
- return node;
-}
-
static void of_link_to_phandle(struct device_node *con_np,
struct device_node *sup_np)
{
@@ -1222,10 +1194,10 @@ static struct device_node *parse_##fname(struct device_node *np, \
* parse_prop.prop_name: Name of property holding a phandle value
* parse_prop.index: For properties holding a list of phandles, this is the
* index into the list
+ * @get_con_dev: If the consumer node containing the property is never converted
+ * to a struct device, implement this ops so fw_devlink can use it
+ * to find the true consumer.
* @optional: Describes whether a supplier is mandatory or not
- * @node_not_dev: The consumer node containing the property is never converted
- * to a struct device. Instead, parse ancestor nodes for the
- * compatible property to find a node corresponding to a device.
*
* Returns:
* parse_prop() return values are
@@ -1236,15 +1208,15 @@ static struct device_node *parse_##fname(struct device_node *np, \
struct supplier_bindings {
struct device_node *(*parse_prop)(struct device_node *np,
const char *prop_name, int index);
+ struct device_node *(*get_con_dev)(struct device_node *np);
bool optional;
- bool node_not_dev;
};
DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells")
DEFINE_SIMPLE_PROP(interconnects, "interconnects", "#interconnect-cells")
DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells")
DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells")
-DEFINE_SIMPLE_PROP(io_channels, "io-channel", "#io-channel-cells")
+DEFINE_SIMPLE_PROP(io_channels, "io-channels", "#io-channel-cells")
DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
@@ -1262,7 +1234,6 @@ DEFINE_SIMPLE_PROP(pinctrl5, "pinctrl-5", NULL)
DEFINE_SIMPLE_PROP(pinctrl6, "pinctrl-6", NULL)
DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL)
DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL)
-DEFINE_SIMPLE_PROP(remote_endpoint, "remote-endpoint", NULL)
DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells")
DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
DEFINE_SIMPLE_PROP(leds, "leds", NULL)
@@ -1328,6 +1299,17 @@ static struct device_node *parse_interrupts(struct device_node *np,
return of_irq_parse_one(np, index, &sup_args) ? NULL : sup_args.np;
}
+static struct device_node *parse_remote_endpoint(struct device_node *np,
+ const char *prop_name,
+ int index)
+{
+ /* Return NULL for index > 0 to signify end of remote-endpoints. */
+ if (index > 0 || strcmp(prop_name, "remote-endpoint"))
+ return NULL;
+
+ return of_graph_get_remote_port_parent(np);
+}
+
static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_clocks, },
{ .parse_prop = parse_interconnects, },
@@ -1352,7 +1334,10 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_pinctrl6, },
{ .parse_prop = parse_pinctrl7, },
{ .parse_prop = parse_pinctrl8, },
- { .parse_prop = parse_remote_endpoint, .node_not_dev = true, },
+ {
+ .parse_prop = parse_remote_endpoint,
+ .get_con_dev = of_graph_get_port_parent,
+ },
{ .parse_prop = parse_pwms, },
{ .parse_prop = parse_resets, },
{ .parse_prop = parse_leds, },
@@ -1403,8 +1388,8 @@ static int of_link_property(struct device_node *con_np, const char *prop_name)
while ((phandle = s->parse_prop(con_np, prop_name, i))) {
struct device_node *con_dev_np;
- con_dev_np = s->node_not_dev
- ? of_get_compat_node_parent(con_np)
+ con_dev_np = s->get_con_dev
+ ? s->get_con_dev(con_np)
: of_node_get(con_np);
matched = true;
i++;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index cfd60e35a899..d7593bde2d02 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -50,6 +50,12 @@ static struct unittest_results {
failed; \
})
+#ifdef CONFIG_OF_KOBJ
+#define OF_KREF_READ(NODE) kref_read(&(NODE)->kobj.kref)
+#else
+#define OF_KREF_READ(NODE) 1
+#endif
+
/*
* Expected message may have a message level other than KERN_INFO.
* Print the expected message only if the current loglevel will allow
@@ -570,7 +576,7 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
pr_err("missing testcase data\n");
return;
}
- prefs[i] = kref_read(&p[i]->kobj.kref);
+ prefs[i] = OF_KREF_READ(p[i]);
}
rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells");
@@ -693,9 +699,9 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
for (i = 0; i < ARRAY_SIZE(p); ++i) {
- unittest(prefs[i] == kref_read(&p[i]->kobj.kref),
+ unittest(prefs[i] == OF_KREF_READ(p[i]),
"provider%d: expected:%d got:%d\n",
- i, prefs[i], kref_read(&p[i]->kobj.kref));
+ i, prefs[i], OF_KREF_READ(p[i]));
of_node_put(p[i]);
}
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 5befed2dc02b..9a437cfce073 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -6,6 +6,7 @@
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*/
+#include <linux/align.h>
#include <linux/bitfield.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -482,9 +483,10 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
reg = ep_func->msi_cap + PCI_MSI_DATA_32;
msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
}
- aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
- msg_addr = ((u64)msg_addr_upper) << 32 |
- (msg_addr_lower & ~aligned_offset);
+ msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower;
+
+ aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
+ msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
@@ -551,7 +553,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
}
aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
- msg_addr &= ~aligned_offset;
+ msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
epc->mem->window.page_size);
if (ret)
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index a89b7de72dcf..7333b305f2a5 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -26,58 +26,79 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
hotplug_slot);
int rc;
- if (zdev->state != ZPCI_FN_STATE_STANDBY)
- return -EIO;
+ mutex_lock(&zdev->state_lock);
+ if (zdev->state != ZPCI_FN_STATE_STANDBY) {
+ rc = -EIO;
+ goto out;
+ }
rc = sclp_pci_configure(zdev->fid);
zpci_dbg(3, "conf fid:%x, rc:%d\n", zdev->fid, rc);
if (rc)
- return rc;
+ goto out;
zdev->state = ZPCI_FN_STATE_CONFIGURED;
- return zpci_scan_configured_device(zdev, zdev->fh);
+ rc = zpci_scan_configured_device(zdev, zdev->fh);
+out:
+ mutex_unlock(&zdev->state_lock);
+ return rc;
}
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
hotplug_slot);
- struct pci_dev *pdev;
+ struct pci_dev *pdev = NULL;
+ int rc;
- if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
- return -EIO;
+ mutex_lock(&zdev->state_lock);
+ if (zdev->state != ZPCI_FN_STATE_CONFIGURED) {
+ rc = -EIO;
+ goto out;
+ }
pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
if (pdev && pci_num_vf(pdev)) {
pci_dev_put(pdev);
- return -EBUSY;
+ rc = -EBUSY;
+ goto out;
}
- pci_dev_put(pdev);
- return zpci_deconfigure_device(zdev);
+ rc = zpci_deconfigure_device(zdev);
+out:
+ mutex_unlock(&zdev->state_lock);
+ if (pdev)
+ pci_dev_put(pdev);
+ return rc;
}
static int reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
{
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
hotplug_slot);
+ int rc = -EIO;
- if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
- return -EIO;
/*
- * We can't take the zdev->lock as reset_slot may be called during
- * probing and/or device removal which already happens under the
- * zdev->lock. Instead the user should use the higher level
- * pci_reset_function() or pci_bus_reset() which hold the PCI device
- * lock preventing concurrent removal. If not using these functions
- * holding the PCI device lock is required.
+ * If we can't get the zdev->state_lock the device state is
+ * currently undergoing a transition and we bail out - just
+ * the same as if the device's state is not configured at all.
*/
+ if (!mutex_trylock(&zdev->state_lock))
+ return rc;
- /* As long as the function is configured we can reset */
- if (probe)
- return 0;
+ /* We can reset only if the function is configured */
+ if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
+ goto out;
+
+ if (probe) {
+ rc = 0;
+ goto out;
+ }
- return zpci_hot_reset_device(zdev);
+ rc = zpci_hot_reset_device(zdev);
+out:
+ mutex_unlock(&zdev->state_lock);
+ return rc;
}
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index c8be056c248d..cfd84a899c82 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -61,7 +61,7 @@ static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc)
return (irq_hw_number_t)desc->msi_index |
pci_dev_id(dev) << 11 |
- (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
+ ((irq_hw_number_t)(pci_domain_nr(dev->bus) & 0xFFFFFFFF)) << 27;
}
static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 9ab9b1008d8b..ccee56615f78 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2522,29 +2522,36 @@ static void pci_pme_list_scan(struct work_struct *work)
if (pdev->pme_poll) {
struct pci_dev *bridge = pdev->bus->self;
struct device *dev = &pdev->dev;
- int pm_status;
+ struct device *bdev = bridge ? &bridge->dev : NULL;
+ int bref = 0;
/*
- * If bridge is in low power state, the
- * configuration space of subordinate devices
- * may be not accessible
+ * If we have a bridge, it should be in an active/D0
+ * state or the configuration space of subordinate
+ * devices may not be accessible or stable over the
+ * course of the call.
*/
- if (bridge && bridge->current_state != PCI_D0)
- continue;
+ if (bdev) {
+ bref = pm_runtime_get_if_active(bdev, true);
+ if (!bref)
+ continue;
+
+ if (bridge->current_state != PCI_D0)
+ goto put_bridge;
+ }
/*
- * If the device is in a low power state it
- * should not be polled either.
+ * The device itself should be suspended but config
+ * space must be accessible, therefore it cannot be in
+ * D3cold.
*/
- pm_status = pm_runtime_get_if_active(dev, true);
- if (!pm_status)
- continue;
-
- if (pdev->current_state != PCI_D3cold)
+ if (pm_runtime_suspended(dev) &&
+ pdev->current_state != PCI_D3cold)
pci_pme_wakeup(pdev, NULL);
- if (pm_status > 0)
- pm_runtime_put(dev);
+put_bridge:
+ if (bref > 0)
+ pm_runtime_put(bdev);
} else {
list_del(&pme_dev->list);
kfree(pme_dev);
@@ -4346,8 +4353,8 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
if (res->end > IO_SPACE_LIMIT)
return -EINVAL;
- return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
- pgprot_device(PAGE_KERNEL));
+ return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
+ pgprot_device(PAGE_KERNEL));
#else
/*
* This architecture does not have memory mapped I/O space,
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index e9750b1b19ba..bfc56f7bee1c 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -368,11 +368,6 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
return 0;
}
-static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
-{
- return dev->error_state == pci_channel_io_perm_failure;
-}
-
/* pci_dev priv_flags */
#define PCI_DEV_ADDED 0
#define PCI_DPC_RECOVERED 1
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index c584165b13ba..7e3aa7e2345f 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -2305,6 +2305,17 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
dev_dbg(cmn->dev, "ignoring external node %llx\n", reg);
continue;
}
+ /*
+ * AmpereOneX erratum AC04_MESH_1 makes some XPs report a bogus
+ * child count larger than the number of valid child pointers.
+ * A child offset of 0 can only occur on CMN-600; otherwise it
+ * would imply the root node being its own grandchild, which
+ * we can safely dismiss in general.
+ */
+ if (reg == 0 && cmn->part != PART_CMN600) {
+ dev_dbg(cmn->dev, "bogus child pointer?\n");
+ continue;
+ }
arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn);
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 6303b82566f9..9e5d7fa647b6 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -716,7 +716,7 @@ static void smmu_pmu_free_msis(void *data)
{
struct device *dev = data;
- platform_msi_domain_free_irqs(dev);
+ platform_device_msi_free_irqs_all(dev);
}
static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
@@ -746,7 +746,7 @@ static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI))
return;
- ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
+ ret = platform_device_msi_init_and_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
if (ret) {
dev_warn(dev, "failed to allocate MSIs\n");
return;
diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c
index 365d964b0f6a..308c9969642e 100644
--- a/drivers/perf/cxl_pmu.c
+++ b/drivers/perf/cxl_pmu.c
@@ -59,7 +59,7 @@
#define CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK GENMASK_ULL(63, 59)
#define CXL_PMU_FILTER_CFG_REG(n, f) (0x400 + 4 * ((f) + (n) * 8))
-#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(15, 0)
+#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(31, 0)
#define CXL_PMU_COUNTER_REG(n) (0xc00 + 8 * (n))
@@ -314,9 +314,9 @@ static bool cxl_pmu_config1_get_edge(struct perf_event *event)
}
/*
- * CPMU specification allows for 8 filters, each with a 16 bit value...
- * So we need to find 8x16bits to store it in.
- * As the value used for disable is 0xffff, a separate enable switch
+ * CPMU specification allows for 8 filters, each with a 32 bit value...
+ * So we need to find 8x32bits to store it in.
+ * As the value used for disable is 0xffff_ffff, a separate enable switch
* is needed.
*/
@@ -419,7 +419,7 @@ static struct attribute *cxl_pmu_event_attrs[] = {
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmp, CXL_PMU_GID_S2M_NDR, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmps, CXL_PMU_GID_S2M_NDR, BIT(1)),
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmpe, CXL_PMU_GID_S2M_NDR, BIT(2)),
- CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(3)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(4)),
/* CXL rev 3.0 Table 3-46 S2M DRS opcodes */
CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdata, CXL_PMU_GID_S2M_DRS, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdatanxm, CXL_PMU_GID_S2M_DRS, BIT(1)),
@@ -642,7 +642,7 @@ static void cxl_pmu_event_start(struct perf_event *event, int flags)
if (cxl_pmu_config1_hdm_filter_en(event))
cfg = cxl_pmu_config2_get_hdm_decoder(event);
else
- cfg = GENMASK(15, 0); /* No filtering if 0xFFFF_FFFF */
+ cfg = GENMASK(31, 0); /* No filtering if 0xFFFF_FFFF */
writeq(cfg, base + CXL_PMU_FILTER_CFG_REG(hwc->idx, 0));
}
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index 0dda70e1ef90..c78a6fd6c57f 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -150,19 +150,11 @@ u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event)
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- if (!rvpmu->ctr_get_width)
- /**
- * If the pmu driver doesn't support counter width, set it to default
- * maximum allowed by the specification.
- */
- cwidth = 63;
- else {
- if (hwc->idx == -1)
- /* Handle init case where idx is not initialized yet */
- cwidth = rvpmu->ctr_get_width(0);
- else
- cwidth = rvpmu->ctr_get_width(hwc->idx);
- }
+ if (hwc->idx == -1)
+ /* Handle init case where idx is not initialized yet */
+ cwidth = rvpmu->ctr_get_width(0);
+ else
+ cwidth = rvpmu->ctr_get_width(hwc->idx);
return GENMASK_ULL(cwidth, 0);
}
diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
index 79fdd667922e..fa0bccf4edf2 100644
--- a/drivers/perf/riscv_pmu_legacy.c
+++ b/drivers/perf/riscv_pmu_legacy.c
@@ -37,6 +37,12 @@ static int pmu_legacy_event_map(struct perf_event *event, u64 *config)
return pmu_legacy_ctr_get_idx(event);
}
+/* cycle & instret are always 64 bit, one bit less according to SBI spec */
+static int pmu_legacy_ctr_get_width(int idx)
+{
+ return 63;
+}
+
static u64 pmu_legacy_read_ctr(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -111,12 +117,14 @@ static void pmu_legacy_init(struct riscv_pmu *pmu)
pmu->ctr_stop = NULL;
pmu->event_map = pmu_legacy_event_map;
pmu->ctr_get_idx = pmu_legacy_ctr_get_idx;
- pmu->ctr_get_width = NULL;
+ pmu->ctr_get_width = pmu_legacy_ctr_get_width;
pmu->ctr_clear_idx = NULL;
pmu->ctr_read = pmu_legacy_read_ctr;
pmu->event_mapped = pmu_legacy_event_mapped;
pmu->event_unmapped = pmu_legacy_event_unmapped;
pmu->csr_index = pmu_legacy_csr_index;
+ pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
}
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 16acd4dcdb96..452aab49db1e 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -512,7 +512,7 @@ static void pmu_sbi_set_scounteren(void *arg)
if (event->hw.idx != -1)
csr_write(CSR_SCOUNTEREN,
- csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event)));
+ csr_read(CSR_SCOUNTEREN) | BIT(pmu_sbi_csr_index(event)));
}
static void pmu_sbi_reset_scounteren(void *arg)
@@ -521,7 +521,7 @@ static void pmu_sbi_reset_scounteren(void *arg)
if (event->hw.idx != -1)
csr_write(CSR_SCOUNTEREN,
- csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event)));
+ csr_read(CSR_SCOUNTEREN) & ~BIT(pmu_sbi_csr_index(event)));
}
static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
@@ -731,14 +731,14 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
/* compute hardware counter index */
hidx = info->csr - CSR_CYCLE;
/* check if the corresponding bit is set in sscountovf */
- if (!(overflow & (1 << hidx)))
+ if (!(overflow & BIT(hidx)))
continue;
/*
* Keep a track of overflowed counters so that they can be started
* with updated initial value.
*/
- overflowed_ctrs |= 1 << lidx;
+ overflowed_ctrs |= BIT(lidx);
hw_evt = &event->hw;
riscv_pmu_event_update(event);
perf_sample_data_init(&data, 0, hw_evt->last_period);
diff --git a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
index e625b32889bf..0928a526e2ab 100644
--- a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
+++ b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c
@@ -706,7 +706,7 @@ static int mixel_dphy_probe(struct platform_device *pdev)
return ret;
}
- priv->id = of_alias_get_id(np, "mipi_dphy");
+ priv->id = of_alias_get_id(np, "mipi-dphy");
if (priv->id < 0) {
dev_err(dev, "Failed to get phy node alias id: %d\n",
priv->id);
diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
index a623f092b11f..a43e20abb10d 100644
--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
@@ -37,56 +37,28 @@
#define EUSB2_TUNE_EUSB_EQU 0x5A
#define EUSB2_TUNE_EUSB_HS_COMP_CUR 0x5B
-#define QCOM_EUSB2_REPEATER_INIT_CFG(r, v) \
- { \
- .reg = r, \
- .val = v, \
- }
-
-enum reg_fields {
- F_TUNE_EUSB_HS_COMP_CUR,
- F_TUNE_EUSB_EQU,
- F_TUNE_EUSB_SLEW,
- F_TUNE_USB2_HS_COMP_CUR,
- F_TUNE_USB2_PREEM,
- F_TUNE_USB2_EQU,
- F_TUNE_USB2_SLEW,
- F_TUNE_SQUELCH_U,
- F_TUNE_HSDISC,
- F_TUNE_RES_FSDIF,
- F_TUNE_IUSB2,
- F_TUNE_USB2_CROSSOVER,
- F_NUM_TUNE_FIELDS,
-
- F_FORCE_VAL_5 = F_NUM_TUNE_FIELDS,
- F_FORCE_EN_5,
-
- F_EN_CTL1,
-
- F_RPTR_STATUS,
- F_NUM_FIELDS,
-};
-
-static struct reg_field eusb2_repeater_tune_reg_fields[F_NUM_FIELDS] = {
- [F_TUNE_EUSB_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_EUSB_HS_COMP_CUR, 0, 1),
- [F_TUNE_EUSB_EQU] = REG_FIELD(EUSB2_TUNE_EUSB_EQU, 0, 1),
- [F_TUNE_EUSB_SLEW] = REG_FIELD(EUSB2_TUNE_EUSB_SLEW, 0, 1),
- [F_TUNE_USB2_HS_COMP_CUR] = REG_FIELD(EUSB2_TUNE_USB2_HS_COMP_CUR, 0, 1),
- [F_TUNE_USB2_PREEM] = REG_FIELD(EUSB2_TUNE_USB2_PREEM, 0, 2),
- [F_TUNE_USB2_EQU] = REG_FIELD(EUSB2_TUNE_USB2_EQU, 0, 1),
- [F_TUNE_USB2_SLEW] = REG_FIELD(EUSB2_TUNE_USB2_SLEW, 0, 1),
- [F_TUNE_SQUELCH_U] = REG_FIELD(EUSB2_TUNE_SQUELCH_U, 0, 2),
- [F_TUNE_HSDISC] = REG_FIELD(EUSB2_TUNE_HSDISC, 0, 2),
- [F_TUNE_RES_FSDIF] = REG_FIELD(EUSB2_TUNE_RES_FSDIF, 0, 2),
- [F_TUNE_IUSB2] = REG_FIELD(EUSB2_TUNE_IUSB2, 0, 3),
- [F_TUNE_USB2_CROSSOVER] = REG_FIELD(EUSB2_TUNE_USB2_CROSSOVER, 0, 2),
-
- [F_FORCE_VAL_5] = REG_FIELD(EUSB2_FORCE_VAL_5, 0, 7),
- [F_FORCE_EN_5] = REG_FIELD(EUSB2_FORCE_EN_5, 0, 7),
-
- [F_EN_CTL1] = REG_FIELD(EUSB2_EN_CTL1, 0, 7),
-
- [F_RPTR_STATUS] = REG_FIELD(EUSB2_RPTR_STATUS, 0, 7),
+enum eusb2_reg_layout {
+ TUNE_EUSB_HS_COMP_CUR,
+ TUNE_EUSB_EQU,
+ TUNE_EUSB_SLEW,
+ TUNE_USB2_HS_COMP_CUR,
+ TUNE_USB2_PREEM,
+ TUNE_USB2_EQU,
+ TUNE_USB2_SLEW,
+ TUNE_SQUELCH_U,
+ TUNE_HSDISC,
+ TUNE_RES_FSDIF,
+ TUNE_IUSB2,
+ TUNE_USB2_CROSSOVER,
+ NUM_TUNE_FIELDS,
+
+ FORCE_VAL_5 = NUM_TUNE_FIELDS,
+ FORCE_EN_5,
+
+ EN_CTL1,
+
+ RPTR_STATUS,
+ LAYOUT_SIZE,
};
struct eusb2_repeater_cfg {
@@ -98,10 +70,11 @@ struct eusb2_repeater_cfg {
struct eusb2_repeater {
struct device *dev;
- struct regmap_field *regs[F_NUM_FIELDS];
+ struct regmap *regmap;
struct phy *phy;
struct regulator_bulk_data *vregs;
const struct eusb2_repeater_cfg *cfg;
+ u32 base;
enum phy_mode mode;
};
@@ -109,10 +82,10 @@ static const char * const pm8550b_vreg_l[] = {
"vdd18", "vdd3",
};
-static const u32 pm8550b_init_tbl[F_NUM_TUNE_FIELDS] = {
- [F_TUNE_IUSB2] = 0x8,
- [F_TUNE_SQUELCH_U] = 0x3,
- [F_TUNE_USB2_PREEM] = 0x5,
+static const u32 pm8550b_init_tbl[NUM_TUNE_FIELDS] = {
+ [TUNE_IUSB2] = 0x8,
+ [TUNE_SQUELCH_U] = 0x3,
+ [TUNE_USB2_PREEM] = 0x5,
};
static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
@@ -140,47 +113,42 @@ static int eusb2_repeater_init_vregs(struct eusb2_repeater *rptr)
static int eusb2_repeater_init(struct phy *phy)
{
- struct reg_field *regfields = eusb2_repeater_tune_reg_fields;
struct eusb2_repeater *rptr = phy_get_drvdata(phy);
struct device_node *np = rptr->dev->of_node;
- u32 init_tbl[F_NUM_TUNE_FIELDS] = { 0 };
- u8 override;
+ struct regmap *regmap = rptr->regmap;
+ const u32 *init_tbl = rptr->cfg->init_tbl;
+ u8 tune_usb2_preem = init_tbl[TUNE_USB2_PREEM];
+ u8 tune_hsdisc = init_tbl[TUNE_HSDISC];
+ u8 tune_iusb2 = init_tbl[TUNE_IUSB2];
+ u32 base = rptr->base;
u32 val;
int ret;
- int i;
+
+ of_property_read_u8(np, "qcom,tune-usb2-amplitude", &tune_iusb2);
+ of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &tune_hsdisc);
+ of_property_read_u8(np, "qcom,tune-usb2-preem", &tune_usb2_preem);
ret = regulator_bulk_enable(rptr->cfg->num_vregs, rptr->vregs);
if (ret)
return ret;
- regmap_field_update_bits(rptr->regs[F_EN_CTL1], EUSB2_RPTR_EN, EUSB2_RPTR_EN);
+ regmap_write(regmap, base + EUSB2_EN_CTL1, EUSB2_RPTR_EN);
- for (i = 0; i < F_NUM_TUNE_FIELDS; i++) {
- if (init_tbl[i]) {
- regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
- } else {
- /* Write 0 if there's no value set */
- u32 mask = GENMASK(regfields[i].msb, regfields[i].lsb);
-
- regmap_field_update_bits(rptr->regs[i], mask, 0);
- }
- }
- memcpy(init_tbl, rptr->cfg->init_tbl, sizeof(init_tbl));
+ regmap_write(regmap, base + EUSB2_TUNE_EUSB_HS_COMP_CUR, init_tbl[TUNE_EUSB_HS_COMP_CUR]);
+ regmap_write(regmap, base + EUSB2_TUNE_EUSB_EQU, init_tbl[TUNE_EUSB_EQU]);
+ regmap_write(regmap, base + EUSB2_TUNE_EUSB_SLEW, init_tbl[TUNE_EUSB_SLEW]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_HS_COMP_CUR, init_tbl[TUNE_USB2_HS_COMP_CUR]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_EQU, init_tbl[TUNE_USB2_EQU]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_SLEW, init_tbl[TUNE_USB2_SLEW]);
+ regmap_write(regmap, base + EUSB2_TUNE_SQUELCH_U, init_tbl[TUNE_SQUELCH_U]);
+ regmap_write(regmap, base + EUSB2_TUNE_RES_FSDIF, init_tbl[TUNE_RES_FSDIF]);
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_CROSSOVER, init_tbl[TUNE_USB2_CROSSOVER]);
- if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &override))
- init_tbl[F_TUNE_IUSB2] = override;
+ regmap_write(regmap, base + EUSB2_TUNE_USB2_PREEM, tune_usb2_preem);
+ regmap_write(regmap, base + EUSB2_TUNE_HSDISC, tune_hsdisc);
+ regmap_write(regmap, base + EUSB2_TUNE_IUSB2, tune_iusb2);
- if (!of_property_read_u8(np, "qcom,tune-usb2-disc-thres", &override))
- init_tbl[F_TUNE_HSDISC] = override;
-
- if (!of_property_read_u8(np, "qcom,tune-usb2-preem", &override))
- init_tbl[F_TUNE_USB2_PREEM] = override;
-
- for (i = 0; i < F_NUM_TUNE_FIELDS; i++)
- regmap_field_update_bits(rptr->regs[i], init_tbl[i], init_tbl[i]);
-
- ret = regmap_field_read_poll_timeout(rptr->regs[F_RPTR_STATUS],
- val, val & RPTR_OK, 10, 5);
+ ret = regmap_read_poll_timeout(regmap, base + EUSB2_RPTR_STATUS, val, val & RPTR_OK, 10, 5);
if (ret)
dev_err(rptr->dev, "initialization timed-out\n");
@@ -191,6 +159,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
enum phy_mode mode, int submode)
{
struct eusb2_repeater *rptr = phy_get_drvdata(phy);
+ struct regmap *regmap = rptr->regmap;
+ u32 base = rptr->base;
switch (mode) {
case PHY_MODE_USB_HOST:
@@ -199,10 +169,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
* per eUSB 1.2 Spec. Below implement software workaround until
* PHY and controller is fixing seen observation.
*/
- regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
- F_CLK_19P2M_EN, F_CLK_19P2M_EN);
- regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
- V_CLK_19P2M_EN, V_CLK_19P2M_EN);
+ regmap_write(regmap, base + EUSB2_FORCE_EN_5, F_CLK_19P2M_EN);
+ regmap_write(regmap, base + EUSB2_FORCE_VAL_5, V_CLK_19P2M_EN);
break;
case PHY_MODE_USB_DEVICE:
/*
@@ -211,10 +179,8 @@ static int eusb2_repeater_set_mode(struct phy *phy,
* repeater doesn't clear previous value due to shared
* regulators (say host <-> device mode switch).
*/
- regmap_field_update_bits(rptr->regs[F_FORCE_EN_5],
- F_CLK_19P2M_EN, 0);
- regmap_field_update_bits(rptr->regs[F_FORCE_VAL_5],
- V_CLK_19P2M_EN, 0);
+ regmap_write(regmap, base + EUSB2_FORCE_EN_5, 0);
+ regmap_write(regmap, base + EUSB2_FORCE_VAL_5, 0);
break;
default:
return -EINVAL;
@@ -243,9 +209,8 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
struct device_node *np = dev->of_node;
- struct regmap *regmap;
- int i, ret;
u32 res;
+ int ret;
rptr = devm_kzalloc(dev, sizeof(*rptr), GFP_KERNEL);
if (!rptr)
@@ -258,22 +223,15 @@ static int eusb2_repeater_probe(struct platform_device *pdev)
if (!rptr->cfg)
return -EINVAL;
- regmap = dev_get_regmap(dev->parent, NULL);
- if (!regmap)
+ rptr->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!rptr->regmap)
return -ENODEV;
ret = of_property_read_u32(np, "reg", &res);
if (ret < 0)
return ret;
- for (i = 0; i < F_NUM_FIELDS; i++)
- eusb2_repeater_tune_reg_fields[i].reg += res;
-
- ret = devm_regmap_field_bulk_alloc(dev, regmap, rptr->regs,
- eusb2_repeater_tune_reg_fields,
- F_NUM_FIELDS);
- if (ret)
- return ret;
+ rptr->base = res;
ret = eusb2_repeater_init_vregs(rptr);
if (ret < 0) {
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
index c2590579190a..03fb0d4b75d7 100644
--- a/drivers/phy/qualcomm/phy-qcom-m31.c
+++ b/drivers/phy/qualcomm/phy-qcom-m31.c
@@ -299,7 +299,7 @@ static int m31usb_phy_probe(struct platform_device *pdev)
qphy->vreg = devm_regulator_get(dev, "vdda-phy");
if (IS_ERR(qphy->vreg))
- return dev_err_probe(dev, PTR_ERR(qphy->phy),
+ return dev_err_probe(dev, PTR_ERR(qphy->vreg),
"failed to get vreg\n");
phy_set_drvdata(qphy->phy, qphy);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 1ad10110dd25..17c4ad7553a5 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -3562,14 +3562,6 @@ static int qmp_combo_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = qmp_combo_typec_switch_register(qmp);
- if (ret)
- return ret;
-
- ret = drm_aux_bridge_register(dev);
- if (ret)
- return ret;
-
/* Check for legacy binding with child nodes. */
usb_np = of_get_child_by_name(dev->of_node, "usb3-phy");
if (usb_np) {
@@ -3589,6 +3581,14 @@ static int qmp_combo_probe(struct platform_device *pdev)
if (ret)
goto err_node_put;
+ ret = qmp_combo_typec_switch_register(qmp);
+ if (ret)
+ goto err_node_put;
+
+ ret = drm_aux_bridge_register(dev);
+ if (ret)
+ goto err_node_put;
+
pm_runtime_set_active(dev);
ret = devm_pm_runtime_enable(dev);
if (ret)
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 6621246e4ddf..5c003988c35d 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -1556,7 +1556,7 @@ static const char * const qmp_phy_vreg_l[] = {
"vdda-phy", "vdda-pll",
};
-static const struct qmp_usb_offsets qmp_usb_offsets_ipq8074 = {
+static const struct qmp_usb_offsets qmp_usb_offsets_v3 = {
.serdes = 0,
.pcs = 0x800,
.pcs_misc = 0x600,
@@ -1572,7 +1572,7 @@ static const struct qmp_usb_offsets qmp_usb_offsets_ipq9574 = {
.rx = 0x400,
};
-static const struct qmp_usb_offsets qmp_usb_offsets_v3 = {
+static const struct qmp_usb_offsets qmp_usb_offsets_v3_msm8996 = {
.serdes = 0,
.pcs = 0x600,
.tx = 0x200,
@@ -1624,7 +1624,7 @@ static const struct qmp_usb_offsets qmp_usb_offsets_v7 = {
static const struct qmp_phy_cfg ipq6018_usb3phy_cfg = {
.lanes = 1,
- .offsets = &qmp_usb_offsets_ipq8074,
+ .offsets = &qmp_usb_offsets_v3,
.serdes_tbl = ipq9574_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(ipq9574_usb3_serdes_tbl),
@@ -1642,7 +1642,7 @@ static const struct qmp_phy_cfg ipq6018_usb3phy_cfg = {
static const struct qmp_phy_cfg ipq8074_usb3phy_cfg = {
.lanes = 1,
- .offsets = &qmp_usb_offsets_ipq8074,
+ .offsets = &qmp_usb_offsets_v3,
.serdes_tbl = ipq8074_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(ipq8074_usb3_serdes_tbl),
@@ -1678,7 +1678,7 @@ static const struct qmp_phy_cfg ipq9574_usb3phy_cfg = {
static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.lanes = 1,
- .offsets = &qmp_usb_offsets_v3,
+ .offsets = &qmp_usb_offsets_v3_msm8996,
.serdes_tbl = msm8996_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(msm8996_usb3_serdes_tbl),
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index ee56856cb80c..bbcdece83bf4 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1644,7 +1644,7 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
const struct pinctrl_ops *ops = pctldev->desc->pctlops;
unsigned int i, pin;
#ifdef CONFIG_GPIOLIB
- struct gpio_device *gdev __free(gpio_device_put) = NULL;
+ struct gpio_device *gdev = NULL;
struct pinctrl_gpio_range *range;
int gpio_num;
#endif
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index d6f29e6faab7..89bd7ce6711a 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1492,7 +1492,7 @@ static int intel_pinctrl_probe_pwm(struct intel_pinctrl *pctrl,
.base_unit_bits = 22,
.bypass = true,
};
- struct pwm_lpss_chip *pwm;
+ struct pwm_chip *chip;
if (!(community->features & PINCTRL_FEATURE_PWM))
return 0;
@@ -1500,8 +1500,8 @@ static int intel_pinctrl_probe_pwm(struct intel_pinctrl *pctrl,
if (!IS_REACHABLE(CONFIG_PWM_LPSS))
return 0;
- pwm = devm_pwm_lpss_probe(pctrl->dev, community->regs + PWMC, &info);
- return PTR_ERR_OR_ZERO(pwm);
+ chip = devm_pwm_lpss_probe(pctrl->dev, community->regs + PWMC, &info);
+ return PTR_ERR_OR_ZERO(chip);
}
int intel_pinctrl_probe(struct platform_device *pdev,
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 03ecb3d1aaf6..49f89b70dcec 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -1159,7 +1159,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
}
ret = devm_request_irq(&pdev->dev, gpio_dev->irq, amd_gpio_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, gpio_dev);
+ IRQF_SHARED | IRQF_ONESHOT, KBUILD_MODNAME, gpio_dev);
if (ret)
goto out2;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32mp257.c b/drivers/pinctrl/stm32/pinctrl-stm32mp257.c
index 73f091cd827e..23aebd4695e9 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32mp257.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32mp257.c
@@ -2562,7 +2562,7 @@ static const struct of_device_id stm32mp257_pctrl_match[] = {
};
static const struct dev_pm_ops stm32_pinctrl_dev_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, stm32_pinctrl_resume)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(stm32_pinctrl_suspend, stm32_pinctrl_resume)
};
static struct platform_driver stm32mp257_pinctrl_driver = {
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index feaa09f5b35a..4f734e049f4a 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -296,7 +296,8 @@ static int amd_pmf_suspend_handler(struct device *dev)
{
struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
- kfree(pdev->buf);
+ if (pdev->smart_pc_enabled)
+ cancel_delayed_work_sync(&pdev->pb_work);
return 0;
}
@@ -312,6 +313,9 @@ static int amd_pmf_resume_handler(struct device *dev)
return ret;
}
+ if (pdev->smart_pc_enabled)
+ schedule_delayed_work(&pdev->pb_work, msecs_to_jiffies(2000));
+
return 0;
}
@@ -330,9 +334,14 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
}
- if (!amd_pmf_init_smart_pc(dev)) {
+ amd_pmf_init_smart_pc(dev);
+ if (dev->smart_pc_enabled) {
dev_dbg(dev->dev, "Smart PC Solution Enabled\n");
- } else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
+ /* If Smart PC is enabled, no need to check for other features */
+ return;
+ }
+
+ if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
amd_pmf_init_auto_mode(dev);
dev_dbg(dev->dev, "Auto Mode Init done\n");
} else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
@@ -351,7 +360,7 @@ static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
amd_pmf_deinit_sps(dev);
}
- if (!dev->smart_pc_enabled) {
+ if (dev->smart_pc_enabled) {
amd_pmf_deinit_smart_pc(dev);
} else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
amd_pmf_deinit_auto_mode(dev);
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 16999c5b334f..66cae1cca73c 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -441,11 +441,6 @@ struct apmf_dyn_slider_output {
struct apmf_cnqf_power_set ps[APMF_CNQF_MAX];
} __packed;
-enum smart_pc_status {
- PMF_SMART_PC_ENABLED,
- PMF_SMART_PC_DISABLED,
-};
-
/* Smart PC - TA internals */
enum system_state {
SYSTEM_STATE_S0i3,
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index f8c0177afb0d..dcbe8f85e122 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -252,15 +252,17 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
cookie = readl(dev->policy_buf + POLICY_COOKIE_OFFSET);
length = readl(dev->policy_buf + POLICY_COOKIE_LEN);
- if (cookie != POLICY_SIGN_COOKIE || !length)
+ if (cookie != POLICY_SIGN_COOKIE || !length) {
+ dev_dbg(dev->dev, "cookie doesn't match\n");
return -EINVAL;
+ }
/* Update the actual length */
dev->policy_sz = length + 512;
res = amd_pmf_invoke_cmd_init(dev);
if (res == TA_PMF_TYPE_SUCCESS) {
/* Now its safe to announce that smart pc is enabled */
- dev->smart_pc_enabled = PMF_SMART_PC_ENABLED;
+ dev->smart_pc_enabled = true;
/*
* Start collecting the data from TA FW after a small delay
* or else, we might end up getting stale values.
@@ -268,7 +270,7 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms * 3));
} else {
dev_err(dev->dev, "ta invoke cmd init failed err: %x\n", res);
- dev->smart_pc_enabled = PMF_SMART_PC_DISABLED;
+ dev->smart_pc_enabled = false;
return res;
}
@@ -336,25 +338,6 @@ static void amd_pmf_remove_pb(struct amd_pmf_dev *dev) {}
static void amd_pmf_hex_dump_pb(struct amd_pmf_dev *dev) {}
#endif
-static int amd_pmf_get_bios_buffer(struct amd_pmf_dev *dev)
-{
- dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
- if (!dev->policy_buf)
- return -ENOMEM;
-
- dev->policy_base = devm_ioremap(dev->dev, dev->policy_addr, dev->policy_sz);
- if (!dev->policy_base)
- return -ENOMEM;
-
- memcpy(dev->policy_buf, dev->policy_base, dev->policy_sz);
-
- amd_pmf_hex_dump_pb(dev);
- if (pb_side_load)
- amd_pmf_open_pb(dev, dev->dbgfs_dir);
-
- return amd_pmf_start_policy_engine(dev);
-}
-
static int amd_pmf_amdtee_ta_match(struct tee_ioctl_version_data *ver, const void *data)
{
return ver->impl_id == TEE_IMPL_ID_AMDTEE;
@@ -453,22 +436,59 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
return ret;
INIT_DELAYED_WORK(&dev->pb_work, amd_pmf_invoke_cmd);
- amd_pmf_set_dram_addr(dev, true);
- amd_pmf_get_bios_buffer(dev);
+
+ ret = amd_pmf_set_dram_addr(dev, true);
+ if (ret)
+ goto error;
+
+ dev->policy_base = devm_ioremap(dev->dev, dev->policy_addr, dev->policy_sz);
+ if (!dev->policy_base) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL);
+ if (!dev->policy_buf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ memcpy(dev->policy_buf, dev->policy_base, dev->policy_sz);
+
+ amd_pmf_hex_dump_pb(dev);
+
dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL);
- if (!dev->prev_data)
- return -ENOMEM;
+ if (!dev->prev_data) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = amd_pmf_start_policy_engine(dev);
+ if (ret)
+ goto error;
+
+ if (pb_side_load)
+ amd_pmf_open_pb(dev, dev->dbgfs_dir);
+
+ return 0;
- return dev->smart_pc_enabled;
+error:
+ amd_pmf_deinit_smart_pc(dev);
+
+ return ret;
}
void amd_pmf_deinit_smart_pc(struct amd_pmf_dev *dev)
{
- if (pb_side_load)
+ if (pb_side_load && dev->esbin)
amd_pmf_remove_pb(dev);
+ cancel_delayed_work_sync(&dev->pb_work);
kfree(dev->prev_data);
+ dev->prev_data = NULL;
kfree(dev->policy_buf);
- cancel_delayed_work_sync(&dev->pb_work);
+ dev->policy_buf = NULL;
+ kfree(dev->buf);
+ dev->buf = NULL;
amd_pmf_tee_deinit(dev);
}
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
index b6708bab7c53..527d8fbc7cc1 100644
--- a/drivers/platform/x86/intel/int0002_vgpio.c
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -196,7 +196,7 @@ static int int0002_probe(struct platform_device *pdev)
* IRQs into gpiolib.
*/
ret = devm_request_irq(dev, irq, int0002_irq,
- IRQF_SHARED, "INT0002", chip);
+ IRQF_ONESHOT | IRQF_SHARED, "INT0002", chip);
if (ret) {
dev_err(dev, "Error requesting IRQ %d: %d\n", irq, ret);
return ret;
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
index a5e0f5c22179..b89c0dda9e5d 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
@@ -242,7 +242,7 @@ static int __init intel_uncore_init(void)
return -ENODEV;
uncore_max_entries = topology_max_packages() *
- topology_max_die_per_package();
+ topology_max_dies_per_package();
uncore_instances = kcalloc(uncore_max_entries,
sizeof(*uncore_instances), GFP_KERNEL);
if (!uncore_instances)
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
index 210b0a81b7ec..084c355c86f5 100644
--- a/drivers/platform/x86/intel/vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -200,9 +200,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE);
sparse_keymap_report_event(input_dev, event, val, autorelease);
-
- /* Some devices need this to report further events */
- acpi_evaluate_object(handle, "VBDL", NULL, NULL);
}
/*
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
index 6bd14d0132db..3d66e1d4eb1f 100644
--- a/drivers/platform/x86/p2sb.c
+++ b/drivers/platform/x86/p2sb.c
@@ -20,9 +20,11 @@
#define P2SBC_HIDE BIT(8)
#define P2SB_DEVFN_DEFAULT PCI_DEVFN(31, 1)
+#define P2SB_DEVFN_GOLDMONT PCI_DEVFN(13, 0)
+#define SPI_DEVFN_GOLDMONT PCI_DEVFN(13, 2)
static const struct x86_cpu_id p2sb_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, PCI_DEVFN(13, 0)),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, P2SB_DEVFN_GOLDMONT),
{}
};
@@ -98,21 +100,12 @@ static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
{
- unsigned int slot, fn;
-
- if (PCI_FUNC(devfn) == 0) {
- /*
- * When function number of the P2SB device is zero, scan it and
- * other function numbers, and if devices are available, cache
- * their BAR0s.
- */
- slot = PCI_SLOT(devfn);
- for (fn = 0; fn < NR_P2SB_RES_CACHE; fn++)
- p2sb_scan_and_cache_devfn(bus, PCI_DEVFN(slot, fn));
- } else {
- /* Scan the P2SB device and cache its BAR0 */
- p2sb_scan_and_cache_devfn(bus, devfn);
- }
+ /* Scan the P2SB device and cache its BAR0 */
+ p2sb_scan_and_cache_devfn(bus, devfn);
+
+ /* On Goldmont p2sb_bar() also gets called for the SPI controller */
+ if (devfn == P2SB_DEVFN_GOLDMONT)
+ p2sb_scan_and_cache_devfn(bus, SPI_DEVFN_GOLDMONT);
if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
return -ENOENT;
diff --git a/drivers/platform/x86/serdev_helpers.h b/drivers/platform/x86/serdev_helpers.h
new file mode 100644
index 000000000000..bcf3a0c356ea
--- /dev/null
+++ b/drivers/platform/x86/serdev_helpers.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * In some cases UART attached devices which require an in kernel driver,
+ * e.g. UART attached Bluetooth HCIs are described in the ACPI tables
+ * by an ACPI device with a broken or missing UartSerialBusV2() resource.
+ *
+ * This causes the kernel to create a /dev/ttyS# char-device for the UART
+ * instead of creating an in kernel serdev-controller + serdev-device pair
+ * for the in kernel driver.
+ *
+ * The quirk handling in acpi_quirk_skip_serdev_enumeration() makes the kernel
+ * create a serdev-controller device for these UARTs instead of a /dev/ttyS#.
+ *
+ * Instantiating the actual serdev-device to bind to is up to pdx86 code,
+ * this header provides a helper for getting the serdev-controller device.
+ */
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/printk.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+
+static inline struct device *
+get_serdev_controller(const char *serial_ctrl_hid,
+ const char *serial_ctrl_uid,
+ int serial_ctrl_port,
+ const char *serdev_ctrl_name)
+{
+ struct device *ctrl_dev, *child;
+ struct acpi_device *ctrl_adev;
+ char name[32];
+ int i;
+
+ ctrl_adev = acpi_dev_get_first_match_dev(serial_ctrl_hid, serial_ctrl_uid, -1);
+ if (!ctrl_adev) {
+ pr_err("error could not get %s/%s serial-ctrl adev\n",
+ serial_ctrl_hid, serial_ctrl_uid);
+ return ERR_PTR(-ENODEV);
+ }
+
+ /* get_first_physical_node() returns a weak ref */
+ ctrl_dev = get_device(acpi_get_first_physical_node(ctrl_adev));
+ if (!ctrl_dev) {
+ pr_err("error could not get %s/%s serial-ctrl physical node\n",
+ serial_ctrl_hid, serial_ctrl_uid);
+ ctrl_dev = ERR_PTR(-ENODEV);
+ goto put_ctrl_adev;
+ }
+
+ /* Walk host -> uart-ctrl -> port -> serdev-ctrl */
+ for (i = 0; i < 3; i++) {
+ switch (i) {
+ case 0:
+ snprintf(name, sizeof(name), "%s:0", dev_name(ctrl_dev));
+ break;
+ case 1:
+ snprintf(name, sizeof(name), "%s.%d",
+ dev_name(ctrl_dev), serial_ctrl_port);
+ break;
+ case 2:
+ strscpy(name, serdev_ctrl_name, sizeof(name));
+ break;
+ }
+
+ child = device_find_child_by_name(ctrl_dev, name);
+ put_device(ctrl_dev);
+ if (!child) {
+ pr_err("error could not find '%s' device\n", name);
+ ctrl_dev = ERR_PTR(-ENODEV);
+ goto put_ctrl_adev;
+ }
+
+ ctrl_dev = child;
+ }
+
+put_ctrl_adev:
+ acpi_dev_put(ctrl_adev);
+ return ctrl_dev;
+}
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 3a396b763c49..ce3e08815a8e 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -1009,7 +1009,16 @@ static ssize_t current_value_store(struct kobject *kobj,
* Note - this sets the variable and then the password as separate
* WMI calls. Function tlmi_save_bios_settings will error if the
* password is incorrect.
+ * Workstation's require the opcode to be set before changing the
+ * attribute.
*/
+ if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
+ ret = tlmi_opcode_setting("WmiOpcodePasswordAdmin",
+ tlmi_priv.pwd_admin->password);
+ if (ret)
+ goto out;
+ }
+
set_str = kasprintf(GFP_KERNEL, "%s,%s;", setting->display_name,
new_setting);
if (!set_str) {
@@ -1021,17 +1030,10 @@ static ssize_t current_value_store(struct kobject *kobj,
if (ret)
goto out;
- if (tlmi_priv.save_mode == TLMI_SAVE_BULK) {
+ if (tlmi_priv.save_mode == TLMI_SAVE_BULK)
tlmi_priv.save_required = true;
- } else {
- if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
- ret = tlmi_opcode_setting("WmiOpcodePasswordAdmin",
- tlmi_priv.pwd_admin->password);
- if (ret)
- goto out;
- }
+ else
ret = tlmi_save_bios_settings("");
- }
} else { /* old non-opcode based authentication method (deprecated) */
if (tlmi_priv.pwd_admin->valid && tlmi_priv.pwd_admin->password[0]) {
auth_str = kasprintf(GFP_KERNEL, "%s,%s,%s;",
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index c4895e9bc714..5ecd9d33250d 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -10308,6 +10308,7 @@ static int convert_dytc_to_profile(int funcmode, int dytcmode,
return 0;
default:
/* Unknown function */
+ pr_debug("unknown function 0x%x\n", funcmode);
return -EOPNOTSUPP;
}
return 0;
@@ -10493,8 +10494,8 @@ static void dytc_profile_refresh(void)
return;
perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
- convert_dytc_to_profile(funcmode, perfmode, &profile);
- if (profile != dytc_current_profile) {
+ err = convert_dytc_to_profile(funcmode, perfmode, &profile);
+ if (!err && profile != dytc_current_profile) {
dytc_current_profile = profile;
platform_profile_notify();
}
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 7aee5e9ff2b8..975cf24ae359 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -81,7 +81,7 @@ static const struct property_entry chuwi_hi8_air_props[] = {
};
static const struct ts_dmi_data chuwi_hi8_air_data = {
- .acpi_name = "MSSL1680:00",
+ .acpi_name = "MSSL1680",
.properties = chuwi_hi8_air_props,
};
@@ -415,18 +415,13 @@ static const struct property_entry gdix1001_upside_down_props[] = {
{ }
};
-static const struct ts_dmi_data gdix1001_00_upside_down_data = {
- .acpi_name = "GDIX1001:00",
- .properties = gdix1001_upside_down_props,
-};
-
-static const struct ts_dmi_data gdix1001_01_upside_down_data = {
- .acpi_name = "GDIX1001:01",
+static const struct ts_dmi_data gdix1001_upside_down_data = {
+ .acpi_name = "GDIX1001",
.properties = gdix1001_upside_down_props,
};
-static const struct ts_dmi_data gdix1002_00_upside_down_data = {
- .acpi_name = "GDIX1002:00",
+static const struct ts_dmi_data gdix1002_upside_down_data = {
+ .acpi_name = "GDIX1002",
.properties = gdix1001_upside_down_props,
};
@@ -1412,7 +1407,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Juno Tablet */
- .driver_data = (void *)&gdix1002_00_upside_down_data,
+ .driver_data = (void *)&gdix1002_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
/* Both product- and board-name being "Default string" is somewhat rare */
@@ -1658,7 +1653,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Teclast X89 (Android version / BIOS) */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "WISKY"),
DMI_MATCH(DMI_BOARD_NAME, "3G062i"),
@@ -1666,7 +1661,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Teclast X89 (Windows version / BIOS) */
- .driver_data = (void *)&gdix1001_01_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
/* tPAD is too generic, also match on bios date */
DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
@@ -1684,7 +1679,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* Teclast X98 Pro */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
/*
* Only match BIOS date, because the manufacturers
@@ -1788,7 +1783,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* "WinBook TW100" */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
@@ -1796,7 +1791,7 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
{
/* WinBook TW700 */
- .driver_data = (void *)&gdix1001_00_upside_down_data,
+ .driver_data = (void *)&gdix1001_upside_down_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
@@ -1821,7 +1816,7 @@ static void ts_dmi_add_props(struct i2c_client *client)
int error;
if (has_acpi_companion(dev) &&
- !strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) {
+ strstarts(client->name, ts_data->acpi_name)) {
error = device_create_managed_software_node(dev, ts_data->properties, NULL);
if (error)
dev_err(dev, "failed to add properties: %d\n", error);
diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c
index f8221a15575b..a3415f1c0b5f 100644
--- a/drivers/platform/x86/x86-android-tablets/core.c
+++ b/drivers/platform/x86/x86-android-tablets/core.c
@@ -21,6 +21,7 @@
#include <linux/string.h>
#include "x86-android-tablets.h"
+#include "../serdev_helpers.h"
static struct platform_device *x86_android_tablet_device;
@@ -113,6 +114,9 @@ int x86_acpi_irq_helper_get(const struct x86_acpi_irq_data *data)
if (irq_type != IRQ_TYPE_NONE && irq_type != irq_get_trigger_type(irq))
irq_set_irq_type(irq, irq_type);
+ if (data->free_gpio)
+ devm_gpiod_put(&x86_android_tablet_device->dev, gpiod);
+
return irq;
case X86_ACPI_IRQ_TYPE_PMIC:
status = acpi_get_handle(NULL, data->chip, &handle);
@@ -229,38 +233,20 @@ static __init int x86_instantiate_spi_dev(const struct x86_dev_info *dev_info, i
static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int idx)
{
- struct acpi_device *ctrl_adev, *serdev_adev;
+ struct acpi_device *serdev_adev;
struct serdev_device *serdev;
struct device *ctrl_dev;
int ret = -ENODEV;
- ctrl_adev = acpi_dev_get_first_match_dev(info->ctrl_hid, info->ctrl_uid, -1);
- if (!ctrl_adev) {
- pr_err("error could not get %s/%s ctrl adev\n",
- info->ctrl_hid, info->ctrl_uid);
- return -ENODEV;
- }
+ ctrl_dev = get_serdev_controller(info->ctrl_hid, info->ctrl_uid, 0,
+ info->ctrl_devname);
+ if (IS_ERR(ctrl_dev))
+ return PTR_ERR(ctrl_dev);
serdev_adev = acpi_dev_get_first_match_dev(info->serdev_hid, NULL, -1);
if (!serdev_adev) {
pr_err("error could not get %s serdev adev\n", info->serdev_hid);
- goto put_ctrl_adev;
- }
-
- /* get_first_physical_node() returns a weak ref, no need to put() it */
- ctrl_dev = acpi_get_first_physical_node(ctrl_adev);
- if (!ctrl_dev) {
- pr_err("error could not get %s/%s ctrl physical dev\n",
- info->ctrl_hid, info->ctrl_uid);
- goto put_serdev_adev;
- }
-
- /* ctrl_dev now points to the controller's parent, get the controller */
- ctrl_dev = device_find_child_by_name(ctrl_dev, info->ctrl_devname);
- if (!ctrl_dev) {
- pr_err("error could not get %s/%s %s ctrl dev\n",
- info->ctrl_hid, info->ctrl_uid, info->ctrl_devname);
- goto put_serdev_adev;
+ goto put_ctrl_dev;
}
serdev = serdev_device_alloc(to_serdev_controller(ctrl_dev));
@@ -283,8 +269,8 @@ static __init int x86_instantiate_serdev(const struct x86_serdev_info *info, int
put_serdev_adev:
acpi_dev_put(serdev_adev);
-put_ctrl_adev:
- acpi_dev_put(ctrl_adev);
+put_ctrl_dev:
+ put_device(ctrl_dev);
return ret;
}
diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c
index f1c66a61bfc5..c297391955ad 100644
--- a/drivers/platform/x86/x86-android-tablets/lenovo.c
+++ b/drivers/platform/x86/x86-android-tablets/lenovo.c
@@ -116,6 +116,7 @@ static const struct x86_i2c_client_info lenovo_yb1_x90_i2c_clients[] __initconst
.trigger = ACPI_EDGE_SENSITIVE,
.polarity = ACPI_ACTIVE_LOW,
.con_id = "goodix_ts_irq",
+ .free_gpio = true,
},
}, {
/* Wacom Digitizer in keyboard half */
diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c
index bc6bbf7ec6ea..278402dcb808 100644
--- a/drivers/platform/x86/x86-android-tablets/other.c
+++ b/drivers/platform/x86/x86-android-tablets/other.c
@@ -68,7 +68,7 @@ static const struct x86_i2c_client_info acer_b1_750_i2c_clients[] __initconst =
},
};
-static struct gpiod_lookup_table acer_b1_750_goodix_gpios = {
+static struct gpiod_lookup_table acer_b1_750_nvt_ts_gpios = {
.dev_id = "i2c-NVT-ts",
.table = {
GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_LOW),
@@ -77,7 +77,7 @@ static struct gpiod_lookup_table acer_b1_750_goodix_gpios = {
};
static struct gpiod_lookup_table * const acer_b1_750_gpios[] = {
- &acer_b1_750_goodix_gpios,
+ &acer_b1_750_nvt_ts_gpios,
&int3496_reference_gpios,
NULL
};
diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
index 49fed9410adb..468993edfeee 100644
--- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
+++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
@@ -39,6 +39,7 @@ struct x86_acpi_irq_data {
int index;
int trigger; /* ACPI_EDGE_SENSITIVE / ACPI_LEVEL_SENSITIVE */
int polarity; /* ACPI_ACTIVE_HIGH / ACPI_ACTIVE_LOW / ACPI_ACTIVE_BOTH */
+ bool free_gpio; /* Release GPIO after getting IRQ (for TYPE_GPIOINT) */
const char *con_id;
};
diff --git a/drivers/pmdomain/arm/scmi_perf_domain.c b/drivers/pmdomain/arm/scmi_perf_domain.c
index 709bbc448fad..d7ef46ccd9b8 100644
--- a/drivers/pmdomain/arm/scmi_perf_domain.c
+++ b/drivers/pmdomain/arm/scmi_perf_domain.c
@@ -159,6 +159,9 @@ static void scmi_perf_domain_remove(struct scmi_device *sdev)
struct genpd_onecell_data *scmi_pd_data = dev_get_drvdata(dev);
int i;
+ if (!scmi_pd_data)
+ return;
+
of_genpd_del_provider(dev->of_node);
for (i = 0; i < scmi_pd_data->num_domains; i++)
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index a1f6cba3ae6c..18e232b5ed53 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -1109,7 +1109,7 @@ static int __init genpd_power_off_unused(void)
return 0;
}
-late_initcall(genpd_power_off_unused);
+late_initcall_sync(genpd_power_off_unused);
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pmdomain/mediatek/mtk-pm-domains.c b/drivers/pmdomain/mediatek/mtk-pm-domains.c
index e26dc17d07ad..e274e3315fe7 100644
--- a/drivers/pmdomain/mediatek/mtk-pm-domains.c
+++ b/drivers/pmdomain/mediatek/mtk-pm-domains.c
@@ -561,6 +561,11 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
goto err_put_node;
}
+ /* recursive call to add all subdomains */
+ ret = scpsys_add_subdomain(scpsys, child);
+ if (ret)
+ goto err_put_node;
+
ret = pm_genpd_add_subdomain(parent_pd, child_pd);
if (ret) {
dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n",
@@ -570,11 +575,6 @@ static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *paren
dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name,
child_pd->name);
}
-
- /* recursive call to add all subdomains */
- ret = scpsys_add_subdomain(scpsys, child);
- if (ret)
- goto err_put_node;
}
return 0;
@@ -588,9 +588,6 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd)
{
int ret;
- if (scpsys_domain_is_on(pd))
- scpsys_power_off(&pd->genpd);
-
/*
* We're in the error cleanup already, so we only complain,
* but won't emit another error on top of the original one.
@@ -600,6 +597,8 @@ static void scpsys_remove_one_domain(struct scpsys_domain *pd)
dev_err(pd->scpsys->dev,
"failed to remove domain '%s' : %d - state may be inconsistent\n",
pd->genpd.name, ret);
+ if (scpsys_domain_is_on(pd))
+ scpsys_power_off(&pd->genpd);
clk_bulk_put(pd->num_clks, pd->clks);
clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
diff --git a/drivers/pmdomain/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c
index 3078896b1300..de9121ef4216 100644
--- a/drivers/pmdomain/qcom/rpmhpd.c
+++ b/drivers/pmdomain/qcom/rpmhpd.c
@@ -217,7 +217,6 @@ static struct rpmhpd *sa8540p_rpmhpds[] = {
[SC8280XP_CX] = &cx,
[SC8280XP_CX_AO] = &cx_ao,
[SC8280XP_EBI] = &ebi,
- [SC8280XP_GFX] = &gfx,
[SC8280XP_LCX] = &lcx,
[SC8280XP_LMX] = &lmx,
[SC8280XP_MMCX] = &mmcx,
@@ -692,6 +691,7 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
unsigned int active_corner, sleep_corner;
unsigned int this_active_corner = 0, this_sleep_corner = 0;
unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
+ unsigned int peer_enabled_corner;
if (pd->state_synced) {
to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
@@ -701,9 +701,11 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
this_sleep_corner = pd->level_count - 1;
}
- if (peer && peer->enabled)
- to_active_sleep(peer, peer->corner, &peer_active_corner,
+ if (peer && peer->enabled) {
+ peer_enabled_corner = max(peer->corner, peer->enable_corner);
+ to_active_sleep(peer, peer_enabled_corner, &peer_active_corner,
&peer_sleep_corner);
+ }
active_corner = max(this_active_corner, peer_active_corner);
diff --git a/drivers/pmdomain/renesas/r8a77980-sysc.c b/drivers/pmdomain/renesas/r8a77980-sysc.c
index 39ca84a67daa..621e411fc999 100644
--- a/drivers/pmdomain/renesas/r8a77980-sysc.c
+++ b/drivers/pmdomain/renesas/r8a77980-sysc.c
@@ -25,7 +25,8 @@ static const struct rcar_sysc_area r8a77980_areas[] __initconst = {
PD_CPU_NOCR },
{ "ca53-cpu3", 0x200, 3, R8A77980_PD_CA53_CPU3, R8A77980_PD_CA53_SCU,
PD_CPU_NOCR },
- { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON },
+ { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON,
+ PD_CPU_NOCR },
{ "a3ir", 0x180, 0, R8A77980_PD_A3IR, R8A77980_PD_ALWAYS_ON },
{ "a2ir0", 0x400, 0, R8A77980_PD_A2IR0, R8A77980_PD_A3IR },
{ "a2ir1", 0x400, 1, R8A77980_PD_A2IR1, R8A77980_PD_A3IR },
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index f21cb05815ec..3e31375491d5 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -978,6 +978,7 @@ config CHARGER_QCOM_SMB2
config FUEL_GAUGE_MM8013
tristate "Mitsumi MM8013 fuel gauge driver"
depends on I2C
+ select REGMAP_I2C
help
Say Y here to enable the Mitsumi MM8013 fuel gauge driver.
It enables the monitoring of many battery parameters, including
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 3a1798b0c1a7..9910c600743e 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -209,7 +209,9 @@ static void bq27xxx_battery_i2c_remove(struct i2c_client *client)
{
struct bq27xxx_device_info *di = i2c_get_clientdata(client);
- free_irq(client->irq, di);
+ if (client->irq)
+ free_irq(client->irq, di);
+
bq27xxx_battery_teardown(di);
mutex_lock(&battery_mutex);
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 2feed036c1cd..00c861899a47 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -1564,7 +1564,7 @@ struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id
if (id_is_cpu) {
rp->id = topology_logical_die_id(id);
rp->lead_cpu = id;
- if (topology_max_die_per_package() > 1)
+ if (topology_max_dies_per_package() > 1)
snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d-die-%d",
topology_physical_package_id(id), topology_die_id(id));
else
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 5dd5f188e14f..604541dcb320 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -155,6 +155,18 @@ config PTP_1588_CLOCK_IDTCM
To compile this driver as a module, choose M here: the module
will be called ptp_clockmatrix.
+config PTP_1588_CLOCK_FC3W
+ tristate "RENESAS FemtoClock3 Wireless as PTP clock"
+ depends on PTP_1588_CLOCK && I2C
+ default n
+ help
+ This driver adds support for using Renesas FemtoClock3 Wireless
+ as a PTP clock. This clock is only useful if your time stamping
+ MAC is connected to the RENESAS chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ptp_fc3.
+
config PTP_1588_CLOCK_MOCK
tristate "Mock-up PTP clock"
depends on PTP_1588_CLOCK
diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile
index dea0cebd2303..68bf02078053 100644
--- a/drivers/ptp/Makefile
+++ b/drivers/ptp/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o
ptp-qoriq-y += ptp_qoriq.o
ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o
obj-$(CONFIG_PTP_1588_CLOCK_IDTCM) += ptp_clockmatrix.o
+obj-$(CONFIG_PTP_1588_CLOCK_FC3W) += ptp_fc3.o
obj-$(CONFIG_PTP_1588_CLOCK_IDT82P33) += ptp_idt82p33.o
obj-$(CONFIG_PTP_1588_CLOCK_MOCK) += ptp_mock.o
obj-$(CONFIG_PTP_1588_CLOCK_VMW) += ptp_vmw.o
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 15b804ba4868..c56cd0f63909 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -4,7 +4,6 @@
*
* Copyright (C) 2010 OMICRON electronics GmbH
*/
-#include <linux/idr.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -16,6 +15,7 @@
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
+#include <linux/xarray.h>
#include <uapi/linux/sched/types.h>
#include "ptp_private.h"
@@ -25,13 +25,16 @@
#define PTP_PPS_EVENT PPS_CAPTUREASSERT
#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
-struct class *ptp_class;
+const struct class ptp_class = {
+ .name = "ptp",
+ .dev_groups = ptp_groups
+};
/* private globals */
static dev_t ptp_devt;
-static DEFINE_IDA(ptp_clocks_map);
+static DEFINE_XARRAY_ALLOC(ptp_clocks_map);
/* time stamp event queue operations */
@@ -44,18 +47,31 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
struct ptp_clock_event *src)
{
struct ptp_extts_event *dst;
+ struct timespec64 offset_ts;
unsigned long flags;
s64 seconds;
u32 remainder;
- seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
+ if (src->type == PTP_CLOCK_EXTTS) {
+ seconds = div_u64_rem(src->timestamp, 1000000000, &remainder);
+ } else if (src->type == PTP_CLOCK_EXTOFF) {
+ offset_ts = ns_to_timespec64(src->offset);
+ seconds = offset_ts.tv_sec;
+ remainder = offset_ts.tv_nsec;
+ } else {
+ WARN(1, "%s: unknown type %d\n", __func__, src->type);
+ return;
+ }
spin_lock_irqsave(&queue->lock, flags);
dst = &queue->buf[queue->tail];
dst->index = src->index;
+ dst->flags = PTP_EXTTS_EVENT_VALID;
dst->t.sec = seconds;
dst->t.nsec = remainder;
+ if (src->type == PTP_CLOCK_EXTOFF)
+ dst->flags |= PTP_EXT_OFFSET;
/* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
if (!queue_free(queue))
@@ -188,7 +204,7 @@ static void ptp_clock_release(struct device *dev)
bitmap_free(tsevq->mask);
kfree(tsevq);
debugfs_remove(ptp->debugfs_root);
- ida_free(&ptp_clocks_map, ptp->index);
+ xa_erase(&ptp_clocks_map, ptp->index);
kfree(ptp);
}
@@ -220,7 +236,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
{
struct ptp_clock *ptp;
struct timestamp_event_queue *queue = NULL;
- int err = 0, index, major = MAJOR(ptp_devt);
+ int err, index, major = MAJOR(ptp_devt);
char debugfsname[16];
size_t size;
@@ -228,16 +244,16 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
return ERR_PTR(-EINVAL);
/* Initialize a clock structure. */
- err = -ENOMEM;
ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
- if (ptp == NULL)
+ if (!ptp) {
+ err = -ENOMEM;
goto no_memory;
+ }
- index = ida_alloc_max(&ptp_clocks_map, MINORMASK, GFP_KERNEL);
- if (index < 0) {
- err = index;
+ err = xa_alloc(&ptp_clocks_map, &index, ptp, xa_limit_31b,
+ GFP_KERNEL);
+ if (err)
goto no_slot;
- }
ptp->clock.ops = ptp_clock_ops;
ptp->info = info;
@@ -245,13 +261,17 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
ptp->index = index;
INIT_LIST_HEAD(&ptp->tsevqs);
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
- if (!queue)
+ if (!queue) {
+ err = -ENOMEM;
goto no_memory_queue;
+ }
list_add_tail(&queue->qlist, &ptp->tsevqs);
spin_lock_init(&ptp->tsevqs_lock);
queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
- if (!queue->mask)
+ if (!queue->mask) {
+ err = -ENOMEM;
goto no_memory_bitmap;
+ }
bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS);
spin_lock_init(&queue->lock);
mutex_init(&ptp->pincfg_mux);
@@ -322,7 +342,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
/* Initialize a new device of our class in our clock structure. */
device_initialize(&ptp->dev);
ptp->dev.devt = ptp->devid;
- ptp->dev.class = ptp_class;
+ ptp->dev.class = &ptp_class;
ptp->dev.parent = parent;
ptp->dev.groups = ptp->pin_attr_groups;
ptp->dev.release = ptp_clock_release;
@@ -365,7 +385,7 @@ no_memory_bitmap:
list_del(&queue->qlist);
kfree(queue);
no_memory_queue:
- ida_free(&ptp_clocks_map, index);
+ xa_erase(&ptp_clocks_map, index);
no_slot:
kfree(ptp);
no_memory:
@@ -417,6 +437,7 @@ void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
break;
case PTP_CLOCK_EXTTS:
+ case PTP_CLOCK_EXTOFF:
/* Enqueue timestamp on selected queues */
spin_lock_irqsave(&ptp->tsevqs_lock, flags);
list_for_each_entry(tsevq, &ptp->tsevqs, qlist) {
@@ -495,19 +516,19 @@ EXPORT_SYMBOL(ptp_cancel_worker_sync);
static void __exit ptp_exit(void)
{
- class_destroy(ptp_class);
+ class_unregister(&ptp_class);
unregister_chrdev_region(ptp_devt, MINORMASK + 1);
- ida_destroy(&ptp_clocks_map);
+ xa_destroy(&ptp_clocks_map);
}
static int __init ptp_init(void)
{
int err;
- ptp_class = class_create("ptp");
- if (IS_ERR(ptp_class)) {
+ err = class_register(&ptp_class);
+ if (err) {
pr_err("ptp: failed to allocate class\n");
- return PTR_ERR(ptp_class);
+ return err;
}
err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp");
@@ -516,12 +537,11 @@ static int __init ptp_init(void)
goto no_region;
}
- ptp_class->dev_groups = ptp_groups;
pr_info("PTP clock support registered\n");
return 0;
no_region:
- class_destroy(ptp_class);
+ class_unregister(&ptp_class);
return err;
}
diff --git a/drivers/ptp/ptp_fc3.c b/drivers/ptp/ptp_fc3.c
new file mode 100644
index 000000000000..6ef982862e27
--- /dev/null
+++ b/drivers/ptp/ptp_fc3.c
@@ -0,0 +1,1014 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PTP hardware clock driver for the FemtoClock3 family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2023 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/timekeeping.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include <linux/mfd/rsmu.h>
+#include <linux/mfd/idtRC38xxx_reg.h>
+#include <asm/unaligned.h>
+
+#include "ptp_private.h"
+#include "ptp_fc3.h"
+
+MODULE_DESCRIPTION("Driver for IDT FemtoClock3(TM) family");
+MODULE_AUTHOR("IDT support-1588 <IDT-support-1588@lm.renesas.com>");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+
+/*
+ * The name of the firmware file to be loaded
+ * over-rides any automatic selection
+ */
+static char *firmware;
+module_param(firmware, charp, 0);
+
+static s64 ns2counters(struct idtfc3 *idtfc3, s64 nsec, u32 *sub_ns)
+{
+ s64 sync;
+ s32 rem;
+
+ if (likely(nsec >= 0)) {
+ sync = div_u64_rem(nsec, idtfc3->ns_per_sync, &rem);
+ *sub_ns = rem;
+ } else {
+ sync = -div_u64_rem(-nsec - 1, idtfc3->ns_per_sync, &rem) - 1;
+ *sub_ns = idtfc3->ns_per_sync - rem - 1;
+ }
+
+ return sync * idtfc3->ns_per_sync;
+}
+
+static s64 tdc_meas2offset(struct idtfc3 *idtfc3, u64 meas_read)
+{
+ s64 coarse, fine;
+
+ fine = sign_extend64(FIELD_GET(FINE_MEAS_MASK, meas_read), 12);
+ coarse = sign_extend64(FIELD_GET(COARSE_MEAS_MASK, meas_read), (39 - 13));
+
+ fine = div64_s64(fine * NSEC_PER_SEC, idtfc3->tdc_apll_freq * 62LL);
+ coarse = div64_s64(coarse * NSEC_PER_SEC, idtfc3->time_ref_freq);
+
+ return coarse + fine;
+}
+
+static s64 tdc_offset2phase(struct idtfc3 *idtfc3, s64 offset_ns)
+{
+ if (offset_ns > idtfc3->ns_per_sync / 2)
+ offset_ns -= idtfc3->ns_per_sync;
+
+ return offset_ns * idtfc3->tdc_offset_sign;
+}
+
+static int idtfc3_set_lpf_mode(struct idtfc3 *idtfc3, u8 mode)
+{
+ int err;
+
+ if (mode >= LPF_INVALID)
+ return -EINVAL;
+
+ if (idtfc3->lpf_mode == mode)
+ return 0;
+
+ err = regmap_bulk_write(idtfc3->regmap, LPF_MODE_CNFG, &mode, sizeof(mode));
+ if (err)
+ return err;
+
+ idtfc3->lpf_mode = mode;
+
+ return 0;
+}
+
+static int idtfc3_enable_lpf(struct idtfc3 *idtfc3, bool enable)
+{
+ u8 val;
+ int err;
+
+ err = regmap_bulk_read(idtfc3->regmap, LPF_CTRL, &val, sizeof(val));
+ if (err)
+ return err;
+
+ if (enable == true)
+ val |= LPF_EN;
+ else
+ val &= ~LPF_EN;
+
+ return regmap_bulk_write(idtfc3->regmap, LPF_CTRL, &val, sizeof(val));
+}
+
+static int idtfc3_get_time_ref_freq(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 buf[4];
+ u8 time_ref_div;
+ u8 time_clk_div;
+
+ err = regmap_bulk_read(idtfc3->regmap, TIME_CLOCK_MEAS_DIV_CNFG, buf, sizeof(buf));
+ if (err)
+ return err;
+ time_ref_div = FIELD_GET(TIME_REF_DIV_MASK, get_unaligned_le32(buf)) + 1;
+
+ err = regmap_bulk_read(idtfc3->regmap, TIME_CLOCK_COUNT, buf, 1);
+ if (err)
+ return err;
+ time_clk_div = (buf[0] & TIME_CLOCK_COUNT_MASK) + 1;
+ idtfc3->time_ref_freq = idtfc3->hw_param.time_clk_freq *
+ time_clk_div / time_ref_div;
+
+ return 0;
+}
+
+static int idtfc3_get_tdc_offset_sign(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 buf[4];
+ u32 val;
+ u8 sig1, sig2;
+
+ err = regmap_bulk_read(idtfc3->regmap, TIME_CLOCK_TDC_FANOUT_CNFG, buf, sizeof(buf));
+ if (err)
+ return err;
+
+ val = get_unaligned_le32(buf);
+ if ((val & TIME_SYNC_TO_TDC_EN) != TIME_SYNC_TO_TDC_EN) {
+ dev_err(idtfc3->dev, "TIME_SYNC_TO_TDC_EN is off !!!");
+ return -EINVAL;
+ }
+
+ sig1 = FIELD_GET(SIG1_MUX_SEL_MASK, val);
+ sig2 = FIELD_GET(SIG2_MUX_SEL_MASK, val);
+
+ if ((sig1 == sig2) || ((sig1 != TIME_SYNC) && (sig2 != TIME_SYNC))) {
+ dev_err(idtfc3->dev, "Invalid tdc_mux_sel sig1=%d sig2=%d", sig1, sig2);
+ return -EINVAL;
+ } else if (sig1 == TIME_SYNC) {
+ idtfc3->tdc_offset_sign = 1;
+ } else if (sig2 == TIME_SYNC) {
+ idtfc3->tdc_offset_sign = -1;
+ }
+
+ return 0;
+}
+
+static int idtfc3_lpf_bw(struct idtfc3 *idtfc3, u8 shift, u8 mult)
+{
+ u8 val = FIELD_PREP(LPF_BW_SHIFT, shift) | FIELD_PREP(LPF_BW_MULT, mult);
+
+ return regmap_bulk_write(idtfc3->regmap, LPF_BW_CNFG, &val, sizeof(val));
+}
+
+static int idtfc3_enable_tdc(struct idtfc3 *idtfc3, bool enable, u8 meas_mode)
+{
+ int err;
+ u8 val = 0;
+
+ /* Disable TDC first */
+ err = regmap_bulk_write(idtfc3->regmap, TIME_CLOCK_MEAS_CTRL, &val, sizeof(val));
+ if (err)
+ return err;
+
+ if (enable == false)
+ return idtfc3_lpf_bw(idtfc3, LPF_BW_SHIFT_DEFAULT, LPF_BW_MULT_DEFAULT);
+
+ if (meas_mode >= MEAS_MODE_INVALID)
+ return -EINVAL;
+
+ /* Change TDC meas mode */
+ err = regmap_bulk_write(idtfc3->regmap, TIME_CLOCK_MEAS_CNFG,
+ &meas_mode, sizeof(meas_mode));
+ if (err)
+ return err;
+
+ /* Enable TDC */
+ val = TDC_MEAS_EN;
+ if (meas_mode == CONTINUOUS)
+ val |= TDC_MEAS_START;
+ err = regmap_bulk_write(idtfc3->regmap, TIME_CLOCK_MEAS_CTRL, &val, sizeof(val));
+ if (err)
+ return err;
+
+ return idtfc3_lpf_bw(idtfc3, LPF_BW_SHIFT_1PPS, LPF_BW_MULT_DEFAULT);
+}
+
+static bool get_tdc_meas(struct idtfc3 *idtfc3, s64 *offset_ns)
+{
+ bool valid = false;
+ u8 buf[9];
+ u8 val;
+ int err;
+
+ while (true) {
+ err = regmap_bulk_read(idtfc3->regmap, TDC_FIFO_STS,
+ &val, sizeof(val));
+ if (err)
+ return false;
+
+ if (val & FIFO_EMPTY)
+ break;
+
+ err = regmap_bulk_read(idtfc3->regmap, TDC_FIFO_READ_REQ,
+ &buf, sizeof(buf));
+ if (err)
+ return false;
+
+ valid = true;
+ }
+
+ if (valid)
+ *offset_ns = tdc_meas2offset(idtfc3, get_unaligned_le64(&buf[1]));
+
+ return valid;
+}
+
+static int check_tdc_fifo_overrun(struct idtfc3 *idtfc3)
+{
+ u8 val;
+ int err;
+
+ /* Check if FIFO is overrun */
+ err = regmap_bulk_read(idtfc3->regmap, TDC_FIFO_STS, &val, sizeof(val));
+ if (err)
+ return err;
+
+ if (!(val & FIFO_FULL))
+ return 0;
+
+ dev_warn(idtfc3->dev, "TDC FIFO overrun !!!");
+
+ err = idtfc3_enable_tdc(idtfc3, true, CONTINUOUS);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int get_tdc_meas_continuous(struct idtfc3 *idtfc3)
+{
+ int err;
+ s64 offset_ns;
+ struct ptp_clock_event event;
+
+ err = check_tdc_fifo_overrun(idtfc3);
+ if (err)
+ return err;
+
+ if (get_tdc_meas(idtfc3, &offset_ns) && offset_ns >= 0) {
+ event.index = 0;
+ event.offset = tdc_offset2phase(idtfc3, offset_ns);
+ event.type = PTP_CLOCK_EXTOFF;
+ ptp_clock_event(idtfc3->ptp_clock, &event);
+ }
+
+ return 0;
+}
+
+static int idtfc3_read_subcounter(struct idtfc3 *idtfc3)
+{
+ u8 buf[5] = {0};
+ int err;
+
+ err = regmap_bulk_read(idtfc3->regmap, TOD_COUNTER_READ_REQ,
+ &buf, sizeof(buf));
+ if (err)
+ return err;
+
+ /* sync_counter_value is [31:82] and sub_sync_counter_value is [0:30] */
+ return get_unaligned_le32(&buf[1]) & SUB_SYNC_COUNTER_MASK;
+}
+
+static int idtfc3_tod_update_is_done(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 req;
+
+ err = read_poll_timeout_atomic(regmap_bulk_read, err, !req, USEC_PER_MSEC,
+ idtfc3->tc_write_timeout, true, idtfc3->regmap,
+ TOD_SYNC_LOAD_REQ_CTRL, &req, 1);
+ if (err)
+ dev_err(idtfc3->dev, "TOD counter write timeout !!!");
+
+ return err;
+}
+
+static int idtfc3_write_subcounter(struct idtfc3 *idtfc3, u32 counter)
+{
+ u8 buf[18] = {0};
+ int err;
+
+ /* sync_counter_value is [31:82] and sub_sync_counter_value is [0:30] */
+ put_unaligned_le32(counter & SUB_SYNC_COUNTER_MASK, &buf[0]);
+
+ buf[16] = SUB_SYNC_LOAD_ENABLE | SYNC_LOAD_ENABLE;
+ buf[17] = SYNC_LOAD_REQ;
+
+ err = regmap_bulk_write(idtfc3->regmap, TOD_SYNC_LOAD_VAL_CTRL,
+ &buf, sizeof(buf));
+ if (err)
+ return err;
+
+ return idtfc3_tod_update_is_done(idtfc3);
+}
+
+static int idtfc3_timecounter_update(struct idtfc3 *idtfc3, u32 counter, s64 ns)
+{
+ int err;
+
+ err = idtfc3_write_subcounter(idtfc3, counter);
+ if (err)
+ return err;
+
+ /* Update time counter */
+ idtfc3->ns = ns;
+ idtfc3->last_counter = counter;
+
+ return 0;
+}
+
+static int idtfc3_timecounter_read(struct idtfc3 *idtfc3)
+{
+ int now, delta;
+
+ now = idtfc3_read_subcounter(idtfc3);
+ if (now < 0)
+ return now;
+
+ /* calculate the delta since the last idtfc3_timecounter_read(): */
+ if (now >= idtfc3->last_counter)
+ delta = now - idtfc3->last_counter;
+ else
+ delta = idtfc3->sub_sync_count - idtfc3->last_counter + now;
+
+ /* Update time counter */
+ idtfc3->ns += delta * idtfc3->ns_per_counter;
+ idtfc3->last_counter = now;
+
+ return 0;
+}
+
+static int _idtfc3_gettime(struct idtfc3 *idtfc3, struct timespec64 *ts)
+{
+ int err;
+
+ err = idtfc3_timecounter_read(idtfc3);
+ if (err)
+ return err;
+
+ *ts = ns_to_timespec64(idtfc3->ns);
+
+ return 0;
+}
+
+static int idtfc3_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_gettime(idtfc3, ts);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int _idtfc3_settime(struct idtfc3 *idtfc3, const struct timespec64 *ts)
+{
+ s64 offset_ns, now_ns;
+ u32 counter, sub_ns;
+ int now;
+
+ if (timespec64_valid(ts) == false) {
+ dev_err(idtfc3->dev, "%s: invalid timespec", __func__);
+ return -EINVAL;
+ }
+
+ now = idtfc3_read_subcounter(idtfc3);
+ if (now < 0)
+ return now;
+
+ offset_ns = (idtfc3->sub_sync_count - now) * idtfc3->ns_per_counter;
+ now_ns = timespec64_to_ns(ts);
+ (void)ns2counters(idtfc3, offset_ns + now_ns, &sub_ns);
+
+ counter = sub_ns / idtfc3->ns_per_counter;
+ return idtfc3_timecounter_update(idtfc3, counter, now_ns);
+}
+
+static int idtfc3_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_settime(idtfc3, ts);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int _idtfc3_adjtime(struct idtfc3 *idtfc3, s64 delta)
+{
+ /*
+ * The TOD counter can be synchronously loaded with any value,
+ * to be loaded on the next Time Sync pulse
+ */
+ s64 sync_ns;
+ u32 sub_ns;
+ u32 counter;
+
+ if (idtfc3->ns + delta < 0) {
+ dev_err(idtfc3->dev, "%lld ns adj is too large", delta);
+ return -EINVAL;
+ }
+
+ sync_ns = ns2counters(idtfc3, delta + idtfc3->ns_per_sync, &sub_ns);
+
+ counter = sub_ns / idtfc3->ns_per_counter;
+ return idtfc3_timecounter_update(idtfc3, counter, idtfc3->ns + sync_ns +
+ counter * idtfc3->ns_per_counter);
+}
+
+static int idtfc3_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_adjtime(idtfc3, delta);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int _idtfc3_adjphase(struct idtfc3 *idtfc3, s32 delta)
+{
+ u8 buf[8] = {0};
+ int err;
+ s64 pcw;
+
+ err = idtfc3_set_lpf_mode(idtfc3, LPF_WP);
+ if (err)
+ return err;
+
+ /*
+ * Phase Control Word unit is: 10^9 / (TDC_APLL_FREQ * 124)
+ *
+ * delta * TDC_APLL_FREQ * 124
+ * PCW = ---------------------------
+ * 10^9
+ *
+ */
+ pcw = div_s64((s64)delta * idtfc3->tdc_apll_freq * 124, NSEC_PER_SEC);
+
+ put_unaligned_le64(pcw, buf);
+
+ return regmap_bulk_write(idtfc3->regmap, LPF_WR_PHASE_CTRL, buf, sizeof(buf));
+}
+
+static int idtfc3_adjphase(struct ptp_clock_info *ptp, s32 delta)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_adjphase(idtfc3, delta);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int _idtfc3_adjfine(struct idtfc3 *idtfc3, long scaled_ppm)
+{
+ u8 buf[8] = {0};
+ int err;
+ s64 fcw;
+
+ err = idtfc3_set_lpf_mode(idtfc3, LPF_WF);
+ if (err)
+ return err;
+
+ /*
+ * Frequency Control Word unit is: 2^-44 * 10^6 ppm
+ *
+ * adjfreq:
+ * ppb * 2^44
+ * FCW = ----------
+ * 10^9
+ *
+ * adjfine:
+ * ppm_16 * 2^28
+ * FCW = -------------
+ * 10^6
+ */
+ fcw = scaled_ppm * BIT(28);
+ fcw = div_s64(fcw, 1000000);
+
+ put_unaligned_le64(fcw, buf);
+
+ return regmap_bulk_write(idtfc3->regmap, LPF_WR_FREQ_CTRL, buf, sizeof(buf));
+}
+
+static int idtfc3_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err;
+
+ mutex_lock(idtfc3->lock);
+ err = _idtfc3_adjfine(idtfc3, scaled_ppm);
+ mutex_unlock(idtfc3->lock);
+
+ return err;
+}
+
+static int idtfc3_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ int err = -EOPNOTSUPP;
+
+ mutex_lock(idtfc3->lock);
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ if (!on)
+ err = 0;
+ /* Only accept a 1-PPS aligned to the second. */
+ else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
+ rq->perout.period.nsec)
+ err = -ERANGE;
+ else
+ err = 0;
+ break;
+ case PTP_CLK_REQ_EXTTS:
+ if (on) {
+ /* Only accept requests for external phase offset */
+ if ((rq->extts.flags & PTP_EXT_OFFSET) != (PTP_EXT_OFFSET))
+ err = -EOPNOTSUPP;
+ else
+ err = idtfc3_enable_tdc(idtfc3, true, CONTINUOUS);
+ } else {
+ err = idtfc3_enable_tdc(idtfc3, false, MEAS_MODE_INVALID);
+ }
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(idtfc3->lock);
+
+ if (err)
+ dev_err(idtfc3->dev, "Failed in %s with err %d!", __func__, err);
+
+ return err;
+}
+
+static long idtfc3_aux_work(struct ptp_clock_info *ptp)
+{
+ struct idtfc3 *idtfc3 = container_of(ptp, struct idtfc3, caps);
+ static int tdc_get;
+
+ mutex_lock(idtfc3->lock);
+ tdc_get %= TDC_GET_PERIOD;
+ if ((tdc_get == 0) || (tdc_get == TDC_GET_PERIOD / 2))
+ idtfc3_timecounter_read(idtfc3);
+ get_tdc_meas_continuous(idtfc3);
+ tdc_get++;
+ mutex_unlock(idtfc3->lock);
+
+ return idtfc3->tc_update_period;
+}
+
+static const struct ptp_clock_info idtfc3_caps = {
+ .owner = THIS_MODULE,
+ .max_adj = MAX_FFO_PPB,
+ .n_per_out = 1,
+ .n_ext_ts = 1,
+ .adjphase = &idtfc3_adjphase,
+ .adjfine = &idtfc3_adjfine,
+ .adjtime = &idtfc3_adjtime,
+ .gettime64 = &idtfc3_gettime,
+ .settime64 = &idtfc3_settime,
+ .enable = &idtfc3_enable,
+ .do_aux_work = &idtfc3_aux_work,
+};
+
+static int idtfc3_hw_calibrate(struct idtfc3 *idtfc3)
+{
+ int err = 0;
+ u8 val;
+
+ mdelay(10);
+ /*
+ * Toggle TDC_DAC_RECAL_REQ:
+ * (1) set tdc_en to 1
+ * (2) set tdc_dac_recal_req to 0
+ * (3) set tdc_dac_recal_req to 1
+ */
+ val = TDC_EN;
+ err = regmap_bulk_write(idtfc3->regmap, TDC_CTRL,
+ &val, sizeof(val));
+ if (err)
+ return err;
+ val = TDC_EN | TDC_DAC_RECAL_REQ;
+ err = regmap_bulk_write(idtfc3->regmap, TDC_CTRL,
+ &val, sizeof(val));
+ if (err)
+ return err;
+ mdelay(10);
+
+ /*
+ * Toggle APLL_REINIT:
+ * (1) set apll_reinit to 0
+ * (2) set apll_reinit to 1
+ */
+ val = 0;
+ err = regmap_bulk_write(idtfc3->regmap, SOFT_RESET_CTRL,
+ &val, sizeof(val));
+ if (err)
+ return err;
+ val = APLL_REINIT;
+ err = regmap_bulk_write(idtfc3->regmap, SOFT_RESET_CTRL,
+ &val, sizeof(val));
+ if (err)
+ return err;
+ mdelay(10);
+
+ return err;
+}
+
+static int idtfc3_init_timecounter(struct idtfc3 *idtfc3)
+{
+ int err;
+ u32 period_ms;
+
+ period_ms = idtfc3->sub_sync_count * MSEC_PER_SEC /
+ idtfc3->hw_param.time_clk_freq;
+
+ idtfc3->tc_update_period = msecs_to_jiffies(period_ms / TDC_GET_PERIOD);
+ idtfc3->tc_write_timeout = period_ms * USEC_PER_MSEC;
+
+ err = idtfc3_timecounter_update(idtfc3, 0, 0);
+ if (err)
+ return err;
+
+ err = idtfc3_timecounter_read(idtfc3);
+ if (err)
+ return err;
+
+ ptp_schedule_worker(idtfc3->ptp_clock, idtfc3->tc_update_period);
+
+ return 0;
+}
+
+static int idtfc3_get_tdc_apll_freq(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 tdc_fb_div_int;
+ u8 tdc_ref_div;
+ struct idtfc3_hw_param *param = &idtfc3->hw_param;
+
+ err = regmap_bulk_read(idtfc3->regmap, TDC_REF_DIV_CNFG,
+ &tdc_ref_div, sizeof(tdc_ref_div));
+ if (err)
+ return err;
+
+ err = regmap_bulk_read(idtfc3->regmap, TDC_FB_DIV_INT_CNFG,
+ &tdc_fb_div_int, sizeof(tdc_fb_div_int));
+ if (err)
+ return err;
+
+ tdc_fb_div_int &= TDC_FB_DIV_INT_MASK;
+ tdc_ref_div &= TDC_REF_DIV_CONFIG_MASK;
+
+ idtfc3->tdc_apll_freq = div_u64(param->xtal_freq * (u64)tdc_fb_div_int,
+ 1 << tdc_ref_div);
+
+ return 0;
+}
+
+static int idtfc3_get_fod(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 fod;
+
+ err = regmap_bulk_read(idtfc3->regmap, TIME_CLOCK_SRC, &fod, sizeof(fod));
+ if (err)
+ return err;
+
+ switch (fod) {
+ case 0:
+ idtfc3->fod_n = FOD_0;
+ break;
+ case 1:
+ idtfc3->fod_n = FOD_1;
+ break;
+ case 2:
+ idtfc3->fod_n = FOD_2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int idtfc3_get_sync_count(struct idtfc3 *idtfc3)
+{
+ int err;
+ u8 buf[4];
+
+ err = regmap_bulk_read(idtfc3->regmap, SUB_SYNC_GEN_CNFG, buf, sizeof(buf));
+ if (err)
+ return err;
+
+ idtfc3->sub_sync_count = (get_unaligned_le32(buf) & SUB_SYNC_COUNTER_MASK) + 1;
+ idtfc3->ns_per_counter = NSEC_PER_SEC / idtfc3->hw_param.time_clk_freq;
+ idtfc3->ns_per_sync = idtfc3->sub_sync_count * idtfc3->ns_per_counter;
+
+ return 0;
+}
+
+static int idtfc3_setup_hw_param(struct idtfc3 *idtfc3)
+{
+ int err;
+
+ err = idtfc3_get_fod(idtfc3);
+ if (err)
+ return err;
+
+ err = idtfc3_get_sync_count(idtfc3);
+ if (err)
+ return err;
+
+ err = idtfc3_get_time_ref_freq(idtfc3);
+ if (err)
+ return err;
+
+ return idtfc3_get_tdc_apll_freq(idtfc3);
+}
+
+static int idtfc3_configure_hw(struct idtfc3 *idtfc3)
+{
+ int err = 0;
+
+ err = idtfc3_hw_calibrate(idtfc3);
+ if (err)
+ return err;
+
+ err = idtfc3_enable_lpf(idtfc3, true);
+ if (err)
+ return err;
+
+ err = idtfc3_enable_tdc(idtfc3, false, MEAS_MODE_INVALID);
+ if (err)
+ return err;
+
+ err = idtfc3_get_tdc_offset_sign(idtfc3);
+ if (err)
+ return err;
+
+ return idtfc3_setup_hw_param(idtfc3);
+}
+
+static int idtfc3_set_overhead(struct idtfc3 *idtfc3)
+{
+ s64 current_ns = 0;
+ s64 lowest_ns = 0;
+ int err;
+ u8 i;
+ ktime_t start;
+ ktime_t stop;
+ ktime_t diff;
+
+ char buf[18] = {0};
+
+ for (i = 0; i < 5; i++) {
+ start = ktime_get_raw();
+
+ err = regmap_bulk_write(idtfc3->regmap, TOD_SYNC_LOAD_VAL_CTRL,
+ &buf, sizeof(buf));
+ if (err)
+ return err;
+
+ stop = ktime_get_raw();
+
+ diff = ktime_sub(stop, start);
+
+ current_ns = ktime_to_ns(diff);
+
+ if (i == 0) {
+ lowest_ns = current_ns;
+ } else {
+ if (current_ns < lowest_ns)
+ lowest_ns = current_ns;
+ }
+ }
+
+ idtfc3->tod_write_overhead = lowest_ns;
+
+ return err;
+}
+
+static int idtfc3_enable_ptp(struct idtfc3 *idtfc3)
+{
+ int err;
+
+ idtfc3->caps = idtfc3_caps;
+ snprintf(idtfc3->caps.name, sizeof(idtfc3->caps.name), "IDT FC3W");
+ idtfc3->ptp_clock = ptp_clock_register(&idtfc3->caps, NULL);
+
+ if (IS_ERR(idtfc3->ptp_clock)) {
+ err = PTR_ERR(idtfc3->ptp_clock);
+ idtfc3->ptp_clock = NULL;
+ return err;
+ }
+
+ err = idtfc3_set_overhead(idtfc3);
+ if (err)
+ return err;
+
+ err = idtfc3_init_timecounter(idtfc3);
+ if (err)
+ return err;
+
+ dev_info(idtfc3->dev, "TIME_SYNC_CHANNEL registered as ptp%d",
+ idtfc3->ptp_clock->index);
+
+ return 0;
+}
+
+static int idtfc3_load_firmware(struct idtfc3 *idtfc3)
+{
+ char fname[128] = FW_FILENAME;
+ const struct firmware *fw;
+ struct idtfc3_fwrc *rec;
+ u16 addr;
+ u8 val;
+ int err;
+ s32 len;
+
+ idtfc3_default_hw_param(&idtfc3->hw_param);
+
+ if (firmware) /* module parameter */
+ snprintf(fname, sizeof(fname), "%s", firmware);
+
+ dev_info(idtfc3->dev, "requesting firmware '%s'\n", fname);
+
+ err = request_firmware(&fw, fname, idtfc3->dev);
+
+ if (err) {
+ dev_err(idtfc3->dev,
+ "requesting firmware failed with err %d!\n", err);
+ return err;
+ }
+
+ dev_dbg(idtfc3->dev, "firmware size %zu bytes\n", fw->size);
+
+ rec = (struct idtfc3_fwrc *)fw->data;
+
+ for (len = fw->size; len > 0; len -= sizeof(*rec)) {
+ if (rec->reserved) {
+ dev_err(idtfc3->dev,
+ "bad firmware, reserved field non-zero\n");
+ err = -EINVAL;
+ } else {
+ val = rec->value;
+ addr = rec->hiaddr << 8 | rec->loaddr;
+
+ rec++;
+
+ err = idtfc3_set_hw_param(&idtfc3->hw_param, addr, val);
+ }
+
+ if (err != -EINVAL) {
+ err = 0;
+
+ /* Max register */
+ if (addr >= 0xE88)
+ continue;
+
+ err = regmap_bulk_write(idtfc3->regmap, addr,
+ &val, sizeof(val));
+ }
+
+ if (err)
+ goto out;
+ }
+
+ err = idtfc3_configure_hw(idtfc3);
+out:
+ release_firmware(fw);
+ return err;
+}
+
+static int idtfc3_read_device_id(struct idtfc3 *idtfc3, u16 *device_id)
+{
+ int err;
+ u8 buf[2] = {0};
+
+ err = regmap_bulk_read(idtfc3->regmap, DEVICE_ID,
+ &buf, sizeof(buf));
+ if (err) {
+ dev_err(idtfc3->dev, "%s failed with %d", __func__, err);
+ return err;
+ }
+
+ *device_id = get_unaligned_le16(buf);
+
+ return 0;
+}
+
+static int idtfc3_check_device_compatibility(struct idtfc3 *idtfc3)
+{
+ int err;
+ u16 device_id;
+
+ err = idtfc3_read_device_id(idtfc3, &device_id);
+ if (err)
+ return err;
+
+ if ((device_id & DEVICE_ID_MASK) == 0) {
+ dev_err(idtfc3->dev, "invalid device");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int idtfc3_probe(struct platform_device *pdev)
+{
+ struct rsmu_ddata *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct idtfc3 *idtfc3;
+ int err;
+
+ idtfc3 = devm_kzalloc(&pdev->dev, sizeof(struct idtfc3), GFP_KERNEL);
+
+ if (!idtfc3)
+ return -ENOMEM;
+
+ idtfc3->dev = &pdev->dev;
+ idtfc3->mfd = pdev->dev.parent;
+ idtfc3->lock = &ddata->lock;
+ idtfc3->regmap = ddata->regmap;
+
+ mutex_lock(idtfc3->lock);
+
+ err = idtfc3_check_device_compatibility(idtfc3);
+ if (err) {
+ mutex_unlock(idtfc3->lock);
+ return err;
+ }
+
+ err = idtfc3_load_firmware(idtfc3);
+ if (err) {
+ if (err == -ENOENT) {
+ mutex_unlock(idtfc3->lock);
+ return -EPROBE_DEFER;
+ }
+ dev_warn(idtfc3->dev, "loading firmware failed with %d", err);
+ }
+
+ err = idtfc3_enable_ptp(idtfc3);
+ if (err) {
+ dev_err(idtfc3->dev, "idtfc3_enable_ptp failed with %d", err);
+ mutex_unlock(idtfc3->lock);
+ return err;
+ }
+
+ mutex_unlock(idtfc3->lock);
+
+ if (err) {
+ ptp_clock_unregister(idtfc3->ptp_clock);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, idtfc3);
+
+ return 0;
+}
+
+static void idtfc3_remove(struct platform_device *pdev)
+{
+ struct idtfc3 *idtfc3 = platform_get_drvdata(pdev);
+
+ ptp_clock_unregister(idtfc3->ptp_clock);
+}
+
+static struct platform_driver idtfc3_driver = {
+ .driver = {
+ .name = "rc38xxx-phc",
+ },
+ .probe = idtfc3_probe,
+ .remove_new = idtfc3_remove,
+};
+
+module_platform_driver(idtfc3_driver);
diff --git a/drivers/ptp/ptp_fc3.h b/drivers/ptp/ptp_fc3.h
new file mode 100644
index 000000000000..897101579207
--- /dev/null
+++ b/drivers/ptp/ptp_fc3.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * PTP hardware clock driver for the FemtoClock3 family of timing and
+ * synchronization devices.
+ *
+ * Copyright (C) 2023 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef PTP_IDTFC3_H
+#define PTP_IDTFC3_H
+
+#include <linux/ktime.h>
+#include <linux/ptp_clock.h>
+#include <linux/regmap.h>
+
+#define FW_FILENAME "idtfc3.bin"
+
+#define MAX_FFO_PPB (244000)
+#define TDC_GET_PERIOD (10)
+
+struct idtfc3 {
+ struct ptp_clock_info caps;
+ struct ptp_clock *ptp_clock;
+ struct device *dev;
+ /* Mutex to protect operations from being interrupted */
+ struct mutex *lock;
+ struct device *mfd;
+ struct regmap *regmap;
+ struct idtfc3_hw_param hw_param;
+ u32 sub_sync_count;
+ u32 ns_per_sync;
+ int tdc_offset_sign;
+ u64 tdc_apll_freq;
+ u32 time_ref_freq;
+ u16 fod_n;
+ u8 lpf_mode;
+ /* Time counter */
+ u32 last_counter;
+ s64 ns;
+ u32 ns_per_counter;
+ u32 tc_update_period;
+ u32 tc_write_timeout;
+ s64 tod_write_overhead;
+};
+
+#endif /* PTP_IDTFC3_H */
diff --git a/drivers/ptp/ptp_kvm_common.c b/drivers/ptp/ptp_kvm_common.c
index 2418977989be..15ccb7dd2ed0 100644
--- a/drivers/ptp/ptp_kvm_common.c
+++ b/drivers/ptp/ptp_kvm_common.c
@@ -28,15 +28,15 @@ static int ptp_kvm_get_time_fn(ktime_t *device_time,
struct system_counterval_t *system_counter,
void *ctx)
{
- long ret;
- u64 cycle;
+ enum clocksource_ids cs_id;
struct timespec64 tspec;
- struct clocksource *cs;
+ u64 cycle;
+ int ret;
spin_lock(&kvm_ptp_lock);
preempt_disable_notrace();
- ret = kvm_arch_ptp_get_crosststamp(&cycle, &tspec, &cs);
+ ret = kvm_arch_ptp_get_crosststamp(&cycle, &tspec, &cs_id);
if (ret) {
spin_unlock(&kvm_ptp_lock);
preempt_enable_notrace();
@@ -46,7 +46,7 @@ static int ptp_kvm_get_time_fn(ktime_t *device_time,
preempt_enable_notrace();
system_counter->cycles = cycle;
- system_counter->cs = cs;
+ system_counter->cs_id = cs_id;
*device_time = timespec64_to_ktime(tspec);
diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c
index 902844cc1a17..617c8d6706d3 100644
--- a/drivers/ptp/ptp_kvm_x86.c
+++ b/drivers/ptp/ptp_kvm_x86.c
@@ -93,7 +93,7 @@ int kvm_arch_ptp_get_clock(struct timespec64 *ts)
}
int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
- struct clocksource **cs)
+ enum clocksource_ids *cs_id)
{
struct pvclock_vcpu_time_info *src;
unsigned int version;
@@ -123,7 +123,7 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
*cycle = __pvclock_read_cycles(src, clock_pair->tsc);
} while (pvclock_read_retry(src, version));
- *cs = &kvm_clock;
+ *cs_id = CSID_X86_KVM_CLK;
return 0;
}
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 5f858e426bbd..6506cfb89aa9 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -34,6 +34,9 @@
#define PCI_VENDOR_ID_OROLIA 0x1ad7
#define PCI_DEVICE_ID_OROLIA_ARTCARD 0xa000
+#define PCI_VENDOR_ID_ADVA 0xad5a
+#define PCI_DEVICE_ID_ADVA_TIMECARD 0x0400
+
static struct class timecard_class = {
.name = "timecard",
};
@@ -63,6 +66,13 @@ struct ocp_reg {
u32 status_drift;
};
+struct ptp_ocp_servo_conf {
+ u32 servo_offset_p;
+ u32 servo_offset_i;
+ u32 servo_drift_p;
+ u32 servo_drift_i;
+};
+
#define OCP_CTRL_ENABLE BIT(0)
#define OCP_CTRL_ADJUST_TIME BIT(1)
#define OCP_CTRL_ADJUST_OFFSET BIT(2)
@@ -397,10 +407,14 @@ static int ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr);
static int ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r);
+static int ptp_ocp_adva_board_init(struct ptp_ocp *bp, struct ocp_resource *r);
+
static const struct ocp_attr_group fb_timecard_groups[];
static const struct ocp_attr_group art_timecard_groups[];
+static const struct ocp_attr_group adva_timecard_groups[];
+
struct ptp_ocp_eeprom_map {
u16 off;
u16 len;
@@ -700,6 +714,12 @@ static struct ocp_resource ocp_fb_resource[] = {
},
{
.setup = ptp_ocp_fb_board_init,
+ .extra = &(struct ptp_ocp_servo_conf) {
+ .servo_offset_p = 0x2000,
+ .servo_offset_i = 0x1000,
+ .servo_drift_p = 0,
+ .servo_drift_i = 0,
+ },
},
{ }
};
@@ -831,6 +851,170 @@ static struct ocp_resource ocp_art_resource[] = {
},
{
.setup = ptp_ocp_art_board_init,
+ .extra = &(struct ptp_ocp_servo_conf) {
+ .servo_offset_p = 0x2000,
+ .servo_offset_i = 0x1000,
+ .servo_drift_p = 0,
+ .servo_drift_i = 0,
+ },
+ },
+ { }
+};
+
+static struct ocp_resource ocp_adva_resource[] = {
+ {
+ OCP_MEM_RESOURCE(reg),
+ .offset = 0x01000000, .size = 0x10000,
+ },
+ {
+ OCP_EXT_RESOURCE(ts0),
+ .offset = 0x01010000, .size = 0x10000, .irq_vec = 1,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 0,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(ts1),
+ .offset = 0x01020000, .size = 0x10000, .irq_vec = 2,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 1,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(ts2),
+ .offset = 0x01060000, .size = 0x10000, .irq_vec = 6,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 2,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ /* Timestamp for PHC and/or PPS generator */
+ {
+ OCP_EXT_RESOURCE(pps),
+ .offset = 0x010C0000, .size = 0x10000, .irq_vec = 0,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 5,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(signal_out[0]),
+ .offset = 0x010D0000, .size = 0x10000, .irq_vec = 11,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 1,
+ .irq_fcn = ptp_ocp_signal_irq,
+ .enable = ptp_ocp_signal_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(signal_out[1]),
+ .offset = 0x010E0000, .size = 0x10000, .irq_vec = 12,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 2,
+ .irq_fcn = ptp_ocp_signal_irq,
+ .enable = ptp_ocp_signal_enable,
+ },
+ },
+ {
+ OCP_MEM_RESOURCE(pps_to_ext),
+ .offset = 0x01030000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(pps_to_clk),
+ .offset = 0x01040000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(tod),
+ .offset = 0x01050000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(image),
+ .offset = 0x00020000, .size = 0x1000,
+ },
+ {
+ OCP_MEM_RESOURCE(pps_select),
+ .offset = 0x00130000, .size = 0x1000,
+ },
+ {
+ OCP_MEM_RESOURCE(sma_map1),
+ .offset = 0x00140000, .size = 0x1000,
+ },
+ {
+ OCP_MEM_RESOURCE(sma_map2),
+ .offset = 0x00220000, .size = 0x1000,
+ },
+ {
+ OCP_SERIAL_RESOURCE(gnss_port),
+ .offset = 0x00160000 + 0x1000, .irq_vec = 3,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 9600,
+ },
+ },
+ {
+ OCP_SERIAL_RESOURCE(mac_port),
+ .offset = 0x00180000 + 0x1000, .irq_vec = 5,
+ .extra = &(struct ptp_ocp_serial_port) {
+ .baud = 115200,
+ },
+ },
+ {
+ OCP_MEM_RESOURCE(freq_in[0]),
+ .offset = 0x01200000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(freq_in[1]),
+ .offset = 0x01210000, .size = 0x10000,
+ },
+ {
+ OCP_SPI_RESOURCE(spi_flash),
+ .offset = 0x00310400, .size = 0x10000, .irq_vec = 9,
+ .extra = &(struct ptp_ocp_flash_info) {
+ .name = "spi_altera", .pci_offset = 0,
+ .data_size = sizeof(struct altera_spi_platform_data),
+ .data = &(struct altera_spi_platform_data) {
+ .num_chipselect = 1,
+ .num_devices = 1,
+ .devices = &(struct spi_board_info) {
+ .modalias = "spi-nor",
+ },
+ },
+ },
+ },
+ {
+ OCP_I2C_RESOURCE(i2c_ctrl),
+ .offset = 0x150000, .size = 0x100, .irq_vec = 7,
+ .extra = &(struct ptp_ocp_i2c_info) {
+ .name = "ocores-i2c",
+ .fixed_rate = 50000000,
+ .data_size = sizeof(struct ocores_i2c_platform_data),
+ .data = &(struct ocores_i2c_platform_data) {
+ .clock_khz = 50000,
+ .bus_khz = 100,
+ .reg_io_width = 4, // 32-bit/4-byte
+ .reg_shift = 2, // 32-bit addressing
+ .num_devices = 2,
+ .devices = (struct i2c_board_info[]) {
+ { I2C_BOARD_INFO("24c02", 0x50) },
+ { I2C_BOARD_INFO("24mac402", 0x58),
+ .platform_data = "mac" },
+ },
+ },
+ },
+ },
+ {
+ .setup = ptp_ocp_adva_board_init,
+ .extra = &(struct ptp_ocp_servo_conf) {
+ .servo_offset_p = 0xc000,
+ .servo_offset_i = 0x1000,
+ .servo_drift_p = 0,
+ .servo_drift_i = 0,
+ },
},
{ }
};
@@ -839,6 +1023,7 @@ static const struct pci_device_id ptp_ocp_pcidev_id[] = {
{ PCI_DEVICE_DATA(FACEBOOK, TIMECARD, &ocp_fb_resource) },
{ PCI_DEVICE_DATA(CELESTICA, TIMECARD, &ocp_fb_resource) },
{ PCI_DEVICE_DATA(OROLIA, ARTCARD, &ocp_art_resource) },
+ { PCI_DEVICE_DATA(ADVA, TIMECARD, &ocp_adva_resource) },
{ }
};
MODULE_DEVICE_TABLE(pci, ptp_ocp_pcidev_id);
@@ -917,6 +1102,30 @@ static const struct ocp_selector ptp_ocp_art_sma_out[] = {
{ }
};
+static const struct ocp_selector ptp_ocp_adva_sma_in[] = {
+ { .name = "10Mhz", .value = 0x0000, .frequency = 10000000},
+ { .name = "PPS1", .value = 0x0001, .frequency = 1 },
+ { .name = "PPS2", .value = 0x0002, .frequency = 1 },
+ { .name = "TS1", .value = 0x0004, .frequency = 0 },
+ { .name = "TS2", .value = 0x0008, .frequency = 0 },
+ { .name = "FREQ1", .value = 0x0100, .frequency = 0 },
+ { .name = "FREQ2", .value = 0x0200, .frequency = 0 },
+ { .name = "None", .value = SMA_DISABLE, .frequency = 0 },
+ { }
+};
+
+static const struct ocp_selector ptp_ocp_adva_sma_out[] = {
+ { .name = "10Mhz", .value = 0x0000, .frequency = 10000000},
+ { .name = "PHC", .value = 0x0001, .frequency = 1 },
+ { .name = "MAC", .value = 0x0002, .frequency = 1 },
+ { .name = "GNSS1", .value = 0x0004, .frequency = 1 },
+ { .name = "GEN1", .value = 0x0040 },
+ { .name = "GEN2", .value = 0x0080 },
+ { .name = "GND", .value = 0x2000 },
+ { .name = "VCC", .value = 0x4000 },
+ { }
+};
+
struct ocp_sma_op {
const struct ocp_selector *tbl[2];
void (*init)(struct ptp_ocp *bp);
@@ -1363,7 +1572,7 @@ ptp_ocp_estimate_pci_timing(struct ptp_ocp *bp)
}
static int
-ptp_ocp_init_clock(struct ptp_ocp *bp)
+ptp_ocp_init_clock(struct ptp_ocp *bp, struct ptp_ocp_servo_conf *servo_conf)
{
struct timespec64 ts;
u32 ctrl;
@@ -1371,12 +1580,11 @@ ptp_ocp_init_clock(struct ptp_ocp *bp)
ctrl = OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
- /* NO DRIFT Correction */
- /* offset_p:i 1/8, offset_i: 1/16, drift_p: 0, drift_i: 0 */
- iowrite32(0x2000, &bp->reg->servo_offset_p);
- iowrite32(0x1000, &bp->reg->servo_offset_i);
- iowrite32(0, &bp->reg->servo_drift_p);
- iowrite32(0, &bp->reg->servo_drift_i);
+ /* servo configuration */
+ iowrite32(servo_conf->servo_offset_p, &bp->reg->servo_offset_p);
+ iowrite32(servo_conf->servo_offset_i, &bp->reg->servo_offset_i);
+ iowrite32(servo_conf->servo_drift_p, &bp->reg->servo_drift_p);
+ iowrite32(servo_conf->servo_drift_p, &bp->reg->servo_drift_i);
/* latch servo values */
ctrl |= OCP_CTRL_ADJUST_SERVO;
@@ -2348,6 +2556,14 @@ static const struct ocp_sma_op ocp_fb_sma_op = {
.set_output = ptp_ocp_sma_fb_set_output,
};
+static const struct ocp_sma_op ocp_adva_sma_op = {
+ .tbl = { ptp_ocp_adva_sma_in, ptp_ocp_adva_sma_out },
+ .init = ptp_ocp_sma_fb_init,
+ .get = ptp_ocp_sma_fb_get,
+ .set_inputs = ptp_ocp_sma_fb_set_inputs,
+ .set_output = ptp_ocp_sma_fb_set_output,
+};
+
static int
ptp_ocp_set_pins(struct ptp_ocp *bp)
{
@@ -2427,7 +2643,7 @@ ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
return err;
ptp_ocp_sma_init(bp);
- return ptp_ocp_init_clock(bp);
+ return ptp_ocp_init_clock(bp, r->extra);
}
static bool
@@ -2589,7 +2805,44 @@ ptp_ocp_art_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
if (err)
return err;
- return ptp_ocp_init_clock(bp);
+ return ptp_ocp_init_clock(bp, r->extra);
+}
+
+/* ADVA specific board initializers; last "resource" registered. */
+static int
+ptp_ocp_adva_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+ int err;
+ u32 version;
+
+ bp->flash_start = 0xA00000;
+ bp->eeprom_map = fb_eeprom_map;
+ bp->sma_op = &ocp_adva_sma_op;
+
+ version = ioread32(&bp->image->version);
+ /* if lower 16 bits are empty, this is the fw loader. */
+ if ((version & 0xffff) == 0) {
+ version = version >> 16;
+ bp->fw_loader = true;
+ }
+ bp->fw_tag = 3;
+ bp->fw_version = version & 0xffff;
+ bp->fw_cap = OCP_CAP_BASIC | OCP_CAP_SIGNAL | OCP_CAP_FREQ;
+
+ ptp_ocp_tod_init(bp);
+ ptp_ocp_nmea_out_init(bp);
+ ptp_ocp_signal_init(bp);
+
+ err = ptp_ocp_attr_group_add(bp, adva_timecard_groups);
+ if (err)
+ return err;
+
+ err = ptp_ocp_set_pins(bp);
+ if (err)
+ return err;
+ ptp_ocp_sma_init(bp);
+
+ return ptp_ocp_init_clock(bp, r->extra);
}
static ssize_t
@@ -3564,6 +3817,37 @@ static const struct ocp_attr_group art_timecard_groups[] = {
{ },
};
+static struct attribute *adva_timecard_attrs[] = {
+ &dev_attr_serialnum.attr,
+ &dev_attr_gnss_sync.attr,
+ &dev_attr_clock_source.attr,
+ &dev_attr_available_clock_sources.attr,
+ &dev_attr_sma1.attr,
+ &dev_attr_sma2.attr,
+ &dev_attr_sma3.attr,
+ &dev_attr_sma4.attr,
+ &dev_attr_available_sma_inputs.attr,
+ &dev_attr_available_sma_outputs.attr,
+ &dev_attr_clock_status_drift.attr,
+ &dev_attr_clock_status_offset.attr,
+ &dev_attr_ts_window_adjust.attr,
+ &dev_attr_tod_correction.attr,
+ NULL,
+};
+
+static const struct attribute_group adva_timecard_group = {
+ .attrs = adva_timecard_attrs,
+};
+
+static const struct ocp_attr_group adva_timecard_groups[] = {
+ { .cap = OCP_CAP_BASIC, .group = &adva_timecard_group },
+ { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal0_group },
+ { .cap = OCP_CAP_SIGNAL, .group = &fb_timecard_signal1_group },
+ { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq0_group },
+ { .cap = OCP_CAP_FREQ, .group = &fb_timecard_freq1_group },
+ { },
+};
+
static void
gpio_input_map(char *buf, struct ptp_ocp *bp, u16 map[][2], u16 bit,
const char *def)
@@ -4209,10 +4493,11 @@ ptp_ocp_detach(struct ptp_ocp *bp)
device_unregister(&bp->dev);
}
-static int ptp_ocp_dpll_lock_status_get(const struct dpll_device *dpll,
- void *priv,
- enum dpll_lock_status *status,
- struct netlink_ext_ack *extack)
+static int
+ptp_ocp_dpll_lock_status_get(const struct dpll_device *dpll, void *priv,
+ enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
+ struct netlink_ext_ack *extack)
{
struct ptp_ocp *bp = priv;
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 45f9002a5dca..18934e28469e 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -120,7 +120,7 @@ static inline bool ptp_clock_freerun(struct ptp_clock *ptp)
return ptp_vclock_in_use(ptp);
}
-extern struct class *ptp_class;
+extern const struct class ptp_class;
/*
* see ptp_chardev.c
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index f7a499a1bd39..a15460aaa03b 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -24,8 +24,7 @@ static ssize_t max_phase_adjustment_show(struct device *dev,
{
struct ptp_clock *ptp = dev_get_drvdata(dev);
- return snprintf(page, PAGE_SIZE - 1, "%d\n",
- ptp->info->getmaxphase(ptp->info));
+ return sysfs_emit(page, "%d\n", ptp->info->getmaxphase(ptp->info));
}
static DEVICE_ATTR_RO(max_phase_adjustment);
@@ -34,7 +33,7 @@ static ssize_t var##_show(struct device *dev, \
struct device_attribute *attr, char *page) \
{ \
struct ptp_clock *ptp = dev_get_drvdata(dev); \
- return snprintf(page, PAGE_SIZE-1, "%d\n", ptp->info->var); \
+ return sysfs_emit(page, "%d\n", ptp->info->var); \
} \
static DEVICE_ATTR(name, 0444, var##_show, NULL);
@@ -102,8 +101,8 @@ static ssize_t extts_fifo_show(struct device *dev,
if (!qcnt)
goto out;
- cnt = snprintf(page, PAGE_SIZE, "%u %lld %u\n",
- event.index, event.t.sec, event.t.nsec);
+ cnt = sysfs_emit(page, "%u %lld %u\n",
+ event.index, event.t.sec, event.t.nsec);
out:
return cnt;
}
@@ -194,7 +193,7 @@ static ssize_t n_vclocks_show(struct device *dev,
if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
return -ERESTARTSYS;
- size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->n_vclocks);
+ size = sysfs_emit(page, "%u\n", ptp->n_vclocks);
mutex_unlock(&ptp->n_vclocks_mux);
@@ -270,7 +269,7 @@ static ssize_t max_vclocks_show(struct device *dev,
struct ptp_clock *ptp = dev_get_drvdata(dev);
ssize_t size;
- size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->max_vclocks);
+ size = sysfs_emit(page, "%u\n", ptp->max_vclocks);
return size;
}
diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c
index dcf752c9e045..7febfdcbde8b 100644
--- a/drivers/ptp/ptp_vclock.c
+++ b/drivers/ptp/ptp_vclock.c
@@ -241,7 +241,7 @@ int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
return num;
snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", pclock_index);
- dev = class_find_device_by_name(ptp_class, name);
+ dev = class_find_device_by_name(&ptp_class, name);
if (!dev)
return num;
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index f2728ee787d7..d70f793ce4b3 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -24,310 +24,11 @@
#define CREATE_TRACE_POINTS
#include <trace/events/pwm.h>
-static DEFINE_MUTEX(pwm_lookup_lock);
-static LIST_HEAD(pwm_lookup_list);
-
/* protects access to pwm_chips */
static DEFINE_MUTEX(pwm_lock);
static DEFINE_IDR(pwm_chips);
-static struct pwm_chip *pwmchip_find_by_name(const char *name)
-{
- struct pwm_chip *chip;
- unsigned long id, tmp;
-
- if (!name)
- return NULL;
-
- mutex_lock(&pwm_lock);
-
- idr_for_each_entry_ul(&pwm_chips, chip, tmp, id) {
- const char *chip_name = dev_name(chip->dev);
-
- if (chip_name && strcmp(chip_name, name) == 0) {
- mutex_unlock(&pwm_lock);
- return chip;
- }
- }
-
- mutex_unlock(&pwm_lock);
-
- return NULL;
-}
-
-static int pwm_device_request(struct pwm_device *pwm, const char *label)
-{
- int err;
- struct pwm_chip *chip = pwm->chip;
- const struct pwm_ops *ops = chip->ops;
-
- if (test_bit(PWMF_REQUESTED, &pwm->flags))
- return -EBUSY;
-
- if (!try_module_get(chip->owner))
- return -ENODEV;
-
- if (ops->request) {
- err = ops->request(chip, pwm);
- if (err) {
- module_put(chip->owner);
- return err;
- }
- }
-
- if (ops->get_state) {
- /*
- * Zero-initialize state because most drivers are unaware of
- * .usage_power. The other members of state are supposed to be
- * set by lowlevel drivers. We still initialize the whole
- * structure for simplicity even though this might paper over
- * faulty implementations of .get_state().
- */
- struct pwm_state state = { 0, };
-
- err = ops->get_state(chip, pwm, &state);
- trace_pwm_get(pwm, &state, err);
-
- if (!err)
- pwm->state = state;
-
- if (IS_ENABLED(CONFIG_PWM_DEBUG))
- pwm->last = pwm->state;
- }
-
- set_bit(PWMF_REQUESTED, &pwm->flags);
- pwm->label = label;
-
- return 0;
-}
-
-struct pwm_device *
-of_pwm_xlate_with_flags(struct pwm_chip *chip, const struct of_phandle_args *args)
-{
- struct pwm_device *pwm;
-
- if (chip->of_pwm_n_cells < 2)
- return ERR_PTR(-EINVAL);
-
- /* flags in the third cell are optional */
- if (args->args_count < 2)
- return ERR_PTR(-EINVAL);
-
- if (args->args[0] >= chip->npwm)
- return ERR_PTR(-EINVAL);
-
- pwm = pwm_request_from_chip(chip, args->args[0], NULL);
- if (IS_ERR(pwm))
- return pwm;
-
- pwm->args.period = args->args[1];
- pwm->args.polarity = PWM_POLARITY_NORMAL;
-
- if (chip->of_pwm_n_cells >= 3) {
- if (args->args_count > 2 && args->args[2] & PWM_POLARITY_INVERTED)
- pwm->args.polarity = PWM_POLARITY_INVERSED;
- }
-
- return pwm;
-}
-EXPORT_SYMBOL_GPL(of_pwm_xlate_with_flags);
-
-struct pwm_device *
-of_pwm_single_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
-{
- struct pwm_device *pwm;
-
- if (chip->of_pwm_n_cells < 1)
- return ERR_PTR(-EINVAL);
-
- /* validate that one cell is specified, optionally with flags */
- if (args->args_count != 1 && args->args_count != 2)
- return ERR_PTR(-EINVAL);
-
- pwm = pwm_request_from_chip(chip, 0, NULL);
- if (IS_ERR(pwm))
- return pwm;
-
- pwm->args.period = args->args[0];
- pwm->args.polarity = PWM_POLARITY_NORMAL;
-
- if (args->args_count == 2 && args->args[1] & PWM_POLARITY_INVERTED)
- pwm->args.polarity = PWM_POLARITY_INVERSED;
-
- return pwm;
-}
-EXPORT_SYMBOL_GPL(of_pwm_single_xlate);
-
-static void of_pwmchip_add(struct pwm_chip *chip)
-{
- if (!chip->dev || !chip->dev->of_node)
- return;
-
- if (!chip->of_xlate) {
- u32 pwm_cells;
-
- if (of_property_read_u32(chip->dev->of_node, "#pwm-cells",
- &pwm_cells))
- pwm_cells = 2;
-
- chip->of_xlate = of_pwm_xlate_with_flags;
- chip->of_pwm_n_cells = pwm_cells;
- }
-
- of_node_get(chip->dev->of_node);
-}
-
-static void of_pwmchip_remove(struct pwm_chip *chip)
-{
- if (chip->dev)
- of_node_put(chip->dev->of_node);
-}
-
-static bool pwm_ops_check(const struct pwm_chip *chip)
-{
- const struct pwm_ops *ops = chip->ops;
-
- if (!ops->apply)
- return false;
-
- if (IS_ENABLED(CONFIG_PWM_DEBUG) && !ops->get_state)
- dev_warn(chip->dev,
- "Please implement the .get_state() callback\n");
-
- return true;
-}
-
-/**
- * __pwmchip_add() - register a new PWM chip
- * @chip: the PWM chip to add
- * @owner: reference to the module providing the chip.
- *
- * Register a new PWM chip. @owner is supposed to be THIS_MODULE, use the
- * pwmchip_add wrapper to do this right.
- *
- * Returns: 0 on success or a negative error code on failure.
- */
-int __pwmchip_add(struct pwm_chip *chip, struct module *owner)
-{
- unsigned int i;
- int ret;
-
- if (!chip || !chip->dev || !chip->ops || !chip->npwm)
- return -EINVAL;
-
- if (!pwm_ops_check(chip))
- return -EINVAL;
-
- chip->owner = owner;
-
- chip->pwms = kcalloc(chip->npwm, sizeof(*chip->pwms), GFP_KERNEL);
- if (!chip->pwms)
- return -ENOMEM;
-
- mutex_lock(&pwm_lock);
-
- ret = idr_alloc(&pwm_chips, chip, 0, 0, GFP_KERNEL);
- if (ret < 0) {
- mutex_unlock(&pwm_lock);
- kfree(chip->pwms);
- return ret;
- }
-
- chip->id = ret;
-
- for (i = 0; i < chip->npwm; i++) {
- struct pwm_device *pwm = &chip->pwms[i];
-
- pwm->chip = chip;
- pwm->hwpwm = i;
- }
-
- mutex_unlock(&pwm_lock);
-
- if (IS_ENABLED(CONFIG_OF))
- of_pwmchip_add(chip);
-
- pwmchip_sysfs_export(chip);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(__pwmchip_add);
-
-/**
- * pwmchip_remove() - remove a PWM chip
- * @chip: the PWM chip to remove
- *
- * Removes a PWM chip.
- */
-void pwmchip_remove(struct pwm_chip *chip)
-{
- pwmchip_sysfs_unexport(chip);
-
- if (IS_ENABLED(CONFIG_OF))
- of_pwmchip_remove(chip);
-
- mutex_lock(&pwm_lock);
-
- idr_remove(&pwm_chips, chip->id);
-
- mutex_unlock(&pwm_lock);
-
- kfree(chip->pwms);
-}
-EXPORT_SYMBOL_GPL(pwmchip_remove);
-
-static void devm_pwmchip_remove(void *data)
-{
- struct pwm_chip *chip = data;
-
- pwmchip_remove(chip);
-}
-
-int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner)
-{
- int ret;
-
- ret = __pwmchip_add(chip, owner);
- if (ret)
- return ret;
-
- return devm_add_action_or_reset(dev, devm_pwmchip_remove, chip);
-}
-EXPORT_SYMBOL_GPL(__devm_pwmchip_add);
-
-/**
- * pwm_request_from_chip() - request a PWM device relative to a PWM chip
- * @chip: PWM chip
- * @index: per-chip index of the PWM to request
- * @label: a literal description string of this PWM
- *
- * Returns: A pointer to the PWM device at the given index of the given PWM
- * chip. A negative error code is returned if the index is not valid for the
- * specified PWM chip or if the PWM device cannot be requested.
- */
-struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
- unsigned int index,
- const char *label)
-{
- struct pwm_device *pwm;
- int err;
-
- if (!chip || index >= chip->npwm)
- return ERR_PTR(-EINVAL);
-
- mutex_lock(&pwm_lock);
- pwm = &chip->pwms[index];
-
- err = pwm_device_request(pwm, label);
- if (err < 0)
- pwm = ERR_PTR(err);
-
- mutex_unlock(&pwm_lock);
- return pwm;
-}
-EXPORT_SYMBOL_GPL(pwm_request_from_chip);
-
static void pwm_apply_debug(struct pwm_device *pwm,
const struct pwm_state *state)
{
@@ -370,18 +71,18 @@ static void pwm_apply_debug(struct pwm_device *pwm,
if (s2.polarity != state->polarity &&
state->duty_cycle < state->period)
- dev_warn(chip->dev, ".apply ignored .polarity\n");
+ dev_warn(pwmchip_parent(chip), ".apply ignored .polarity\n");
if (state->enabled &&
last->polarity == state->polarity &&
last->period > s2.period &&
last->period <= state->period)
- dev_warn(chip->dev,
+ dev_warn(pwmchip_parent(chip),
".apply didn't pick the best available period (requested: %llu, applied: %llu, possible: %llu)\n",
state->period, s2.period, last->period);
if (state->enabled && state->period < s2.period)
- dev_warn(chip->dev,
+ dev_warn(pwmchip_parent(chip),
".apply is supposed to round down period (requested: %llu, applied: %llu)\n",
state->period, s2.period);
@@ -390,20 +91,20 @@ static void pwm_apply_debug(struct pwm_device *pwm,
last->period == s2.period &&
last->duty_cycle > s2.duty_cycle &&
last->duty_cycle <= state->duty_cycle)
- dev_warn(chip->dev,
+ dev_warn(pwmchip_parent(chip),
".apply didn't pick the best available duty cycle (requested: %llu/%llu, applied: %llu/%llu, possible: %llu/%llu)\n",
state->duty_cycle, state->period,
s2.duty_cycle, s2.period,
last->duty_cycle, last->period);
if (state->enabled && state->duty_cycle < s2.duty_cycle)
- dev_warn(chip->dev,
+ dev_warn(pwmchip_parent(chip),
".apply is supposed to round down duty_cycle (requested: %llu/%llu, applied: %llu/%llu)\n",
state->duty_cycle, state->period,
s2.duty_cycle, s2.period);
if (!state->enabled && s2.enabled && s2.duty_cycle > 0)
- dev_warn(chip->dev,
+ dev_warn(pwmchip_parent(chip),
"requested disabled, but yielded enabled with duty > 0\n");
/* reapply the state that the driver reported being configured. */
@@ -411,7 +112,7 @@ static void pwm_apply_debug(struct pwm_device *pwm,
trace_pwm_apply(pwm, &s1, err);
if (err) {
*last = s1;
- dev_err(chip->dev, "failed to reapply current setting\n");
+ dev_err(pwmchip_parent(chip), "failed to reapply current setting\n");
return;
}
@@ -426,7 +127,7 @@ static void pwm_apply_debug(struct pwm_device *pwm,
s1.polarity != last->polarity ||
(s1.enabled && s1.period != last->period) ||
(s1.enabled && s1.duty_cycle != last->duty_cycle)) {
- dev_err(chip->dev,
+ dev_err(pwmchip_parent(chip),
".apply is not idempotent (ena=%d pol=%d %llu/%llu) -> (ena=%d pol=%d %llu/%llu)\n",
s1.enabled, s1.polarity, s1.duty_cycle, s1.period,
last->enabled, last->polarity, last->duty_cycle,
@@ -524,33 +225,6 @@ int pwm_apply_atomic(struct pwm_device *pwm, const struct pwm_state *state)
EXPORT_SYMBOL_GPL(pwm_apply_atomic);
/**
- * pwm_capture() - capture and report a PWM signal
- * @pwm: PWM device
- * @result: structure to fill with capture result
- * @timeout: time to wait, in milliseconds, before giving up on capture
- *
- * Returns: 0 on success or a negative error code on failure.
- */
-int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
- unsigned long timeout)
-{
- int err;
-
- if (!pwm || !pwm->chip->ops)
- return -EINVAL;
-
- if (!pwm->chip->ops->capture)
- return -ENOSYS;
-
- mutex_lock(&pwm_lock);
- err = pwm->chip->ops->capture(pwm->chip, pwm, result, timeout);
- mutex_unlock(&pwm_lock);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(pwm_capture);
-
-/**
* pwm_adjust_config() - adjust the current PWM config to the PWM arguments
* @pwm: PWM device
*
@@ -606,24 +280,367 @@ int pwm_adjust_config(struct pwm_device *pwm)
}
EXPORT_SYMBOL_GPL(pwm_adjust_config);
-static struct pwm_chip *fwnode_to_pwmchip(struct fwnode_handle *fwnode)
+/**
+ * pwm_capture() - capture and report a PWM signal
+ * @pwm: PWM device
+ * @result: structure to fill with capture result
+ * @timeout: time to wait, in milliseconds, before giving up on capture
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
+ unsigned long timeout)
+{
+ int err;
+
+ if (!pwm || !pwm->chip->ops)
+ return -EINVAL;
+
+ if (!pwm->chip->ops->capture)
+ return -ENOSYS;
+
+ mutex_lock(&pwm_lock);
+ err = pwm->chip->ops->capture(pwm->chip, pwm, result, timeout);
+ mutex_unlock(&pwm_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pwm_capture);
+
+static struct pwm_chip *pwmchip_find_by_name(const char *name)
{
struct pwm_chip *chip;
unsigned long id, tmp;
+ if (!name)
+ return NULL;
+
mutex_lock(&pwm_lock);
- idr_for_each_entry_ul(&pwm_chips, chip, tmp, id)
- if (chip->dev && device_match_fwnode(chip->dev, fwnode)) {
+ idr_for_each_entry_ul(&pwm_chips, chip, tmp, id) {
+ const char *chip_name = dev_name(pwmchip_parent(chip));
+
+ if (chip_name && strcmp(chip_name, name) == 0) {
mutex_unlock(&pwm_lock);
return chip;
}
+ }
mutex_unlock(&pwm_lock);
- return ERR_PTR(-EPROBE_DEFER);
+ return NULL;
+}
+
+static int pwm_device_request(struct pwm_device *pwm, const char *label)
+{
+ int err;
+ struct pwm_chip *chip = pwm->chip;
+ const struct pwm_ops *ops = chip->ops;
+
+ if (test_bit(PWMF_REQUESTED, &pwm->flags))
+ return -EBUSY;
+
+ if (!try_module_get(chip->owner))
+ return -ENODEV;
+
+ if (ops->request) {
+ err = ops->request(chip, pwm);
+ if (err) {
+ module_put(chip->owner);
+ return err;
+ }
+ }
+
+ if (ops->get_state) {
+ /*
+ * Zero-initialize state because most drivers are unaware of
+ * .usage_power. The other members of state are supposed to be
+ * set by lowlevel drivers. We still initialize the whole
+ * structure for simplicity even though this might paper over
+ * faulty implementations of .get_state().
+ */
+ struct pwm_state state = { 0, };
+
+ err = ops->get_state(chip, pwm, &state);
+ trace_pwm_get(pwm, &state, err);
+
+ if (!err)
+ pwm->state = state;
+
+ if (IS_ENABLED(CONFIG_PWM_DEBUG))
+ pwm->last = pwm->state;
+ }
+
+ set_bit(PWMF_REQUESTED, &pwm->flags);
+ pwm->label = label;
+
+ return 0;
+}
+
+/**
+ * pwm_request_from_chip() - request a PWM device relative to a PWM chip
+ * @chip: PWM chip
+ * @index: per-chip index of the PWM to request
+ * @label: a literal description string of this PWM
+ *
+ * Returns: A pointer to the PWM device at the given index of the given PWM
+ * chip. A negative error code is returned if the index is not valid for the
+ * specified PWM chip or if the PWM device cannot be requested.
+ */
+struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
+ unsigned int index,
+ const char *label)
+{
+ struct pwm_device *pwm;
+ int err;
+
+ if (!chip || index >= chip->npwm)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&pwm_lock);
+ pwm = &chip->pwms[index];
+
+ err = pwm_device_request(pwm, label);
+ if (err < 0)
+ pwm = ERR_PTR(err);
+
+ mutex_unlock(&pwm_lock);
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(pwm_request_from_chip);
+
+
+struct pwm_device *
+of_pwm_xlate_with_flags(struct pwm_chip *chip, const struct of_phandle_args *args)
+{
+ struct pwm_device *pwm;
+
+ /* period in the second cell and flags in the third cell are optional */
+ if (args->args_count < 1)
+ return ERR_PTR(-EINVAL);
+
+ pwm = pwm_request_from_chip(chip, args->args[0], NULL);
+ if (IS_ERR(pwm))
+ return pwm;
+
+ if (args->args_count > 1)
+ pwm->args.period = args->args[1];
+
+ pwm->args.polarity = PWM_POLARITY_NORMAL;
+ if (args->args_count > 2 && args->args[2] & PWM_POLARITY_INVERTED)
+ pwm->args.polarity = PWM_POLARITY_INVERSED;
+
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(of_pwm_xlate_with_flags);
+
+struct pwm_device *
+of_pwm_single_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
+{
+ struct pwm_device *pwm;
+
+ pwm = pwm_request_from_chip(chip, 0, NULL);
+ if (IS_ERR(pwm))
+ return pwm;
+
+ if (args->args_count > 1)
+ pwm->args.period = args->args[0];
+
+ pwm->args.polarity = PWM_POLARITY_NORMAL;
+ if (args->args_count > 1 && args->args[1] & PWM_POLARITY_INVERTED)
+ pwm->args.polarity = PWM_POLARITY_INVERSED;
+
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(of_pwm_single_xlate);
+
+#define PWMCHIP_ALIGN ARCH_DMA_MINALIGN
+
+static void *pwmchip_priv(struct pwm_chip *chip)
+{
+ return (void *)chip + ALIGN(sizeof(*chip), PWMCHIP_ALIGN);
+}
+
+/* This is the counterpart to pwmchip_alloc() */
+void pwmchip_put(struct pwm_chip *chip)
+{
+ kfree(chip);
+}
+EXPORT_SYMBOL_GPL(pwmchip_put);
+
+struct pwm_chip *pwmchip_alloc(struct device *parent, unsigned int npwm, size_t sizeof_priv)
+{
+ struct pwm_chip *chip;
+ size_t alloc_size;
+
+ alloc_size = size_add(ALIGN(sizeof(*chip), PWMCHIP_ALIGN), sizeof_priv);
+
+ chip = kzalloc(alloc_size, GFP_KERNEL);
+ if (!chip)
+ return ERR_PTR(-ENOMEM);
+
+ chip->dev = parent;
+ chip->npwm = npwm;
+
+ pwmchip_set_drvdata(chip, pwmchip_priv(chip));
+
+ return chip;
+}
+EXPORT_SYMBOL_GPL(pwmchip_alloc);
+
+static void devm_pwmchip_put(void *data)
+{
+ struct pwm_chip *chip = data;
+
+ pwmchip_put(chip);
+}
+
+struct pwm_chip *devm_pwmchip_alloc(struct device *parent, unsigned int npwm, size_t sizeof_priv)
+{
+ struct pwm_chip *chip;
+ int ret;
+
+ chip = pwmchip_alloc(parent, npwm, sizeof_priv);
+ if (IS_ERR(chip))
+ return chip;
+
+ ret = devm_add_action_or_reset(parent, devm_pwmchip_put, chip);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return chip;
+}
+EXPORT_SYMBOL_GPL(devm_pwmchip_alloc);
+
+static void of_pwmchip_add(struct pwm_chip *chip)
+{
+ if (!pwmchip_parent(chip) || !pwmchip_parent(chip)->of_node)
+ return;
+
+ if (!chip->of_xlate)
+ chip->of_xlate = of_pwm_xlate_with_flags;
+
+ of_node_get(pwmchip_parent(chip)->of_node);
+}
+
+static void of_pwmchip_remove(struct pwm_chip *chip)
+{
+ if (pwmchip_parent(chip))
+ of_node_put(pwmchip_parent(chip)->of_node);
+}
+
+static bool pwm_ops_check(const struct pwm_chip *chip)
+{
+ const struct pwm_ops *ops = chip->ops;
+
+ if (!ops->apply)
+ return false;
+
+ if (IS_ENABLED(CONFIG_PWM_DEBUG) && !ops->get_state)
+ dev_warn(pwmchip_parent(chip),
+ "Please implement the .get_state() callback\n");
+
+ return true;
}
+/**
+ * __pwmchip_add() - register a new PWM chip
+ * @chip: the PWM chip to add
+ * @owner: reference to the module providing the chip.
+ *
+ * Register a new PWM chip. @owner is supposed to be THIS_MODULE, use the
+ * pwmchip_add wrapper to do this right.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int __pwmchip_add(struct pwm_chip *chip, struct module *owner)
+{
+ unsigned int i;
+ int ret;
+
+ if (!chip || !pwmchip_parent(chip) || !chip->ops || !chip->npwm)
+ return -EINVAL;
+
+ if (!pwm_ops_check(chip))
+ return -EINVAL;
+
+ chip->owner = owner;
+
+ chip->pwms = kcalloc(chip->npwm, sizeof(*chip->pwms), GFP_KERNEL);
+ if (!chip->pwms)
+ return -ENOMEM;
+
+ mutex_lock(&pwm_lock);
+
+ ret = idr_alloc(&pwm_chips, chip, 0, 0, GFP_KERNEL);
+ if (ret < 0) {
+ mutex_unlock(&pwm_lock);
+ kfree(chip->pwms);
+ return ret;
+ }
+
+ chip->id = ret;
+
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
+
+ pwm->chip = chip;
+ pwm->hwpwm = i;
+ }
+
+ mutex_unlock(&pwm_lock);
+
+ if (IS_ENABLED(CONFIG_OF))
+ of_pwmchip_add(chip);
+
+ pwmchip_sysfs_export(chip);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__pwmchip_add);
+
+/**
+ * pwmchip_remove() - remove a PWM chip
+ * @chip: the PWM chip to remove
+ *
+ * Removes a PWM chip.
+ */
+void pwmchip_remove(struct pwm_chip *chip)
+{
+ pwmchip_sysfs_unexport(chip);
+
+ if (IS_ENABLED(CONFIG_OF))
+ of_pwmchip_remove(chip);
+
+ mutex_lock(&pwm_lock);
+
+ idr_remove(&pwm_chips, chip->id);
+
+ mutex_unlock(&pwm_lock);
+
+ kfree(chip->pwms);
+}
+EXPORT_SYMBOL_GPL(pwmchip_remove);
+
+static void devm_pwmchip_remove(void *data)
+{
+ struct pwm_chip *chip = data;
+
+ pwmchip_remove(chip);
+}
+
+int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner)
+{
+ int ret;
+
+ ret = __pwmchip_add(chip, owner);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_pwmchip_remove, chip);
+}
+EXPORT_SYMBOL_GPL(__devm_pwmchip_add);
+
static struct device_link *pwm_device_link_add(struct device *dev,
struct pwm_device *pwm)
{
@@ -635,21 +652,39 @@ static struct device_link *pwm_device_link_add(struct device *dev,
* impact the PM sequence ordering: the PWM supplier may get
* suspended before the consumer.
*/
- dev_warn(pwm->chip->dev,
+ dev_warn(pwmchip_parent(pwm->chip),
"No consumer device specified to create a link to\n");
return NULL;
}
- dl = device_link_add(dev, pwm->chip->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
+ dl = device_link_add(dev, pwmchip_parent(pwm->chip), DL_FLAG_AUTOREMOVE_CONSUMER);
if (!dl) {
dev_err(dev, "failed to create device link to %s\n",
- dev_name(pwm->chip->dev));
+ dev_name(pwmchip_parent(pwm->chip)));
return ERR_PTR(-EINVAL);
}
return dl;
}
+static struct pwm_chip *fwnode_to_pwmchip(struct fwnode_handle *fwnode)
+{
+ struct pwm_chip *chip;
+ unsigned long id, tmp;
+
+ mutex_lock(&pwm_lock);
+
+ idr_for_each_entry_ul(&pwm_chips, chip, tmp, id)
+ if (pwmchip_parent(chip) && device_match_fwnode(pwmchip_parent(chip), fwnode)) {
+ mutex_unlock(&pwm_lock);
+ return chip;
+ }
+
+ mutex_unlock(&pwm_lock);
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
/**
* of_pwm_get() - request a PWM via the PWM framework
* @dev: device for PWM consumer
@@ -784,6 +819,9 @@ static struct pwm_device *acpi_pwm_get(const struct fwnode_handle *fwnode)
return pwm;
}
+static DEFINE_MUTEX(pwm_lookup_lock);
+static LIST_HEAD(pwm_lookup_list);
+
/**
* pwm_add_table() - register PWM device consumers
* @table: array of consumers to register
@@ -1105,8 +1143,8 @@ static int pwm_seq_show(struct seq_file *s, void *v)
seq_printf(s, "%s%d: %s/%s, %d PWM device%s\n",
(char *)s->private, chip->id,
- chip->dev->bus ? chip->dev->bus->name : "no-bus",
- dev_name(chip->dev), chip->npwm,
+ pwmchip_parent(chip)->bus ? pwmchip_parent(chip)->bus->name : "no-bus",
+ dev_name(pwmchip_parent(chip)), chip->npwm,
(chip->npwm != 1) ? "s" : "");
pwm_dbg_show(chip, s);
diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c
index 670d33daea84..f000adab85b0 100644
--- a/drivers/pwm/pwm-ab8500.c
+++ b/drivers/pwm/pwm-ab8500.c
@@ -24,13 +24,12 @@
#define AB8500_PWM_CLKRATE 9600000
struct ab8500_pwm_chip {
- struct pwm_chip chip;
unsigned int hwid;
};
static struct ab8500_pwm_chip *ab8500_pwm_from_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct ab8500_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -92,12 +91,12 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
* when disabled.
*/
if (!state->enabled || duty_steps == 0) {
- ret = abx500_mask_and_set_register_interruptible(chip->dev,
+ ret = abx500_mask_and_set_register_interruptible(pwmchip_parent(chip),
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
1 << ab8500->hwid, 0);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to disable PWM, Error %d\n",
+ dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM, Error %d\n",
pwm->label, ret);
return ret;
}
@@ -115,22 +114,22 @@ static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
reg = AB8500_PWM_OUT_CTRL1_REG + (ab8500->hwid * 2);
- ret = abx500_set_register_interruptible(chip->dev, AB8500_MISC,
+ ret = abx500_set_register_interruptible(pwmchip_parent(chip), AB8500_MISC,
reg, lower_val);
if (ret < 0)
return ret;
- ret = abx500_set_register_interruptible(chip->dev, AB8500_MISC,
+ ret = abx500_set_register_interruptible(pwmchip_parent(chip), AB8500_MISC,
(reg + 1), higher_val);
if (ret < 0)
return ret;
/* enable */
- ret = abx500_mask_and_set_register_interruptible(chip->dev,
+ ret = abx500_mask_and_set_register_interruptible(pwmchip_parent(chip),
AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG,
1 << ab8500->hwid, 1 << ab8500->hwid);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to enable PWM, Error %d\n",
+ dev_err(pwmchip_parent(chip), "%s: Failed to enable PWM, Error %d\n",
pwm->label, ret);
return ret;
@@ -144,7 +143,7 @@ static int ab8500_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct ab8500_pwm_chip *ab8500 = ab8500_pwm_from_chip(chip);
unsigned int div, duty_steps;
- ret = abx500_get_register_interruptible(chip->dev, AB8500_MISC,
+ ret = abx500_get_register_interruptible(pwmchip_parent(chip), AB8500_MISC,
AB8500_PWM_OUT_CTRL7_REG,
&ctrl7);
if (ret)
@@ -157,13 +156,13 @@ static int ab8500_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
}
- ret = abx500_get_register_interruptible(chip->dev, AB8500_MISC,
+ ret = abx500_get_register_interruptible(pwmchip_parent(chip), AB8500_MISC,
AB8500_PWM_OUT_CTRL1_REG + (ab8500->hwid * 2),
&lower_val);
if (ret)
return ret;
- ret = abx500_get_register_interruptible(chip->dev, AB8500_MISC,
+ ret = abx500_get_register_interruptible(pwmchip_parent(chip), AB8500_MISC,
AB8500_PWM_OUT_CTRL2_REG + (ab8500->hwid * 2),
&higher_val);
if (ret)
@@ -185,6 +184,7 @@ static const struct pwm_ops ab8500_pwm_ops = {
static int ab8500_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct ab8500_pwm_chip *ab8500;
int err;
@@ -195,16 +195,16 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
* Nothing to be done in probe, this is required to get the
* device which is required for ab8500 read and write
*/
- ab8500 = devm_kzalloc(&pdev->dev, sizeof(*ab8500), GFP_KERNEL);
- if (ab8500 == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*ab8500));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
- ab8500->chip.dev = &pdev->dev;
- ab8500->chip.ops = &ab8500_pwm_ops;
- ab8500->chip.npwm = 1;
+ ab8500 = ab8500_pwm_from_chip(chip);
+
+ chip->ops = &ab8500_pwm_ops;
ab8500->hwid = pdev->id - 1;
- err = devm_pwmchip_add(&pdev->dev, &ab8500->chip);
+ err = devm_pwmchip_add(&pdev->dev, chip);
if (err < 0)
return dev_err_probe(&pdev->dev, err, "Failed to add pwm chip\n");
diff --git a/drivers/pwm/pwm-apple.c b/drivers/pwm/pwm-apple.c
index 4d755b628d9e..6e58aca2f13c 100644
--- a/drivers/pwm/pwm-apple.c
+++ b/drivers/pwm/pwm-apple.c
@@ -32,14 +32,13 @@
#define APPLE_PWM_CTRL_OUTPUT_ENABLE BIT(14)
struct apple_pwm {
- struct pwm_chip chip;
void __iomem *base;
u64 clkrate;
};
static inline struct apple_pwm *to_apple_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct apple_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int apple_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -103,13 +102,16 @@ static const struct pwm_ops apple_pwm_ops = {
static int apple_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct apple_pwm *fpwm;
struct clk *clk;
int ret;
- fpwm = devm_kzalloc(&pdev->dev, sizeof(*fpwm), GFP_KERNEL);
- if (!fpwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*fpwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ fpwm = to_apple_pwm(chip);
fpwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fpwm->base))
@@ -129,11 +131,9 @@ static int apple_pwm_probe(struct platform_device *pdev)
if (fpwm->clkrate > NSEC_PER_SEC)
return dev_err_probe(&pdev->dev, -EINVAL, "pwm clock out of range");
- fpwm->chip.dev = &pdev->dev;
- fpwm->chip.npwm = 1;
- fpwm->chip.ops = &apple_pwm_ops;
+ chip->ops = &apple_pwm_ops;
- ret = devm_pwmchip_add(&pdev->dev, &fpwm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "unable to add pwm chip");
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index 3f2c5031a3ba..2afb302be02c 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -28,7 +28,6 @@ struct atmel_hlcdc_pwm_errata {
};
struct atmel_hlcdc_pwm {
- struct pwm_chip chip;
struct atmel_hlcdc *hlcdc;
struct clk *cur_clk;
const struct atmel_hlcdc_pwm_errata *errata;
@@ -36,7 +35,7 @@ struct atmel_hlcdc_pwm {
static inline struct atmel_hlcdc_pwm *to_atmel_hlcdc_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct atmel_hlcdc_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int atmel_hlcdc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -182,10 +181,12 @@ static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_sama5d3_errata = {
static int atmel_hlcdc_pwm_suspend(struct device *dev)
{
- struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct atmel_hlcdc_pwm *atmel = to_atmel_hlcdc_pwm(chip);
+ struct pwm_device *pwm = &chip->pwms[0];
/* Keep the periph clock enabled if the PWM is still running. */
- if (pwm_is_enabled(&atmel->chip.pwms[0]))
+ if (!pwm->state.enabled)
clk_disable_unprepare(atmel->hlcdc->periph_clk);
return 0;
@@ -193,21 +194,19 @@ static int atmel_hlcdc_pwm_suspend(struct device *dev)
static int atmel_hlcdc_pwm_resume(struct device *dev)
{
- struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev);
- struct pwm_state state;
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct atmel_hlcdc_pwm *atmel = to_atmel_hlcdc_pwm(chip);
+ struct pwm_device *pwm = &chip->pwms[0];
int ret;
- pwm_get_state(&atmel->chip.pwms[0], &state);
-
/* Re-enable the periph clock it was stopped during suspend. */
- if (!state.enabled) {
+ if (!pwm->state.enabled) {
ret = clk_prepare_enable(atmel->hlcdc->periph_clk);
if (ret)
return ret;
}
- return atmel_hlcdc_pwm_apply(&atmel->chip, &atmel->chip.pwms[0],
- &state);
+ return atmel_hlcdc_pwm_apply(chip, pwm, &pwm->state);
}
static DEFINE_SIMPLE_DEV_PM_OPS(atmel_hlcdc_pwm_pm_ops,
@@ -243,15 +242,17 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct device *dev = &pdev->dev;
+ struct pwm_chip *chip;
struct atmel_hlcdc_pwm *atmel;
struct atmel_hlcdc *hlcdc;
int ret;
hlcdc = dev_get_drvdata(dev->parent);
- atmel = devm_kzalloc(dev, sizeof(*atmel), GFP_KERNEL);
- if (!atmel)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(dev, 1, sizeof(*atmel));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ atmel = to_atmel_hlcdc_pwm(chip);
ret = clk_prepare_enable(hlcdc->periph_clk);
if (ret)
@@ -262,26 +263,25 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
atmel->errata = match->data;
atmel->hlcdc = hlcdc;
- atmel->chip.ops = &atmel_hlcdc_pwm_ops;
- atmel->chip.dev = dev;
- atmel->chip.npwm = 1;
+ chip->ops = &atmel_hlcdc_pwm_ops;
- ret = pwmchip_add(&atmel->chip);
+ ret = pwmchip_add(chip);
if (ret) {
clk_disable_unprepare(hlcdc->periph_clk);
return ret;
}
- platform_set_drvdata(pdev, atmel);
+ platform_set_drvdata(pdev, chip);
return 0;
}
static void atmel_hlcdc_pwm_remove(struct platform_device *pdev)
{
- struct atmel_hlcdc_pwm *atmel = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct atmel_hlcdc_pwm *atmel = to_atmel_hlcdc_pwm(chip);
- pwmchip_remove(&atmel->chip);
+ pwmchip_remove(chip);
clk_disable_unprepare(atmel->hlcdc->periph_clk);
}
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index d42c897cb85e..528e54c5999d 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -47,7 +47,6 @@ struct atmel_tcb_channel {
};
struct atmel_tcb_pwm_chip {
- struct pwm_chip chip;
spinlock_t lock;
u8 channel;
u8 width;
@@ -63,7 +62,7 @@ static const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128, 0, };
static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct atmel_tcb_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int atmel_tcb_pwm_request(struct pwm_chip *chip,
@@ -327,7 +326,7 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if ((atcbpwm && atcbpwm->duty > 0 &&
atcbpwm->duty != atcbpwm->period) &&
(atcbpwm->div != i || atcbpwm->period != period)) {
- dev_err(chip->dev,
+ dev_err(pwmchip_parent(chip),
"failed to configure period_ns: PWM group already configured with a different value\n");
return -EINVAL;
}
@@ -388,6 +387,7 @@ static const struct of_device_id atmel_tcb_of_match[] = {
static int atmel_tcb_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
const struct of_device_id *match;
struct atmel_tcb_pwm_chip *tcbpwm;
const struct atmel_tcb_config *config;
@@ -396,9 +396,10 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
int err;
int channel;
- tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
- if (tcbpwm == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, NPWM, sizeof(*tcbpwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ tcbpwm = to_tcb_chip(chip);
err = of_property_read_u32(np, "reg", &channel);
if (err < 0) {
@@ -436,9 +437,7 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
}
}
- tcbpwm->chip.dev = &pdev->dev;
- tcbpwm->chip.ops = &atmel_tcb_pwm_ops;
- tcbpwm->chip.npwm = NPWM;
+ chip->ops = &atmel_tcb_pwm_ops;
tcbpwm->channel = channel;
tcbpwm->width = config->counter_width;
@@ -448,11 +447,11 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
spin_lock_init(&tcbpwm->lock);
- err = pwmchip_add(&tcbpwm->chip);
+ err = pwmchip_add(chip);
if (err < 0)
goto err_disable_clk;
- platform_set_drvdata(pdev, tcbpwm);
+ platform_set_drvdata(pdev, chip);
return 0;
@@ -473,9 +472,10 @@ err_slow_clk:
static void atmel_tcb_pwm_remove(struct platform_device *pdev)
{
- struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct atmel_tcb_pwm_chip *tcbpwm = to_tcb_chip(chip);
- pwmchip_remove(&tcbpwm->chip);
+ pwmchip_remove(chip);
clk_disable_unprepare(tcbpwm->slow_clk);
clk_put(tcbpwm->gclk);
@@ -491,7 +491,8 @@ MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids);
static int atmel_tcb_pwm_suspend(struct device *dev)
{
- struct atmel_tcb_pwm_chip *tcbpwm = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct atmel_tcb_pwm_chip *tcbpwm = to_tcb_chip(chip);
struct atmel_tcb_channel *chan = &tcbpwm->bkup;
unsigned int channel = tcbpwm->channel;
@@ -505,7 +506,8 @@ static int atmel_tcb_pwm_suspend(struct device *dev)
static int atmel_tcb_pwm_resume(struct device *dev)
{
- struct atmel_tcb_pwm_chip *tcbpwm = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct atmel_tcb_pwm_chip *tcbpwm = to_tcb_chip(chip);
struct atmel_tcb_channel *chan = &tcbpwm->bkup;
unsigned int channel = tcbpwm->channel;
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 47bcc8a3bf9d..b2f0abbbad63 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -77,7 +77,6 @@ struct atmel_pwm_data {
};
struct atmel_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
const struct atmel_pwm_data *data;
@@ -99,7 +98,7 @@ struct atmel_pwm_chip {
static inline struct atmel_pwm_chip *to_atmel_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct atmel_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline u32 atmel_pwm_readl(struct atmel_pwm_chip *chip,
@@ -210,7 +209,7 @@ static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip,
shift = fls(cycles) - atmel_pwm->data->cfg.period_bits;
if (shift > PWM_MAX_PRES) {
- dev_err(chip->dev, "pres exceeds the maximum value\n");
+ dev_err(pwmchip_parent(chip), "pres exceeds the maximum value\n");
return -EINVAL;
} else if (shift > 0) {
*pres = shift;
@@ -294,19 +293,16 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
- struct pwm_state cstate;
unsigned long cprd, cdty;
u32 pres, val;
int ret;
- pwm_get_state(pwm, &cstate);
-
if (state->enabled) {
unsigned long clkrate = clk_get_rate(atmel_pwm->clk);
- if (cstate.enabled &&
- cstate.polarity == state->polarity &&
- cstate.period == state->period) {
+ if (pwm->state.enabled &&
+ pwm->state.polarity == state->polarity &&
+ pwm->state.period == state->period) {
u32 cmr = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR);
cprd = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm,
@@ -321,19 +317,19 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
ret = atmel_pwm_calculate_cprd_and_pres(chip, clkrate, state, &cprd,
&pres);
if (ret) {
- dev_err(chip->dev,
+ dev_err(pwmchip_parent(chip),
"failed to calculate cprd and prescaler\n");
return ret;
}
atmel_pwm_calculate_cdty(state, clkrate, cprd, pres, &cdty);
- if (cstate.enabled) {
+ if (pwm->state.enabled) {
atmel_pwm_disable(chip, pwm, false);
} else {
ret = clk_enable(atmel_pwm->clk);
if (ret) {
- dev_err(chip->dev, "failed to enable clock\n");
+ dev_err(pwmchip_parent(chip), "failed to enable clock\n");
return ret;
}
}
@@ -348,7 +344,7 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val);
atmel_pwm_set_cprd_cdty(chip, pwm, cprd, cdty);
atmel_pwm_writel(atmel_pwm, PWM_ENA, 1 << pwm->hwpwm);
- } else if (cstate.enabled) {
+ } else if (pwm->state.enabled) {
atmel_pwm_disable(chip, pwm, true);
}
@@ -462,8 +458,9 @@ static const struct of_device_id atmel_pwm_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids);
-static int atmel_pwm_enable_clk_if_on(struct atmel_pwm_chip *atmel_pwm, bool on)
+static int atmel_pwm_enable_clk_if_on(struct pwm_chip *chip, bool on)
{
+ struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
unsigned int i, cnt = 0;
unsigned long sr;
int ret = 0;
@@ -472,7 +469,7 @@ static int atmel_pwm_enable_clk_if_on(struct atmel_pwm_chip *atmel_pwm, bool on)
if (!sr)
return 0;
- cnt = bitmap_weight(&sr, atmel_pwm->chip.npwm);
+ cnt = bitmap_weight(&sr, chip->npwm);
if (!on)
goto disable_clk;
@@ -480,7 +477,7 @@ static int atmel_pwm_enable_clk_if_on(struct atmel_pwm_chip *atmel_pwm, bool on)
for (i = 0; i < cnt; i++) {
ret = clk_enable(atmel_pwm->clk);
if (ret) {
- dev_err(atmel_pwm->chip.dev,
+ dev_err(pwmchip_parent(chip),
"failed to enable clock for pwm %pe\n",
ERR_PTR(ret));
@@ -501,12 +498,14 @@ disable_clk:
static int atmel_pwm_probe(struct platform_device *pdev)
{
struct atmel_pwm_chip *atmel_pwm;
+ struct pwm_chip *chip;
int ret;
- atmel_pwm = devm_kzalloc(&pdev->dev, sizeof(*atmel_pwm), GFP_KERNEL);
- if (!atmel_pwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 4, sizeof(*atmel_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ atmel_pwm = to_atmel_pwm_chip(chip);
atmel_pwm->data = of_device_get_match_data(&pdev->dev);
atmel_pwm->update_pending = 0;
@@ -521,15 +520,13 @@ static int atmel_pwm_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(atmel_pwm->clk),
"failed to get prepared PWM clock\n");
- atmel_pwm->chip.dev = &pdev->dev;
- atmel_pwm->chip.ops = &atmel_pwm_ops;
- atmel_pwm->chip.npwm = 4;
+ chip->ops = &atmel_pwm_ops;
- ret = atmel_pwm_enable_clk_if_on(atmel_pwm, true);
+ ret = atmel_pwm_enable_clk_if_on(chip, true);
if (ret < 0)
return ret;
- ret = devm_pwmchip_add(&pdev->dev, &atmel_pwm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
goto disable_clk;
@@ -538,7 +535,7 @@ static int atmel_pwm_probe(struct platform_device *pdev)
return 0;
disable_clk:
- atmel_pwm_enable_clk_if_on(atmel_pwm, false);
+ atmel_pwm_enable_clk_if_on(chip, false);
return ret;
}
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
index 758254025683..f4c9f10e490e 100644
--- a/drivers/pwm/pwm-bcm-iproc.c
+++ b/drivers/pwm/pwm-bcm-iproc.c
@@ -34,14 +34,13 @@
#define IPROC_PWM_PRESCALE_MAX 0x3f
struct iproc_pwmc {
- struct pwm_chip chip;
void __iomem *base;
struct clk *clk;
};
static inline struct iproc_pwmc *to_iproc_pwmc(struct pwm_chip *chip)
{
- return container_of(chip, struct iproc_pwmc, chip);
+ return pwmchip_get_drvdata(chip);
}
static void iproc_pwmc_enable(struct iproc_pwmc *ip, unsigned int channel)
@@ -187,20 +186,20 @@ static const struct pwm_ops iproc_pwm_ops = {
static int iproc_pwmc_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct iproc_pwmc *ip;
unsigned int i;
u32 value;
int ret;
- ip = devm_kzalloc(&pdev->dev, sizeof(*ip), GFP_KERNEL);
- if (!ip)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 4, sizeof(*ip));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ ip = to_iproc_pwmc(chip);
platform_set_drvdata(pdev, ip);
- ip->chip.dev = &pdev->dev;
- ip->chip.ops = &iproc_pwm_ops;
- ip->chip.npwm = 4;
+ chip->ops = &iproc_pwm_ops;
ip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ip->base))
@@ -214,14 +213,14 @@ static int iproc_pwmc_probe(struct platform_device *pdev)
/* Set full drive and normal polarity for all channels */
value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
- for (i = 0; i < ip->chip.npwm; i++) {
+ for (i = 0; i < chip->npwm; i++) {
value &= ~(1 << IPROC_PWM_CTRL_TYPE_SHIFT(i));
value |= 1 << IPROC_PWM_CTRL_POLARITY_SHIFT(i);
}
writel(value, ip->base + IPROC_PWM_CTRL_OFFSET);
- ret = devm_pwmchip_add(&pdev->dev, &ip->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret,
"failed to add PWM chip\n");
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index 45046a5c20a5..022c078aae84 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -56,14 +56,13 @@
#define DUTY_CYCLE_HIGH_MAX 0x00ffffff
struct kona_pwmc {
- struct pwm_chip chip;
void __iomem *base;
struct clk *clk;
};
static inline struct kona_pwmc *to_kona_pwmc(struct pwm_chip *chip)
{
- return container_of(chip, struct kona_pwmc, chip);
+ return pwmchip_get_drvdata(chip);
}
/*
@@ -164,7 +163,7 @@ static int kona_pwmc_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
ret = clk_prepare_enable(kp->clk);
if (ret < 0) {
- dev_err(chip->dev, "failed to enable clock: %d\n", ret);
+ dev_err(pwmchip_parent(chip), "failed to enable clock: %d\n", ret);
return ret;
}
@@ -193,7 +192,7 @@ static int kona_pwmc_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = clk_prepare_enable(kp->clk);
if (ret < 0) {
- dev_err(chip->dev, "failed to enable clock: %d\n", ret);
+ dev_err(pwmchip_parent(chip), "failed to enable clock: %d\n", ret);
return ret;
}
@@ -273,18 +272,18 @@ static const struct pwm_ops kona_pwm_ops = {
static int kona_pwmc_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct kona_pwmc *kp;
unsigned int chan;
unsigned int value = 0;
int ret = 0;
- kp = devm_kzalloc(&pdev->dev, sizeof(*kp), GFP_KERNEL);
- if (kp == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 6, sizeof(*kp));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ kp = to_kona_pwmc(chip);
- kp->chip.dev = &pdev->dev;
- kp->chip.ops = &kona_pwm_ops;
- kp->chip.npwm = 6;
+ chip->ops = &kona_pwm_ops;
kp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kp->base))
@@ -304,14 +303,14 @@ static int kona_pwmc_probe(struct platform_device *pdev)
}
/* Set push/pull for all channels */
- for (chan = 0; chan < kp->chip.npwm; chan++)
+ for (chan = 0; chan < chip->npwm; chan++)
value |= (1 << PWM_CONTROL_TYPE_SHIFT(chan));
writel(value, kp->base + PWM_CONTROL_OFFSET);
clk_disable_unprepare(kp->clk);
- ret = devm_pwmchip_add(&pdev->dev, &kp->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
index 283cf27f25ba..aa35acbb0cbc 100644
--- a/drivers/pwm/pwm-bcm2835.c
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -24,8 +24,6 @@
#define PERIOD_MIN 0x2
struct bcm2835_pwm {
- struct pwm_chip chip;
- struct device *dev;
void __iomem *base;
struct clk *clk;
unsigned long rate;
@@ -33,7 +31,7 @@ struct bcm2835_pwm {
static inline struct bcm2835_pwm *to_bcm2835_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct bcm2835_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int bcm2835_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -135,14 +133,14 @@ static void devm_clk_rate_exclusive_put(void *data)
static int bcm2835_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct bcm2835_pwm *pc;
int ret;
- pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
-
- pc->dev = &pdev->dev;
+ chip = devm_pwmchip_alloc(&pdev->dev, 2, sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_bcm2835_pwm(chip);
pc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->base))
@@ -168,14 +166,12 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, -EINVAL,
"failed to get clock rate\n");
- pc->chip.dev = &pdev->dev;
- pc->chip.ops = &bcm2835_pwm_ops;
- pc->chip.atomic = true;
- pc->chip.npwm = 2;
+ chip->ops = &bcm2835_pwm_ops;
+ chip->atomic = true;
platform_set_drvdata(pdev, pc);
- ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret,
"failed to add pwmchip\n");
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index 442913232dc0..831aed228caf 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -49,7 +49,6 @@ struct berlin_pwm_channel {
};
struct berlin_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
struct berlin_pwm_channel channel[BERLIN_PWM_NUMPWMS];
@@ -57,7 +56,7 @@ struct berlin_pwm_chip {
static inline struct berlin_pwm_chip *to_berlin_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct berlin_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline u32 berlin_pwm_readl(struct berlin_pwm_chip *bpc,
@@ -198,12 +197,14 @@ MODULE_DEVICE_TABLE(of, berlin_pwm_match);
static int berlin_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct berlin_pwm_chip *bpc;
int ret;
- bpc = devm_kzalloc(&pdev->dev, sizeof(*bpc), GFP_KERNEL);
- if (!bpc)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, BERLIN_PWM_NUMPWMS, sizeof(*bpc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ bpc = to_berlin_pwm_chip(chip);
bpc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bpc->base))
@@ -213,25 +214,24 @@ static int berlin_pwm_probe(struct platform_device *pdev)
if (IS_ERR(bpc->clk))
return PTR_ERR(bpc->clk);
- bpc->chip.dev = &pdev->dev;
- bpc->chip.ops = &berlin_pwm_ops;
- bpc->chip.npwm = BERLIN_PWM_NUMPWMS;
+ chip->ops = &berlin_pwm_ops;
- ret = devm_pwmchip_add(&pdev->dev, &bpc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
- platform_set_drvdata(pdev, bpc);
+ platform_set_drvdata(pdev, chip);
return 0;
}
static int berlin_pwm_suspend(struct device *dev)
{
- struct berlin_pwm_chip *bpc = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct berlin_pwm_chip *bpc = to_berlin_pwm_chip(chip);
unsigned int i;
- for (i = 0; i < bpc->chip.npwm; i++) {
+ for (i = 0; i < chip->npwm; i++) {
struct berlin_pwm_channel *channel = &bpc->channel[i];
channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_ENABLE);
@@ -247,7 +247,8 @@ static int berlin_pwm_suspend(struct device *dev)
static int berlin_pwm_resume(struct device *dev)
{
- struct berlin_pwm_chip *bpc = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct berlin_pwm_chip *bpc = to_berlin_pwm_chip(chip);
unsigned int i;
int ret;
@@ -255,7 +256,7 @@ static int berlin_pwm_resume(struct device *dev)
if (ret)
return ret;
- for (i = 0; i < bpc->chip.npwm; i++) {
+ for (i = 0; i < chip->npwm; i++) {
struct berlin_pwm_channel *channel = &bpc->channel[i];
berlin_pwm_writel(bpc, i, channel->ctrl, BERLIN_PWM_CONTROL);
diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
index 0fdeb0b2dbf3..82d27d07ba91 100644
--- a/drivers/pwm/pwm-brcmstb.c
+++ b/drivers/pwm/pwm-brcmstb.c
@@ -54,7 +54,6 @@
struct brcmstb_pwm {
void __iomem *base;
struct clk *clk;
- struct pwm_chip chip;
};
static inline u32 brcmstb_pwm_readl(struct brcmstb_pwm *p,
@@ -77,7 +76,7 @@ static inline void brcmstb_pwm_writel(struct brcmstb_pwm *p, u32 value,
static inline struct brcmstb_pwm *to_brcmstb_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct brcmstb_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
/*
@@ -230,12 +229,14 @@ MODULE_DEVICE_TABLE(of, brcmstb_pwm_of_match);
static int brcmstb_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct brcmstb_pwm *p;
int ret;
- p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 2, sizeof(*p));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ p = to_brcmstb_pwm(chip);
p->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(p->clk))
@@ -244,15 +245,13 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, p);
- p->chip.dev = &pdev->dev;
- p->chip.ops = &brcmstb_pwm_ops;
- p->chip.npwm = 2;
+ chip->ops = &brcmstb_pwm_ops;
p->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(p->base))
return PTR_ERR(p->base);
- ret = devm_pwmchip_add(&pdev->dev, &p->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret)
return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
diff --git a/drivers/pwm/pwm-clk.c b/drivers/pwm/pwm-clk.c
index 9dd88b386907..c19a482d7e28 100644
--- a/drivers/pwm/pwm-clk.c
+++ b/drivers/pwm/pwm-clk.c
@@ -28,12 +28,14 @@
#include <linux/pwm.h>
struct pwm_clk_chip {
- struct pwm_chip chip;
struct clk *clk;
bool clk_enabled;
};
-#define to_pwm_clk_chip(_chip) container_of(_chip, struct pwm_clk_chip, chip)
+static inline struct pwm_clk_chip *to_pwm_clk_chip(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
static int pwm_clk_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
@@ -81,35 +83,36 @@ static const struct pwm_ops pwm_clk_ops = {
static int pwm_clk_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct pwm_clk_chip *pcchip;
int ret;
- pcchip = devm_kzalloc(&pdev->dev, sizeof(*pcchip), GFP_KERNEL);
- if (!pcchip)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*pcchip));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pcchip = to_pwm_clk_chip(chip);
pcchip->clk = devm_clk_get_prepared(&pdev->dev, NULL);
if (IS_ERR(pcchip->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(pcchip->clk),
"Failed to get clock\n");
- pcchip->chip.dev = &pdev->dev;
- pcchip->chip.ops = &pwm_clk_ops;
- pcchip->chip.npwm = 1;
+ chip->ops = &pwm_clk_ops;
- ret = pwmchip_add(&pcchip->chip);
+ ret = pwmchip_add(chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Failed to add pwm chip\n");
- platform_set_drvdata(pdev, pcchip);
+ platform_set_drvdata(pdev, chip);
return 0;
}
static void pwm_clk_remove(struct platform_device *pdev)
{
- struct pwm_clk_chip *pcchip = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct pwm_clk_chip *pcchip = to_pwm_clk_chip(chip);
- pwmchip_remove(&pcchip->chip);
+ pwmchip_remove(chip);
if (pcchip->clk_enabled)
clk_disable(pcchip->clk);
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index 42179b3f7ec3..c950e1dbd2b8 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -12,7 +12,6 @@
#include <linux/pwm.h>
struct clps711x_chip {
- struct pwm_chip chip;
void __iomem *pmpcon;
struct clk *clk;
spinlock_t lock;
@@ -20,7 +19,7 @@ struct clps711x_chip {
static inline struct clps711x_chip *to_clps711x_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct clps711x_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -74,22 +73,15 @@ static const struct pwm_ops clps711x_pwm_ops = {
.apply = clps711x_pwm_apply,
};
-static struct pwm_device *clps711x_pwm_xlate(struct pwm_chip *chip,
- const struct of_phandle_args *args)
-{
- if (args->args[0] >= chip->npwm)
- return ERR_PTR(-EINVAL);
-
- return pwm_request_from_chip(chip, args->args[0], NULL);
-}
-
static int clps711x_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct clps711x_chip *priv;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 2, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = to_clps711x_chip(chip);
priv->pmpcon = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->pmpcon))
@@ -99,15 +91,11 @@ static int clps711x_pwm_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
- priv->chip.ops = &clps711x_pwm_ops;
- priv->chip.dev = &pdev->dev;
- priv->chip.npwm = 2;
- priv->chip.of_xlate = clps711x_pwm_xlate;
- priv->chip.of_pwm_n_cells = 1;
+ chip->ops = &clps711x_pwm_ops;
spin_lock_init(&priv->lock);
- return devm_pwmchip_add(&pdev->dev, &priv->chip);
+ return devm_pwmchip_add(&pdev->dev, chip);
}
static const struct of_device_id __maybe_unused clps711x_pwm_dt_ids[] = {
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
index e09358901ab5..98ee5cdbd0ba 100644
--- a/drivers/pwm/pwm-crc.c
+++ b/drivers/pwm/pwm-crc.c
@@ -26,17 +26,15 @@
/**
* struct crystalcove_pwm - Crystal Cove PWM controller
- * @chip: the abstract pwm_chip structure.
* @regmap: the regmap from the parent device.
*/
struct crystalcove_pwm {
- struct pwm_chip chip;
struct regmap *regmap;
};
static inline struct crystalcove_pwm *to_crc_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct crystalcove_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int crc_pwm_calc_clk_div(int period_ns)
@@ -55,7 +53,7 @@ static int crc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct crystalcove_pwm *crc_pwm = to_crc_pwm(chip);
- struct device *dev = crc_pwm->chip.dev;
+ struct device *dev = pwmchip_parent(chip);
int err;
if (state->period > PWM_MAX_PERIOD_NS) {
@@ -125,7 +123,7 @@ static int crc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
struct crystalcove_pwm *crc_pwm = to_crc_pwm(chip);
- struct device *dev = crc_pwm->chip.dev;
+ struct device *dev = pwmchip_parent(chip);
unsigned int clk_div, clk_div_reg, duty_cycle_reg;
int error;
@@ -160,22 +158,22 @@ static const struct pwm_ops crc_pwm_ops = {
static int crystalcove_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct crystalcove_pwm *crc_pwm;
struct device *dev = pdev->dev.parent;
struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
- crc_pwm = devm_kzalloc(&pdev->dev, sizeof(*crc_pwm), GFP_KERNEL);
- if (!crc_pwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*crc_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ crc_pwm = to_crc_pwm(chip);
- crc_pwm->chip.dev = &pdev->dev;
- crc_pwm->chip.ops = &crc_pwm_ops;
- crc_pwm->chip.npwm = 1;
+ chip->ops = &crc_pwm_ops;
/* get the PMIC regmap */
crc_pwm->regmap = pmic->regmap;
- return devm_pwmchip_add(&pdev->dev, &crc_pwm->chip);
+ return devm_pwmchip_add(&pdev->dev, chip);
}
static struct platform_driver crystalcove_pwm_driver = {
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 5fe303b8656d..606ccfdaf4cc 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -19,13 +19,11 @@
* struct cros_ec_pwm_device - Driver data for EC PWM
*
* @ec: Pointer to EC device
- * @chip: PWM controller chip
* @use_pwm_type: Use PWM types instead of generic channels
* @channel: array with per-channel data
*/
struct cros_ec_pwm_device {
struct cros_ec_device *ec;
- struct pwm_chip chip;
bool use_pwm_type;
struct cros_ec_pwm *channel;
};
@@ -40,7 +38,7 @@ struct cros_ec_pwm {
static inline struct cros_ec_pwm_device *pwm_to_cros_ec_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct cros_ec_pwm_device, chip);
+ return pwmchip_get_drvdata(chip);
}
static int cros_ec_dt_type_to_pwm_type(u8 dt_index, u8 *pwm_type)
@@ -93,9 +91,8 @@ static int cros_ec_pwm_set_duty(struct cros_ec_pwm_device *ec_pwm, u8 index,
return cros_ec_cmd_xfer_status(ec, msg);
}
-static int cros_ec_pwm_get_duty(struct cros_ec_pwm_device *ec_pwm, u8 index)
+static int cros_ec_pwm_get_duty(struct cros_ec_device *ec, bool use_pwm_type, u8 index)
{
- struct cros_ec_device *ec = ec_pwm->ec;
struct {
struct cros_ec_command msg;
union {
@@ -115,7 +112,7 @@ static int cros_ec_pwm_get_duty(struct cros_ec_pwm_device *ec_pwm, u8 index)
msg->insize = sizeof(*resp);
msg->outsize = sizeof(*params);
- if (ec_pwm->use_pwm_type) {
+ if (use_pwm_type) {
ret = cros_ec_dt_type_to_pwm_type(index, &params->pwm_type);
if (ret) {
dev_err(ec->dev, "Invalid PWM type index: %d\n", index);
@@ -171,9 +168,9 @@ static int cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct cros_ec_pwm *channel = &ec_pwm->channel[pwm->hwpwm];
int ret;
- ret = cros_ec_pwm_get_duty(ec_pwm, pwm->hwpwm);
+ ret = cros_ec_pwm_get_duty(ec_pwm->ec, ec_pwm->use_pwm_type, pwm->hwpwm);
if (ret < 0) {
- dev_err(chip->dev, "error getting initial duty: %d\n", ret);
+ dev_err(pwmchip_parent(chip), "error getting initial duty: %d\n", ret);
return ret;
}
@@ -226,13 +223,17 @@ static const struct pwm_ops cros_ec_pwm_ops = {
* of PWMs it supports directly, so we have to read the pwm duty cycle for
* subsequent channels until we get an error.
*/
-static int cros_ec_num_pwms(struct cros_ec_pwm_device *ec_pwm)
+static int cros_ec_num_pwms(struct cros_ec_device *ec)
{
int i, ret;
/* The index field is only 8 bits */
for (i = 0; i <= U8_MAX; i++) {
- ret = cros_ec_pwm_get_duty(ec_pwm, i);
+ /*
+ * Note that this function is only called when use_pwm_type is
+ * false. With use_pwm_type == true the number of PWMs is fixed.
+ */
+ ret = cros_ec_pwm_get_duty(ec, false, i);
/*
* We look for SUCCESS, INVALID_COMMAND, or INVALID_PARAM
* responses; everything else is treated as an error.
@@ -261,34 +262,34 @@ static int cros_ec_pwm_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct cros_ec_pwm_device *ec_pwm;
struct pwm_chip *chip;
+ bool use_pwm_type = false;
+ unsigned int npwm;
int ret;
if (!ec)
return dev_err_probe(dev, -EINVAL, "no parent EC device\n");
- ec_pwm = devm_kzalloc(dev, sizeof(*ec_pwm), GFP_KERNEL);
- if (!ec_pwm)
- return -ENOMEM;
- chip = &ec_pwm->chip;
- ec_pwm->ec = ec;
+ if (of_device_is_compatible(np, "google,cros-ec-pwm-type")) {
+ use_pwm_type = true;
+ npwm = CROS_EC_PWM_DT_COUNT;
+ } else {
+ ret = cros_ec_num_pwms(ec);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Couldn't find PWMs\n");
+ npwm = ret;
+ }
+
+ chip = devm_pwmchip_alloc(dev, npwm, sizeof(*ec_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
- if (of_device_is_compatible(np, "google,cros-ec-pwm-type"))
- ec_pwm->use_pwm_type = true;
+ ec_pwm = pwm_to_cros_ec_pwm(chip);
+ ec_pwm->use_pwm_type = use_pwm_type;
+ ec_pwm->ec = ec;
/* PWM chip */
- chip->dev = dev;
chip->ops = &cros_ec_pwm_ops;
chip->of_xlate = cros_ec_pwm_xlate;
- chip->of_pwm_n_cells = 1;
-
- if (ec_pwm->use_pwm_type) {
- chip->npwm = CROS_EC_PWM_DT_COUNT;
- } else {
- ret = cros_ec_num_pwms(ec_pwm);
- if (ret < 0)
- return dev_err_probe(dev, ret, "Couldn't find PWMs\n");
- chip->npwm = ret;
- }
ec_pwm->channel = devm_kcalloc(dev, chip->npwm, sizeof(*ec_pwm->channel),
GFP_KERNEL);
diff --git a/drivers/pwm/pwm-dwc-core.c b/drivers/pwm/pwm-dwc-core.c
index ea63dd741f5c..043736972cb9 100644
--- a/drivers/pwm/pwm-dwc-core.c
+++ b/drivers/pwm/pwm-dwc-core.c
@@ -105,12 +105,12 @@ static int dwc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->enabled) {
if (!pwm->state.enabled)
- pm_runtime_get_sync(chip->dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
return __dwc_pwm_configure_timer(dwc, pwm, state);
} else {
if (pwm->state.enabled) {
__dwc_pwm_set_enable(dwc, pwm->hwpwm, false);
- pm_runtime_put_sync(chip->dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
}
}
@@ -124,7 +124,7 @@ static int dwc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
u64 duty, period;
u32 ctrl, ld, ld2;
- pm_runtime_get_sync(chip->dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
ctrl = dwc_pwm_readl(dwc, DWC_TIM_CTRL(pwm->hwpwm));
ld = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(pwm->hwpwm));
@@ -149,7 +149,7 @@ static int dwc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
state->period = period;
state->duty_cycle = duty;
- pm_runtime_put_sync(chip->dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
return 0;
}
@@ -159,21 +159,21 @@ static const struct pwm_ops dwc_pwm_ops = {
.get_state = dwc_pwm_get_state,
};
-struct dwc_pwm *dwc_pwm_alloc(struct device *dev)
+struct pwm_chip *dwc_pwm_alloc(struct device *dev)
{
+ struct pwm_chip *chip;
struct dwc_pwm *dwc;
- dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
- if (!dwc)
- return NULL;
+ chip = devm_pwmchip_alloc(dev, DWC_TIMERS_TOTAL, sizeof(*dwc));
+ if (IS_ERR(chip))
+ return chip;
+ dwc = to_dwc_pwm(chip);
dwc->clk_ns = 10;
- dwc->chip.dev = dev;
- dwc->chip.ops = &dwc_pwm_ops;
- dwc->chip.npwm = DWC_TIMERS_TOTAL;
+ chip->ops = &dwc_pwm_ops;
- dev_set_drvdata(dev, dwc);
- return dwc;
+ dev_set_drvdata(dev, chip);
+ return chip;
}
EXPORT_SYMBOL_GPL(dwc_pwm_alloc);
diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c
index 4929354f8cd9..676eaf8d7a53 100644
--- a/drivers/pwm/pwm-dwc.c
+++ b/drivers/pwm/pwm-dwc.c
@@ -25,39 +25,54 @@
#include "pwm-dwc.h"
-static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
+/* Elkhart Lake */
+static const struct dwc_pwm_info ehl_pwm_info = {
+ .nr = 2,
+ .size = 0x1000,
+};
+
+static int dwc_pwm_init_one(struct device *dev, void __iomem *base, unsigned int offset)
{
- struct device *dev = &pci->dev;
+ struct pwm_chip *chip;
struct dwc_pwm *dwc;
- int ret;
- dwc = dwc_pwm_alloc(dev);
- if (!dwc)
- return -ENOMEM;
+ chip = dwc_pwm_alloc(dev);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+
+ dwc = to_dwc_pwm(chip);
+ dwc->base = base + offset;
+
+ return devm_pwmchip_add(dev, chip);
+}
+
+static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
+{
+ const struct dwc_pwm_info *info;
+ struct device *dev = &pci->dev;
+ int i, ret;
ret = pcim_enable_device(pci);
- if (ret) {
- dev_err(dev, "Failed to enable device (%pe)\n", ERR_PTR(ret));
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable device\n");
pci_set_master(pci);
ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci));
- if (ret) {
- dev_err(dev, "Failed to iomap PCI BAR (%pe)\n", ERR_PTR(ret));
- return ret;
- }
-
- dwc->base = pcim_iomap_table(pci)[0];
- if (!dwc->base) {
- dev_err(dev, "Base address missing\n");
- return -ENOMEM;
- }
-
- ret = devm_pwmchip_add(dev, &dwc->chip);
if (ret)
- return ret;
+ return dev_err_probe(dev, ret, "Failed to iomap PCI BAR\n");
+
+ info = (const struct dwc_pwm_info *)id->driver_data;
+
+ for (i = 0; i < info->nr; i++) {
+ /*
+ * No need to check for pcim_iomap_table() failure,
+ * pcim_iomap_regions() already does it for us.
+ */
+ ret = dwc_pwm_init_one(dev, pcim_iomap_table(pci)[0], i * info->size);
+ if (ret)
+ return ret;
+ }
pm_runtime_put(dev);
pm_runtime_allow(dev);
@@ -73,14 +88,14 @@ static void dwc_pwm_remove(struct pci_dev *pci)
static int dwc_pwm_suspend(struct device *dev)
{
- struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
- struct dwc_pwm *dwc = pci_get_drvdata(pdev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct dwc_pwm *dwc = to_dwc_pwm(chip);
int i;
for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
- if (dwc->chip.pwms[i].state.enabled) {
+ if (chip->pwms[i].state.enabled) {
dev_err(dev, "PWM %u in use by consumer (%s)\n",
- i, dwc->chip.pwms[i].label);
+ i, chip->pwms[i].label);
return -EBUSY;
}
dwc->ctx[i].cnt = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(i));
@@ -93,8 +108,8 @@ static int dwc_pwm_suspend(struct device *dev)
static int dwc_pwm_resume(struct device *dev)
{
- struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
- struct dwc_pwm *dwc = pci_get_drvdata(pdev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct dwc_pwm *dwc = to_dwc_pwm(chip);
int i;
for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
@@ -109,7 +124,7 @@ static int dwc_pwm_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(dwc_pwm_pm_ops, dwc_pwm_suspend, dwc_pwm_resume);
static const struct pci_device_id dwc_pwm_id_table[] = {
- { PCI_VDEVICE(INTEL, 0x4bb7) }, /* Elkhart Lake */
+ { PCI_VDEVICE(INTEL, 0x4bb7), (kernel_ulong_t)&ehl_pwm_info },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, dwc_pwm_id_table);
@@ -120,7 +135,7 @@ static struct pci_driver dwc_pwm_driver = {
.remove = dwc_pwm_remove,
.id_table = dwc_pwm_id_table,
.driver = {
- .pm = pm_ptr(&dwc_pwm_pm_ops),
+ .pm = pm_sleep_ptr(&dwc_pwm_pm_ops),
},
};
diff --git a/drivers/pwm/pwm-dwc.h b/drivers/pwm/pwm-dwc.h
index 64795247c54c..a8b074841ae8 100644
--- a/drivers/pwm/pwm-dwc.h
+++ b/drivers/pwm/pwm-dwc.h
@@ -33,6 +33,11 @@ MODULE_IMPORT_NS(dwc_pwm);
#define DWC_TIM_CTRL_INT_MASK BIT(2)
#define DWC_TIM_CTRL_PWM BIT(3)
+struct dwc_pwm_info {
+ unsigned int nr;
+ unsigned int size;
+};
+
struct dwc_pwm_ctx {
u32 cnt;
u32 cnt2;
@@ -40,12 +45,15 @@ struct dwc_pwm_ctx {
};
struct dwc_pwm {
- struct pwm_chip chip;
void __iomem *base;
unsigned int clk_ns;
struct dwc_pwm_ctx ctx[DWC_TIMERS_TOTAL];
};
-#define to_dwc_pwm(p) (container_of((p), struct dwc_pwm, chip))
+
+static inline struct dwc_pwm *to_dwc_pwm(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
static inline u32 dwc_pwm_readl(struct dwc_pwm *dwc, u32 offset)
{
@@ -57,4 +65,4 @@ static inline void dwc_pwm_writel(struct dwc_pwm *dwc, u32 value, u32 offset)
writel(value, dwc->base + offset);
}
-extern struct dwc_pwm *dwc_pwm_alloc(struct device *dev);
+extern struct pwm_chip *dwc_pwm_alloc(struct device *dev);
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index 51e072572a87..666f2954133c 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -36,24 +36,23 @@
struct ep93xx_pwm {
void __iomem *base;
struct clk *clk;
- struct pwm_chip chip;
};
static inline struct ep93xx_pwm *to_ep93xx_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct ep93xx_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int ep93xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct platform_device *pdev = to_platform_device(chip->dev);
+ struct platform_device *pdev = to_platform_device(pwmchip_parent(chip));
return ep93xx_pwm_acquire_gpio(pdev);
}
static void ep93xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct platform_device *pdev = to_platform_device(chip->dev);
+ struct platform_device *pdev = to_platform_device(pwmchip_parent(chip));
ep93xx_pwm_release_gpio(pdev);
}
@@ -163,12 +162,14 @@ static const struct pwm_ops ep93xx_pwm_ops = {
static int ep93xx_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct ep93xx_pwm *ep93xx_pwm;
int ret;
- ep93xx_pwm = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_pwm), GFP_KERNEL);
- if (!ep93xx_pwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*ep93xx_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ ep93xx_pwm = to_ep93xx_pwm(chip);
ep93xx_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ep93xx_pwm->base))
@@ -178,11 +179,9 @@ static int ep93xx_pwm_probe(struct platform_device *pdev)
if (IS_ERR(ep93xx_pwm->clk))
return PTR_ERR(ep93xx_pwm->clk);
- ep93xx_pwm->chip.dev = &pdev->dev;
- ep93xx_pwm->chip.ops = &ep93xx_pwm_ops;
- ep93xx_pwm->chip.npwm = 1;
+ chip->ops = &ep93xx_pwm_ops;
- ret = devm_pwmchip_add(&pdev->dev, &ep93xx_pwm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return ret;
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index d1b6d1aa4773..2510c10ca473 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -40,7 +40,6 @@ struct fsl_pwm_periodcfg {
};
struct fsl_pwm_chip {
- struct pwm_chip chip;
struct mutex lock;
struct regmap *regmap;
@@ -55,7 +54,7 @@ struct fsl_pwm_chip {
static inline struct fsl_pwm_chip *to_fsl_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct fsl_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static void ftm_clear_write_protection(struct fsl_pwm_chip *fpc)
@@ -221,10 +220,11 @@ static bool fsl_pwm_is_other_pwm_enabled(struct fsl_pwm_chip *fpc,
return false;
}
-static int fsl_pwm_apply_config(struct fsl_pwm_chip *fpc,
+static int fsl_pwm_apply_config(struct pwm_chip *chip,
struct pwm_device *pwm,
const struct pwm_state *newstate)
{
+ struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
unsigned int duty;
u32 reg_polarity;
@@ -232,7 +232,7 @@ static int fsl_pwm_apply_config(struct fsl_pwm_chip *fpc,
bool do_write_period = false;
if (!fsl_pwm_calculate_period(fpc, newstate->period, &periodcfg)) {
- dev_err(fpc->chip.dev, "failed to calculate new period\n");
+ dev_err(pwmchip_parent(chip), "failed to calculate new period\n");
return -EINVAL;
}
@@ -246,7 +246,7 @@ static int fsl_pwm_apply_config(struct fsl_pwm_chip *fpc,
*/
else if (!fsl_pwm_periodcfg_are_equal(&fpc->period, &periodcfg)) {
if (fsl_pwm_is_other_pwm_enabled(fpc, pwm)) {
- dev_err(fpc->chip.dev,
+ dev_err(pwmchip_parent(chip),
"Cannot change period for PWM %u, disable other PWMs first\n",
pwm->hwpwm);
return -EBUSY;
@@ -322,7 +322,7 @@ static int fsl_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
goto end_mutex;
}
- ret = fsl_pwm_apply_config(fpc, pwm, newstate);
+ ret = fsl_pwm_apply_config(chip, pwm, newstate);
if (ret)
goto end_mutex;
@@ -392,18 +392,19 @@ static const struct regmap_config fsl_pwm_regmap_config = {
static int fsl_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct fsl_pwm_chip *fpc;
void __iomem *base;
int ret;
- fpc = devm_kzalloc(&pdev->dev, sizeof(*fpc), GFP_KERNEL);
- if (!fpc)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 8, sizeof(*fpc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ fpc = to_fsl_chip(chip);
mutex_init(&fpc->lock);
fpc->soc = of_device_get_match_data(&pdev->dev);
- fpc->chip.dev = &pdev->dev;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -422,16 +423,16 @@ static int fsl_pwm_probe(struct platform_device *pdev)
return PTR_ERR(fpc->clk[FSL_PWM_CLK_SYS]);
}
- fpc->clk[FSL_PWM_CLK_FIX] = devm_clk_get(fpc->chip.dev, "ftm_fix");
+ fpc->clk[FSL_PWM_CLK_FIX] = devm_clk_get(&pdev->dev, "ftm_fix");
if (IS_ERR(fpc->clk[FSL_PWM_CLK_FIX]))
return PTR_ERR(fpc->clk[FSL_PWM_CLK_FIX]);
- fpc->clk[FSL_PWM_CLK_EXT] = devm_clk_get(fpc->chip.dev, "ftm_ext");
+ fpc->clk[FSL_PWM_CLK_EXT] = devm_clk_get(&pdev->dev, "ftm_ext");
if (IS_ERR(fpc->clk[FSL_PWM_CLK_EXT]))
return PTR_ERR(fpc->clk[FSL_PWM_CLK_EXT]);
fpc->clk[FSL_PWM_CLK_CNTEN] =
- devm_clk_get(fpc->chip.dev, "ftm_cnt_clk_en");
+ devm_clk_get(&pdev->dev, "ftm_cnt_clk_en");
if (IS_ERR(fpc->clk[FSL_PWM_CLK_CNTEN]))
return PTR_ERR(fpc->clk[FSL_PWM_CLK_CNTEN]);
@@ -443,17 +444,15 @@ static int fsl_pwm_probe(struct platform_device *pdev)
if (IS_ERR(fpc->ipg_clk))
fpc->ipg_clk = fpc->clk[FSL_PWM_CLK_SYS];
+ chip->ops = &fsl_pwm_ops;
- fpc->chip.ops = &fsl_pwm_ops;
- fpc->chip.npwm = 8;
-
- ret = devm_pwmchip_add(&pdev->dev, &fpc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
return ret;
}
- platform_set_drvdata(pdev, fpc);
+ platform_set_drvdata(pdev, chip);
return fsl_pwm_init(fpc);
}
@@ -461,14 +460,15 @@ static int fsl_pwm_probe(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int fsl_pwm_suspend(struct device *dev)
{
- struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
int i;
regcache_cache_only(fpc->regmap, true);
regcache_mark_dirty(fpc->regmap);
- for (i = 0; i < fpc->chip.npwm; i++) {
- struct pwm_device *pwm = &fpc->chip.pwms[i];
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
if (!test_bit(PWMF_REQUESTED, &pwm->flags))
continue;
@@ -487,11 +487,12 @@ static int fsl_pwm_suspend(struct device *dev)
static int fsl_pwm_resume(struct device *dev)
{
- struct fsl_pwm_chip *fpc = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
int i;
- for (i = 0; i < fpc->chip.npwm; i++) {
- struct pwm_device *pwm = &fpc->chip.pwms[i];
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
if (!test_bit(PWMF_REQUESTED, &pwm->flags))
continue;
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index c435776e2f78..2eb0b13d4e10 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -33,7 +33,6 @@
#define PWM_DUTY_MASK GENMASK(31, 0)
struct hibvt_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
struct reset_control *rstc;
@@ -65,7 +64,7 @@ static const struct hibvt_pwm_soc hi3559v100_soc_info = {
static inline struct hibvt_pwm_chip *to_hibvt_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct hibvt_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static void hibvt_pwm_set_bits(void __iomem *base, u32 offset,
@@ -191,72 +190,71 @@ static int hibvt_pwm_probe(struct platform_device *pdev)
{
const struct hibvt_pwm_soc *soc =
of_device_get_match_data(&pdev->dev);
- struct hibvt_pwm_chip *pwm_chip;
+ struct pwm_chip *chip;
+ struct hibvt_pwm_chip *hi_pwm_chip;
int ret, i;
- pwm_chip = devm_kzalloc(&pdev->dev, sizeof(*pwm_chip), GFP_KERNEL);
- if (pwm_chip == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, soc->num_pwms, sizeof(*hi_pwm_chip));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ hi_pwm_chip = to_hibvt_pwm_chip(chip);
- pwm_chip->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pwm_chip->clk)) {
+ hi_pwm_chip->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hi_pwm_chip->clk)) {
dev_err(&pdev->dev, "getting clock failed with %ld\n",
- PTR_ERR(pwm_chip->clk));
- return PTR_ERR(pwm_chip->clk);
+ PTR_ERR(hi_pwm_chip->clk));
+ return PTR_ERR(hi_pwm_chip->clk);
}
- pwm_chip->chip.ops = &hibvt_pwm_ops;
- pwm_chip->chip.dev = &pdev->dev;
- pwm_chip->chip.npwm = soc->num_pwms;
- pwm_chip->soc = soc;
+ chip->ops = &hibvt_pwm_ops;
+ hi_pwm_chip->soc = soc;
- pwm_chip->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(pwm_chip->base))
- return PTR_ERR(pwm_chip->base);
+ hi_pwm_chip->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hi_pwm_chip->base))
+ return PTR_ERR(hi_pwm_chip->base);
- ret = clk_prepare_enable(pwm_chip->clk);
+ ret = clk_prepare_enable(hi_pwm_chip->clk);
if (ret < 0)
return ret;
- pwm_chip->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
- if (IS_ERR(pwm_chip->rstc)) {
- clk_disable_unprepare(pwm_chip->clk);
- return PTR_ERR(pwm_chip->rstc);
+ hi_pwm_chip->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(hi_pwm_chip->rstc)) {
+ clk_disable_unprepare(hi_pwm_chip->clk);
+ return PTR_ERR(hi_pwm_chip->rstc);
}
- reset_control_assert(pwm_chip->rstc);
+ reset_control_assert(hi_pwm_chip->rstc);
msleep(30);
- reset_control_deassert(pwm_chip->rstc);
+ reset_control_deassert(hi_pwm_chip->rstc);
- ret = pwmchip_add(&pwm_chip->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
- clk_disable_unprepare(pwm_chip->clk);
+ clk_disable_unprepare(hi_pwm_chip->clk);
return ret;
}
- for (i = 0; i < pwm_chip->chip.npwm; i++) {
- hibvt_pwm_set_bits(pwm_chip->base, PWM_CTRL_ADDR(i),
+ for (i = 0; i < chip->npwm; i++) {
+ hibvt_pwm_set_bits(hi_pwm_chip->base, PWM_CTRL_ADDR(i),
PWM_KEEP_MASK, (0x1 << PWM_KEEP_SHIFT));
}
- platform_set_drvdata(pdev, pwm_chip);
+ platform_set_drvdata(pdev, chip);
return 0;
}
static void hibvt_pwm_remove(struct platform_device *pdev)
{
- struct hibvt_pwm_chip *pwm_chip;
-
- pwm_chip = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip);
- pwmchip_remove(&pwm_chip->chip);
+ pwmchip_remove(chip);
- reset_control_assert(pwm_chip->rstc);
+ reset_control_assert(hi_pwm_chip->rstc);
msleep(30);
- reset_control_deassert(pwm_chip->rstc);
+ reset_control_deassert(hi_pwm_chip->rstc);
- clk_disable_unprepare(pwm_chip->clk);
+ clk_disable_unprepare(hi_pwm_chip->clk);
}
static const struct of_device_id hibvt_pwm_of_match[] = {
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 5965ac35b32e..d79a96679a26 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -59,8 +59,6 @@ struct img_pwm_soc_data {
};
struct img_pwm_chip {
- struct device *dev;
- struct pwm_chip chip;
struct clk *pwm_clk;
struct clk *sys_clk;
void __iomem *base;
@@ -74,7 +72,7 @@ struct img_pwm_chip {
static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct img_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline void img_pwm_writel(struct img_pwm_chip *imgchip,
@@ -99,7 +97,7 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (period_ns < imgchip->min_period_ns ||
period_ns > imgchip->max_period_ns) {
- dev_err(chip->dev, "configured period not in range\n");
+ dev_err(pwmchip_parent(chip), "configured period not in range\n");
return -ERANGE;
}
@@ -120,14 +118,14 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
div = PWM_CTRL_CFG_SUB_DIV0_DIV1;
timebase = DIV_ROUND_UP(mul, 512);
} else {
- dev_err(chip->dev,
+ dev_err(pwmchip_parent(chip),
"failed to configure timebase steps/divider value\n");
return -EINVAL;
}
duty = DIV_ROUND_UP(timebase * duty_ns, period_ns);
- ret = pm_runtime_resume_and_get(chip->dev);
+ ret = pm_runtime_resume_and_get(pwmchip_parent(chip));
if (ret < 0)
return ret;
@@ -141,8 +139,8 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
(timebase << PWM_CH_CFG_TMBASE_SHIFT);
img_pwm_writel(imgchip, PWM_CH_CFG(pwm->hwpwm), val);
- pm_runtime_mark_last_busy(chip->dev);
- pm_runtime_put_autosuspend(chip->dev);
+ pm_runtime_mark_last_busy(pwmchip_parent(chip));
+ pm_runtime_put_autosuspend(pwmchip_parent(chip));
return 0;
}
@@ -153,7 +151,7 @@ static int img_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
struct img_pwm_chip *imgchip = to_img_pwm_chip(chip);
int ret;
- ret = pm_runtime_resume_and_get(chip->dev);
+ ret = pm_runtime_resume_and_get(pwmchip_parent(chip));
if (ret < 0)
return ret;
@@ -177,8 +175,8 @@ static void img_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
val &= ~BIT(pwm->hwpwm);
img_pwm_writel(imgchip, PWM_CTRL_CFG, val);
- pm_runtime_mark_last_busy(chip->dev);
- pm_runtime_put_autosuspend(chip->dev);
+ pm_runtime_mark_last_busy(pwmchip_parent(chip));
+ pm_runtime_put_autosuspend(pwmchip_parent(chip));
}
static int img_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -225,7 +223,8 @@ MODULE_DEVICE_TABLE(of, img_pwm_of_match);
static int img_pwm_runtime_suspend(struct device *dev)
{
- struct img_pwm_chip *imgchip = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct img_pwm_chip *imgchip = to_img_pwm_chip(chip);
clk_disable_unprepare(imgchip->pwm_clk);
clk_disable_unprepare(imgchip->sys_clk);
@@ -235,7 +234,8 @@ static int img_pwm_runtime_suspend(struct device *dev)
static int img_pwm_runtime_resume(struct device *dev)
{
- struct img_pwm_chip *imgchip = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct img_pwm_chip *imgchip = to_img_pwm_chip(chip);
int ret;
ret = clk_prepare_enable(imgchip->sys_clk);
@@ -259,13 +259,13 @@ static int img_pwm_probe(struct platform_device *pdev)
int ret;
u64 val;
unsigned long clk_rate;
+ struct pwm_chip *chip;
struct img_pwm_chip *imgchip;
- imgchip = devm_kzalloc(&pdev->dev, sizeof(*imgchip), GFP_KERNEL);
- if (!imgchip)
- return -ENOMEM;
-
- imgchip->dev = &pdev->dev;
+ chip = devm_pwmchip_alloc(&pdev->dev, IMG_PWM_NPWM, sizeof(*imgchip));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ imgchip = to_img_pwm_chip(chip);
imgchip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imgchip->base))
@@ -290,7 +290,7 @@ static int img_pwm_probe(struct platform_device *pdev)
return PTR_ERR(imgchip->pwm_clk);
}
- platform_set_drvdata(pdev, imgchip);
+ platform_set_drvdata(pdev, chip);
pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -317,11 +317,9 @@ static int img_pwm_probe(struct platform_device *pdev)
do_div(val, clk_rate);
imgchip->min_period_ns = val;
- imgchip->chip.dev = &pdev->dev;
- imgchip->chip.ops = &img_pwm_ops;
- imgchip->chip.npwm = IMG_PWM_NPWM;
+ chip->ops = &img_pwm_ops;
- ret = pwmchip_add(&imgchip->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
goto err_suspend;
@@ -340,19 +338,20 @@ err_pm_disable:
static void img_pwm_remove(struct platform_device *pdev)
{
- struct img_pwm_chip *imgchip = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
img_pwm_runtime_suspend(&pdev->dev);
- pwmchip_remove(&imgchip->chip);
+ pwmchip_remove(chip);
}
#ifdef CONFIG_PM_SLEEP
static int img_pwm_suspend(struct device *dev)
{
- struct img_pwm_chip *imgchip = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct img_pwm_chip *imgchip = to_img_pwm_chip(chip);
int i, ret;
if (pm_runtime_status_suspended(dev)) {
@@ -361,7 +360,7 @@ static int img_pwm_suspend(struct device *dev)
return ret;
}
- for (i = 0; i < imgchip->chip.npwm; i++)
+ for (i = 0; i < chip->npwm; i++)
imgchip->suspend_ch_cfg[i] = img_pwm_readl(imgchip,
PWM_CH_CFG(i));
@@ -374,7 +373,8 @@ static int img_pwm_suspend(struct device *dev)
static int img_pwm_resume(struct device *dev)
{
- struct img_pwm_chip *imgchip = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct img_pwm_chip *imgchip = to_img_pwm_chip(chip);
int ret;
int i;
@@ -382,13 +382,13 @@ static int img_pwm_resume(struct device *dev)
if (ret)
return ret;
- for (i = 0; i < imgchip->chip.npwm; i++)
+ for (i = 0; i < chip->npwm; i++)
img_pwm_writel(imgchip, PWM_CH_CFG(i),
imgchip->suspend_ch_cfg[i]);
img_pwm_writel(imgchip, PWM_CTRL_CFG, imgchip->suspend_ctrl_cfg);
- for (i = 0; i < imgchip->chip.npwm; i++)
+ for (i = 0; i < chip->npwm; i++)
if (imgchip->suspend_ctrl_cfg & BIT(i))
regmap_clear_bits(imgchip->periph_regs,
PERIP_PWM_PDM_CONTROL,
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index 9fc290e647e1..c50ddbac43c8 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -57,7 +57,6 @@
#define PWM_IMX_TPM_MOD_MOD GENMASK(PWM_IMX_TPM_MOD_WIDTH - 1, 0)
struct imx_tpm_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
struct mutex lock;
@@ -75,7 +74,7 @@ struct imx_tpm_pwm_param {
static inline struct imx_tpm_pwm_chip *
to_imx_tpm_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct imx_tpm_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
/*
@@ -336,35 +335,42 @@ static const struct pwm_ops imx_tpm_pwm_ops = {
static int pwm_imx_tpm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct imx_tpm_pwm_chip *tpm;
+ struct clk *clk;
+ void __iomem *base;
int ret;
+ unsigned int npwm;
u32 val;
- tpm = devm_kzalloc(&pdev->dev, sizeof(*tpm), GFP_KERNEL);
- if (!tpm)
- return -ENOMEM;
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
- platform_set_drvdata(pdev, tpm);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "failed to get PWM clock\n");
- tpm->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(tpm->base))
- return PTR_ERR(tpm->base);
+ /* get number of channels */
+ val = readl(base + PWM_IMX_TPM_PARAM);
+ npwm = FIELD_GET(PWM_IMX_TPM_PARAM_CHAN, val);
- tpm->clk = devm_clk_get_enabled(&pdev->dev, NULL);
- if (IS_ERR(tpm->clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(tpm->clk),
- "failed to get PWM clock\n");
+ chip = devm_pwmchip_alloc(&pdev->dev, npwm, sizeof(*tpm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ tpm = to_imx_tpm_pwm_chip(chip);
+
+ platform_set_drvdata(pdev, tpm);
- tpm->chip.dev = &pdev->dev;
- tpm->chip.ops = &imx_tpm_pwm_ops;
+ tpm->base = base;
+ tpm->clk = clk;
- /* get number of channels */
- val = readl(tpm->base + PWM_IMX_TPM_PARAM);
- tpm->chip.npwm = FIELD_GET(PWM_IMX_TPM_PARAM_CHAN, val);
+ chip->ops = &imx_tpm_pwm_ops;
mutex_init(&tpm->lock);
- ret = devm_pwmchip_add(&pdev->dev, &tpm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret)
return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c
index d175d895f22a..1d2aae2d278f 100644
--- a/drivers/pwm/pwm-imx1.c
+++ b/drivers/pwm/pwm-imx1.c
@@ -28,10 +28,12 @@ struct pwm_imx1_chip {
struct clk *clk_ipg;
struct clk *clk_per;
void __iomem *mmio_base;
- struct pwm_chip chip;
};
-#define to_pwm_imx1_chip(chip) container_of(chip, struct pwm_imx1_chip, chip)
+static inline struct pwm_imx1_chip *to_pwm_imx1_chip(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
static int pwm_imx1_clk_prepare_enable(struct pwm_chip *chip)
{
@@ -156,11 +158,13 @@ MODULE_DEVICE_TABLE(of, pwm_imx1_dt_ids);
static int pwm_imx1_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct pwm_imx1_chip *imx;
- imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
- if (!imx)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*imx));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ imx = to_pwm_imx1_chip(chip);
imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx->clk_ipg))
@@ -172,15 +176,13 @@ static int pwm_imx1_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_per),
"failed to get peripheral clock\n");
- imx->chip.ops = &pwm_imx1_ops;
- imx->chip.dev = &pdev->dev;
- imx->chip.npwm = 1;
+ chip->ops = &pwm_imx1_ops;
imx->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imx->mmio_base))
return PTR_ERR(imx->mmio_base);
- return devm_pwmchip_add(&pdev->dev, &imx->chip);
+ return devm_pwmchip_add(&pdev->dev, chip);
}
static struct platform_driver pwm_imx1_driver = {
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index 7d9bc43f12b0..e1412116ef65 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -83,7 +83,6 @@ struct pwm_imx27_chip {
struct clk *clk_ipg;
struct clk *clk_per;
void __iomem *mmio_base;
- struct pwm_chip chip;
/*
* The driver cannot read the current duty cycle from the hardware if
@@ -93,7 +92,10 @@ struct pwm_imx27_chip {
unsigned int duty_cycle;
};
-#define to_pwm_imx27_chip(chip) container_of(chip, struct pwm_imx27_chip, chip)
+static inline struct pwm_imx27_chip *to_pwm_imx27_chip(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
static int pwm_imx27_clk_prepare_enable(struct pwm_imx27_chip *imx)
{
@@ -145,7 +147,7 @@ static int pwm_imx27_get_state(struct pwm_chip *chip,
state->polarity = PWM_POLARITY_INVERSED;
break;
default:
- dev_warn(chip->dev, "can't set polarity, output disconnected");
+ dev_warn(pwmchip_parent(chip), "can't set polarity, output disconnected");
}
prescaler = MX3_PWMCR_PRESCALER_GET(val);
@@ -177,7 +179,7 @@ static int pwm_imx27_get_state(struct pwm_chip *chip,
static void pwm_imx27_sw_reset(struct pwm_chip *chip)
{
struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
- struct device *dev = chip->dev;
+ struct device *dev = pwmchip_parent(chip);
int wait_count = 0;
u32 cr;
@@ -196,7 +198,7 @@ static void pwm_imx27_wait_fifo_slot(struct pwm_chip *chip,
struct pwm_device *pwm)
{
struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
- struct device *dev = chip->dev;
+ struct device *dev = pwmchip_parent(chip);
unsigned int period_ms;
int fifoav;
u32 sr;
@@ -204,8 +206,8 @@ static void pwm_imx27_wait_fifo_slot(struct pwm_chip *chip,
sr = readl(imx->mmio_base + MX3_PWMSR);
fifoav = FIELD_GET(MX3_PWMSR_FIFOAV, sr);
if (fifoav == MX3_PWMSR_FIFOAV_4WORDS) {
- period_ms = DIV_ROUND_UP_ULL(pwm_get_period(pwm),
- NSEC_PER_MSEC);
+ period_ms = DIV_ROUND_UP_ULL(pwm->state.period,
+ NSEC_PER_MSEC);
msleep(period_ms);
sr = readl(imx->mmio_base + MX3_PWMSR);
@@ -219,14 +221,11 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
{
unsigned long period_cycles, duty_cycles, prescale;
struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
- struct pwm_state cstate;
unsigned long long c;
unsigned long long clkrate;
int ret;
u32 cr;
- pwm_get_state(pwm, &cstate);
-
clkrate = clk_get_rate(imx->clk_per);
c = clkrate * state->period;
@@ -254,7 +253,7 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
* Wait for a free FIFO slot if the PWM is already enabled, and flush
* the FIFO if the PWM was disabled and is about to be enabled.
*/
- if (cstate.enabled) {
+ if (pwm->state.enabled) {
pwm_imx27_wait_fifo_slot(chip, pwm);
} else {
ret = pwm_imx27_clk_prepare_enable(imx);
@@ -306,13 +305,15 @@ MODULE_DEVICE_TABLE(of, pwm_imx27_dt_ids);
static int pwm_imx27_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct pwm_imx27_chip *imx;
int ret;
u32 pwmcr;
- imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
- if (imx == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*imx));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ imx = to_pwm_imx27_chip(chip);
imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(imx->clk_ipg))
@@ -324,9 +325,7 @@ static int pwm_imx27_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_per),
"failed to get peripheral clock\n");
- imx->chip.ops = &pwm_imx27_ops;
- imx->chip.dev = &pdev->dev;
- imx->chip.npwm = 1;
+ chip->ops = &pwm_imx27_ops;
imx->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imx->mmio_base))
@@ -341,7 +340,7 @@ static int pwm_imx27_probe(struct platform_device *pdev)
if (!(pwmcr & MX3_PWMCR_EN))
pwm_imx27_clk_disable_unprepare(imx);
- return devm_pwmchip_add(&pdev->dev, &imx->chip);
+ return devm_pwmchip_add(&pdev->dev, chip);
}
static struct platform_driver imx_pwm_driver = {
diff --git a/drivers/pwm/pwm-intel-lgm.c b/drivers/pwm/pwm-intel-lgm.c
index 54ecae7f937e..f9cc7c17c8f0 100644
--- a/drivers/pwm/pwm-intel-lgm.c
+++ b/drivers/pwm/pwm-intel-lgm.c
@@ -42,14 +42,13 @@
#define LGM_PWM_PERIOD_2WIRE_NS (40 * NSEC_PER_MSEC)
struct lgm_pwm_chip {
- struct pwm_chip chip;
struct regmap *regmap;
u32 period;
};
static inline struct lgm_pwm_chip *to_lgm_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct lgm_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int lgm_pwm_enable(struct pwm_chip *chip, bool enable)
@@ -168,14 +167,16 @@ static int lgm_pwm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct reset_control *rst;
+ struct pwm_chip *chip;
struct lgm_pwm_chip *pc;
void __iomem *io_base;
struct clk *clk;
int ret;
- pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(dev, 1, sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_lgm_pwm_chip(chip);
io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(io_base))
@@ -203,13 +204,11 @@ static int lgm_pwm_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "cannot deassert reset control\n");
- pc->chip.dev = dev;
- pc->chip.ops = &lgm_pwm_ops;
- pc->chip.npwm = 1;
+ chip->ops = &lgm_pwm_ops;
lgm_pwm_init(pc);
- ret = devm_pwmchip_add(dev, &pc->chip);
+ ret = devm_pwmchip_add(dev, chip);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to add PWM chip\n");
diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
index 378ab036edfe..13e5e138c8e9 100644
--- a/drivers/pwm/pwm-iqs620a.c
+++ b/drivers/pwm/pwm-iqs620a.c
@@ -34,12 +34,17 @@
struct iqs620_pwm_private {
struct iqs62x_core *iqs62x;
- struct pwm_chip chip;
+ struct device *dev;
struct notifier_block notifier;
struct mutex lock;
unsigned int duty_scale;
};
+static inline struct iqs620_pwm_private *iqs620_pwm_from_chip(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
+
static int iqs620_pwm_init(struct iqs620_pwm_private *iqs620_pwm,
unsigned int duty_scale)
{
@@ -73,7 +78,7 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->period < IQS620_PWM_PERIOD_NS)
return -EINVAL;
- iqs620_pwm = container_of(chip, struct iqs620_pwm_private, chip);
+ iqs620_pwm = iqs620_pwm_from_chip(chip);
/*
* The duty cycle generated by the device is calculated as follows:
@@ -109,7 +114,7 @@ static int iqs620_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct iqs620_pwm_private *iqs620_pwm;
- iqs620_pwm = container_of(chip, struct iqs620_pwm_private, chip);
+ iqs620_pwm = iqs620_pwm_from_chip(chip);
mutex_lock(&iqs620_pwm->lock);
@@ -155,7 +160,7 @@ static int iqs620_pwm_notifier(struct notifier_block *notifier,
mutex_unlock(&iqs620_pwm->lock);
if (ret) {
- dev_err(iqs620_pwm->chip.dev,
+ dev_err(iqs620_pwm->dev,
"Failed to re-initialize device: %d\n", ret);
return NOTIFY_BAD;
}
@@ -176,21 +181,24 @@ static void iqs620_pwm_notifier_unregister(void *context)
ret = blocking_notifier_chain_unregister(&iqs620_pwm->iqs62x->nh,
&iqs620_pwm->notifier);
if (ret)
- dev_err(iqs620_pwm->chip.dev,
+ dev_err(iqs620_pwm->dev,
"Failed to unregister notifier: %d\n", ret);
}
static int iqs620_pwm_probe(struct platform_device *pdev)
{
struct iqs62x_core *iqs62x = dev_get_drvdata(pdev->dev.parent);
+ struct pwm_chip *chip;
struct iqs620_pwm_private *iqs620_pwm;
unsigned int val;
int ret;
- iqs620_pwm = devm_kzalloc(&pdev->dev, sizeof(*iqs620_pwm), GFP_KERNEL);
- if (!iqs620_pwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*iqs620_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ iqs620_pwm = iqs620_pwm_from_chip(chip);
+ iqs620_pwm->dev = &pdev->dev;
iqs620_pwm->iqs62x = iqs62x;
ret = regmap_read(iqs62x->regmap, IQS620_PWR_SETTINGS, &val);
@@ -205,9 +213,7 @@ static int iqs620_pwm_probe(struct platform_device *pdev)
iqs620_pwm->duty_scale = val + 1;
}
- iqs620_pwm->chip.dev = &pdev->dev;
- iqs620_pwm->chip.ops = &iqs620_pwm_ops;
- iqs620_pwm->chip.npwm = 1;
+ chip->ops = &iqs620_pwm_ops;
mutex_init(&iqs620_pwm->lock);
@@ -225,7 +231,7 @@ static int iqs620_pwm_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = devm_pwmchip_add(&pdev->dev, &iqs620_pwm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret)
dev_err(&pdev->dev, "Failed to add device: %d\n", ret);
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 3933418e551b..da4bf543d357 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -25,23 +25,21 @@ struct soc_info {
};
struct jz4740_pwm_chip {
- struct pwm_chip chip;
struct regmap *map;
struct clk *clk[];
};
static inline struct jz4740_pwm_chip *to_jz4740(struct pwm_chip *chip)
{
- return container_of(chip, struct jz4740_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
-static bool jz4740_pwm_can_use_chn(struct jz4740_pwm_chip *jz,
- unsigned int channel)
+static bool jz4740_pwm_can_use_chn(struct pwm_chip *chip, unsigned int channel)
{
/* Enable all TCU channels for PWM use by default except channels 0/1 */
- u32 pwm_channels_mask = GENMASK(jz->chip.npwm - 1, 2);
+ u32 pwm_channels_mask = GENMASK(chip->npwm - 1, 2);
- device_property_read_u32(jz->chip.dev->parent,
+ device_property_read_u32(pwmchip_parent(chip)->parent,
"ingenic,pwm-channels-mask",
&pwm_channels_mask);
@@ -55,14 +53,15 @@ static int jz4740_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
char name[16];
int err;
- if (!jz4740_pwm_can_use_chn(jz, pwm->hwpwm))
+ if (!jz4740_pwm_can_use_chn(chip, pwm->hwpwm))
return -EBUSY;
snprintf(name, sizeof(name), "timer%u", pwm->hwpwm);
- clk = clk_get(chip->dev, name);
+ clk = clk_get(pwmchip_parent(chip), name);
if (IS_ERR(clk)) {
- dev_err(chip->dev, "error %pe: Failed to get clock\n", clk);
+ dev_err(pwmchip_parent(chip),
+ "error %pe: Failed to get clock\n", clk);
return PTR_ERR(clk);
}
@@ -150,7 +149,7 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
*/
rate = clk_round_rate(clk, tmp);
if (rate < 0) {
- dev_err(chip->dev, "Unable to round rate: %ld\n", rate);
+ dev_err(pwmchip_parent(chip), "Unable to round rate: %ld\n", rate);
return rate;
}
@@ -171,7 +170,7 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
err = clk_set_rate(clk, rate);
if (err) {
- dev_err(chip->dev, "Unable to set rate: %d\n", err);
+ dev_err(pwmchip_parent(chip), "Unable to set rate: %d\n", err);
return err;
}
@@ -224,6 +223,7 @@ static const struct pwm_ops jz4740_pwm_ops = {
static int jz4740_pwm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct pwm_chip *chip;
struct jz4740_pwm_chip *jz;
const struct soc_info *info;
@@ -231,10 +231,10 @@ static int jz4740_pwm_probe(struct platform_device *pdev)
if (!info)
return -EINVAL;
- jz = devm_kzalloc(dev, struct_size(jz, clk, info->num_pwms),
- GFP_KERNEL);
- if (!jz)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(dev, info->num_pwms, struct_size(jz, clk, info->num_pwms));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ jz = to_jz4740(chip);
jz->map = device_node_to_regmap(dev->parent->of_node);
if (IS_ERR(jz->map)) {
@@ -242,11 +242,9 @@ static int jz4740_pwm_probe(struct platform_device *pdev)
return PTR_ERR(jz->map);
}
- jz->chip.dev = dev;
- jz->chip.ops = &jz4740_pwm_ops;
- jz->chip.npwm = info->num_pwms;
+ chip->ops = &jz4740_pwm_ops;
- return devm_pwmchip_add(dev, &jz->chip);
+ return devm_pwmchip_add(dev, chip);
}
static const struct soc_info jz4740_soc_info = {
diff --git a/drivers/pwm/pwm-keembay.c b/drivers/pwm/pwm-keembay.c
index ac824ecc3f64..35b641f3f6ed 100644
--- a/drivers/pwm/pwm-keembay.c
+++ b/drivers/pwm/pwm-keembay.c
@@ -36,7 +36,6 @@
#define KMB_PWM_HIGHLOW_OFFSET(ch) (0x20 + 4 * (ch))
struct keembay_pwm {
- struct pwm_chip chip;
struct device *dev;
struct clk *clk;
void __iomem *base;
@@ -44,7 +43,7 @@ struct keembay_pwm {
static inline struct keembay_pwm *to_keembay_pwm_dev(struct pwm_chip *chip)
{
- return container_of(chip, struct keembay_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static void keembay_clk_unprepare(void *data)
@@ -185,12 +184,14 @@ static const struct pwm_ops keembay_pwm_ops = {
static int keembay_pwm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct pwm_chip *chip;
struct keembay_pwm *priv;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(dev, KMB_TOTAL_PWM_CHANNELS, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = to_keembay_pwm_dev(chip);
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk))
@@ -204,11 +205,9 @@ static int keembay_pwm_probe(struct platform_device *pdev)
if (ret)
return ret;
- priv->chip.dev = dev;
- priv->chip.ops = &keembay_pwm_ops;
- priv->chip.npwm = KMB_TOTAL_PWM_CHANNELS;
+ chip->ops = &keembay_pwm_ops;
- ret = devm_pwmchip_add(dev, &priv->chip);
+ ret = devm_pwmchip_add(dev, chip);
if (ret)
return dev_err_probe(dev, ret, "Failed to add PWM chip\n");
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 32350a357278..61189cea1046 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -20,7 +20,6 @@
#define LP3943_MAX_PERIOD 1600000
struct lp3943_pwm {
- struct pwm_chip chip;
struct lp3943 *lp3943;
struct lp3943_platform_data *pdata;
struct lp3943_pwm_map pwm_map[LP3943_NUM_PWMS];
@@ -28,7 +27,7 @@ struct lp3943_pwm {
static inline struct lp3943_pwm *to_lp3943_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct lp3943_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static struct lp3943_pwm_map *
@@ -273,12 +272,14 @@ static int lp3943_pwm_parse_dt(struct device *dev,
static int lp3943_pwm_probe(struct platform_device *pdev)
{
struct lp3943 *lp3943 = dev_get_drvdata(pdev->dev.parent);
+ struct pwm_chip *chip;
struct lp3943_pwm *lp3943_pwm;
int ret;
- lp3943_pwm = devm_kzalloc(&pdev->dev, sizeof(*lp3943_pwm), GFP_KERNEL);
- if (!lp3943_pwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, LP3943_NUM_PWMS, sizeof(*lp3943_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ lp3943_pwm = to_lp3943_pwm(chip);
lp3943_pwm->pdata = lp3943->pdata;
if (!lp3943_pwm->pdata) {
@@ -292,11 +293,9 @@ static int lp3943_pwm_probe(struct platform_device *pdev)
}
lp3943_pwm->lp3943 = lp3943;
- lp3943_pwm->chip.dev = &pdev->dev;
- lp3943_pwm->chip.ops = &lp3943_pwm_ops;
- lp3943_pwm->chip.npwm = LP3943_NUM_PWMS;
+ chip->ops = &lp3943_pwm_ops;
- return devm_pwmchip_add(&pdev->dev, &lp3943_pwm->chip);
+ return devm_pwmchip_add(&pdev->dev, chip);
}
#ifdef CONFIG_OF
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index fe891fa71a1d..04b76d257fd8 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -92,8 +92,6 @@ struct lpc18xx_pwm_data {
};
struct lpc18xx_pwm_chip {
- struct device *dev;
- struct pwm_chip chip;
void __iomem *base;
struct clk *pwm_clk;
unsigned long clk_rate;
@@ -110,7 +108,7 @@ struct lpc18xx_pwm_chip {
static inline struct lpc18xx_pwm_chip *
to_lpc18xx_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct lpc18xx_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline void lpc18xx_pwm_writel(struct lpc18xx_pwm_chip *lpc18xx_pwm,
@@ -198,7 +196,7 @@ static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (period_ns < lpc18xx_pwm->min_period_ns ||
period_ns > lpc18xx_pwm->max_period_ns) {
- dev_err(chip->dev, "period %d not in range\n", period_ns);
+ dev_err(pwmchip_parent(chip), "period %d not in range\n", period_ns);
return -ERANGE;
}
@@ -214,7 +212,7 @@ static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
*/
if (requested_events > 2 && lpc18xx_pwm->period_ns != period_ns &&
lpc18xx_pwm->period_ns) {
- dev_err(chip->dev, "conflicting period requested for PWM %u\n",
+ dev_err(pwmchip_parent(chip), "conflicting period requested for PWM %u\n",
pwm->hwpwm);
mutex_unlock(&lpc18xx_pwm->period_lock);
return -EBUSY;
@@ -289,7 +287,7 @@ static int lpc18xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
LPC18XX_PWM_EVENT_MAX);
if (event >= LPC18XX_PWM_EVENT_MAX) {
- dev_err(lpc18xx_pwm->dev,
+ dev_err(pwmchip_parent(chip),
"maximum number of simultaneous channels reached\n");
return -EBUSY;
}
@@ -349,16 +347,15 @@ MODULE_DEVICE_TABLE(of, lpc18xx_pwm_of_match);
static int lpc18xx_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct lpc18xx_pwm_chip *lpc18xx_pwm;
int ret;
u64 val;
- lpc18xx_pwm = devm_kzalloc(&pdev->dev, sizeof(*lpc18xx_pwm),
- GFP_KERNEL);
- if (!lpc18xx_pwm)
- return -ENOMEM;
-
- lpc18xx_pwm->dev = &pdev->dev;
+ chip = devm_pwmchip_alloc(&pdev->dev, LPC18XX_NUM_PWMS, sizeof(*lpc18xx_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
lpc18xx_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpc18xx_pwm->base))
@@ -389,9 +386,7 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
lpc18xx_pwm->min_period_ns = DIV_ROUND_UP(NSEC_PER_SEC,
lpc18xx_pwm->clk_rate);
- lpc18xx_pwm->chip.dev = &pdev->dev;
- lpc18xx_pwm->chip.ops = &lpc18xx_pwm_ops;
- lpc18xx_pwm->chip.npwm = LPC18XX_NUM_PWMS;
+ chip->ops = &lpc18xx_pwm_ops;
/* SCT counter must be in unify (32 bit) mode */
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CONFIG,
@@ -423,21 +418,22 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
val |= LPC18XX_PWM_PRE(0);
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL, val);
- ret = pwmchip_add(&lpc18xx_pwm->chip);
+ ret = pwmchip_add(chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "pwmchip_add failed\n");
- platform_set_drvdata(pdev, lpc18xx_pwm);
+ platform_set_drvdata(pdev, chip);
return 0;
}
static void lpc18xx_pwm_remove(struct platform_device *pdev)
{
- struct lpc18xx_pwm_chip *lpc18xx_pwm = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
u32 val;
- pwmchip_remove(&lpc18xx_pwm->chip);
+ pwmchip_remove(chip);
val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL,
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 1d9f3e7a2434..c748537e57d1 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
struct lpc32xx_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
};
@@ -23,8 +22,10 @@ struct lpc32xx_pwm_chip {
#define PWM_ENABLE BIT(31)
#define PWM_PIN_LEVEL BIT(30)
-#define to_lpc32xx_pwm_chip(_chip) \
- container_of(_chip, struct lpc32xx_pwm_chip, chip)
+static inline struct lpc32xx_pwm_chip *to_lpc32xx_pwm_chip(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
@@ -119,13 +120,15 @@ static const struct pwm_ops lpc32xx_pwm_ops = {
static int lpc32xx_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct lpc32xx_pwm_chip *lpc32xx;
int ret;
u32 val;
- lpc32xx = devm_kzalloc(&pdev->dev, sizeof(*lpc32xx), GFP_KERNEL);
- if (!lpc32xx)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*lpc32xx));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ lpc32xx = to_lpc32xx_pwm_chip(chip);
lpc32xx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpc32xx->base))
@@ -135,16 +138,14 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
if (IS_ERR(lpc32xx->clk))
return PTR_ERR(lpc32xx->clk);
- lpc32xx->chip.dev = &pdev->dev;
- lpc32xx->chip.ops = &lpc32xx_pwm_ops;
- lpc32xx->chip.npwm = 1;
+ chip->ops = &lpc32xx_pwm_ops;
/* If PWM is disabled, configure the output to the default value */
val = readl(lpc32xx->base);
val &= ~PWM_PIN_LEVEL;
writel(val, lpc32xx->base);
- ret = devm_pwmchip_add(&pdev->dev, &lpc32xx->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip, error %d\n", ret);
return ret;
diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c
index b4134bee2863..25045c229520 100644
--- a/drivers/pwm/pwm-lpss-pci.c
+++ b/drivers/pwm/pwm-lpss-pci.c
@@ -18,7 +18,7 @@ static int pwm_lpss_probe_pci(struct pci_dev *pdev,
const struct pci_device_id *id)
{
const struct pwm_lpss_boardinfo *info;
- struct pwm_lpss_chip *lpwm;
+ struct pwm_chip *chip;
int err;
err = pcim_enable_device(pdev);
@@ -30,11 +30,9 @@ static int pwm_lpss_probe_pci(struct pci_dev *pdev,
return err;
info = (struct pwm_lpss_boardinfo *)id->driver_data;
- lpwm = devm_pwm_lpss_probe(&pdev->dev, pcim_iomap_table(pdev)[0], info);
- if (IS_ERR(lpwm))
- return PTR_ERR(lpwm);
-
- pci_set_drvdata(pdev, lpwm);
+ chip = devm_pwm_lpss_probe(&pdev->dev, pcim_iomap_table(pdev)[0], info);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
pm_runtime_put(&pdev->dev);
pm_runtime_allow(&pdev->dev);
diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
index 319809aac2c4..dbc9f5b17bdc 100644
--- a/drivers/pwm/pwm-lpss-platform.c
+++ b/drivers/pwm/pwm-lpss-platform.c
@@ -20,7 +20,7 @@
static int pwm_lpss_probe_platform(struct platform_device *pdev)
{
const struct pwm_lpss_boardinfo *info;
- struct pwm_lpss_chip *lpwm;
+ struct pwm_chip *chip;
void __iomem *base;
info = device_get_match_data(&pdev->dev);
@@ -31,11 +31,9 @@ static int pwm_lpss_probe_platform(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- lpwm = devm_pwm_lpss_probe(&pdev->dev, base, info);
- if (IS_ERR(lpwm))
- return PTR_ERR(lpwm);
-
- platform_set_drvdata(pdev, lpwm);
+ chip = devm_pwm_lpss_probe(&pdev->dev, base, info);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
/*
* On Cherry Trail devices the GFX0._PS0 AML checks if the controller
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index a6ea3ce7e019..867e2bc8c601 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(pwm_lpss_tng_info);
static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip)
{
- return container_of(chip, struct pwm_lpss_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline u32 pwm_lpss_read(const struct pwm_device *pwm)
@@ -106,7 +106,7 @@ static int pwm_lpss_wait_for_update(struct pwm_device *pwm)
*/
err = readl_poll_timeout(addr, val, !(val & PWM_SW_UPDATE), 40, ms);
if (err)
- dev_err(pwm->chip->dev, "PWM_SW_UPDATE was not cleared\n");
+ dev_err(pwmchip_parent(pwm->chip), "PWM_SW_UPDATE was not cleared\n");
return err;
}
@@ -114,7 +114,7 @@ static int pwm_lpss_wait_for_update(struct pwm_device *pwm)
static inline int pwm_lpss_is_updating(struct pwm_device *pwm)
{
if (pwm_lpss_read(pwm) & PWM_SW_UPDATE) {
- dev_err(pwm->chip->dev, "PWM_SW_UPDATE is still set, skipping update\n");
+ dev_err(pwmchip_parent(pwm->chip), "PWM_SW_UPDATE is still set, skipping update\n");
return -EBUSY;
}
@@ -190,16 +190,16 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->enabled) {
if (!pwm_is_enabled(pwm)) {
- pm_runtime_get_sync(chip->dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
ret = pwm_lpss_prepare_enable(lpwm, pwm, state);
if (ret)
- pm_runtime_put(chip->dev);
+ pm_runtime_put(pwmchip_parent(chip));
} else {
ret = pwm_lpss_prepare_enable(lpwm, pwm, state);
}
} else if (pwm_is_enabled(pwm)) {
pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE);
- pm_runtime_put(chip->dev);
+ pm_runtime_put(pwmchip_parent(chip));
}
return ret;
@@ -213,7 +213,7 @@ static int pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long long base_unit, freq, on_time_div;
u32 ctrl;
- pm_runtime_get_sync(chip->dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
base_unit_range = BIT(lpwm->info->base_unit_bits);
@@ -235,7 +235,7 @@ static int pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
state->polarity = PWM_POLARITY_NORMAL;
state->enabled = !!(ctrl & PWM_ENABLE);
- pm_runtime_put(chip->dev);
+ pm_runtime_put(pwmchip_parent(chip));
return 0;
}
@@ -245,10 +245,11 @@ static const struct pwm_ops pwm_lpss_ops = {
.get_state = pwm_lpss_get_state,
};
-struct pwm_lpss_chip *devm_pwm_lpss_probe(struct device *dev, void __iomem *base,
- const struct pwm_lpss_boardinfo *info)
+struct pwm_chip *devm_pwm_lpss_probe(struct device *dev, void __iomem *base,
+ const struct pwm_lpss_boardinfo *info)
{
struct pwm_lpss_chip *lpwm;
+ struct pwm_chip *chip;
unsigned long c;
int i, ret;
u32 ctrl;
@@ -256,9 +257,10 @@ struct pwm_lpss_chip *devm_pwm_lpss_probe(struct device *dev, void __iomem *base
if (WARN_ON(info->npwm > LPSS_MAX_PWMS))
return ERR_PTR(-ENODEV);
- lpwm = devm_kzalloc(dev, sizeof(*lpwm), GFP_KERNEL);
- if (!lpwm)
- return ERR_PTR(-ENOMEM);
+ chip = devm_pwmchip_alloc(dev, info->npwm, sizeof(*lpwm));
+ if (IS_ERR(chip))
+ return chip;
+ lpwm = to_lpwm(chip);
lpwm->regs = base;
lpwm->info = info;
@@ -267,23 +269,21 @@ struct pwm_lpss_chip *devm_pwm_lpss_probe(struct device *dev, void __iomem *base
if (!c)
return ERR_PTR(-EINVAL);
- lpwm->chip.dev = dev;
- lpwm->chip.ops = &pwm_lpss_ops;
- lpwm->chip.npwm = info->npwm;
+ chip->ops = &pwm_lpss_ops;
- ret = devm_pwmchip_add(dev, &lpwm->chip);
+ ret = devm_pwmchip_add(dev, chip);
if (ret) {
dev_err(dev, "failed to add PWM chip: %d\n", ret);
return ERR_PTR(ret);
}
for (i = 0; i < lpwm->info->npwm; i++) {
- ctrl = pwm_lpss_read(&lpwm->chip.pwms[i]);
+ ctrl = pwm_lpss_read(&chip->pwms[i]);
if (ctrl & PWM_ENABLE)
pm_runtime_get(dev);
}
- return lpwm;
+ return chip;
}
EXPORT_SYMBOL_GPL(devm_pwm_lpss_probe);
diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
index bf841250385f..b5267ab5193b 100644
--- a/drivers/pwm/pwm-lpss.h
+++ b/drivers/pwm/pwm-lpss.h
@@ -18,7 +18,6 @@
#define LPSS_MAX_PWMS 4
struct pwm_lpss_chip {
- struct pwm_chip chip;
void __iomem *regs;
const struct pwm_lpss_boardinfo *info;
};
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index 17d290f847af..19a87873ad60 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -42,16 +42,13 @@ struct pwm_mediatek_of_data {
/**
* struct pwm_mediatek_chip - struct representing PWM chip
- * @chip: linux PWM chip representation
* @regs: base address of PWM chip
* @clk_top: the top clock generator
* @clk_main: the clock used by PWM core
* @clk_pwms: the clock used by each PWM channel
- * @clk_freq: the fix clock frequency of legacy MIPS SoC
* @soc: pointer to chip's platform data
*/
struct pwm_mediatek_chip {
- struct pwm_chip chip;
void __iomem *regs;
struct clk *clk_top;
struct clk *clk_main;
@@ -70,7 +67,7 @@ static const unsigned int mtk_pwm_reg_offset_v2[] = {
static inline struct pwm_mediatek_chip *
to_pwm_mediatek_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct pwm_mediatek_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int pwm_mediatek_clk_enable(struct pwm_chip *chip,
@@ -150,7 +147,7 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (clkdiv > PWM_CLK_DIV_MAX) {
pwm_mediatek_clk_disable(chip, pwm);
- dev_err(chip->dev, "period of %d ns not supported\n", period_ns);
+ dev_err(pwmchip_parent(chip), "period of %d ns not supported\n", period_ns);
return -EINVAL;
}
@@ -233,21 +230,26 @@ static const struct pwm_ops pwm_mediatek_ops = {
static int pwm_mediatek_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct pwm_mediatek_chip *pc;
+ const struct pwm_mediatek_of_data *soc;
unsigned int i;
int ret;
- pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
+ soc = of_device_get_match_data(&pdev->dev);
+
+ chip = devm_pwmchip_alloc(&pdev->dev, soc->num_pwms, sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_pwm_mediatek_chip(chip);
- pc->soc = of_device_get_match_data(&pdev->dev);
+ pc->soc = soc;
pc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->regs))
return PTR_ERR(pc->regs);
- pc->clk_pwms = devm_kmalloc_array(&pdev->dev, pc->soc->num_pwms,
+ pc->clk_pwms = devm_kmalloc_array(&pdev->dev, soc->num_pwms,
sizeof(*pc->clk_pwms), GFP_KERNEL);
if (!pc->clk_pwms)
return -ENOMEM;
@@ -262,7 +264,7 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk_main),
"Failed to get main clock\n");
- for (i = 0; i < pc->soc->num_pwms; i++) {
+ for (i = 0; i < soc->num_pwms; i++) {
char name[8];
snprintf(name, sizeof(name), "pwm%d", i + 1);
@@ -273,11 +275,9 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
"Failed to get %s clock\n", name);
}
- pc->chip.dev = &pdev->dev;
- pc->chip.ops = &pwm_mediatek_ops;
- pc->chip.npwm = pc->soc->num_pwms;
+ chip->ops = &pwm_mediatek_ops;
- ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "pwmchip_add() failed\n");
@@ -340,6 +340,13 @@ static const struct pwm_mediatek_of_data mt7986_pwm_data = {
.reg_offset = mtk_pwm_reg_offset_v1,
};
+static const struct pwm_mediatek_of_data mt7988_pwm_data = {
+ .num_pwms = 8,
+ .pwm45_fixup = false,
+ .has_ck_26m_sel = false,
+ .reg_offset = mtk_pwm_reg_offset_v2,
+};
+
static const struct pwm_mediatek_of_data mt8183_pwm_data = {
.num_pwms = 4,
.pwm45_fixup = false,
@@ -370,6 +377,7 @@ static const struct of_device_id pwm_mediatek_of_match[] = {
{ .compatible = "mediatek,mt7629-pwm", .data = &mt7629_pwm_data },
{ .compatible = "mediatek,mt7981-pwm", .data = &mt7981_pwm_data },
{ .compatible = "mediatek,mt7986-pwm", .data = &mt7986_pwm_data },
+ { .compatible = "mediatek,mt7988-pwm", .data = &mt7988_pwm_data },
{ .compatible = "mediatek,mt8183-pwm", .data = &mt8183_pwm_data },
{ .compatible = "mediatek,mt8365-pwm", .data = &mt8365_pwm_data },
{ .compatible = "mediatek,mt8516-pwm", .data = &mt8516_pwm_data },
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 2971bbf3b5e7..a02fdbc61256 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -60,7 +60,7 @@
#define MISC_A_EN BIT(0)
#define MESON_NUM_PWMS 2
-#define MESON_MAX_MUX_PARENTS 4
+#define MESON_NUM_MUX_PARENTS 4
static struct meson_pwm_channel_data {
u8 reg_offset;
@@ -97,12 +97,10 @@ struct meson_pwm_channel {
};
struct meson_pwm_data {
- const char * const *parent_names;
- unsigned int num_parents;
+ const char *const parent_names[MESON_NUM_MUX_PARENTS];
};
struct meson_pwm {
- struct pwm_chip chip;
const struct meson_pwm_data *data;
struct meson_pwm_channel channels[MESON_NUM_PWMS];
void __iomem *base;
@@ -115,14 +113,14 @@ struct meson_pwm {
static inline struct meson_pwm *to_meson_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct meson_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int meson_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct meson_pwm *meson = to_meson_pwm(chip);
struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
- struct device *dev = chip->dev;
+ struct device *dev = pwmchip_parent(chip);
int err;
err = clk_prepare_enable(channel->clk);
@@ -143,9 +141,10 @@ static void meson_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
clk_disable_unprepare(channel->clk);
}
-static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
+static int meson_pwm_calc(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
+ struct meson_pwm *meson = to_meson_pwm(chip);
struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
unsigned int cnt, duty_cnt;
unsigned long fin_freq;
@@ -169,19 +168,19 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
fin_freq = clk_round_rate(channel->clk, freq);
if (fin_freq == 0) {
- dev_err(meson->chip.dev, "invalid source clock frequency\n");
+ dev_err(pwmchip_parent(chip), "invalid source clock frequency\n");
return -EINVAL;
}
- dev_dbg(meson->chip.dev, "fin_freq: %lu Hz\n", fin_freq);
+ dev_dbg(pwmchip_parent(chip), "fin_freq: %lu Hz\n", fin_freq);
cnt = div_u64(fin_freq * period, NSEC_PER_SEC);
if (cnt > 0xffff) {
- dev_err(meson->chip.dev, "unable to get period cnt\n");
+ dev_err(pwmchip_parent(chip), "unable to get period cnt\n");
return -EINVAL;
}
- dev_dbg(meson->chip.dev, "period=%llu cnt=%u\n", period, cnt);
+ dev_dbg(pwmchip_parent(chip), "period=%llu cnt=%u\n", period, cnt);
if (duty == period) {
channel->hi = cnt;
@@ -192,7 +191,7 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
} else {
duty_cnt = div_u64(fin_freq * duty, NSEC_PER_SEC);
- dev_dbg(meson->chip.dev, "duty=%llu duty_cnt=%u\n", duty, duty_cnt);
+ dev_dbg(pwmchip_parent(chip), "duty=%llu duty_cnt=%u\n", duty, duty_cnt);
channel->hi = duty_cnt;
channel->lo = cnt - duty_cnt;
@@ -203,8 +202,9 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
return 0;
}
-static void meson_pwm_enable(struct meson_pwm *meson, struct pwm_device *pwm)
+static void meson_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
+ struct meson_pwm *meson = to_meson_pwm(chip);
struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
struct meson_pwm_channel_data *channel_data;
unsigned long flags;
@@ -215,7 +215,7 @@ static void meson_pwm_enable(struct meson_pwm *meson, struct pwm_device *pwm)
err = clk_set_rate(channel->clk, channel->rate);
if (err)
- dev_err(meson->chip.dev, "setting clock rate failed\n");
+ dev_err(pwmchip_parent(chip), "setting clock rate failed\n");
spin_lock_irqsave(&meson->lock, flags);
@@ -230,8 +230,9 @@ static void meson_pwm_enable(struct meson_pwm *meson, struct pwm_device *pwm)
spin_unlock_irqrestore(&meson->lock, flags);
}
-static void meson_pwm_disable(struct meson_pwm *meson, struct pwm_device *pwm)
+static void meson_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
+ struct meson_pwm *meson = to_meson_pwm(chip);
unsigned long flags;
u32 value;
@@ -269,16 +270,16 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
channel->hi = ~0;
channel->lo = 0;
- meson_pwm_enable(meson, pwm);
+ meson_pwm_enable(chip, pwm);
} else {
- meson_pwm_disable(meson, pwm);
+ meson_pwm_disable(chip, pwm);
}
} else {
- err = meson_pwm_calc(meson, pwm, state);
+ err = meson_pwm_calc(chip, pwm, state);
if (err < 0)
return err;
- meson_pwm_enable(meson, pwm);
+ meson_pwm_enable(chip, pwm);
}
return 0;
@@ -337,62 +338,32 @@ static const struct pwm_ops meson_pwm_ops = {
.get_state = meson_pwm_get_state,
};
-static const char * const pwm_meson8b_parent_names[] = {
- "xtal", NULL, "fclk_div4", "fclk_div3"
-};
-
static const struct meson_pwm_data pwm_meson8b_data = {
- .parent_names = pwm_meson8b_parent_names,
- .num_parents = ARRAY_SIZE(pwm_meson8b_parent_names),
+ .parent_names = { "xtal", NULL, "fclk_div4", "fclk_div3" },
};
/*
* Only the 2 first inputs of the GXBB AO PWMs are valid
* The last 2 are grounded
*/
-static const char * const pwm_gxbb_ao_parent_names[] = {
- "xtal", "clk81"
-};
-
static const struct meson_pwm_data pwm_gxbb_ao_data = {
- .parent_names = pwm_gxbb_ao_parent_names,
- .num_parents = ARRAY_SIZE(pwm_gxbb_ao_parent_names),
-};
-
-static const char * const pwm_axg_ee_parent_names[] = {
- "xtal", "fclk_div5", "fclk_div4", "fclk_div3"
+ .parent_names = { "xtal", "clk81", NULL, NULL },
};
static const struct meson_pwm_data pwm_axg_ee_data = {
- .parent_names = pwm_axg_ee_parent_names,
- .num_parents = ARRAY_SIZE(pwm_axg_ee_parent_names),
-};
-
-static const char * const pwm_axg_ao_parent_names[] = {
- "xtal", "axg_ao_clk81", "fclk_div4", "fclk_div5"
+ .parent_names = { "xtal", "fclk_div5", "fclk_div4", "fclk_div3" },
};
static const struct meson_pwm_data pwm_axg_ao_data = {
- .parent_names = pwm_axg_ao_parent_names,
- .num_parents = ARRAY_SIZE(pwm_axg_ao_parent_names),
-};
-
-static const char * const pwm_g12a_ao_ab_parent_names[] = {
- "xtal", "g12a_ao_clk81", "fclk_div4", "fclk_div5"
+ .parent_names = { "xtal", "axg_ao_clk81", "fclk_div4", "fclk_div5" },
};
static const struct meson_pwm_data pwm_g12a_ao_ab_data = {
- .parent_names = pwm_g12a_ao_ab_parent_names,
- .num_parents = ARRAY_SIZE(pwm_g12a_ao_ab_parent_names),
-};
-
-static const char * const pwm_g12a_ao_cd_parent_names[] = {
- "xtal", "g12a_ao_clk81",
+ .parent_names = { "xtal", "g12a_ao_clk81", "fclk_div4", "fclk_div5" },
};
static const struct meson_pwm_data pwm_g12a_ao_cd_data = {
- .parent_names = pwm_g12a_ao_cd_parent_names,
- .num_parents = ARRAY_SIZE(pwm_g12a_ao_cd_parent_names),
+ .parent_names = { "xtal", "g12a_ao_clk81", NULL, NULL },
};
static const struct of_device_id meson_pwm_matches[] = {
@@ -432,20 +403,21 @@ static const struct of_device_id meson_pwm_matches[] = {
};
MODULE_DEVICE_TABLE(of, meson_pwm_matches);
-static int meson_pwm_init_channels(struct meson_pwm *meson)
+static int meson_pwm_init_channels(struct pwm_chip *chip)
{
- struct clk_parent_data mux_parent_data[MESON_MAX_MUX_PARENTS] = {};
- struct device *dev = meson->chip.dev;
+ struct meson_pwm *meson = to_meson_pwm(chip);
+ struct clk_parent_data mux_parent_data[MESON_NUM_MUX_PARENTS] = {};
+ struct device *dev = pwmchip_parent(chip);
unsigned int i;
char name[255];
int err;
- for (i = 0; i < meson->data->num_parents; i++) {
+ for (i = 0; i < MESON_NUM_MUX_PARENTS; i++) {
mux_parent_data[i].index = -1;
mux_parent_data[i].name = meson->data->parent_names[i];
}
- for (i = 0; i < meson->chip.npwm; i++) {
+ for (i = 0; i < chip->npwm; i++) {
struct meson_pwm_channel *channel = &meson->channels[i];
struct clk_parent_data div_parent = {}, gate_parent = {};
struct clk_init_data init = {};
@@ -456,7 +428,7 @@ static int meson_pwm_init_channels(struct meson_pwm *meson)
init.ops = &clk_mux_ops;
init.flags = 0;
init.parent_data = mux_parent_data;
- init.num_parents = meson->data->num_parents;
+ init.num_parents = MESON_NUM_MUX_PARENTS;
channel->mux.reg = meson->base + REG_MISC_AB;
channel->mux.shift =
@@ -525,29 +497,29 @@ static int meson_pwm_init_channels(struct meson_pwm *meson)
static int meson_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct meson_pwm *meson;
int err;
- meson = devm_kzalloc(&pdev->dev, sizeof(*meson), GFP_KERNEL);
- if (!meson)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, MESON_NUM_PWMS, sizeof(*meson));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ meson = to_meson_pwm(chip);
meson->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(meson->base))
return PTR_ERR(meson->base);
spin_lock_init(&meson->lock);
- meson->chip.dev = &pdev->dev;
- meson->chip.ops = &meson_pwm_ops;
- meson->chip.npwm = MESON_NUM_PWMS;
+ chip->ops = &meson_pwm_ops;
meson->data = of_device_get_match_data(&pdev->dev);
- err = meson_pwm_init_channels(meson);
+ err = meson_pwm_init_channels(chip);
if (err < 0)
return err;
- err = devm_pwmchip_add(&pdev->dev, &meson->chip);
+ err = devm_pwmchip_add(&pdev->dev, chip);
if (err < 0)
return dev_err_probe(&pdev->dev, err,
"failed to register PWM chip\n");
diff --git a/drivers/pwm/pwm-microchip-core.c b/drivers/pwm/pwm-microchip-core.c
index c0c53968f3e9..c1f2287b8e97 100644
--- a/drivers/pwm/pwm-microchip-core.c
+++ b/drivers/pwm/pwm-microchip-core.c
@@ -54,7 +54,6 @@
#define MCHPCOREPWM_TIMEOUT_MS 100u
struct mchp_core_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
struct mutex lock; /* protects the shared period */
@@ -65,7 +64,7 @@ struct mchp_core_pwm_chip {
static inline struct mchp_core_pwm_chip *to_mchp_core_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct mchp_core_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static void mchp_core_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -447,13 +446,15 @@ MODULE_DEVICE_TABLE(of, mchp_core_of_match);
static int mchp_core_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct mchp_core_pwm_chip *mchp_core_pwm;
struct resource *regs;
int ret;
- mchp_core_pwm = devm_kzalloc(&pdev->dev, sizeof(*mchp_core_pwm), GFP_KERNEL);
- if (!mchp_core_pwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 16, sizeof(*mchp_core_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ mchp_core_pwm = to_mchp_core_pwm(chip);
mchp_core_pwm->base = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
if (IS_ERR(mchp_core_pwm->base))
@@ -470,9 +471,7 @@ static int mchp_core_pwm_probe(struct platform_device *pdev)
mutex_init(&mchp_core_pwm->lock);
- mchp_core_pwm->chip.dev = &pdev->dev;
- mchp_core_pwm->chip.ops = &mchp_core_pwm_ops;
- mchp_core_pwm->chip.npwm = 16;
+ chip->ops = &mchp_core_pwm_ops;
mchp_core_pwm->channel_enabled = readb_relaxed(mchp_core_pwm->base + MCHPCOREPWM_EN(0));
mchp_core_pwm->channel_enabled |=
@@ -485,7 +484,7 @@ static int mchp_core_pwm_probe(struct platform_device *pdev)
writel_relaxed(1U, mchp_core_pwm->base + MCHPCOREPWM_SYNC_UPD);
mchp_core_pwm->update_timestamp = ktime_get();
- ret = devm_pwmchip_add(&pdev->dev, &mchp_core_pwm->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Failed to add pwmchip\n");
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index a72f7be36996..bafd6b6195f6 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -42,7 +42,6 @@ struct mtk_pwm_data {
};
struct mtk_disp_pwm {
- struct pwm_chip chip;
const struct mtk_pwm_data *data;
struct clk *clk_main;
struct clk *clk_mm;
@@ -52,7 +51,7 @@ struct mtk_disp_pwm {
static inline struct mtk_disp_pwm *to_mtk_disp_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct mtk_disp_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static void mtk_disp_pwm_update_bits(struct mtk_disp_pwm *mdp, u32 offset,
@@ -91,14 +90,14 @@ static int mtk_disp_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (!mdp->enabled) {
err = clk_prepare_enable(mdp->clk_main);
if (err < 0) {
- dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n",
+ dev_err(pwmchip_parent(chip), "Can't enable mdp->clk_main: %pe\n",
ERR_PTR(err));
return err;
}
err = clk_prepare_enable(mdp->clk_mm);
if (err < 0) {
- dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n",
+ dev_err(pwmchip_parent(chip), "Can't enable mdp->clk_mm: %pe\n",
ERR_PTR(err));
clk_disable_unprepare(mdp->clk_main);
return err;
@@ -181,13 +180,13 @@ static int mtk_disp_pwm_get_state(struct pwm_chip *chip,
err = clk_prepare_enable(mdp->clk_main);
if (err < 0) {
- dev_err(chip->dev, "Can't enable mdp->clk_main: %pe\n", ERR_PTR(err));
+ dev_err(pwmchip_parent(chip), "Can't enable mdp->clk_main: %pe\n", ERR_PTR(err));
return err;
}
err = clk_prepare_enable(mdp->clk_mm);
if (err < 0) {
- dev_err(chip->dev, "Can't enable mdp->clk_mm: %pe\n", ERR_PTR(err));
+ dev_err(pwmchip_parent(chip), "Can't enable mdp->clk_mm: %pe\n", ERR_PTR(err));
clk_disable_unprepare(mdp->clk_main);
return err;
}
@@ -231,12 +230,14 @@ static const struct pwm_ops mtk_disp_pwm_ops = {
static int mtk_disp_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct mtk_disp_pwm *mdp;
int ret;
- mdp = devm_kzalloc(&pdev->dev, sizeof(*mdp), GFP_KERNEL);
- if (!mdp)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*mdp));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ mdp = to_mtk_disp_pwm(chip);
mdp->data = of_device_get_match_data(&pdev->dev);
@@ -254,11 +255,9 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(mdp->clk_mm),
"Failed to get mm clock\n");
- mdp->chip.dev = &pdev->dev;
- mdp->chip.ops = &mtk_disp_pwm_ops;
- mdp->chip.npwm = 1;
+ chip->ops = &mtk_disp_pwm_ops;
- ret = devm_pwmchip_add(&pdev->dev, &mdp->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "pwmchip_add() failed\n");
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index 1b5e787d78f1..8cad214b1c29 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -37,12 +37,14 @@ static const u8 cdiv_shift[PERIOD_CDIV_MAX] = {
};
struct mxs_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
void __iomem *base;
};
-#define to_mxs_pwm_chip(_chip) container_of(_chip, struct mxs_pwm_chip, chip)
+static inline struct mxs_pwm_chip *to_mxs_pwm_chip(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
static int mxs_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
@@ -120,12 +122,21 @@ static const struct pwm_ops mxs_pwm_ops = {
static int mxs_pwm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct pwm_chip *chip;
struct mxs_pwm_chip *mxs;
+ u32 npwm;
int ret;
- mxs = devm_kzalloc(&pdev->dev, sizeof(*mxs), GFP_KERNEL);
- if (!mxs)
- return -ENOMEM;
+ ret = of_property_read_u32(np, "fsl,pwm-number", &npwm);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get pwm number: %d\n", ret);
+ return ret;
+ }
+
+ chip = devm_pwmchip_alloc(&pdev->dev, npwm, sizeof(*mxs));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ mxs = to_mxs_pwm_chip(chip);
mxs->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mxs->base))
@@ -135,21 +146,14 @@ static int mxs_pwm_probe(struct platform_device *pdev)
if (IS_ERR(mxs->clk))
return PTR_ERR(mxs->clk);
- mxs->chip.dev = &pdev->dev;
- mxs->chip.ops = &mxs_pwm_ops;
-
- ret = of_property_read_u32(np, "fsl,pwm-number", &mxs->chip.npwm);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to get pwm number: %d\n", ret);
- return ret;
- }
+ chip->ops = &mxs_pwm_ops;
/* FIXME: Only do this if the PWM isn't already running */
ret = stmp_reset_block(mxs->base);
if (ret)
return dev_err_probe(&pdev->dev, ret, "failed to reset PWM\n");
- ret = devm_pwmchip_add(&pdev->dev, &mxs->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add pwm chip %d\n", ret);
return ret;
diff --git a/drivers/pwm/pwm-ntxec.c b/drivers/pwm/pwm-ntxec.c
index 78606039eda2..28d1c2e5a98f 100644
--- a/drivers/pwm/pwm-ntxec.c
+++ b/drivers/pwm/pwm-ntxec.c
@@ -25,12 +25,11 @@
struct ntxec_pwm {
struct ntxec *ec;
- struct pwm_chip chip;
};
static struct ntxec_pwm *ntxec_pwm_from_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct ntxec_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
#define NTXEC_REG_AUTO_OFF_HI 0xa1
@@ -141,16 +140,13 @@ static int ntxec_pwm_probe(struct platform_device *pdev)
device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = ntxec_pwm_from_chip(chip);
priv->ec = ec;
-
- chip = &priv->chip;
- chip->dev = &pdev->dev;
chip->ops = &ntxec_pwm_ops;
- chip->npwm = 1;
return devm_pwmchip_add(&pdev->dev, chip);
}
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 496bd73d29fe..cd51c4a938f5 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -53,13 +53,11 @@
/**
* struct pwm_omap_dmtimer_chip - Structure representing a pwm chip
* corresponding to omap dmtimer.
- * @chip: PWM chip structure representing PWM controller
* @dm_timer: Pointer to omap dm timer.
* @pdata: Pointer to omap dm timer ops.
* @dm_timer_pdev: Pointer to omap dm timer platform device
*/
struct pwm_omap_dmtimer_chip {
- struct pwm_chip chip;
/* Mutex to protect pwm apply state */
struct omap_dm_timer *dm_timer;
const struct omap_dm_timer_ops *pdata;
@@ -69,7 +67,7 @@ struct pwm_omap_dmtimer_chip {
static inline struct pwm_omap_dmtimer_chip *
to_pwm_omap_dmtimer_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct pwm_omap_dmtimer_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
/**
@@ -155,7 +153,7 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
unsigned long clk_rate;
struct clk *fclk;
- dev_dbg(chip->dev, "requested duty cycle: %d ns, period: %d ns\n",
+ dev_dbg(pwmchip_parent(chip), "requested duty cycle: %d ns, period: %d ns\n",
duty_ns, period_ns);
if (duty_ns == pwm_get_duty_cycle(pwm) &&
@@ -164,17 +162,17 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
fclk = omap->pdata->get_fclk(omap->dm_timer);
if (!fclk) {
- dev_err(chip->dev, "invalid pmtimer fclk\n");
+ dev_err(pwmchip_parent(chip), "invalid pmtimer fclk\n");
return -EINVAL;
}
clk_rate = clk_get_rate(fclk);
if (!clk_rate) {
- dev_err(chip->dev, "invalid pmtimer fclk rate\n");
+ dev_err(pwmchip_parent(chip), "invalid pmtimer fclk rate\n");
return -EINVAL;
}
- dev_dbg(chip->dev, "clk rate: %luHz\n", clk_rate);
+ dev_dbg(pwmchip_parent(chip), "clk rate: %luHz\n", clk_rate);
/*
* Calculate the appropriate load and match values based on the
@@ -196,27 +194,27 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
duty_cycles = pwm_omap_dmtimer_get_clock_cycles(clk_rate, duty_ns);
if (period_cycles < 2) {
- dev_info(chip->dev,
+ dev_info(pwmchip_parent(chip),
"period %d ns too short for clock rate %lu Hz\n",
period_ns, clk_rate);
return -EINVAL;
}
if (duty_cycles < 1) {
- dev_dbg(chip->dev,
+ dev_dbg(pwmchip_parent(chip),
"duty cycle %d ns is too short for clock rate %lu Hz\n",
duty_ns, clk_rate);
- dev_dbg(chip->dev, "using minimum of 1 clock cycle\n");
+ dev_dbg(pwmchip_parent(chip), "using minimum of 1 clock cycle\n");
duty_cycles = 1;
} else if (duty_cycles >= period_cycles) {
- dev_dbg(chip->dev,
+ dev_dbg(pwmchip_parent(chip),
"duty cycle %d ns is too long for period %d ns at clock rate %lu Hz\n",
duty_ns, period_ns, clk_rate);
- dev_dbg(chip->dev, "using maximum of 1 clock cycle less than period\n");
+ dev_dbg(pwmchip_parent(chip), "using maximum of 1 clock cycle less than period\n");
duty_cycles = period_cycles - 1;
}
- dev_dbg(chip->dev, "effective duty cycle: %lld ns, period: %lld ns\n",
+ dev_dbg(pwmchip_parent(chip), "effective duty cycle: %lld ns, period: %lld ns\n",
DIV_ROUND_CLOSEST_ULL((u64)NSEC_PER_SEC * duty_cycles,
clk_rate),
DIV_ROUND_CLOSEST_ULL((u64)NSEC_PER_SEC * period_cycles,
@@ -228,7 +226,7 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
omap->pdata->set_load(omap->dm_timer, load_value);
omap->pdata->set_match(omap->dm_timer, true, match_value);
- dev_dbg(chip->dev, "load value: %#08x (%d), match value: %#08x (%d)\n",
+ dev_dbg(pwmchip_parent(chip), "load value: %#08x (%d), match value: %#08x (%d)\n",
load_value, load_value, match_value, match_value);
return 0;
@@ -311,6 +309,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
struct dmtimer_platform_data *timer_pdata;
const struct omap_dm_timer_ops *pdata;
struct platform_device *timer_pdev;
+ struct pwm_chip *chip;
struct pwm_omap_dmtimer_chip *omap;
struct omap_dm_timer *dm_timer;
struct device_node *timer;
@@ -368,11 +367,12 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
goto err_request_timer;
}
- omap = devm_kzalloc(&pdev->dev, sizeof(*omap), GFP_KERNEL);
- if (!omap) {
- ret = -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*omap));
+ if (IS_ERR(chip)) {
+ ret = PTR_ERR(chip);
goto err_alloc_omap;
}
+ omap = to_pwm_omap_dmtimer_chip(chip);
omap->pdata = pdata;
omap->dm_timer = dm_timer;
@@ -392,11 +392,9 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
if (!of_property_read_u32(pdev->dev.of_node, "ti,clock-source", &v))
omap->pdata->set_source(omap->dm_timer, v);
- omap->chip.dev = &pdev->dev;
- omap->chip.ops = &pwm_omap_dmtimer_ops;
- omap->chip.npwm = 1;
+ chip->ops = &pwm_omap_dmtimer_ops;
- ret = pwmchip_add(&omap->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register PWM\n");
goto err_pwmchip_add;
@@ -404,7 +402,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
of_node_put(timer);
- platform_set_drvdata(pdev, omap);
+ platform_set_drvdata(pdev, chip);
return 0;
@@ -432,9 +430,10 @@ err_find_timer_pdev:
static void pwm_omap_dmtimer_remove(struct platform_device *pdev)
{
- struct pwm_omap_dmtimer_chip *omap = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
- pwmchip_remove(&omap->chip);
+ pwmchip_remove(chip);
if (pm_runtime_active(&omap->dm_timer_pdev->dev))
omap->pdata->stop(omap->dm_timer);
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index e79b1de8c4d8..c5da2a6ed846 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -76,7 +76,6 @@
#define REG_OFF_L(C) ((C) >= PCA9685_MAXCHAN ? PCA9685_ALL_LED_OFF_L : LED_N_OFF_L((C)))
struct pca9685 {
- struct pwm_chip chip;
struct regmap *regmap;
struct mutex lock;
DECLARE_BITMAP(pwms_enabled, PCA9685_MAXCHAN + 1);
@@ -88,7 +87,7 @@ struct pca9685 {
static inline struct pca9685 *to_pca(struct pwm_chip *chip)
{
- return container_of(chip, struct pca9685, chip);
+ return pwmchip_get_drvdata(chip);
}
/* This function is supposed to be called with the lock mutex held */
@@ -107,9 +106,10 @@ static bool pca9685_prescaler_can_change(struct pca9685 *pca, int channel)
return test_bit(channel, pca->pwms_enabled);
}
-static int pca9685_read_reg(struct pca9685 *pca, unsigned int reg, unsigned int *val)
+static int pca9685_read_reg(struct pwm_chip *chip, unsigned int reg, unsigned int *val)
{
- struct device *dev = pca->chip.dev;
+ struct pca9685 *pca = to_pca(chip);
+ struct device *dev = pwmchip_parent(chip);
int err;
err = regmap_read(pca->regmap, reg, val);
@@ -119,9 +119,10 @@ static int pca9685_read_reg(struct pca9685 *pca, unsigned int reg, unsigned int
return err;
}
-static int pca9685_write_reg(struct pca9685 *pca, unsigned int reg, unsigned int val)
+static int pca9685_write_reg(struct pwm_chip *chip, unsigned int reg, unsigned int val)
{
- struct device *dev = pca->chip.dev;
+ struct pca9685 *pca = to_pca(chip);
+ struct device *dev = pwmchip_parent(chip);
int err;
err = regmap_write(pca->regmap, reg, val);
@@ -132,19 +133,19 @@ static int pca9685_write_reg(struct pca9685 *pca, unsigned int reg, unsigned int
}
/* Helper function to set the duty cycle ratio to duty/4096 (e.g. duty=2048 -> 50%) */
-static void pca9685_pwm_set_duty(struct pca9685 *pca, int channel, unsigned int duty)
+static void pca9685_pwm_set_duty(struct pwm_chip *chip, int channel, unsigned int duty)
{
- struct pwm_device *pwm = &pca->chip.pwms[channel];
+ struct pwm_device *pwm = &chip->pwms[channel];
unsigned int on, off;
if (duty == 0) {
/* Set the full OFF bit, which has the highest precedence */
- pca9685_write_reg(pca, REG_OFF_H(channel), LED_FULL);
+ pca9685_write_reg(chip, REG_OFF_H(channel), LED_FULL);
return;
} else if (duty >= PCA9685_COUNTER_RANGE) {
/* Set the full ON bit and clear the full OFF bit */
- pca9685_write_reg(pca, REG_ON_H(channel), LED_FULL);
- pca9685_write_reg(pca, REG_OFF_H(channel), 0);
+ pca9685_write_reg(chip, REG_ON_H(channel), LED_FULL);
+ pca9685_write_reg(chip, REG_OFF_H(channel), 0);
return;
}
@@ -164,16 +165,16 @@ static void pca9685_pwm_set_duty(struct pca9685 *pca, int channel, unsigned int
off = (on + duty) % PCA9685_COUNTER_RANGE;
/* Set ON time (clears full ON bit) */
- pca9685_write_reg(pca, REG_ON_L(channel), on & 0xff);
- pca9685_write_reg(pca, REG_ON_H(channel), (on >> 8) & 0xf);
+ pca9685_write_reg(chip, REG_ON_L(channel), on & 0xff);
+ pca9685_write_reg(chip, REG_ON_H(channel), (on >> 8) & 0xf);
/* Set OFF time (clears full OFF bit) */
- pca9685_write_reg(pca, REG_OFF_L(channel), off & 0xff);
- pca9685_write_reg(pca, REG_OFF_H(channel), (off >> 8) & 0xf);
+ pca9685_write_reg(chip, REG_OFF_L(channel), off & 0xff);
+ pca9685_write_reg(chip, REG_OFF_H(channel), (off >> 8) & 0xf);
}
-static unsigned int pca9685_pwm_get_duty(struct pca9685 *pca, int channel)
+static unsigned int pca9685_pwm_get_duty(struct pwm_chip *chip, int channel)
{
- struct pwm_device *pwm = &pca->chip.pwms[channel];
+ struct pwm_device *pwm = &chip->pwms[channel];
unsigned int off = 0, on = 0, val = 0;
if (WARN_ON(channel >= PCA9685_MAXCHAN)) {
@@ -181,25 +182,25 @@ static unsigned int pca9685_pwm_get_duty(struct pca9685 *pca, int channel)
return 0;
}
- pca9685_read_reg(pca, LED_N_OFF_H(channel), &off);
+ pca9685_read_reg(chip, LED_N_OFF_H(channel), &off);
if (off & LED_FULL) {
/* Full OFF bit is set */
return 0;
}
- pca9685_read_reg(pca, LED_N_ON_H(channel), &on);
+ pca9685_read_reg(chip, LED_N_ON_H(channel), &on);
if (on & LED_FULL) {
/* Full ON bit is set */
return PCA9685_COUNTER_RANGE;
}
- pca9685_read_reg(pca, LED_N_OFF_L(channel), &val);
+ pca9685_read_reg(chip, LED_N_OFF_L(channel), &val);
off = ((off & 0xf) << 8) | (val & 0xff);
if (!pwm->state.usage_power)
return off;
/* Read ON register to calculate duty cycle of staggered output */
- if (pca9685_read_reg(pca, LED_N_ON_L(channel), &val)) {
+ if (pca9685_read_reg(chip, LED_N_ON_L(channel), &val)) {
/* Reset val to 0 in case reading LED_N_ON_L failed */
val = 0;
}
@@ -247,35 +248,37 @@ static void pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx)
static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset)
{
- struct pca9685 *pca = gpiochip_get_data(gpio);
+ struct pwm_chip *chip = gpiochip_get_data(gpio);
+ struct pca9685 *pca = to_pca(chip);
if (pca9685_pwm_test_and_set_inuse(pca, offset))
return -EBUSY;
- pm_runtime_get_sync(pca->chip.dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
return 0;
}
static int pca9685_pwm_gpio_get(struct gpio_chip *gpio, unsigned int offset)
{
- struct pca9685 *pca = gpiochip_get_data(gpio);
+ struct pwm_chip *chip = gpiochip_get_data(gpio);
- return pca9685_pwm_get_duty(pca, offset) != 0;
+ return pca9685_pwm_get_duty(chip, offset) != 0;
}
static void pca9685_pwm_gpio_set(struct gpio_chip *gpio, unsigned int offset,
int value)
{
- struct pca9685 *pca = gpiochip_get_data(gpio);
+ struct pwm_chip *chip = gpiochip_get_data(gpio);
- pca9685_pwm_set_duty(pca, offset, value ? PCA9685_COUNTER_RANGE : 0);
+ pca9685_pwm_set_duty(chip, offset, value ? PCA9685_COUNTER_RANGE : 0);
}
static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
{
- struct pca9685 *pca = gpiochip_get_data(gpio);
+ struct pwm_chip *chip = gpiochip_get_data(gpio);
+ struct pca9685 *pca = to_pca(chip);
- pca9685_pwm_set_duty(pca, offset, 0);
- pm_runtime_put(pca->chip.dev);
+ pca9685_pwm_set_duty(chip, offset, 0);
+ pm_runtime_put(pwmchip_parent(chip));
pca9685_pwm_clear_inuse(pca, offset);
}
@@ -306,9 +309,10 @@ static int pca9685_pwm_gpio_direction_output(struct gpio_chip *gpio,
* expose a GPIO chip here which can exclusively take over the underlying
* PWM channel.
*/
-static int pca9685_pwm_gpio_probe(struct pca9685 *pca)
+static int pca9685_pwm_gpio_probe(struct pwm_chip *chip)
{
- struct device *dev = pca->chip.dev;
+ struct pca9685 *pca = to_pca(chip);
+ struct device *dev = pwmchip_parent(chip);
pca->gpio.label = dev_name(dev);
pca->gpio.parent = dev;
@@ -323,7 +327,7 @@ static int pca9685_pwm_gpio_probe(struct pca9685 *pca)
pca->gpio.ngpio = PCA9685_MAXCHAN;
pca->gpio.can_sleep = true;
- return devm_gpiochip_add_data(dev, &pca->gpio, pca);
+ return devm_gpiochip_add_data(dev, &pca->gpio, chip);
}
#else
static inline bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca,
@@ -337,15 +341,16 @@ pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx)
{
}
-static inline int pca9685_pwm_gpio_probe(struct pca9685 *pca)
+static inline int pca9685_pwm_gpio_probe(struct pwm_chip *chip)
{
return 0;
}
#endif
-static void pca9685_set_sleep_mode(struct pca9685 *pca, bool enable)
+static void pca9685_set_sleep_mode(struct pwm_chip *chip, bool enable)
{
- struct device *dev = pca->chip.dev;
+ struct device *dev = pwmchip_parent(chip);
+ struct pca9685 *pca = to_pca(chip);
int err = regmap_update_bits(pca->regmap, PCA9685_MODE1,
MODE1_SLEEP, enable ? MODE1_SLEEP : 0);
if (err) {
@@ -373,19 +378,19 @@ static int __pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
prescale = DIV_ROUND_CLOSEST_ULL(PCA9685_OSC_CLOCK_MHZ * state->period,
PCA9685_COUNTER_RANGE * 1000) - 1;
if (prescale < PCA9685_PRESCALE_MIN || prescale > PCA9685_PRESCALE_MAX) {
- dev_err(chip->dev, "pwm not changed: period out of bounds!\n");
+ dev_err(pwmchip_parent(chip), "pwm not changed: period out of bounds!\n");
return -EINVAL;
}
if (!state->enabled) {
- pca9685_pwm_set_duty(pca, pwm->hwpwm, 0);
+ pca9685_pwm_set_duty(chip, pwm->hwpwm, 0);
return 0;
}
- pca9685_read_reg(pca, PCA9685_PRESCALE, &val);
+ pca9685_read_reg(chip, PCA9685_PRESCALE, &val);
if (prescale != val) {
if (!pca9685_prescaler_can_change(pca, pwm->hwpwm)) {
- dev_err(chip->dev,
+ dev_err(pwmchip_parent(chip),
"pwm not changed: periods of enabled pwms must match!\n");
return -EBUSY;
}
@@ -397,18 +402,18 @@ static int __pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
* state is guaranteed active here.
*/
/* Put chip into sleep mode */
- pca9685_set_sleep_mode(pca, true);
+ pca9685_set_sleep_mode(chip, true);
/* Change the chip-wide output frequency */
- pca9685_write_reg(pca, PCA9685_PRESCALE, prescale);
+ pca9685_write_reg(chip, PCA9685_PRESCALE, prescale);
/* Wake the chip up */
- pca9685_set_sleep_mode(pca, false);
+ pca9685_set_sleep_mode(chip, false);
}
duty = PCA9685_COUNTER_RANGE * state->duty_cycle;
duty = DIV_ROUND_UP_ULL(duty, state->period);
- pca9685_pwm_set_duty(pca, pwm->hwpwm, duty);
+ pca9685_pwm_set_duty(chip, pwm->hwpwm, duty);
return 0;
}
@@ -434,12 +439,11 @@ static int pca9685_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
static int pca9685_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_state *state)
{
- struct pca9685 *pca = to_pca(chip);
unsigned long long duty;
unsigned int val = 0;
/* Calculate (chip-wide) period from prescale value */
- pca9685_read_reg(pca, PCA9685_PRESCALE, &val);
+ pca9685_read_reg(chip, PCA9685_PRESCALE, &val);
/*
* PCA9685_OSC_CLOCK_MHZ is 25, i.e. an integer divider of 1000.
* The following calculation is therefore only a multiplication
@@ -462,7 +466,7 @@ static int pca9685_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
}
state->enabled = true;
- duty = pca9685_pwm_get_duty(pca, pwm->hwpwm);
+ duty = pca9685_pwm_get_duty(chip, pwm->hwpwm);
state->duty_cycle = DIV_ROUND_DOWN_ULL(duty * state->period, PCA9685_COUNTER_RANGE);
return 0;
@@ -482,7 +486,7 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_unlock(&pca->lock);
}
- pm_runtime_get_sync(chip->dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
return 0;
}
@@ -492,11 +496,11 @@ static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
struct pca9685 *pca = to_pca(chip);
mutex_lock(&pca->lock);
- pca9685_pwm_set_duty(pca, pwm->hwpwm, 0);
+ pca9685_pwm_set_duty(chip, pwm->hwpwm, 0);
clear_bit(pwm->hwpwm, pca->pwms_enabled);
mutex_unlock(&pca->lock);
- pm_runtime_put(chip->dev);
+ pm_runtime_put(pwmchip_parent(chip));
pca9685_pwm_clear_inuse(pca, pwm->hwpwm);
}
@@ -516,13 +520,16 @@ static const struct regmap_config pca9685_regmap_i2c_config = {
static int pca9685_pwm_probe(struct i2c_client *client)
{
+ struct pwm_chip *chip;
struct pca9685 *pca;
unsigned int reg;
int ret;
- pca = devm_kzalloc(&client->dev, sizeof(*pca), GFP_KERNEL);
- if (!pca)
- return -ENOMEM;
+ /* Add an extra channel for ALL_LED */
+ chip = devm_pwmchip_alloc(&client->dev, PCA9685_MAXCHAN + 1, sizeof(*pca));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pca = to_pca(chip);
pca->regmap = devm_regmap_init_i2c(client, &pca9685_regmap_i2c_config);
if (IS_ERR(pca->regmap)) {
@@ -532,11 +539,11 @@ static int pca9685_pwm_probe(struct i2c_client *client)
return ret;
}
- i2c_set_clientdata(client, pca);
+ i2c_set_clientdata(client, chip);
mutex_init(&pca->lock);
- ret = pca9685_read_reg(pca, PCA9685_MODE2, &reg);
+ ret = pca9685_read_reg(chip, PCA9685_MODE2, &reg);
if (ret)
return ret;
@@ -550,34 +557,30 @@ static int pca9685_pwm_probe(struct i2c_client *client)
else
reg |= MODE2_OUTDRV;
- ret = pca9685_write_reg(pca, PCA9685_MODE2, reg);
+ ret = pca9685_write_reg(chip, PCA9685_MODE2, reg);
if (ret)
return ret;
/* Disable all LED ALLCALL and SUBx addresses to avoid bus collisions */
- pca9685_read_reg(pca, PCA9685_MODE1, &reg);
+ pca9685_read_reg(chip, PCA9685_MODE1, &reg);
reg &= ~(MODE1_ALLCALL | MODE1_SUB1 | MODE1_SUB2 | MODE1_SUB3);
- pca9685_write_reg(pca, PCA9685_MODE1, reg);
+ pca9685_write_reg(chip, PCA9685_MODE1, reg);
/* Reset OFF/ON registers to POR default */
- pca9685_write_reg(pca, PCA9685_ALL_LED_OFF_L, 0);
- pca9685_write_reg(pca, PCA9685_ALL_LED_OFF_H, LED_FULL);
- pca9685_write_reg(pca, PCA9685_ALL_LED_ON_L, 0);
- pca9685_write_reg(pca, PCA9685_ALL_LED_ON_H, LED_FULL);
-
- pca->chip.ops = &pca9685_pwm_ops;
- /* Add an extra channel for ALL_LED */
- pca->chip.npwm = PCA9685_MAXCHAN + 1;
+ pca9685_write_reg(chip, PCA9685_ALL_LED_OFF_L, 0);
+ pca9685_write_reg(chip, PCA9685_ALL_LED_OFF_H, LED_FULL);
+ pca9685_write_reg(chip, PCA9685_ALL_LED_ON_L, 0);
+ pca9685_write_reg(chip, PCA9685_ALL_LED_ON_H, LED_FULL);
- pca->chip.dev = &client->dev;
+ chip->ops = &pca9685_pwm_ops;
- ret = pwmchip_add(&pca->chip);
+ ret = pwmchip_add(chip);
if (ret < 0)
return ret;
- ret = pca9685_pwm_gpio_probe(pca);
+ ret = pca9685_pwm_gpio_probe(chip);
if (ret < 0) {
- pwmchip_remove(&pca->chip);
+ pwmchip_remove(chip);
return ret;
}
@@ -588,11 +591,11 @@ static int pca9685_pwm_probe(struct i2c_client *client)
* Although the chip comes out of power-up in the sleep state,
* we force it to sleep in case it was woken up before
*/
- pca9685_set_sleep_mode(pca, true);
+ pca9685_set_sleep_mode(chip, true);
pm_runtime_set_suspended(&client->dev);
} else {
/* Wake the chip up if runtime PM is disabled */
- pca9685_set_sleep_mode(pca, false);
+ pca9685_set_sleep_mode(chip, false);
}
return 0;
@@ -600,13 +603,13 @@ static int pca9685_pwm_probe(struct i2c_client *client)
static void pca9685_pwm_remove(struct i2c_client *client)
{
- struct pca9685 *pca = i2c_get_clientdata(client);
+ struct pwm_chip *chip = i2c_get_clientdata(client);
- pwmchip_remove(&pca->chip);
+ pwmchip_remove(chip);
if (!pm_runtime_enabled(&client->dev)) {
/* Put chip in sleep state if runtime PM is disabled */
- pca9685_set_sleep_mode(pca, true);
+ pca9685_set_sleep_mode(chip, true);
}
pm_runtime_disable(&client->dev);
@@ -615,18 +618,18 @@ static void pca9685_pwm_remove(struct i2c_client *client)
static int __maybe_unused pca9685_pwm_runtime_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct pca9685 *pca = i2c_get_clientdata(client);
+ struct pwm_chip *chip = i2c_get_clientdata(client);
- pca9685_set_sleep_mode(pca, true);
+ pca9685_set_sleep_mode(chip, true);
return 0;
}
static int __maybe_unused pca9685_pwm_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct pca9685 *pca = i2c_get_clientdata(client);
+ struct pwm_chip *chip = i2c_get_clientdata(client);
- pca9685_set_sleep_mode(pca, false);
+ pca9685_set_sleep_mode(chip, false);
return 0;
}
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index 76685f926c75..bb7bb48b2e6d 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -49,7 +49,6 @@ MODULE_DEVICE_TABLE(platform, pwm_id_table);
#define PWMDCR_FD (1 << 10)
struct pxa_pwm_chip {
- struct pwm_chip chip;
struct device *dev;
struct clk *clk;
@@ -58,7 +57,7 @@ struct pxa_pwm_chip {
static inline struct pxa_pwm_chip *to_pxa_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct pxa_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
/*
@@ -159,6 +158,7 @@ MODULE_DEVICE_TABLE(of, pwm_of_match);
static int pwm_probe(struct platform_device *pdev)
{
const struct platform_device_id *id = platform_get_device_id(pdev);
+ struct pwm_chip *chip;
struct pxa_pwm_chip *pc;
int ret = 0;
@@ -168,28 +168,27 @@ static int pwm_probe(struct platform_device *pdev)
if (id == NULL)
return -EINVAL;
- pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (pc == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev,
+ (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1,
+ sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_pxa_pwm_chip(chip);
pc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pc->clk))
return PTR_ERR(pc->clk);
- pc->chip.dev = &pdev->dev;
- pc->chip.ops = &pxa_pwm_ops;
- pc->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1;
+ chip->ops = &pxa_pwm_ops;
- if (IS_ENABLED(CONFIG_OF)) {
- pc->chip.of_xlate = of_pwm_single_xlate;
- pc->chip.of_pwm_n_cells = 1;
- }
+ if (IS_ENABLED(CONFIG_OF))
+ chip->of_xlate = of_pwm_single_xlate;
pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
- ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
return ret;
diff --git a/drivers/pwm/pwm-raspberrypi-poe.c b/drivers/pwm/pwm-raspberrypi-poe.c
index 1ad814fdec6b..8921e7ea2cea 100644
--- a/drivers/pwm/pwm-raspberrypi-poe.c
+++ b/drivers/pwm/pwm-raspberrypi-poe.c
@@ -27,7 +27,6 @@
struct raspberrypi_pwm {
struct rpi_firmware *firmware;
- struct pwm_chip chip;
unsigned int duty_cycle;
};
@@ -40,7 +39,7 @@ struct raspberrypi_pwm_prop {
static inline
struct raspberrypi_pwm *raspberrypi_pwm_from_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct raspberrypi_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int raspberrypi_pwm_set_property(struct rpi_firmware *firmware,
@@ -122,7 +121,7 @@ static int raspberrypi_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
ret = raspberrypi_pwm_set_property(rpipwm->firmware, RPI_PWM_CUR_DUTY_REG,
duty_cycle);
if (ret) {
- dev_err(chip->dev, "Failed to set duty cycle: %pe\n",
+ dev_err(pwmchip_parent(chip), "Failed to set duty cycle: %pe\n",
ERR_PTR(ret));
return ret;
}
@@ -142,6 +141,7 @@ static int raspberrypi_pwm_probe(struct platform_device *pdev)
struct device_node *firmware_node;
struct device *dev = &pdev->dev;
struct rpi_firmware *firmware;
+ struct pwm_chip *chip;
struct raspberrypi_pwm *rpipwm;
int ret;
@@ -157,14 +157,14 @@ static int raspberrypi_pwm_probe(struct platform_device *pdev)
return dev_err_probe(dev, -EPROBE_DEFER,
"Failed to get firmware handle\n");
- rpipwm = devm_kzalloc(&pdev->dev, sizeof(*rpipwm), GFP_KERNEL);
- if (!rpipwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, RASPBERRYPI_FIRMWARE_PWM_NUM,
+ sizeof(*rpipwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ rpipwm = raspberrypi_pwm_from_chip(chip);
rpipwm->firmware = firmware;
- rpipwm->chip.dev = dev;
- rpipwm->chip.ops = &raspberrypi_pwm_ops;
- rpipwm->chip.npwm = RASPBERRYPI_FIRMWARE_PWM_NUM;
+ chip->ops = &raspberrypi_pwm_ops;
ret = raspberrypi_pwm_get_property(rpipwm->firmware, RPI_PWM_CUR_DUTY_REG,
&rpipwm->duty_cycle);
@@ -173,7 +173,7 @@ static int raspberrypi_pwm_probe(struct platform_device *pdev)
return ret;
}
- return devm_pwmchip_add(dev, &rpipwm->chip);
+ return devm_pwmchip_add(dev, chip);
}
static const struct of_device_id raspberrypi_pwm_of_match[] = {
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 13269f55fccf..4cfecd88ede0 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -38,14 +38,13 @@
#define RCAR_PWMCNT_PH0_SHIFT 0
struct rcar_pwm_chip {
- struct pwm_chip chip;
void __iomem *base;
struct clk *clk;
};
static inline struct rcar_pwm_chip *to_rcar_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct rcar_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static void rcar_pwm_write(struct rcar_pwm_chip *rp, u32 data,
@@ -132,12 +131,12 @@ static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, int duty_ns,
static int rcar_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
- return pm_runtime_get_sync(chip->dev);
+ return pm_runtime_get_sync(pwmchip_parent(chip));
}
static void rcar_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- pm_runtime_put(chip->dev);
+ pm_runtime_put(pwmchip_parent(chip));
}
static int rcar_pwm_enable(struct rcar_pwm_chip *rp)
@@ -202,12 +201,14 @@ static const struct pwm_ops rcar_pwm_ops = {
static int rcar_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct rcar_pwm_chip *rcar_pwm;
int ret;
- rcar_pwm = devm_kzalloc(&pdev->dev, sizeof(*rcar_pwm), GFP_KERNEL);
- if (rcar_pwm == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*rcar_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ rcar_pwm = to_rcar_pwm_chip(chip);
rcar_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rcar_pwm->base))
@@ -219,15 +220,13 @@ static int rcar_pwm_probe(struct platform_device *pdev)
return PTR_ERR(rcar_pwm->clk);
}
- platform_set_drvdata(pdev, rcar_pwm);
+ chip->ops = &rcar_pwm_ops;
- rcar_pwm->chip.dev = &pdev->dev;
- rcar_pwm->chip.ops = &rcar_pwm_ops;
- rcar_pwm->chip.npwm = 1;
+ platform_set_drvdata(pdev, chip);
pm_runtime_enable(&pdev->dev);
- ret = pwmchip_add(&rcar_pwm->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register PWM chip: %d\n", ret);
pm_runtime_disable(&pdev->dev);
@@ -239,9 +238,9 @@ static int rcar_pwm_probe(struct platform_device *pdev)
static void rcar_pwm_remove(struct platform_device *pdev)
{
- struct rcar_pwm_chip *rcar_pwm = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
- pwmchip_remove(&rcar_pwm->chip);
+ pwmchip_remove(chip);
pm_runtime_disable(&pdev->dev);
}
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index 28265fdfc92a..2196080b4177 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -79,7 +79,6 @@ struct tpu_pwm_device {
struct tpu_device {
struct platform_device *pdev;
- struct pwm_chip chip;
spinlock_t lock;
void __iomem *base;
@@ -87,7 +86,10 @@ struct tpu_device {
struct tpu_pwm_device tpd[TPU_CHANNEL_MAX];
};
-#define to_tpu_device(c) container_of(c, struct tpu_device, chip)
+static inline struct tpu_device *to_tpu_device(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
static void tpu_pwm_write(struct tpu_pwm_device *tpd, int reg_nr, u16 value)
{
@@ -438,12 +440,14 @@ static const struct pwm_ops tpu_pwm_ops = {
static int tpu_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct tpu_device *tpu;
int ret;
- tpu = devm_kzalloc(&pdev->dev, sizeof(*tpu), GFP_KERNEL);
- if (tpu == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, TPU_CHANNEL_MAX, sizeof(*tpu));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ tpu = to_tpu_device(chip);
spin_lock_init(&tpu->lock);
tpu->pdev = pdev;
@@ -460,15 +464,13 @@ static int tpu_probe(struct platform_device *pdev)
/* Initialize and register the device. */
platform_set_drvdata(pdev, tpu);
- tpu->chip.dev = &pdev->dev;
- tpu->chip.ops = &tpu_pwm_ops;
- tpu->chip.npwm = TPU_CHANNEL_MAX;
+ chip->ops = &tpu_pwm_ops;
ret = devm_pm_runtime_enable(&pdev->dev);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Failed to enable runtime PM\n");
- ret = devm_pwmchip_add(&pdev->dev, &tpu->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Failed to register PWM chip\n");
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index a7c647e37837..0fa7575dbb54 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -30,7 +30,6 @@
#define PWM_LP_DISABLE (0 << 8)
struct rockchip_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
struct clk *pclk;
const struct rockchip_pwm_data *data;
@@ -54,7 +53,7 @@ struct rockchip_pwm_data {
static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct rockchip_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int rockchip_pwm_get_state(struct pwm_chip *chip,
@@ -296,14 +295,16 @@ MODULE_DEVICE_TABLE(of, rockchip_pwm_dt_ids);
static int rockchip_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct rockchip_pwm_chip *pc;
u32 enable_conf, ctrl;
bool enabled;
int ret, count;
- pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_rockchip_pwm_chip(chip);
pc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->base))
@@ -337,18 +338,16 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
goto err_clk;
}
- platform_set_drvdata(pdev, pc);
+ platform_set_drvdata(pdev, chip);
pc->data = device_get_match_data(&pdev->dev);
- pc->chip.dev = &pdev->dev;
- pc->chip.ops = &rockchip_pwm_ops;
- pc->chip.npwm = 1;
+ chip->ops = &rockchip_pwm_ops;
enable_conf = pc->data->enable_conf;
ctrl = readl_relaxed(pc->base + pc->data->regs.ctrl);
enabled = (ctrl & enable_conf) == enable_conf;
- ret = pwmchip_add(&pc->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "pwmchip_add() failed\n");
goto err_pclk;
@@ -372,9 +371,10 @@ err_clk:
static void rockchip_pwm_remove(struct platform_device *pdev)
{
- struct rockchip_pwm_chip *pc = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
- pwmchip_remove(&pc->chip);
+ pwmchip_remove(chip);
clk_unprepare(pc->pclk);
clk_unprepare(pc->clk);
diff --git a/drivers/pwm/pwm-rz-mtu3.c b/drivers/pwm/pwm-rz-mtu3.c
index bdda315b3bd3..ab39bd37edaf 100644
--- a/drivers/pwm/pwm-rz-mtu3.c
+++ b/drivers/pwm/pwm-rz-mtu3.c
@@ -61,7 +61,6 @@ struct rz_mtu3_pwm_channel {
/**
* struct rz_mtu3_pwm_chip - MTU3 pwm private data
*
- * @chip: MTU3 pwm chip data
* @clk: MTU3 module clock
* @lock: Lock to prevent concurrent access for usage count
* @rate: MTU3 clock rate
@@ -72,7 +71,6 @@ struct rz_mtu3_pwm_channel {
*/
struct rz_mtu3_pwm_chip {
- struct pwm_chip chip;
struct clk *clk;
struct mutex lock;
unsigned long rate;
@@ -92,7 +90,7 @@ static const struct rz_mtu3_channel_io_map channel_map[] = {
static inline struct rz_mtu3_pwm_chip *to_rz_mtu3_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct rz_mtu3_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static void rz_mtu3_pwm_read_tgr_registers(struct rz_mtu3_pwm_channel *priv,
@@ -211,15 +209,15 @@ static void rz_mtu3_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_unlock(&rz_mtu3_pwm->lock);
}
-static int rz_mtu3_pwm_enable(struct rz_mtu3_pwm_chip *rz_mtu3_pwm,
- struct pwm_device *pwm)
+static int rz_mtu3_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
struct rz_mtu3_pwm_channel *priv;
u32 ch;
u8 val;
int rc;
- rc = pm_runtime_resume_and_get(rz_mtu3_pwm->chip.dev);
+ rc = pm_runtime_resume_and_get(pwmchip_parent(chip));
if (rc)
return rc;
@@ -243,9 +241,9 @@ static int rz_mtu3_pwm_enable(struct rz_mtu3_pwm_chip *rz_mtu3_pwm,
return 0;
}
-static void rz_mtu3_pwm_disable(struct rz_mtu3_pwm_chip *rz_mtu3_pwm,
- struct pwm_device *pwm)
+static void rz_mtu3_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
struct rz_mtu3_pwm_channel *priv;
u32 ch;
@@ -265,7 +263,7 @@ static void rz_mtu3_pwm_disable(struct rz_mtu3_pwm_chip *rz_mtu3_pwm,
mutex_unlock(&rz_mtu3_pwm->lock);
- pm_runtime_put_sync(rz_mtu3_pwm->chip.dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
}
static int rz_mtu3_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -274,7 +272,7 @@ static int rz_mtu3_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
struct rz_mtu3_pwm_chip *rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
int rc;
- rc = pm_runtime_resume_and_get(chip->dev);
+ rc = pm_runtime_resume_and_get(pwmchip_parent(chip));
if (rc)
return rc;
@@ -307,7 +305,7 @@ static int rz_mtu3_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
}
state->polarity = PWM_POLARITY_NORMAL;
- pm_runtime_put(chip->dev);
+ pm_runtime_put(pwmchip_parent(chip));
return 0;
}
@@ -362,7 +360,7 @@ static int rz_mtu3_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (!pwm->state.enabled) {
int rc;
- rc = pm_runtime_resume_and_get(chip->dev);
+ rc = pm_runtime_resume_and_get(pwmchip_parent(chip));
if (rc)
return rc;
}
@@ -399,7 +397,7 @@ static int rz_mtu3_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
/* If the PWM is not enabled, turn the clock off again to save power. */
if (!pwm->state.enabled)
- pm_runtime_put(chip->dev);
+ pm_runtime_put(pwmchip_parent(chip));
return 0;
}
@@ -416,7 +414,7 @@ static int rz_mtu3_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (!state->enabled) {
if (enabled)
- rz_mtu3_pwm_disable(rz_mtu3_pwm, pwm);
+ rz_mtu3_pwm_disable(chip, pwm);
return 0;
}
@@ -428,7 +426,7 @@ static int rz_mtu3_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
if (!enabled)
- ret = rz_mtu3_pwm_enable(rz_mtu3_pwm, pwm);
+ ret = rz_mtu3_pwm_enable(chip, pwm);
return ret;
}
@@ -442,7 +440,8 @@ static const struct pwm_ops rz_mtu3_pwm_ops = {
static int rz_mtu3_pwm_pm_runtime_suspend(struct device *dev)
{
- struct rz_mtu3_pwm_chip *rz_mtu3_pwm = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
clk_disable_unprepare(rz_mtu3_pwm->clk);
@@ -451,7 +450,8 @@ static int rz_mtu3_pwm_pm_runtime_suspend(struct device *dev)
static int rz_mtu3_pwm_pm_runtime_resume(struct device *dev)
{
- struct rz_mtu3_pwm_chip *rz_mtu3_pwm = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
return clk_prepare_enable(rz_mtu3_pwm->clk);
}
@@ -462,24 +462,28 @@ static DEFINE_RUNTIME_DEV_PM_OPS(rz_mtu3_pwm_pm_ops,
static void rz_mtu3_pwm_pm_disable(void *data)
{
- struct rz_mtu3_pwm_chip *rz_mtu3_pwm = data;
+ struct pwm_chip *chip = data;
+ struct rz_mtu3_pwm_chip *rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
clk_rate_exclusive_put(rz_mtu3_pwm->clk);
- pm_runtime_disable(rz_mtu3_pwm->chip.dev);
- pm_runtime_set_suspended(rz_mtu3_pwm->chip.dev);
+ pm_runtime_disable(pwmchip_parent(chip));
+ pm_runtime_set_suspended(pwmchip_parent(chip));
}
static int rz_mtu3_pwm_probe(struct platform_device *pdev)
{
struct rz_mtu3 *parent_ddata = dev_get_drvdata(pdev->dev.parent);
struct rz_mtu3_pwm_chip *rz_mtu3_pwm;
+ struct pwm_chip *chip;
struct device *dev = &pdev->dev;
unsigned int i, j = 0;
int ret;
- rz_mtu3_pwm = devm_kzalloc(&pdev->dev, sizeof(*rz_mtu3_pwm), GFP_KERNEL);
- if (!rz_mtu3_pwm)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, RZ_MTU3_MAX_PWM_CHANNELS,
+ sizeof(*rz_mtu3_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ rz_mtu3_pwm = to_rz_mtu3_pwm_chip(chip);
rz_mtu3_pwm->clk = parent_ddata->clk;
@@ -494,7 +498,7 @@ static int rz_mtu3_pwm_probe(struct platform_device *pdev)
}
mutex_init(&rz_mtu3_pwm->lock);
- platform_set_drvdata(pdev, rz_mtu3_pwm);
+ platform_set_drvdata(pdev, chip);
ret = clk_prepare_enable(rz_mtu3_pwm->clk);
if (ret)
return dev_err_probe(dev, ret, "Clock enable failed\n");
@@ -514,15 +518,13 @@ static int rz_mtu3_pwm_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- rz_mtu3_pwm->chip.dev = &pdev->dev;
ret = devm_add_action_or_reset(&pdev->dev, rz_mtu3_pwm_pm_disable,
- rz_mtu3_pwm);
+ chip);
if (ret < 0)
return ret;
- rz_mtu3_pwm->chip.ops = &rz_mtu3_pwm_ops;
- rz_mtu3_pwm->chip.npwm = RZ_MTU3_MAX_PWM_CHANNELS;
- ret = devm_pwmchip_add(&pdev->dev, &rz_mtu3_pwm->chip);
+ chip->ops = &rz_mtu3_pwm_ops;
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret)
return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index 6e77302f7368..efb60c9f0cb3 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -69,7 +69,6 @@ struct samsung_pwm_channel {
/**
* struct samsung_pwm_chip - private data of PWM chip
- * @chip: generic PWM chip
* @variant: local copy of hardware variant data
* @inverter_mask: inverter status for all channels - one bit per channel
* @disabled_mask: disabled status for all channels - one bit per channel
@@ -80,7 +79,6 @@ struct samsung_pwm_channel {
* @channel: per channel driver data
*/
struct samsung_pwm_chip {
- struct pwm_chip chip;
struct samsung_pwm_variant variant;
u8 inverter_mask;
u8 disabled_mask;
@@ -110,7 +108,7 @@ static DEFINE_SPINLOCK(samsung_pwm_lock);
static inline
struct samsung_pwm_chip *to_samsung_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct samsung_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline unsigned int to_tcon_channel(unsigned int channel)
@@ -181,9 +179,10 @@ static unsigned long pwm_samsung_get_tin_rate(struct samsung_pwm_chip *our_chip,
return rate / (reg + 1);
}
-static unsigned long pwm_samsung_calc_tin(struct samsung_pwm_chip *our_chip,
+static unsigned long pwm_samsung_calc_tin(struct pwm_chip *chip,
unsigned int chan, unsigned long freq)
{
+ struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip);
struct samsung_pwm_variant *variant = &our_chip->variant;
unsigned long rate;
struct clk *clk;
@@ -197,12 +196,12 @@ static unsigned long pwm_samsung_calc_tin(struct samsung_pwm_chip *our_chip,
return rate;
}
- dev_warn(our_chip->chip.dev,
+ dev_warn(pwmchip_parent(chip),
"tclk of PWM %d is inoperational, using tdiv\n", chan);
}
rate = pwm_samsung_get_tin_rate(our_chip, chan);
- dev_dbg(our_chip->chip.dev, "tin parent at %lu\n", rate);
+ dev_dbg(pwmchip_parent(chip), "tin parent at %lu\n", rate);
/*
* Compare minimum PWM frequency that can be achieved with possible
@@ -232,7 +231,7 @@ static int pwm_samsung_request(struct pwm_chip *chip, struct pwm_device *pwm)
struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip);
if (!(our_chip->variant.output_mask & BIT(pwm->hwpwm))) {
- dev_warn(chip->dev,
+ dev_warn(pwmchip_parent(chip),
"tried to request PWM channel %d without output\n",
pwm->hwpwm);
return -EINVAL;
@@ -326,12 +325,12 @@ static int __pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
period = NSEC_PER_SEC / period_ns;
- dev_dbg(our_chip->chip.dev, "duty_ns=%d, period_ns=%d (%u)\n",
+ dev_dbg(pwmchip_parent(chip), "duty_ns=%d, period_ns=%d (%u)\n",
duty_ns, period_ns, period);
- tin_rate = pwm_samsung_calc_tin(our_chip, pwm->hwpwm, period);
+ tin_rate = pwm_samsung_calc_tin(chip, pwm->hwpwm, period);
- dev_dbg(our_chip->chip.dev, "tin_rate=%lu\n", tin_rate);
+ dev_dbg(pwmchip_parent(chip), "tin_rate=%lu\n", tin_rate);
tin_ns = NSEC_PER_SEC / tin_rate;
tcnt = period_ns / tin_ns;
@@ -355,8 +354,7 @@ static int __pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
/* -1UL will give 100% duty. */
--tcmp;
- dev_dbg(our_chip->chip.dev,
- "tin_ns=%u, tcmp=%u/%u\n", tin_ns, tcmp, tcnt);
+ dev_dbg(pwmchip_parent(chip), "tin_ns=%u, tcmp=%u/%u\n", tin_ns, tcmp, tcnt);
/* Update PWM registers. */
writel(tcnt, our_chip->base + REG_TCNTB(pwm->hwpwm));
@@ -368,7 +366,7 @@ static int __pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
* shortly afer this update (before it autoreloaded the new values).
*/
if (oldtcmp == (u32) -1) {
- dev_dbg(our_chip->chip.dev, "Forcing manual update");
+ dev_dbg(pwmchip_parent(chip), "Forcing manual update");
pwm_samsung_manual_update(our_chip, pwm);
}
@@ -507,9 +505,10 @@ static const struct of_device_id samsung_pwm_matches[] = {
};
MODULE_DEVICE_TABLE(of, samsung_pwm_matches);
-static int pwm_samsung_parse_dt(struct samsung_pwm_chip *our_chip)
+static int pwm_samsung_parse_dt(struct pwm_chip *chip)
{
- struct device_node *np = our_chip->chip.dev->of_node;
+ struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip);
+ struct device_node *np = pwmchip_parent(chip)->of_node;
const struct of_device_id *match;
struct property *prop;
const __be32 *cur;
@@ -523,7 +522,7 @@ static int pwm_samsung_parse_dt(struct samsung_pwm_chip *our_chip)
of_property_for_each_u32(np, "samsung,pwm-outputs", prop, cur, val) {
if (val >= SAMSUNG_PWM_NUM) {
- dev_err(our_chip->chip.dev,
+ dev_err(pwmchip_parent(chip),
"%s: invalid channel index in samsung,pwm-outputs property\n",
__func__);
continue;
@@ -534,7 +533,7 @@ static int pwm_samsung_parse_dt(struct samsung_pwm_chip *our_chip)
return 0;
}
#else
-static int pwm_samsung_parse_dt(struct samsung_pwm_chip *our_chip)
+static int pwm_samsung_parse_dt(struct pwm_chip *chip)
{
return -ENODEV;
}
@@ -544,27 +543,26 @@ static int pwm_samsung_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct samsung_pwm_chip *our_chip;
+ struct pwm_chip *chip;
unsigned int chan;
int ret;
- our_chip = devm_kzalloc(&pdev->dev, sizeof(*our_chip), GFP_KERNEL);
- if (our_chip == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, SAMSUNG_PWM_NUM, sizeof(*our_chip));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ our_chip = to_samsung_pwm_chip(chip);
- our_chip->chip.dev = &pdev->dev;
- our_chip->chip.ops = &pwm_samsung_ops;
- our_chip->chip.npwm = SAMSUNG_PWM_NUM;
+ chip->ops = &pwm_samsung_ops;
our_chip->inverter_mask = BIT(SAMSUNG_PWM_NUM) - 1;
if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
- ret = pwm_samsung_parse_dt(our_chip);
+ ret = pwm_samsung_parse_dt(chip);
if (ret)
return ret;
} else {
- if (!pdev->dev.platform_data) {
- dev_err(&pdev->dev, "no platform data specified\n");
- return -EINVAL;
- }
+ if (!pdev->dev.platform_data)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "no platform data specified\n");
memcpy(&our_chip->variant, pdev->dev.platform_data,
sizeof(our_chip->variant));
@@ -574,17 +572,10 @@ static int pwm_samsung_probe(struct platform_device *pdev)
if (IS_ERR(our_chip->base))
return PTR_ERR(our_chip->base);
- our_chip->base_clk = devm_clk_get(&pdev->dev, "timers");
- if (IS_ERR(our_chip->base_clk)) {
- dev_err(dev, "failed to get timer base clk\n");
- return PTR_ERR(our_chip->base_clk);
- }
-
- ret = clk_prepare_enable(our_chip->base_clk);
- if (ret < 0) {
- dev_err(dev, "failed to enable base clock\n");
- return ret;
- }
+ our_chip->base_clk = devm_clk_get_enabled(&pdev->dev, "timers");
+ if (IS_ERR(our_chip->base_clk))
+ return dev_err_probe(dev, PTR_ERR(our_chip->base_clk),
+ "failed to get timer base clk\n");
for (chan = 0; chan < SAMSUNG_PWM_NUM; ++chan)
if (our_chip->variant.output_mask & BIT(chan))
@@ -594,14 +585,11 @@ static int pwm_samsung_probe(struct platform_device *pdev)
our_chip->tclk0 = devm_clk_get(&pdev->dev, "pwm-tclk0");
our_chip->tclk1 = devm_clk_get(&pdev->dev, "pwm-tclk1");
- platform_set_drvdata(pdev, our_chip);
+ platform_set_drvdata(pdev, chip);
- ret = pwmchip_add(&our_chip->chip);
- if (ret < 0) {
- dev_err(dev, "failed to register PWM chip\n");
- clk_disable_unprepare(our_chip->base_clk);
- return ret;
- }
+ ret = devm_pwmchip_add(&pdev->dev, chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to register PWM chip\n");
dev_dbg(dev, "base_clk at %lu, tclk0 at %lu, tclk1 at %lu\n",
clk_get_rate(our_chip->base_clk),
@@ -611,19 +599,10 @@ static int pwm_samsung_probe(struct platform_device *pdev)
return 0;
}
-static void pwm_samsung_remove(struct platform_device *pdev)
-{
- struct samsung_pwm_chip *our_chip = platform_get_drvdata(pdev);
-
- pwmchip_remove(&our_chip->chip);
-
- clk_disable_unprepare(our_chip->base_clk);
-}
-
static int pwm_samsung_resume(struct device *dev)
{
- struct samsung_pwm_chip *our_chip = dev_get_drvdata(dev);
- struct pwm_chip *chip = &our_chip->chip;
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip);
unsigned int i;
for (i = 0; i < SAMSUNG_PWM_NUM; i++) {
@@ -662,7 +641,6 @@ static struct platform_driver pwm_samsung_driver = {
.of_match_table = of_match_ptr(samsung_pwm_matches),
},
.probe = pwm_samsung_probe,
- .remove_new = pwm_samsung_remove,
};
module_platform_driver(pwm_samsung_driver);
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index 089e50bdbbf0..ed7957cc51fd 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -41,7 +41,7 @@
#define PWM_SIFIVE_DEFAULT_PERIOD 10000000
struct pwm_sifive_ddata {
- struct pwm_chip chip;
+ struct device *parent;
struct mutex lock; /* lock to protect user_count and approx_period */
struct notifier_block notifier;
struct clk *clk;
@@ -54,7 +54,7 @@ struct pwm_sifive_ddata {
static inline
struct pwm_sifive_ddata *pwm_sifive_chip_to_ddata(struct pwm_chip *chip)
{
- return container_of(chip, struct pwm_sifive_ddata, chip);
+ return pwmchip_get_drvdata(chip);
}
static int pwm_sifive_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -102,7 +102,7 @@ static void pwm_sifive_update_clock(struct pwm_sifive_ddata *ddata,
/* As scale <= 15 the shift operation cannot overflow. */
num = (unsigned long long)NSEC_PER_SEC << (PWM_SIFIVE_CMPWIDTH + scale);
ddata->real_period = div64_ul(num, rate);
- dev_dbg(ddata->chip.dev,
+ dev_dbg(ddata->parent,
"New real_period = %u ns\n", ddata->real_period);
}
@@ -185,7 +185,7 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (!enabled) {
ret = clk_enable(ddata->clk);
if (ret) {
- dev_err(ddata->chip.dev, "Enable clk failed\n");
+ dev_err(pwmchip_parent(chip), "Enable clk failed\n");
return ret;
}
}
@@ -230,15 +230,14 @@ static int pwm_sifive_probe(struct platform_device *pdev)
u32 val;
unsigned int enabled_pwms = 0, enabled_clks = 1;
- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(dev, 4, sizeof(*ddata));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ ddata = pwm_sifive_chip_to_ddata(chip);
+ ddata->parent = dev;
mutex_init(&ddata->lock);
- chip = &ddata->chip;
- chip->dev = dev;
chip->ops = &pwm_sifive_ops;
- chip->npwm = 4;
ddata->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ddata->regs))
@@ -296,7 +295,7 @@ static int pwm_sifive_probe(struct platform_device *pdev)
goto unregister_clk;
}
- platform_set_drvdata(pdev, ddata);
+ platform_set_drvdata(pdev, chip);
dev_dbg(dev, "SiFive PWM chip registered %d PWMs\n", chip->npwm);
return 0;
@@ -314,15 +313,16 @@ disable_clk:
static void pwm_sifive_remove(struct platform_device *dev)
{
- struct pwm_sifive_ddata *ddata = platform_get_drvdata(dev);
+ struct pwm_chip *chip = platform_get_drvdata(dev);
+ struct pwm_sifive_ddata *ddata = pwm_sifive_chip_to_ddata(chip);
struct pwm_device *pwm;
int ch;
- pwmchip_remove(&ddata->chip);
+ pwmchip_remove(chip);
clk_notifier_unregister(ddata->clk, &ddata->notifier);
- for (ch = 0; ch < ddata->chip.npwm; ch++) {
- pwm = &ddata->chip.pwms[ch];
+ for (ch = 0; ch < chip->npwm; ch++) {
+ pwm = &chip->pwms[ch];
if (pwm->state.enabled)
clk_disable(ddata->clk);
}
diff --git a/drivers/pwm/pwm-sl28cpld.c b/drivers/pwm/pwm-sl28cpld.c
index 88b01ff9e460..934378d6a002 100644
--- a/drivers/pwm/pwm-sl28cpld.c
+++ b/drivers/pwm/pwm-sl28cpld.c
@@ -81,14 +81,13 @@
regmap_write((priv)->regmap, (priv)->offset + (reg), (val))
struct sl28cpld_pwm {
- struct pwm_chip chip;
struct regmap *regmap;
u32 offset;
};
static inline struct sl28cpld_pwm *sl28cpld_pwm_from_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct sl28cpld_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int sl28cpld_pwm_get_state(struct pwm_chip *chip,
@@ -213,9 +212,10 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
return -ENODEV;
}
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = sl28cpld_pwm_from_chip(chip);
priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!priv->regmap) {
@@ -231,10 +231,7 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
}
/* Initialize the pwm_chip structure */
- chip = &priv->chip;
- chip->dev = &pdev->dev;
chip->ops = &sl28cpld_pwm_ops;
- chip->npwm = 1;
ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret) {
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
index ff991319feef..6c6f3b38c835 100644
--- a/drivers/pwm/pwm-spear.c
+++ b/drivers/pwm/pwm-spear.c
@@ -48,17 +48,15 @@
*
* @mmio_base: base address of pwm chip
* @clk: pointer to clk structure of pwm chip
- * @chip: linux pwm chip representation
*/
struct spear_pwm_chip {
void __iomem *mmio_base;
struct clk *clk;
- struct pwm_chip chip;
};
static inline struct spear_pwm_chip *to_spear_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct spear_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline u32 spear_pwm_readl(struct spear_pwm_chip *chip, unsigned int num,
@@ -194,13 +192,15 @@ static const struct pwm_ops spear_pwm_ops = {
static int spear_pwm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct pwm_chip *chip;
struct spear_pwm_chip *pc;
int ret;
u32 val;
- pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, NUM_PWM, sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_spear_pwm_chip(chip);
pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->mmio_base))
@@ -211,9 +211,7 @@ static int spear_pwm_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk),
"Failed to get clock\n");
- pc->chip.dev = &pdev->dev;
- pc->chip.ops = &spear_pwm_ops;
- pc->chip.npwm = NUM_PWM;
+ chip->ops = &spear_pwm_ops;
if (of_device_is_compatible(np, "st,spear1340-pwm")) {
ret = clk_enable(pc->clk);
@@ -232,7 +230,7 @@ static int spear_pwm_probe(struct platform_device *pdev)
clk_disable(pc->clk);
}
- ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "pwmchip_add() failed\n");
diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c
index 77939e161006..4c76ca5e4cdd 100644
--- a/drivers/pwm/pwm-sprd.c
+++ b/drivers/pwm/pwm-sprd.c
@@ -34,15 +34,12 @@ struct sprd_pwm_chn {
struct sprd_pwm_chip {
void __iomem *base;
- struct device *dev;
- struct pwm_chip chip;
- int num_pwms;
struct sprd_pwm_chn chn[SPRD_PWM_CHN_NUM];
};
static inline struct sprd_pwm_chip* sprd_pwm_from_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct sprd_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
/*
@@ -86,7 +83,7 @@ static int sprd_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
*/
ret = clk_bulk_prepare_enable(SPRD_PWM_CHN_CLKS_NUM, chn->clks);
if (ret) {
- dev_err(spc->dev, "failed to enable pwm%u clocks\n",
+ dev_err(pwmchip_parent(chip), "failed to enable pwm%u clocks\n",
pwm->hwpwm);
return ret;
}
@@ -183,7 +180,7 @@ static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
ret = clk_bulk_prepare_enable(SPRD_PWM_CHN_CLKS_NUM,
chn->clks);
if (ret) {
- dev_err(spc->dev,
+ dev_err(pwmchip_parent(chip),
"failed to enable pwm%u clocks\n",
pwm->hwpwm);
return ret;
@@ -215,65 +212,64 @@ static const struct pwm_ops sprd_pwm_ops = {
.get_state = sprd_pwm_get_state,
};
-static int sprd_pwm_clk_init(struct sprd_pwm_chip *spc)
+static int sprd_pwm_clk_init(struct device *dev,
+ struct sprd_pwm_chn chn[SPRD_PWM_CHN_NUM])
{
struct clk *clk_pwm;
int ret, i;
for (i = 0; i < SPRD_PWM_CHN_NUM; i++) {
- struct sprd_pwm_chn *chn = &spc->chn[i];
int j;
for (j = 0; j < SPRD_PWM_CHN_CLKS_NUM; ++j)
- chn->clks[j].id =
+ chn[i].clks[j].id =
sprd_pwm_clks[i * SPRD_PWM_CHN_CLKS_NUM + j];
- ret = devm_clk_bulk_get(spc->dev, SPRD_PWM_CHN_CLKS_NUM,
- chn->clks);
+ ret = devm_clk_bulk_get(dev, SPRD_PWM_CHN_CLKS_NUM,
+ chn[i].clks);
if (ret) {
if (ret == -ENOENT)
break;
- return dev_err_probe(spc->dev, ret,
+ return dev_err_probe(dev, ret,
"failed to get channel clocks\n");
}
- clk_pwm = chn->clks[SPRD_PWM_CHN_OUTPUT_CLK].clk;
- chn->clk_rate = clk_get_rate(clk_pwm);
+ clk_pwm = chn[i].clks[SPRD_PWM_CHN_OUTPUT_CLK].clk;
+ chn[i].clk_rate = clk_get_rate(clk_pwm);
}
if (!i)
- return dev_err_probe(spc->dev, -ENODEV, "no available PWM channels\n");
+ return dev_err_probe(dev, -ENODEV, "no available PWM channels\n");
- spc->num_pwms = i;
-
- return 0;
+ return i;
}
static int sprd_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct sprd_pwm_chip *spc;
- int ret;
+ struct sprd_pwm_chn chn[SPRD_PWM_CHN_NUM];
+ int ret, npwm;
- spc = devm_kzalloc(&pdev->dev, sizeof(*spc), GFP_KERNEL);
- if (!spc)
- return -ENOMEM;
+ npwm = sprd_pwm_clk_init(&pdev->dev, chn);
+ if (npwm < 0)
+ return npwm;
+
+ chip = devm_pwmchip_alloc(&pdev->dev, npwm, sizeof(*spc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ spc = sprd_pwm_from_chip(chip);
spc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spc->base))
return PTR_ERR(spc->base);
- spc->dev = &pdev->dev;
-
- ret = sprd_pwm_clk_init(spc);
- if (ret)
- return ret;
+ memcpy(spc->chn, chn, sizeof(chn));
- spc->chip.dev = &pdev->dev;
- spc->chip.ops = &sprd_pwm_ops;
- spc->chip.npwm = spc->num_pwms;
+ chip->ops = &sprd_pwm_ops;
- ret = devm_pwmchip_add(&pdev->dev, &spc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret)
dev_err(&pdev->dev, "failed to add PWM chip\n");
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
index 6cf55cf34d39..39d80da0e14a 100644
--- a/drivers/pwm/pwm-sti.c
+++ b/drivers/pwm/pwm-sti.c
@@ -94,7 +94,6 @@ struct sti_pwm_chip {
struct regmap_field *pwm_cpt_en;
struct regmap_field *pwm_cpt_int_en;
struct regmap_field *pwm_cpt_int_stat;
- struct pwm_chip chip;
struct pwm_device *cur;
unsigned long configured;
unsigned int en_count;
@@ -114,7 +113,7 @@ static const struct reg_field sti_pwm_regfields[MAX_REGFIELDS] = {
static inline struct sti_pwm_chip *to_sti_pwmchip(struct pwm_chip *chip)
{
- return container_of(chip, struct sti_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
/*
@@ -395,8 +394,17 @@ out:
static int sti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
+ struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
+ struct sti_pwm_compat_data *cdata = pc->cdata;
+ struct device *dev = pc->dev;
int err;
+ if (pwm->hwpwm >= cdata->pwm_num_devs) {
+ dev_err(dev, "device %u is not valid for pwm mode\n",
+ pwm->hwpwm);
+ return -EINVAL;
+ }
+
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
@@ -498,23 +506,7 @@ static int sti_pwm_probe_dt(struct sti_pwm_chip *pc)
{
struct device *dev = pc->dev;
const struct reg_field *reg_fields;
- struct device_node *np = dev->of_node;
struct sti_pwm_compat_data *cdata = pc->cdata;
- u32 num_devs;
- int ret;
-
- ret = of_property_read_u32(np, "st,pwm-num-chan", &num_devs);
- if (!ret)
- cdata->pwm_num_devs = num_devs;
-
- ret = of_property_read_u32(np, "st,capture-num-chan", &num_devs);
- if (!ret)
- cdata->cpt_num_devs = num_devs;
-
- if (!cdata->pwm_num_devs && !cdata->cpt_num_devs) {
- dev_err(dev, "No channels configured\n");
- return -EINVAL;
- }
reg_fields = cdata->reg_fields;
@@ -560,14 +552,33 @@ static const struct regmap_config sti_pwm_regmap_config = {
static int sti_pwm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ u32 num_devs;
+ unsigned int pwm_num_devs = 0;
+ unsigned int cpt_num_devs = 0;
struct sti_pwm_compat_data *cdata;
+ struct pwm_chip *chip;
struct sti_pwm_chip *pc;
unsigned int i;
int irq, ret;
- pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
+ ret = of_property_read_u32(np, "st,pwm-num-chan", &num_devs);
+ if (!ret)
+ pwm_num_devs = num_devs;
+
+ ret = of_property_read_u32(np, "st,capture-num-chan", &num_devs);
+ if (!ret)
+ cpt_num_devs = num_devs;
+
+ if (!pwm_num_devs && !cpt_num_devs) {
+ dev_err(dev, "No channels configured\n");
+ return -EINVAL;
+ }
+
+ chip = devm_pwmchip_alloc(dev, max(pwm_num_devs, cpt_num_devs), sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_sti_pwmchip(chip);
cdata = devm_kzalloc(dev, sizeof(*cdata), GFP_KERNEL);
if (!cdata)
@@ -600,8 +611,8 @@ static int sti_pwm_probe(struct platform_device *pdev)
cdata->reg_fields = sti_pwm_regfields;
cdata->max_prescale = 0xff;
cdata->max_pwm_cnt = 255;
- cdata->pwm_num_devs = 0;
- cdata->cpt_num_devs = 0;
+ cdata->pwm_num_devs = pwm_num_devs;
+ cdata->cpt_num_devs = cpt_num_devs;
pc->cdata = cdata;
pc->dev = dev;
@@ -644,9 +655,7 @@ static int sti_pwm_probe(struct platform_device *pdev)
return -ENOMEM;
}
- pc->chip.dev = dev;
- pc->chip.ops = &sti_pwm_ops;
- pc->chip.npwm = pc->cdata->pwm_num_devs;
+ chip->ops = &sti_pwm_ops;
for (i = 0; i < cdata->cpt_num_devs; i++) {
struct sti_cpt_ddata *ddata = &cdata->ddata[i];
@@ -655,23 +664,24 @@ static int sti_pwm_probe(struct platform_device *pdev)
mutex_init(&ddata->lock);
}
- ret = pwmchip_add(&pc->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
clk_unprepare(pc->pwm_clk);
clk_unprepare(pc->cpt_clk);
return ret;
}
- platform_set_drvdata(pdev, pc);
+ platform_set_drvdata(pdev, chip);
return 0;
}
static void sti_pwm_remove(struct platform_device *pdev)
{
- struct sti_pwm_chip *pc = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
- pwmchip_remove(&pc->chip);
+ pwmchip_remove(chip);
clk_unprepare(pc->pwm_clk);
clk_unprepare(pc->cpt_clk);
diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c
index 439068f3eca1..989731256f50 100644
--- a/drivers/pwm/pwm-stm32-lp.c
+++ b/drivers/pwm/pwm-stm32-lp.c
@@ -18,14 +18,13 @@
#include <linux/pwm.h>
struct stm32_pwm_lp {
- struct pwm_chip chip;
struct clk *clk;
struct regmap *regmap;
};
static inline struct stm32_pwm_lp *to_stm32_pwm_lp(struct pwm_chip *chip)
{
- return container_of(chip, struct stm32_pwm_lp, chip);
+ return pwmchip_get_drvdata(chip);
}
/* STM32 Low-Power Timer is preceded by a configurable power-of-2 prescaler */
@@ -61,7 +60,7 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
do_div(div, NSEC_PER_SEC);
if (!div) {
/* Clock is too slow to achieve requested period. */
- dev_dbg(priv->chip.dev, "Can't reach %llu ns\n", state->period);
+ dev_dbg(pwmchip_parent(chip), "Can't reach %llu ns\n", state->period);
return -EINVAL;
}
@@ -69,7 +68,7 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
while (div > STM32_LPTIM_MAX_ARR) {
presc++;
if ((1 << presc) > STM32_LPTIM_MAX_PRESCALER) {
- dev_err(priv->chip.dev, "max prescaler exceeded\n");
+ dev_err(pwmchip_parent(chip), "max prescaler exceeded\n");
return -EINVAL;
}
div = prd >> presc;
@@ -130,7 +129,7 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm,
(val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK,
100, 1000);
if (ret) {
- dev_err(priv->chip.dev, "ARR/CMP registers write issue\n");
+ dev_err(pwmchip_parent(chip), "ARR/CMP registers write issue\n");
goto err;
}
ret = regmap_write(priv->regmap, STM32_LPTIM_ICR,
@@ -197,36 +196,36 @@ static int stm32_pwm_lp_probe(struct platform_device *pdev)
{
struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent);
struct stm32_pwm_lp *priv;
+ struct pwm_chip *chip;
int ret;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = to_stm32_pwm_lp(chip);
priv->regmap = ddata->regmap;
priv->clk = ddata->clk;
- priv->chip.dev = &pdev->dev;
- priv->chip.ops = &stm32_pwm_lp_ops;
- priv->chip.npwm = 1;
+ chip->ops = &stm32_pwm_lp_ops;
- ret = devm_pwmchip_add(&pdev->dev, &priv->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return ret;
- platform_set_drvdata(pdev, priv);
+ platform_set_drvdata(pdev, chip);
return 0;
}
static int stm32_pwm_lp_suspend(struct device *dev)
{
- struct stm32_pwm_lp *priv = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
struct pwm_state state;
- pwm_get_state(&priv->chip.pwms[0], &state);
+ pwm_get_state(&chip->pwms[0], &state);
if (state.enabled) {
dev_err(dev, "The consumer didn't stop us (%s)\n",
- priv->chip.pwms[0].label);
+ chip->pwms[0].label);
return -EBUSY;
}
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index 5f10cba492ec..0c028d17c075 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -27,7 +27,6 @@ struct stm32_breakinput {
};
struct stm32_pwm {
- struct pwm_chip chip;
struct mutex lock; /* protect pwm config/enable */
struct clk *clk;
struct regmap *regmap;
@@ -40,7 +39,7 @@ struct stm32_pwm {
static inline struct stm32_pwm *to_stm32_pwm_dev(struct pwm_chip *chip)
{
- return container_of(chip, struct stm32_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static u32 active_channels(struct stm32_pwm *dev)
@@ -90,11 +89,12 @@ static u32 active_channels(struct stm32_pwm *dev)
* - Period = t2 - t0
* - Duty cycle = t1 - t0
*/
-static int stm32_pwm_raw_capture(struct stm32_pwm *priv, struct pwm_device *pwm,
+static int stm32_pwm_raw_capture(struct pwm_chip *chip, struct pwm_device *pwm,
unsigned long tmo_ms, u32 *raw_prd,
u32 *raw_dty)
{
- struct device *parent = priv->chip.dev->parent;
+ struct stm32_pwm *priv = to_stm32_pwm_dev(chip);
+ struct device *parent = pwmchip_parent(chip)->parent;
enum stm32_timers_dmas dma_id;
u32 ccen, ccr;
int ret;
@@ -170,7 +170,7 @@ static int stm32_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
ret = clk_enable(priv->clk);
if (ret) {
- dev_err(priv->chip.dev, "failed to enable counter clock\n");
+ dev_err(pwmchip_parent(chip), "failed to enable counter clock\n");
goto unlock;
}
@@ -208,7 +208,7 @@ static int stm32_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
TIM_CCER_CC12P : TIM_CCER_CC34P, pwm->hwpwm < 2 ?
TIM_CCER_CC2P : TIM_CCER_CC4P);
- ret = stm32_pwm_raw_capture(priv, pwm, tmo_ms, &raw_prd, &raw_dty);
+ ret = stm32_pwm_raw_capture(chip, pwm, tmo_ms, &raw_prd, &raw_dty);
if (ret)
goto stop;
@@ -229,7 +229,7 @@ static int stm32_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
/* 2nd measure with new scale */
psc /= scale;
regmap_write(priv->regmap, TIM_PSC, psc);
- ret = stm32_pwm_raw_capture(priv, pwm, tmo_ms, &raw_prd,
+ ret = stm32_pwm_raw_capture(chip, pwm, tmo_ms, &raw_prd,
&raw_dty);
if (ret)
goto stop;
@@ -257,7 +257,7 @@ static int stm32_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
FIELD_PREP(TIM_CCMR_IC1PSC, icpsc) |
FIELD_PREP(TIM_CCMR_IC2PSC, icpsc));
- ret = stm32_pwm_raw_capture(priv, pwm, tmo_ms, &raw_prd, &raw_dty);
+ ret = stm32_pwm_raw_capture(chip, pwm, tmo_ms, &raw_prd, &raw_dty);
if (ret)
goto stop;
@@ -605,7 +605,7 @@ static void stm32_pwm_detect_complementary(struct stm32_pwm *priv)
priv->have_complementary_output = (ccer != 0);
}
-static unsigned int stm32_pwm_detect_channels(struct stm32_pwm *priv,
+static unsigned int stm32_pwm_detect_channels(struct regmap *regmap,
unsigned int *num_enabled)
{
u32 ccer, ccer_backup;
@@ -614,10 +614,10 @@ static unsigned int stm32_pwm_detect_channels(struct stm32_pwm *priv,
* If channels enable bits don't exist writing 1 will have no
* effect so we can detect and count them.
*/
- regmap_read(priv->regmap, TIM_CCER, &ccer_backup);
- regmap_set_bits(priv->regmap, TIM_CCER, TIM_CCER_CCXE);
- regmap_read(priv->regmap, TIM_CCER, &ccer);
- regmap_write(priv->regmap, TIM_CCER, ccer_backup);
+ regmap_read(regmap, TIM_CCER, &ccer_backup);
+ regmap_set_bits(regmap, TIM_CCER, TIM_CCER_CCXE);
+ regmap_read(regmap, TIM_CCER, &ccer);
+ regmap_write(regmap, TIM_CCER, ccer_backup);
*num_enabled = hweight32(ccer_backup & TIM_CCER_CCXE);
@@ -629,14 +629,18 @@ static int stm32_pwm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct pwm_chip *chip;
struct stm32_pwm *priv;
- unsigned int num_enabled;
+ unsigned int npwm, num_enabled;
unsigned int i;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ npwm = stm32_pwm_detect_channels(ddata->regmap, &num_enabled);
+
+ chip = devm_pwmchip_alloc(dev, npwm, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = to_stm32_pwm_dev(chip);
mutex_init(&priv->lock);
priv->regmap = ddata->regmap;
@@ -652,37 +656,36 @@ static int stm32_pwm_probe(struct platform_device *pdev)
stm32_pwm_detect_complementary(priv);
- priv->chip.dev = dev;
- priv->chip.ops = &stm32pwm_ops;
- priv->chip.npwm = stm32_pwm_detect_channels(priv, &num_enabled);
+ chip->ops = &stm32pwm_ops;
/* Initialize clock refcount to number of enabled PWM channels. */
for (i = 0; i < num_enabled; i++)
clk_enable(priv->clk);
- ret = devm_pwmchip_add(dev, &priv->chip);
+ ret = devm_pwmchip_add(dev, chip);
if (ret < 0)
return ret;
- platform_set_drvdata(pdev, priv);
+ platform_set_drvdata(pdev, chip);
return 0;
}
static int stm32_pwm_suspend(struct device *dev)
{
- struct stm32_pwm *priv = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct stm32_pwm *priv = to_stm32_pwm_dev(chip);
unsigned int i;
u32 ccer, mask;
/* Look for active channels */
ccer = active_channels(priv);
- for (i = 0; i < priv->chip.npwm; i++) {
+ for (i = 0; i < chip->npwm; i++) {
mask = TIM_CCER_CC1E << (i * 4);
if (ccer & mask) {
dev_err(dev, "PWM %u still in use by consumer %s\n",
- i, priv->chip.pwms[i].label);
+ i, chip->pwms[i].label);
return -EBUSY;
}
}
@@ -692,7 +695,8 @@ static int stm32_pwm_suspend(struct device *dev)
static int stm32_pwm_resume(struct device *dev)
{
- struct stm32_pwm *priv = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct stm32_pwm *priv = to_stm32_pwm_dev(chip);
int ret;
ret = pinctrl_pm_select_default_state(dev);
diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c
index 19c0c0f39675..bb91062d5f1d 100644
--- a/drivers/pwm/pwm-stmpe.c
+++ b/drivers/pwm/pwm-stmpe.c
@@ -27,13 +27,12 @@
struct stmpe_pwm {
struct stmpe *stmpe;
- struct pwm_chip chip;
u8 last_duty;
};
static inline struct stmpe_pwm *to_stmpe_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct stmpe_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int stmpe_24xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -44,7 +43,7 @@ static int stmpe_24xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = stmpe_reg_read(stmpe_pwm->stmpe, STMPE24XX_PWMCS);
if (ret < 0) {
- dev_dbg(chip->dev, "error reading PWM#%u control\n",
+ dev_dbg(pwmchip_parent(chip), "error reading PWM#%u control\n",
pwm->hwpwm);
return ret;
}
@@ -53,7 +52,7 @@ static int stmpe_24xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = stmpe_reg_write(stmpe_pwm->stmpe, STMPE24XX_PWMCS, value);
if (ret) {
- dev_dbg(chip->dev, "error writing PWM#%u control\n",
+ dev_dbg(pwmchip_parent(chip), "error writing PWM#%u control\n",
pwm->hwpwm);
return ret;
}
@@ -70,7 +69,7 @@ static int stmpe_24xx_pwm_disable(struct pwm_chip *chip,
ret = stmpe_reg_read(stmpe_pwm->stmpe, STMPE24XX_PWMCS);
if (ret < 0) {
- dev_dbg(chip->dev, "error reading PWM#%u control\n",
+ dev_dbg(pwmchip_parent(chip), "error reading PWM#%u control\n",
pwm->hwpwm);
return ret;
}
@@ -79,7 +78,7 @@ static int stmpe_24xx_pwm_disable(struct pwm_chip *chip,
ret = stmpe_reg_write(stmpe_pwm->stmpe, STMPE24XX_PWMCS, value);
if (ret)
- dev_dbg(chip->dev, "error writing PWM#%u control\n",
+ dev_dbg(pwmchip_parent(chip), "error writing PWM#%u control\n",
pwm->hwpwm);
return ret;
}
@@ -125,7 +124,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
ret = stmpe_set_altfunc(stmpe_pwm->stmpe, BIT(pin),
STMPE_BLOCK_PWM);
if (ret) {
- dev_err(chip->dev, "unable to connect PWM#%u to pin\n",
+ dev_err(pwmchip_parent(chip), "unable to connect PWM#%u to pin\n",
pwm->hwpwm);
return ret;
}
@@ -150,7 +149,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
return -ENODEV;
}
- dev_dbg(chip->dev, "PWM#%u: config duty %d ns, period %d ns\n",
+ dev_dbg(pwmchip_parent(chip), "PWM#%u: config duty %d ns, period %d ns\n",
pwm->hwpwm, duty_ns, period_ns);
if (duty_ns == 0) {
@@ -216,7 +215,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
program[1] = BRANCH;
}
- dev_dbg(chip->dev,
+ dev_dbg(pwmchip_parent(chip),
"PWM#%u: value = %02x, last_duty = %02x, program=%04x,%04x,%04x\n",
pwm->hwpwm, value, last, program[0], program[1],
program[2]);
@@ -233,7 +232,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
ret = stmpe_reg_write(stmpe_pwm->stmpe, offset, value);
if (ret) {
- dev_dbg(chip->dev, "error writing register %02x: %d\n",
+ dev_dbg(pwmchip_parent(chip), "error writing register %02x: %d\n",
offset, ret);
return ret;
}
@@ -242,7 +241,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
ret = stmpe_reg_write(stmpe_pwm->stmpe, offset, value);
if (ret) {
- dev_dbg(chip->dev, "error writing register %02x: %d\n",
+ dev_dbg(pwmchip_parent(chip), "error writing register %02x: %d\n",
offset, ret);
return ret;
}
@@ -255,7 +254,7 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
/* Sleep for 200ms so we're sure it will take effect */
msleep(200);
- dev_dbg(chip->dev, "programmed PWM#%u, %u bytes\n", pwm->hwpwm, i);
+ dev_dbg(pwmchip_parent(chip), "programmed PWM#%u, %u bytes\n", pwm->hwpwm, i);
return 0;
}
@@ -292,33 +291,36 @@ static const struct pwm_ops stmpe_24xx_pwm_ops = {
static int __init stmpe_pwm_probe(struct platform_device *pdev)
{
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
+ struct pwm_chip *chip;
struct stmpe_pwm *stmpe_pwm;
int ret;
- stmpe_pwm = devm_kzalloc(&pdev->dev, sizeof(*stmpe_pwm), GFP_KERNEL);
- if (!stmpe_pwm)
- return -ENOMEM;
+ switch (stmpe->partnum) {
+ case STMPE2401:
+ case STMPE2403:
+ break;
+ case STMPE1601:
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "STMPE1601 not yet supported\n");
+ default:
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "Unknown STMPE PWM\n");
+ }
- stmpe_pwm->stmpe = stmpe;
- stmpe_pwm->chip.dev = &pdev->dev;
+ chip = devm_pwmchip_alloc(&pdev->dev, 3, sizeof(*stmpe_pwm));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ stmpe_pwm = to_stmpe_pwm(chip);
- if (stmpe->partnum == STMPE2401 || stmpe->partnum == STMPE2403) {
- stmpe_pwm->chip.ops = &stmpe_24xx_pwm_ops;
- stmpe_pwm->chip.npwm = 3;
- } else {
- if (stmpe->partnum == STMPE1601)
- dev_err(&pdev->dev, "STMPE1601 not yet supported\n");
- else
- dev_err(&pdev->dev, "Unknown STMPE PWM\n");
+ stmpe_pwm->stmpe = stmpe;
- return -ENODEV;
- }
+ chip->ops = &stmpe_24xx_pwm_ops;
ret = stmpe_enable(stmpe, STMPE_BLOCK_PWM);
if (ret)
return ret;
- ret = pwmchip_add(&stmpe_pwm->chip);
+ ret = pwmchip_add(chip);
if (ret) {
stmpe_disable(stmpe, STMPE_BLOCK_PWM);
return ret;
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 1a439025540d..5c29590d1821 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -81,7 +81,6 @@ struct sun4i_pwm_data {
};
struct sun4i_pwm_chip {
- struct pwm_chip chip;
struct clk *bus_clk;
struct clk *clk;
struct reset_control *rst;
@@ -92,35 +91,35 @@ struct sun4i_pwm_chip {
static inline struct sun4i_pwm_chip *to_sun4i_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct sun4i_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
-static inline u32 sun4i_pwm_readl(struct sun4i_pwm_chip *chip,
+static inline u32 sun4i_pwm_readl(struct sun4i_pwm_chip *sun4ichip,
unsigned long offset)
{
- return readl(chip->base + offset);
+ return readl(sun4ichip->base + offset);
}
-static inline void sun4i_pwm_writel(struct sun4i_pwm_chip *chip,
+static inline void sun4i_pwm_writel(struct sun4i_pwm_chip *sun4ichip,
u32 val, unsigned long offset)
{
- writel(val, chip->base + offset);
+ writel(val, sun4ichip->base + offset);
}
static int sun4i_pwm_get_state(struct pwm_chip *chip,
struct pwm_device *pwm,
struct pwm_state *state)
{
- struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
+ struct sun4i_pwm_chip *sun4ichip = to_sun4i_pwm_chip(chip);
u64 clk_rate, tmp;
u32 val;
unsigned int prescaler;
- clk_rate = clk_get_rate(sun4i_pwm->clk);
+ clk_rate = clk_get_rate(sun4ichip->clk);
if (!clk_rate)
return -EINVAL;
- val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
+ val = sun4i_pwm_readl(sun4ichip, PWM_CTRL_REG);
/*
* PWM chapter in H6 manual has a diagram which explains that if bypass
@@ -128,7 +127,7 @@ static int sun4i_pwm_get_state(struct pwm_chip *chip,
* proved that also enable bit is ignored in this case.
*/
if ((val & BIT_CH(PWM_BYPASS, pwm->hwpwm)) &&
- sun4i_pwm->data->has_direct_mod_clk_output) {
+ sun4ichip->data->has_direct_mod_clk_output) {
state->period = DIV_ROUND_UP_ULL(NSEC_PER_SEC, clk_rate);
state->duty_cycle = DIV_ROUND_UP_ULL(state->period, 2);
state->polarity = PWM_POLARITY_NORMAL;
@@ -137,7 +136,7 @@ static int sun4i_pwm_get_state(struct pwm_chip *chip,
}
if ((PWM_REG_PRESCAL(val, pwm->hwpwm) == PWM_PRESCAL_MASK) &&
- sun4i_pwm->data->has_prescaler_bypass)
+ sun4ichip->data->has_prescaler_bypass)
prescaler = 1;
else
prescaler = prescaler_table[PWM_REG_PRESCAL(val, pwm->hwpwm)];
@@ -156,7 +155,7 @@ static int sun4i_pwm_get_state(struct pwm_chip *chip,
else
state->enabled = false;
- val = sun4i_pwm_readl(sun4i_pwm, PWM_CH_PRD(pwm->hwpwm));
+ val = sun4i_pwm_readl(sun4ichip, PWM_CH_PRD(pwm->hwpwm));
tmp = (u64)prescaler * NSEC_PER_SEC * PWM_REG_DTY(val);
state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate);
@@ -167,7 +166,7 @@ static int sun4i_pwm_get_state(struct pwm_chip *chip,
return 0;
}
-static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
+static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4ichip,
const struct pwm_state *state,
u32 *dty, u32 *prd, unsigned int *prsclr,
bool *bypass)
@@ -175,9 +174,9 @@ static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
u64 clk_rate, div = 0;
unsigned int prescaler = 0;
- clk_rate = clk_get_rate(sun4i_pwm->clk);
+ clk_rate = clk_get_rate(sun4ichip->clk);
- *bypass = sun4i_pwm->data->has_direct_mod_clk_output &&
+ *bypass = sun4ichip->data->has_direct_mod_clk_output &&
state->enabled &&
(state->period * clk_rate >= NSEC_PER_SEC) &&
(state->period * clk_rate < 2 * NSEC_PER_SEC) &&
@@ -187,7 +186,7 @@ static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
if (*bypass)
return 0;
- if (sun4i_pwm->data->has_prescaler_bypass) {
+ if (sun4ichip->data->has_prescaler_bypass) {
/* First, test without any prescaler when available */
prescaler = PWM_PRESCAL_MASK;
/*
@@ -233,7 +232,7 @@ static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm,
static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
- struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip);
+ struct sun4i_pwm_chip *sun4ichip = to_sun4i_pwm_chip(chip);
struct pwm_state cstate;
u32 ctrl, duty = 0, period = 0, val;
int ret;
@@ -243,31 +242,31 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
pwm_get_state(pwm, &cstate);
if (!cstate.enabled) {
- ret = clk_prepare_enable(sun4i_pwm->clk);
+ ret = clk_prepare_enable(sun4ichip->clk);
if (ret) {
- dev_err(chip->dev, "failed to enable PWM clock\n");
+ dev_err(pwmchip_parent(chip), "failed to enable PWM clock\n");
return ret;
}
}
- ret = sun4i_pwm_calculate(sun4i_pwm, state, &duty, &period, &prescaler,
+ ret = sun4i_pwm_calculate(sun4ichip, state, &duty, &period, &prescaler,
&bypass);
if (ret) {
- dev_err(chip->dev, "period exceeds the maximum value\n");
+ dev_err(pwmchip_parent(chip), "period exceeds the maximum value\n");
if (!cstate.enabled)
- clk_disable_unprepare(sun4i_pwm->clk);
+ clk_disable_unprepare(sun4ichip->clk);
return ret;
}
- spin_lock(&sun4i_pwm->ctrl_lock);
- ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
+ spin_lock(&sun4ichip->ctrl_lock);
+ ctrl = sun4i_pwm_readl(sun4ichip, PWM_CTRL_REG);
- if (sun4i_pwm->data->has_direct_mod_clk_output) {
+ if (sun4ichip->data->has_direct_mod_clk_output) {
if (bypass) {
ctrl |= BIT_CH(PWM_BYPASS, pwm->hwpwm);
/* We can skip other parameter */
- sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
- spin_unlock(&sun4i_pwm->ctrl_lock);
+ sun4i_pwm_writel(sun4ichip, ctrl, PWM_CTRL_REG);
+ spin_unlock(&sun4ichip->ctrl_lock);
return 0;
}
@@ -277,14 +276,14 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (PWM_REG_PRESCAL(ctrl, pwm->hwpwm) != prescaler) {
/* Prescaler changed, the clock has to be gated */
ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
- sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
+ sun4i_pwm_writel(sun4ichip, ctrl, PWM_CTRL_REG);
ctrl &= ~BIT_CH(PWM_PRESCAL_MASK, pwm->hwpwm);
ctrl |= BIT_CH(prescaler, pwm->hwpwm);
}
val = (duty & PWM_DTY_MASK) | PWM_PRD(period);
- sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
+ sun4i_pwm_writel(sun4ichip, val, PWM_CH_PRD(pwm->hwpwm));
if (state->polarity != PWM_POLARITY_NORMAL)
ctrl &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
@@ -296,9 +295,9 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->enabled)
ctrl |= BIT_CH(PWM_EN, pwm->hwpwm);
- sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
+ sun4i_pwm_writel(sun4ichip, ctrl, PWM_CTRL_REG);
- spin_unlock(&sun4i_pwm->ctrl_lock);
+ spin_unlock(&sun4ichip->ctrl_lock);
if (state->enabled)
return 0;
@@ -310,14 +309,14 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
else
usleep_range(delay_us, delay_us * 2);
- spin_lock(&sun4i_pwm->ctrl_lock);
- ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
+ spin_lock(&sun4ichip->ctrl_lock);
+ ctrl = sun4i_pwm_readl(sun4ichip, PWM_CTRL_REG);
ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
ctrl &= ~BIT_CH(PWM_EN, pwm->hwpwm);
- sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
- spin_unlock(&sun4i_pwm->ctrl_lock);
+ sun4i_pwm_writel(sun4ichip, ctrl, PWM_CTRL_REG);
+ spin_unlock(&sun4ichip->ctrl_lock);
- clk_disable_unprepare(sun4i_pwm->clk);
+ clk_disable_unprepare(sun4ichip->clk);
return 0;
}
@@ -384,17 +383,21 @@ MODULE_DEVICE_TABLE(of, sun4i_pwm_dt_ids);
static int sun4i_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
+ const struct sun4i_pwm_data *data;
struct sun4i_pwm_chip *sun4ichip;
int ret;
- sun4ichip = devm_kzalloc(&pdev->dev, sizeof(*sun4ichip), GFP_KERNEL);
- if (!sun4ichip)
- return -ENOMEM;
-
- sun4ichip->data = of_device_get_match_data(&pdev->dev);
- if (!sun4ichip->data)
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
return -ENODEV;
+ chip = devm_pwmchip_alloc(&pdev->dev, data->npwm, sizeof(*sun4ichip));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ sun4ichip = to_sun4i_pwm_chip(chip);
+
+ sun4ichip->data = data;
sun4ichip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sun4ichip->base))
return PTR_ERR(sun4ichip->base);
@@ -451,19 +454,17 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
goto err_bus;
}
- sun4ichip->chip.dev = &pdev->dev;
- sun4ichip->chip.ops = &sun4i_pwm_ops;
- sun4ichip->chip.npwm = sun4ichip->data->npwm;
+ chip->ops = &sun4i_pwm_ops;
spin_lock_init(&sun4ichip->ctrl_lock);
- ret = pwmchip_add(&sun4ichip->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
goto err_pwm_add;
}
- platform_set_drvdata(pdev, sun4ichip);
+ platform_set_drvdata(pdev, chip);
return 0;
@@ -477,9 +478,10 @@ err_bus:
static void sun4i_pwm_remove(struct platform_device *pdev)
{
- struct sun4i_pwm_chip *sun4ichip = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct sun4i_pwm_chip *sun4ichip = to_sun4i_pwm_chip(chip);
- pwmchip_remove(&sun4ichip->chip);
+ pwmchip_remove(chip);
clk_disable_unprepare(sun4ichip->bus_clk);
reset_control_assert(sun4ichip->rst);
diff --git a/drivers/pwm/pwm-sunplus.c b/drivers/pwm/pwm-sunplus.c
index 773e2f80526e..b342b843247b 100644
--- a/drivers/pwm/pwm-sunplus.c
+++ b/drivers/pwm/pwm-sunplus.c
@@ -43,14 +43,13 @@
#define SP7021_PWM_NUM 4
struct sunplus_pwm {
- struct pwm_chip chip;
void __iomem *base;
struct clk *clk;
};
static inline struct sunplus_pwm *to_sunplus_pwm(struct pwm_chip *chip)
{
- return container_of(chip, struct sunplus_pwm, chip);
+ return pwmchip_get_drvdata(chip);
}
static int sunplus_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -175,12 +174,14 @@ static void sunplus_pwm_clk_release(void *data)
static int sunplus_pwm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct pwm_chip *chip;
struct sunplus_pwm *priv;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(dev, SP7021_PWM_NUM, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = to_sunplus_pwm(chip);
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
@@ -203,11 +204,9 @@ static int sunplus_pwm_probe(struct platform_device *pdev)
return ret;
}
- priv->chip.dev = dev;
- priv->chip.ops = &sunplus_pwm_ops;
- priv->chip.npwm = SP7021_PWM_NUM;
+ chip->ops = &sunplus_pwm_ops;
- ret = devm_pwmchip_add(dev, &priv->chip);
+ ret = devm_pwmchip_add(dev, chip);
if (ret < 0)
return dev_err_probe(dev, ret, "Cannot register sunplus PWM\n");
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 82ee2f0754f9..a3d69976148f 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -65,9 +65,6 @@ struct tegra_pwm_soc {
};
struct tegra_pwm_chip {
- struct pwm_chip chip;
- struct device *dev;
-
struct clk *clk;
struct reset_control*rst;
@@ -81,7 +78,7 @@ struct tegra_pwm_chip {
static inline struct tegra_pwm_chip *to_tegra_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct tegra_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline u32 pwm_readl(struct tegra_pwm_chip *pc, unsigned int offset)
@@ -158,7 +155,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
*/
required_clk_rate *= 2;
- err = dev_pm_opp_set_rate(pc->dev, required_clk_rate);
+ err = dev_pm_opp_set_rate(pwmchip_parent(chip), required_clk_rate);
if (err < 0)
return -EINVAL;
@@ -194,7 +191,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* before writing the register. Otherwise, keep it enabled.
*/
if (!pwm_is_enabled(pwm)) {
- err = pm_runtime_resume_and_get(pc->dev);
+ err = pm_runtime_resume_and_get(pwmchip_parent(chip));
if (err)
return err;
} else
@@ -206,7 +203,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* If the PWM is not enabled, turn the clock off again to save power.
*/
if (!pwm_is_enabled(pwm))
- pm_runtime_put(pc->dev);
+ pm_runtime_put(pwmchip_parent(chip));
return 0;
}
@@ -217,7 +214,7 @@ static int tegra_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
int rc = 0;
u32 val;
- rc = pm_runtime_resume_and_get(pc->dev);
+ rc = pm_runtime_resume_and_get(pwmchip_parent(chip));
if (rc)
return rc;
@@ -237,7 +234,7 @@ static void tegra_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
val &= ~PWM_ENABLE;
pwm_writel(pc, pwm->hwpwm, val);
- pm_runtime_put_sync(pc->dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
}
static int tegra_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -272,21 +269,25 @@ static const struct pwm_ops tegra_pwm_ops = {
static int tegra_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct tegra_pwm_chip *pc;
+ const struct tegra_pwm_soc *soc;
int ret;
- pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
+ soc = of_device_get_match_data(&pdev->dev);
+
+ chip = devm_pwmchip_alloc(&pdev->dev, soc->num_channels, sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_tegra_pwm_chip(chip);
- pc->soc = of_device_get_match_data(&pdev->dev);
- pc->dev = &pdev->dev;
+ pc->soc = soc;
pc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->regs))
return PTR_ERR(pc->regs);
- platform_set_drvdata(pdev, pc);
+ platform_set_drvdata(pdev, chip);
pc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pc->clk))
@@ -302,7 +303,7 @@ static int tegra_pwm_probe(struct platform_device *pdev)
return ret;
/* Set maximum frequency of the IP */
- ret = dev_pm_opp_set_rate(pc->dev, pc->soc->max_frequency);
+ ret = dev_pm_opp_set_rate(&pdev->dev, pc->soc->max_frequency);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to set max frequency: %d\n", ret);
goto put_pm;
@@ -328,11 +329,9 @@ static int tegra_pwm_probe(struct platform_device *pdev)
reset_control_deassert(pc->rst);
- pc->chip.dev = &pdev->dev;
- pc->chip.ops = &tegra_pwm_ops;
- pc->chip.npwm = pc->soc->num_channels;
+ chip->ops = &tegra_pwm_ops;
- ret = pwmchip_add(&pc->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
reset_control_assert(pc->rst);
@@ -350,9 +349,10 @@ put_pm:
static void tegra_pwm_remove(struct platform_device *pdev)
{
- struct tegra_pwm_chip *pc = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
- pwmchip_remove(&pc->chip);
+ pwmchip_remove(chip);
reset_control_assert(pc->rst);
@@ -361,7 +361,8 @@ static void tegra_pwm_remove(struct platform_device *pdev)
static int __maybe_unused tegra_pwm_runtime_suspend(struct device *dev)
{
- struct tegra_pwm_chip *pc = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
int err;
clk_disable_unprepare(pc->clk);
@@ -377,7 +378,8 @@ static int __maybe_unused tegra_pwm_runtime_suspend(struct device *dev)
static int __maybe_unused tegra_pwm_runtime_resume(struct device *dev)
{
- struct tegra_pwm_chip *pc = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
int err;
err = pinctrl_pm_select_default_state(dev);
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index d974f4414ac9..d6c2b1b1387e 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -32,7 +32,6 @@ struct ecap_context {
};
struct ecap_pwm_chip {
- struct pwm_chip chip;
unsigned int clk_rate;
void __iomem *mmio_base;
struct ecap_context ctx;
@@ -40,7 +39,7 @@ struct ecap_pwm_chip {
static inline struct ecap_pwm_chip *to_ecap_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct ecap_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
/*
@@ -70,7 +69,7 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
duty_cycles = (u32)c;
}
- pm_runtime_get_sync(pc->chip.dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
value = readw(pc->mmio_base + ECCTL2);
@@ -100,7 +99,7 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
writew(value, pc->mmio_base + ECCTL2);
}
- pm_runtime_put_sync(pc->chip.dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
return 0;
}
@@ -111,7 +110,7 @@ static int ecap_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip);
u16 value;
- pm_runtime_get_sync(pc->chip.dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
value = readw(pc->mmio_base + ECCTL2);
@@ -124,7 +123,7 @@ static int ecap_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
writew(value, pc->mmio_base + ECCTL2);
- pm_runtime_put_sync(pc->chip.dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
return 0;
}
@@ -135,7 +134,7 @@ static int ecap_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
u16 value;
/* Leave clock enabled on enabling PWM */
- pm_runtime_get_sync(pc->chip.dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
/*
* Enable 'Free run Time stamp counter mode' to start counter
@@ -162,7 +161,7 @@ static void ecap_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
writew(value, pc->mmio_base + ECCTL2);
/* Disable clock on PWM disable */
- pm_runtime_put_sync(pc->chip.dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
}
static int ecap_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -218,12 +217,14 @@ static int ecap_pwm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct ecap_pwm_chip *pc;
+ struct pwm_chip *chip;
struct clk *clk;
int ret;
- pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_ecap_pwm_chip(chip);
clk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(clk)) {
@@ -244,21 +245,19 @@ static int ecap_pwm_probe(struct platform_device *pdev)
return -EINVAL;
}
- pc->chip.dev = &pdev->dev;
- pc->chip.ops = &ecap_pwm_ops;
- pc->chip.npwm = 1;
+ chip->ops = &ecap_pwm_ops;
pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
- ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
return ret;
}
- platform_set_drvdata(pdev, pc);
+ platform_set_drvdata(pdev, chip);
pm_runtime_enable(&pdev->dev);
return 0;
@@ -269,17 +268,21 @@ static void ecap_pwm_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static void ecap_pwm_save_context(struct ecap_pwm_chip *pc)
+static void ecap_pwm_save_context(struct pwm_chip *chip)
{
- pm_runtime_get_sync(pc->chip.dev);
+ struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip);
+
+ pm_runtime_get_sync(pwmchip_parent(chip));
pc->ctx.ecctl2 = readw(pc->mmio_base + ECCTL2);
pc->ctx.cap4 = readl(pc->mmio_base + CAP4);
pc->ctx.cap3 = readl(pc->mmio_base + CAP3);
- pm_runtime_put_sync(pc->chip.dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
}
-static void ecap_pwm_restore_context(struct ecap_pwm_chip *pc)
+static void ecap_pwm_restore_context(struct pwm_chip *chip)
{
+ struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip);
+
writel(pc->ctx.cap3, pc->mmio_base + CAP3);
writel(pc->ctx.cap4, pc->mmio_base + CAP4);
writew(pc->ctx.ecctl2, pc->mmio_base + ECCTL2);
@@ -287,10 +290,10 @@ static void ecap_pwm_restore_context(struct ecap_pwm_chip *pc)
static int ecap_pwm_suspend(struct device *dev)
{
- struct ecap_pwm_chip *pc = dev_get_drvdata(dev);
- struct pwm_device *pwm = pc->chip.pwms;
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct pwm_device *pwm = chip->pwms;
- ecap_pwm_save_context(pc);
+ ecap_pwm_save_context(chip);
/* Disable explicitly if PWM is running */
if (pwm_is_enabled(pwm))
@@ -301,14 +304,14 @@ static int ecap_pwm_suspend(struct device *dev)
static int ecap_pwm_resume(struct device *dev)
{
- struct ecap_pwm_chip *pc = dev_get_drvdata(dev);
- struct pwm_device *pwm = pc->chip.pwms;
+ struct pwm_chip *chip = dev_get_drvdata(dev);
+ struct pwm_device *pwm = chip->pwms;
/* Enable explicitly if PWM was running */
if (pwm_is_enabled(pwm))
pm_runtime_get_sync(dev);
- ecap_pwm_restore_context(pc);
+ ecap_pwm_restore_context(chip);
return 0;
}
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index af231fa74fa9..e5104725d9b7 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -105,7 +105,6 @@ struct ehrpwm_context {
};
struct ehrpwm_pwm_chip {
- struct pwm_chip chip;
unsigned long clk_rate;
void __iomem *mmio_base;
unsigned long period_cycles[NUM_PWM_CHANNEL];
@@ -116,7 +115,7 @@ struct ehrpwm_pwm_chip {
static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct ehrpwm_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static inline u16 ehrpwm_read(void __iomem *base, unsigned int offset)
@@ -256,7 +255,7 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (i == pwm->hwpwm)
continue;
- dev_err(chip->dev,
+ dev_err(pwmchip_parent(chip),
"period value conflicts with channel %u\n",
i);
return -EINVAL;
@@ -268,11 +267,11 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
/* Configure clock prescaler to support Low frequency PWM wave */
if (set_prescale_div(period_cycles/PERIOD_MAX, &ps_divval,
&tb_divval)) {
- dev_err(chip->dev, "Unsupported values\n");
+ dev_err(pwmchip_parent(chip), "Unsupported values\n");
return -EINVAL;
}
- pm_runtime_get_sync(chip->dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
/* Update clock prescaler values */
ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CLKDIV_MASK, tb_divval);
@@ -299,7 +298,7 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
ehrpwm_write(pc->mmio_base, cmp_reg, duty_cycles);
- pm_runtime_put_sync(chip->dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
return 0;
}
@@ -323,7 +322,7 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
int ret;
/* Leave clock enabled on enabling PWM */
- pm_runtime_get_sync(chip->dev);
+ pm_runtime_get_sync(pwmchip_parent(chip));
/* Disabling Action Qualifier on PWM output */
if (pwm->hwpwm) {
@@ -346,8 +345,8 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
/* Enable TBCLK */
ret = clk_enable(pc->tbclk);
if (ret) {
- dev_err(chip->dev, "Failed to enable TBCLK for %s: %d\n",
- dev_name(pc->chip.dev), ret);
+ dev_err(pwmchip_parent(chip), "Failed to enable TBCLK for %s: %d\n",
+ dev_name(pwmchip_parent(chip)), ret);
return ret;
}
@@ -385,7 +384,7 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
clk_disable(pc->tbclk);
/* Disable clock on PWM disable */
- pm_runtime_put_sync(chip->dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
}
static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -393,8 +392,8 @@ static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
if (pwm_is_enabled(pwm)) {
- dev_warn(chip->dev, "Removing PWM device without disabling\n");
- pm_runtime_put_sync(chip->dev);
+ dev_warn(pwmchip_parent(chip), "Removing PWM device without disabling\n");
+ pm_runtime_put_sync(pwmchip_parent(chip));
}
/* set period value to zero on free */
@@ -450,12 +449,14 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct ehrpwm_pwm_chip *pc;
+ struct pwm_chip *chip;
struct clk *clk;
int ret;
- pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
- if (!pc)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, NUM_PWM_CHANNEL, sizeof(*pc));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ pc = to_ehrpwm_pwm_chip(chip);
clk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(clk)) {
@@ -474,9 +475,7 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
return -EINVAL;
}
- pc->chip.dev = &pdev->dev;
- pc->chip.ops = &ehrpwm_pwm_ops;
- pc->chip.npwm = NUM_PWM_CHANNEL;
+ chip->ops = &ehrpwm_pwm_ops;
pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->mmio_base))
@@ -493,13 +492,13 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
return ret;
}
- ret = pwmchip_add(&pc->chip);
+ ret = pwmchip_add(chip);
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
goto err_clk_unprepare;
}
- platform_set_drvdata(pdev, pc);
+ platform_set_drvdata(pdev, chip);
pm_runtime_enable(&pdev->dev);
return 0;
@@ -512,18 +511,21 @@ err_clk_unprepare:
static void ehrpwm_pwm_remove(struct platform_device *pdev)
{
- struct ehrpwm_pwm_chip *pc = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
- pwmchip_remove(&pc->chip);
+ pwmchip_remove(chip);
clk_unprepare(pc->tbclk);
pm_runtime_disable(&pdev->dev);
}
-static void ehrpwm_pwm_save_context(struct ehrpwm_pwm_chip *pc)
+static void ehrpwm_pwm_save_context(struct pwm_chip *chip)
{
- pm_runtime_get_sync(pc->chip.dev);
+ struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
+
+ pm_runtime_get_sync(pwmchip_parent(chip));
pc->ctx.tbctl = ehrpwm_read(pc->mmio_base, TBCTL);
pc->ctx.tbprd = ehrpwm_read(pc->mmio_base, TBPRD);
@@ -534,11 +536,13 @@ static void ehrpwm_pwm_save_context(struct ehrpwm_pwm_chip *pc)
pc->ctx.aqsfrc = ehrpwm_read(pc->mmio_base, AQSFRC);
pc->ctx.aqcsfrc = ehrpwm_read(pc->mmio_base, AQCSFRC);
- pm_runtime_put_sync(pc->chip.dev);
+ pm_runtime_put_sync(pwmchip_parent(chip));
}
-static void ehrpwm_pwm_restore_context(struct ehrpwm_pwm_chip *pc)
+static void ehrpwm_pwm_restore_context(struct pwm_chip *chip)
{
+ struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
+
ehrpwm_write(pc->mmio_base, TBPRD, pc->ctx.tbprd);
ehrpwm_write(pc->mmio_base, CMPA, pc->ctx.cmpa);
ehrpwm_write(pc->mmio_base, CMPB, pc->ctx.cmpb);
@@ -551,13 +555,13 @@ static void ehrpwm_pwm_restore_context(struct ehrpwm_pwm_chip *pc)
static int ehrpwm_pwm_suspend(struct device *dev)
{
- struct ehrpwm_pwm_chip *pc = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
unsigned int i;
- ehrpwm_pwm_save_context(pc);
+ ehrpwm_pwm_save_context(chip);
- for (i = 0; i < pc->chip.npwm; i++) {
- struct pwm_device *pwm = &pc->chip.pwms[i];
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
if (!pwm_is_enabled(pwm))
continue;
@@ -571,11 +575,11 @@ static int ehrpwm_pwm_suspend(struct device *dev)
static int ehrpwm_pwm_resume(struct device *dev)
{
- struct ehrpwm_pwm_chip *pc = dev_get_drvdata(dev);
+ struct pwm_chip *chip = dev_get_drvdata(dev);
unsigned int i;
- for (i = 0; i < pc->chip.npwm; i++) {
- struct pwm_device *pwm = &pc->chip.pwms[i];
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
if (!pwm_is_enabled(pwm))
continue;
@@ -584,7 +588,7 @@ static int ehrpwm_pwm_resume(struct device *dev)
pm_runtime_get_sync(dev);
}
- ehrpwm_pwm_restore_context(pc);
+ ehrpwm_pwm_restore_context(chip);
return 0;
}
diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c
index c670ccb81653..4b10a8dab312 100644
--- a/drivers/pwm/pwm-twl-led.c
+++ b/drivers/pwm/pwm-twl-led.c
@@ -62,13 +62,12 @@
#define TWL6040_LED_MODE_MASK 0x03
struct twl_pwmled_chip {
- struct pwm_chip chip;
struct mutex mutex;
};
static inline struct twl_pwmled_chip *to_twl(struct pwm_chip *chip)
{
- return container_of(chip, struct twl_pwmled_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int twl4030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -100,7 +99,7 @@ static int twl4030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
ret = twl_i2c_write(TWL4030_MODULE_LED, pwm_config, base, 2);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to configure PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to configure PWM\n", pwm->label);
return ret;
}
@@ -114,7 +113,7 @@ static int twl4030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL4030_MODULE_LED, &val, TWL4030_LEDEN_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read LEDEN\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to read LEDEN\n", pwm->label);
goto out;
}
@@ -122,7 +121,7 @@ static int twl4030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL4030_MODULE_LED, val, TWL4030_LEDEN_REG);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to enable PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -139,7 +138,7 @@ static void twl4030_pwmled_disable(struct pwm_chip *chip,
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL4030_MODULE_LED, &val, TWL4030_LEDEN_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read LEDEN\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to read LEDEN\n", pwm->label);
goto out;
}
@@ -147,7 +146,7 @@ static void twl4030_pwmled_disable(struct pwm_chip *chip,
ret = twl_i2c_write_u8(TWL4030_MODULE_LED, val, TWL4030_LEDEN_REG);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -203,7 +202,7 @@ static int twl6030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, on_time,
TWL6030_LED_PWM_CTRL1);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to configure PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to configure PWM\n", pwm->label);
return ret;
}
@@ -217,7 +216,7 @@ static int twl6030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
+ dev_err(pwmchip_parent(chip), "%s: Failed to read PWM_CTRL2\n",
pwm->label);
goto out;
}
@@ -227,7 +226,7 @@ static int twl6030_pwmled_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to enable PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -244,7 +243,7 @@ static void twl6030_pwmled_disable(struct pwm_chip *chip,
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
+ dev_err(pwmchip_parent(chip), "%s: Failed to read PWM_CTRL2\n",
pwm->label);
goto out;
}
@@ -254,7 +253,7 @@ static void twl6030_pwmled_disable(struct pwm_chip *chip,
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -295,7 +294,7 @@ static int twl6030_pwmled_request(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
+ dev_err(pwmchip_parent(chip), "%s: Failed to read PWM_CTRL2\n",
pwm->label);
goto out;
}
@@ -305,7 +304,7 @@ static int twl6030_pwmled_request(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to request PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to request PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -321,7 +320,7 @@ static void twl6030_pwmled_free(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL6030_MODULE_ID1, &val, TWL6030_LED_PWM_CTRL2);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read PWM_CTRL2\n",
+ dev_err(pwmchip_parent(chip), "%s: Failed to read PWM_CTRL2\n",
pwm->label);
goto out;
}
@@ -331,7 +330,7 @@ static void twl6030_pwmled_free(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_LED_PWM_CTRL2);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to free PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to free PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -345,25 +344,29 @@ static const struct pwm_ops twl6030_pwmled_ops = {
static int twl_pwmled_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct twl_pwmled_chip *twl;
-
- twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
- if (!twl)
- return -ENOMEM;
+ unsigned int npwm;
+ const struct pwm_ops *ops;
if (twl_class_is_4030()) {
- twl->chip.ops = &twl4030_pwmled_ops;
- twl->chip.npwm = 2;
+ ops = &twl4030_pwmled_ops;
+ npwm = 2;
} else {
- twl->chip.ops = &twl6030_pwmled_ops;
- twl->chip.npwm = 1;
+ ops = &twl6030_pwmled_ops;
+ npwm = 1;
}
- twl->chip.dev = &pdev->dev;
+ chip = devm_pwmchip_alloc(&pdev->dev, npwm, sizeof(*twl));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ twl = to_twl(chip);
+
+ chip->ops = ops;
mutex_init(&twl->mutex);
- return devm_pwmchip_add(&pdev->dev, &twl->chip);
+ return devm_pwmchip_add(&pdev->dev, chip);
}
#ifdef CONFIG_OF
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index 68e02c9a6bf9..8f981ffff4b4 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -46,7 +46,6 @@
#define TWL6030_PWM_TOGGLE(pwm, x) ((x) << (pwm * 3))
struct twl_pwm_chip {
- struct pwm_chip chip;
struct mutex mutex;
u8 twl6030_toggle3;
u8 twl4030_pwm_mux;
@@ -54,7 +53,7 @@ struct twl_pwm_chip {
static inline struct twl_pwm_chip *to_twl(struct pwm_chip *chip)
{
- return container_of(chip, struct twl_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int twl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -86,7 +85,7 @@ static int twl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
ret = twl_i2c_write(TWL_MODULE_PWM, pwm_config, base, 2);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to configure PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to configure PWM\n", pwm->label);
return ret;
}
@@ -100,7 +99,7 @@ static int twl4030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_GPBR1_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read GPBR1\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to read GPBR1\n", pwm->label);
goto out;
}
@@ -108,13 +107,13 @@ static int twl4030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to enable PWM\n", pwm->label);
val |= TWL4030_PWM_TOGGLE(pwm->hwpwm, TWL4030_PWMX_ENABLE);
ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to enable PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -130,7 +129,7 @@ static void twl4030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_GPBR1_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read GPBR1\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to read GPBR1\n", pwm->label);
goto out;
}
@@ -138,13 +137,13 @@ static void twl4030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM\n", pwm->label);
val &= ~TWL4030_PWM_TOGGLE(pwm->hwpwm, TWL4030_PWMXCLK_ENABLE);
ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_GPBR1_REG);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -167,7 +166,7 @@ static int twl4030_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_PMBR1_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read PMBR1\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to read PMBR1\n", pwm->label);
goto out;
}
@@ -181,7 +180,7 @@ static int twl4030_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_PMBR1_REG);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to request PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to request PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -202,7 +201,7 @@ static void twl4030_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
mutex_lock(&twl->mutex);
ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, &val, TWL4030_PMBR1_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to read PMBR1\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to read PMBR1\n", pwm->label);
goto out;
}
@@ -212,7 +211,7 @@ static void twl4030_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, val, TWL4030_PMBR1_REG);
if (ret < 0)
- dev_err(chip->dev, "%s: Failed to free PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to free PWM\n", pwm->label);
out:
mutex_unlock(&twl->mutex);
@@ -231,7 +230,7 @@ static int twl6030_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to enable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to enable PWM\n", pwm->label);
goto out;
}
@@ -254,7 +253,7 @@ static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM\n", pwm->label);
goto out;
}
@@ -262,7 +261,7 @@ static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM\n", pwm->label);
goto out;
}
@@ -270,7 +269,7 @@ static void twl6030_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, val, TWL6030_TOGGLE3_REG);
if (ret < 0) {
- dev_err(chip->dev, "%s: Failed to disable PWM\n", pwm->label);
+ dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM\n", pwm->label);
goto out;
}
@@ -341,23 +340,22 @@ static const struct pwm_ops twl6030_pwm_ops = {
static int twl_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct twl_pwm_chip *twl;
- twl = devm_kzalloc(&pdev->dev, sizeof(*twl), GFP_KERNEL);
- if (!twl)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, 2, sizeof(*twl));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ twl = to_twl(chip);
if (twl_class_is_4030())
- twl->chip.ops = &twl4030_pwm_ops;
+ chip->ops = &twl4030_pwm_ops;
else
- twl->chip.ops = &twl6030_pwm_ops;
-
- twl->chip.dev = &pdev->dev;
- twl->chip.npwm = 2;
+ chip->ops = &twl6030_pwm_ops;
mutex_init(&twl->mutex);
- return devm_pwmchip_add(&pdev->dev, &twl->chip);
+ return devm_pwmchip_add(&pdev->dev, chip);
}
#ifdef CONFIG_OF
diff --git a/drivers/pwm/pwm-visconti.c b/drivers/pwm/pwm-visconti.c
index 8d736d558122..9e55380957be 100644
--- a/drivers/pwm/pwm-visconti.c
+++ b/drivers/pwm/pwm-visconti.c
@@ -34,13 +34,12 @@
#define PIPGM_PWMC_POLARITY_MASK GENMASK(5, 5)
struct visconti_pwm_chip {
- struct pwm_chip chip;
void __iomem *base;
};
static inline struct visconti_pwm_chip *visconti_pwm_from_chip(struct pwm_chip *chip)
{
- return container_of(chip, struct visconti_pwm_chip, chip);
+ return pwmchip_get_drvdata(chip);
}
static int visconti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -134,22 +133,22 @@ static const struct pwm_ops visconti_pwm_ops = {
static int visconti_pwm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct pwm_chip *chip;
struct visconti_pwm_chip *priv;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(dev, 4, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = visconti_pwm_from_chip(chip);
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- priv->chip.dev = dev;
- priv->chip.ops = &visconti_pwm_ops;
- priv->chip.npwm = 4;
+ chip->ops = &visconti_pwm_ops;
- ret = devm_pwmchip_add(&pdev->dev, &priv->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Cannot register visconti PWM\n");
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index 7bfeacee05d0..016c82d65527 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -45,16 +45,19 @@
#define STATUS_ALL_UPDATE 0x0F
struct vt8500_chip {
- struct pwm_chip chip;
void __iomem *base;
struct clk *clk;
};
-#define to_vt8500_chip(chip) container_of(chip, struct vt8500_chip, chip)
+static inline struct vt8500_chip *to_vt8500_chip(struct pwm_chip *chip)
+{
+ return pwmchip_get_drvdata(chip);
+}
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
-static inline void vt8500_pwm_busy_wait(struct vt8500_chip *vt8500, int nr, u8 bitmask)
+static inline void vt8500_pwm_busy_wait(struct pwm_chip *chip, int nr, u8 bitmask)
{
+ struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
int loops = msecs_to_loops(10);
u32 mask = bitmask << (nr << 8);
@@ -62,7 +65,7 @@ static inline void vt8500_pwm_busy_wait(struct vt8500_chip *vt8500, int nr, u8 b
cpu_relax();
if (unlikely(!loops))
- dev_warn(vt8500->chip.dev, "Waiting for status bits 0x%x to clear timed out\n",
+ dev_warn(pwmchip_parent(chip), "Waiting for status bits 0x%x to clear timed out\n",
mask);
}
@@ -77,7 +80,7 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
err = clk_enable(vt8500->clk);
if (err < 0) {
- dev_err(chip->dev, "failed to enable clock\n");
+ dev_err(pwmchip_parent(chip), "failed to enable clock\n");
return err;
}
@@ -103,18 +106,18 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
dc = div64_u64(c, period_ns);
writel(prescale, vt8500->base + REG_SCALAR(pwm->hwpwm));
- vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_SCALAR_UPDATE);
+ vt8500_pwm_busy_wait(chip, pwm->hwpwm, STATUS_SCALAR_UPDATE);
writel(pv, vt8500->base + REG_PERIOD(pwm->hwpwm));
- vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_PERIOD_UPDATE);
+ vt8500_pwm_busy_wait(chip, pwm->hwpwm, STATUS_PERIOD_UPDATE);
writel(dc, vt8500->base + REG_DUTY(pwm->hwpwm));
- vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_DUTY_UPDATE);
+ vt8500_pwm_busy_wait(chip, pwm->hwpwm, STATUS_DUTY_UPDATE);
val = readl(vt8500->base + REG_CTRL(pwm->hwpwm));
val |= CTRL_AUTOLOAD;
writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
- vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
+ vt8500_pwm_busy_wait(chip, pwm->hwpwm, STATUS_CTRL_UPDATE);
clk_disable(vt8500->clk);
return 0;
@@ -128,14 +131,14 @@ static int vt8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
err = clk_enable(vt8500->clk);
if (err < 0) {
- dev_err(chip->dev, "failed to enable clock\n");
+ dev_err(pwmchip_parent(chip), "failed to enable clock\n");
return err;
}
val = readl(vt8500->base + REG_CTRL(pwm->hwpwm));
val |= CTRL_ENABLE;
writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
- vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
+ vt8500_pwm_busy_wait(chip, pwm->hwpwm, STATUS_CTRL_UPDATE);
return 0;
}
@@ -148,7 +151,7 @@ static void vt8500_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
val = readl(vt8500->base + REG_CTRL(pwm->hwpwm));
val &= ~CTRL_ENABLE;
writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
- vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
+ vt8500_pwm_busy_wait(chip, pwm->hwpwm, STATUS_CTRL_UPDATE);
clk_disable(vt8500->clk);
}
@@ -168,7 +171,7 @@ static int vt8500_pwm_set_polarity(struct pwm_chip *chip,
val &= ~CTRL_INVERT;
writel(val, vt8500->base + REG_CTRL(pwm->hwpwm));
- vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_CTRL_UPDATE);
+ vt8500_pwm_busy_wait(chip, pwm->hwpwm, STATUS_CTRL_UPDATE);
return 0;
}
@@ -231,6 +234,7 @@ MODULE_DEVICE_TABLE(of, vt8500_pwm_dt_ids);
static int vt8500_pwm_probe(struct platform_device *pdev)
{
+ struct pwm_chip *chip;
struct vt8500_chip *vt8500;
struct device_node *np = pdev->dev.of_node;
int ret;
@@ -238,13 +242,12 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
if (!np)
return dev_err_probe(&pdev->dev, -EINVAL, "invalid devicetree node\n");
- vt8500 = devm_kzalloc(&pdev->dev, sizeof(*vt8500), GFP_KERNEL);
- if (vt8500 == NULL)
- return -ENOMEM;
+ chip = devm_pwmchip_alloc(&pdev->dev, VT8500_NR_PWMS, sizeof(*vt8500));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ vt8500 = to_vt8500_chip(chip);
- vt8500->chip.dev = &pdev->dev;
- vt8500->chip.ops = &vt8500_pwm_ops;
- vt8500->chip.npwm = VT8500_NR_PWMS;
+ chip->ops = &vt8500_pwm_ops;
vt8500->clk = devm_clk_get_prepared(&pdev->dev, NULL);
if (IS_ERR(vt8500->clk))
@@ -254,7 +257,7 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
if (IS_ERR(vt8500->base))
return PTR_ERR(vt8500->base);
- ret = devm_pwmchip_add(&pdev->dev, &vt8500->chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
diff --git a/drivers/pwm/pwm-xilinx.c b/drivers/pwm/pwm-xilinx.c
index 5f3c2a6fed11..3a7deebb0d0c 100644
--- a/drivers/pwm/pwm-xilinx.c
+++ b/drivers/pwm/pwm-xilinx.c
@@ -80,15 +80,10 @@ unsigned int xilinx_timer_get_period(struct xilinx_timer_priv *priv,
#define TCSR_PWM_CLEAR (TCSR_MDT | TCSR_LOAD)
#define TCSR_PWM_MASK (TCSR_PWM_SET | TCSR_PWM_CLEAR)
-struct xilinx_pwm_device {
- struct pwm_chip chip;
- struct xilinx_timer_priv priv;
-};
-
static inline struct xilinx_timer_priv
*xilinx_pwm_chip_to_priv(struct pwm_chip *chip)
{
- return &container_of(chip, struct xilinx_pwm_device, chip)->priv;
+ return pwmchip_get_drvdata(chip);
}
static bool xilinx_timer_pwm_enabled(u32 tcsr0, u32 tcsr1)
@@ -214,7 +209,7 @@ static int xilinx_pwm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct xilinx_timer_priv *priv;
- struct xilinx_pwm_device *xilinx_pwm;
+ struct pwm_chip *chip;
u32 pwm_cells, one_timer, width;
void __iomem *regs;
@@ -225,11 +220,11 @@ static int xilinx_pwm_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "could not read #pwm-cells\n");
- xilinx_pwm = devm_kzalloc(dev, sizeof(*xilinx_pwm), GFP_KERNEL);
- if (!xilinx_pwm)
- return -ENOMEM;
- platform_set_drvdata(pdev, xilinx_pwm);
- priv = &xilinx_pwm->priv;
+ chip = devm_pwmchip_alloc(dev, 1, sizeof(*priv));
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ priv = xilinx_pwm_chip_to_priv(chip);
+ platform_set_drvdata(pdev, chip);
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
@@ -278,10 +273,8 @@ static int xilinx_pwm_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret, "Clock enable failed\n");
clk_rate_exclusive_get(priv->clk);
- xilinx_pwm->chip.dev = dev;
- xilinx_pwm->chip.ops = &xilinx_pwm_ops;
- xilinx_pwm->chip.npwm = 1;
- ret = pwmchip_add(&xilinx_pwm->chip);
+ chip->ops = &xilinx_pwm_ops;
+ ret = pwmchip_add(chip);
if (ret) {
clk_rate_exclusive_put(priv->clk);
clk_disable_unprepare(priv->clk);
@@ -293,11 +286,12 @@ static int xilinx_pwm_probe(struct platform_device *pdev)
static void xilinx_pwm_remove(struct platform_device *pdev)
{
- struct xilinx_pwm_device *xilinx_pwm = platform_get_drvdata(pdev);
+ struct pwm_chip *chip = platform_get_drvdata(pdev);
+ struct xilinx_timer_priv *priv = xilinx_pwm_chip_to_priv(chip);
- pwmchip_remove(&xilinx_pwm->chip);
- clk_rate_exclusive_put(xilinx_pwm->priv.clk);
- clk_disable_unprepare(xilinx_pwm->priv.clk);
+ pwmchip_remove(chip);
+ clk_rate_exclusive_put(priv->clk);
+ clk_disable_unprepare(priv->clk);
}
static const struct of_device_id xilinx_pwm_of_match[] = {
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 1698609d91c8..3f434a771fb5 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -509,10 +509,10 @@ void pwmchip_sysfs_export(struct pwm_chip *chip)
* If device_create() fails the pwm_chip is still usable by
* the kernel it's just not exported.
*/
- parent = device_create(&pwm_class, chip->dev, MKDEV(0, 0), chip,
+ parent = device_create(&pwm_class, pwmchip_parent(chip), MKDEV(0, 0), chip,
"pwmchip%d", chip->id);
if (IS_ERR(parent)) {
- dev_warn(chip->dev,
+ dev_warn(pwmchip_parent(chip),
"device_create failed for pwm_chip sysfs export\n");
}
}
diff --git a/drivers/ras/Kconfig b/drivers/ras/Kconfig
index c2a236f2e846..fc4f4bb94a4c 100644
--- a/drivers/ras/Kconfig
+++ b/drivers/ras/Kconfig
@@ -32,5 +32,18 @@ menuconfig RAS
if RAS
source "arch/x86/ras/Kconfig"
+source "drivers/ras/amd/atl/Kconfig"
+
+config RAS_FMPM
+ tristate "FRU Memory Poison Manager"
+ default m
+ depends on AMD_ATL && ACPI_APEI
+ help
+ Support saving and restoring memory error information across reboot
+ using ACPI ERST as persistent storage. Error information is saved with
+ the UEFI CPER "FRU Memory Poison" section format.
+
+ Memory will be retired during boot time and run time depending on
+ platform-specific policies.
endif
diff --git a/drivers/ras/Makefile b/drivers/ras/Makefile
index 6f0404f50107..11f95d59d397 100644
--- a/drivers/ras/Makefile
+++ b/drivers/ras/Makefile
@@ -2,3 +2,6 @@
obj-$(CONFIG_RAS) += ras.o
obj-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_RAS_CEC) += cec.o
+
+obj-$(CONFIG_RAS_FMPM) += amd/fmpm.o
+obj-y += amd/atl/
diff --git a/drivers/ras/amd/atl/Kconfig b/drivers/ras/amd/atl/Kconfig
new file mode 100644
index 000000000000..df49c23e7f62
--- /dev/null
+++ b/drivers/ras/amd/atl/Kconfig
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# AMD Address Translation Library Kconfig
+#
+# Copyright (c) 2023, Advanced Micro Devices, Inc.
+# All Rights Reserved.
+#
+# Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+
+config AMD_ATL
+ tristate "AMD Address Translation Library"
+ depends on AMD_NB && X86_64 && RAS
+ depends on MEMORY_FAILURE
+ default N
+ help
+ This library includes support for implementation-specific
+ address translation procedures needed for various error
+ handling cases.
+
+ Enable this option if using DRAM ECC on Zen-based systems
+ and OS-based error handling.
diff --git a/drivers/ras/amd/atl/Makefile b/drivers/ras/amd/atl/Makefile
new file mode 100644
index 000000000000..4acd5f05bd9c
--- /dev/null
+++ b/drivers/ras/amd/atl/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# AMD Address Translation Library Makefile
+#
+# Copyright (c) 2023, Advanced Micro Devices, Inc.
+# All Rights Reserved.
+#
+# Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+
+amd_atl-y := access.o
+amd_atl-y += core.o
+amd_atl-y += dehash.o
+amd_atl-y += denormalize.o
+amd_atl-y += map.o
+amd_atl-y += system.o
+amd_atl-y += umc.o
+
+obj-$(CONFIG_AMD_ATL) += amd_atl.o
diff --git a/drivers/ras/amd/atl/access.c b/drivers/ras/amd/atl/access.c
new file mode 100644
index 000000000000..ee4661ed28ba
--- /dev/null
+++ b/drivers/ras/amd/atl/access.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Address Translation Library
+ *
+ * access.c : DF Indirect Access functions
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+#include "internal.h"
+
+/* Protect the PCI config register pairs used for DF indirect access. */
+static DEFINE_MUTEX(df_indirect_mutex);
+
+/*
+ * Data Fabric Indirect Access uses FICAA/FICAD.
+ *
+ * Fabric Indirect Configuration Access Address (FICAA): constructed based
+ * on the device's Instance Id and the PCI function and register offset of
+ * the desired register.
+ *
+ * Fabric Indirect Configuration Access Data (FICAD): there are FICAD
+ * low and high registers but so far only the low register is needed.
+ *
+ * Use Instance Id 0xFF to indicate a broadcast read.
+ */
+#define DF_BROADCAST 0xFF
+
+#define DF_FICAA_INST_EN BIT(0)
+#define DF_FICAA_REG_NUM GENMASK(10, 1)
+#define DF_FICAA_FUNC_NUM GENMASK(13, 11)
+#define DF_FICAA_INST_ID GENMASK(23, 16)
+
+#define DF_FICAA_REG_NUM_LEGACY GENMASK(10, 2)
+
+static u16 get_accessible_node(u16 node)
+{
+ /*
+ * On heterogeneous systems, not all AMD Nodes are accessible
+ * through software-visible registers. The Node ID needs to be
+ * adjusted for register accesses. But its value should not be
+ * changed for the translation methods.
+ */
+ if (df_cfg.flags.heterogeneous) {
+ /* Only Node 0 is accessible on DF3.5 systems. */
+ if (df_cfg.rev == DF3p5)
+ node = 0;
+
+ /*
+ * Only the first Node in each Socket is accessible on
+ * DF4.5 systems, and this is visible to software as one
+ * Fabric per Socket. The Socket ID can be derived from
+ * the Node ID and global shift values.
+ */
+ if (df_cfg.rev == DF4p5)
+ node >>= df_cfg.socket_id_shift - df_cfg.node_id_shift;
+ }
+
+ return node;
+}
+
+static int __df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
+{
+ u32 ficaa_addr = 0x8C, ficad_addr = 0xB8;
+ struct pci_dev *F4;
+ int err = -ENODEV;
+ u32 ficaa = 0;
+
+ node = get_accessible_node(node);
+ if (node >= amd_nb_num())
+ goto out;
+
+ F4 = node_to_amd_nb(node)->link;
+ if (!F4)
+ goto out;
+
+ /* Enable instance-specific access. */
+ if (instance_id != DF_BROADCAST) {
+ ficaa |= FIELD_PREP(DF_FICAA_INST_EN, 1);
+ ficaa |= FIELD_PREP(DF_FICAA_INST_ID, instance_id);
+ }
+
+ /*
+ * The two least-significant bits are masked when inputing the
+ * register offset to FICAA.
+ */
+ reg >>= 2;
+
+ if (df_cfg.flags.legacy_ficaa) {
+ ficaa_addr = 0x5C;
+ ficad_addr = 0x98;
+
+ ficaa |= FIELD_PREP(DF_FICAA_REG_NUM_LEGACY, reg);
+ } else {
+ ficaa |= FIELD_PREP(DF_FICAA_REG_NUM, reg);
+ }
+
+ ficaa |= FIELD_PREP(DF_FICAA_FUNC_NUM, func);
+
+ mutex_lock(&df_indirect_mutex);
+
+ err = pci_write_config_dword(F4, ficaa_addr, ficaa);
+ if (err) {
+ pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
+ goto out_unlock;
+ }
+
+ err = pci_read_config_dword(F4, ficad_addr, lo);
+ if (err)
+ pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
+
+ pr_debug("node=%u inst=0x%x func=0x%x reg=0x%x val=0x%x",
+ node, instance_id, func, reg << 2, *lo);
+
+out_unlock:
+ mutex_unlock(&df_indirect_mutex);
+
+out:
+ return err;
+}
+
+int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
+{
+ return __df_indirect_read(node, func, reg, instance_id, lo);
+}
+
+int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo)
+{
+ return __df_indirect_read(node, func, reg, DF_BROADCAST, lo);
+}
diff --git a/drivers/ras/amd/atl/core.c b/drivers/ras/amd/atl/core.c
new file mode 100644
index 000000000000..6dc4e06305f7
--- /dev/null
+++ b/drivers/ras/amd/atl/core.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Address Translation Library
+ *
+ * core.c : Module init and base translation functions
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+#include <linux/module.h>
+#include <asm/cpu_device_id.h>
+
+#include "internal.h"
+
+struct df_config df_cfg __read_mostly;
+
+static int addr_over_limit(struct addr_ctx *ctx)
+{
+ u64 dram_limit_addr;
+
+ if (df_cfg.rev >= DF4)
+ dram_limit_addr = FIELD_GET(DF4_DRAM_LIMIT_ADDR, ctx->map.limit);
+ else
+ dram_limit_addr = FIELD_GET(DF2_DRAM_LIMIT_ADDR, ctx->map.limit);
+
+ dram_limit_addr <<= DF_DRAM_BASE_LIMIT_LSB;
+ dram_limit_addr |= GENMASK(DF_DRAM_BASE_LIMIT_LSB - 1, 0);
+
+ /* Is calculated system address above DRAM limit address? */
+ if (ctx->ret_addr > dram_limit_addr) {
+ atl_debug(ctx, "Calculated address (0x%016llx) > DRAM limit (0x%016llx)",
+ ctx->ret_addr, dram_limit_addr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool legacy_hole_en(struct addr_ctx *ctx)
+{
+ u32 reg = ctx->map.base;
+
+ if (df_cfg.rev >= DF4)
+ reg = ctx->map.ctl;
+
+ return FIELD_GET(DF_LEGACY_MMIO_HOLE_EN, reg);
+}
+
+static int add_legacy_hole(struct addr_ctx *ctx)
+{
+ u32 dram_hole_base;
+ u8 func = 0;
+
+ if (!legacy_hole_en(ctx))
+ return 0;
+
+ if (df_cfg.rev >= DF4)
+ func = 7;
+
+ if (df_indirect_read_broadcast(ctx->node_id, func, 0x104, &dram_hole_base))
+ return -EINVAL;
+
+ dram_hole_base &= DF_DRAM_HOLE_BASE_MASK;
+
+ if (ctx->ret_addr >= dram_hole_base)
+ ctx->ret_addr += (BIT_ULL(32) - dram_hole_base);
+
+ return 0;
+}
+
+static u64 get_base_addr(struct addr_ctx *ctx)
+{
+ u64 base_addr;
+
+ if (df_cfg.rev >= DF4)
+ base_addr = FIELD_GET(DF4_BASE_ADDR, ctx->map.base);
+ else
+ base_addr = FIELD_GET(DF2_BASE_ADDR, ctx->map.base);
+
+ return base_addr << DF_DRAM_BASE_LIMIT_LSB;
+}
+
+static int add_base_and_hole(struct addr_ctx *ctx)
+{
+ ctx->ret_addr += get_base_addr(ctx);
+
+ if (add_legacy_hole(ctx))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool late_hole_remove(struct addr_ctx *ctx)
+{
+ if (df_cfg.rev == DF3p5)
+ return true;
+
+ if (df_cfg.rev == DF4)
+ return true;
+
+ if (ctx->map.intlv_mode == DF3_6CHAN)
+ return true;
+
+ return false;
+}
+
+unsigned long norm_to_sys_addr(u8 socket_id, u8 die_id, u8 coh_st_inst_id, unsigned long addr)
+{
+ struct addr_ctx ctx;
+
+ if (df_cfg.rev == UNKNOWN)
+ return -EINVAL;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ /* Start from the normalized address */
+ ctx.ret_addr = addr;
+ ctx.inst_id = coh_st_inst_id;
+
+ ctx.inputs.norm_addr = addr;
+ ctx.inputs.socket_id = socket_id;
+ ctx.inputs.die_id = die_id;
+ ctx.inputs.coh_st_inst_id = coh_st_inst_id;
+
+ if (determine_node_id(&ctx, socket_id, die_id))
+ return -EINVAL;
+
+ if (get_address_map(&ctx))
+ return -EINVAL;
+
+ if (denormalize_address(&ctx))
+ return -EINVAL;
+
+ if (!late_hole_remove(&ctx) && add_base_and_hole(&ctx))
+ return -EINVAL;
+
+ if (dehash_address(&ctx))
+ return -EINVAL;
+
+ if (late_hole_remove(&ctx) && add_base_and_hole(&ctx))
+ return -EINVAL;
+
+ if (addr_over_limit(&ctx))
+ return -EINVAL;
+
+ return ctx.ret_addr;
+}
+
+static void check_for_legacy_df_access(void)
+{
+ /*
+ * All Zen-based systems before Family 19h use the legacy
+ * DF Indirect Access (FICAA/FICAD) offsets.
+ */
+ if (boot_cpu_data.x86 < 0x19) {
+ df_cfg.flags.legacy_ficaa = true;
+ return;
+ }
+
+ /* All systems after Family 19h use the current offsets. */
+ if (boot_cpu_data.x86 > 0x19)
+ return;
+
+ /* Some Family 19h systems use the legacy offsets. */
+ switch (boot_cpu_data.x86_model) {
+ case 0x00 ... 0x0f:
+ case 0x20 ... 0x5f:
+ df_cfg.flags.legacy_ficaa = true;
+ }
+}
+
+/*
+ * This library provides functionality for AMD-based systems with a Data Fabric.
+ * The set of systems with a Data Fabric is equivalent to the set of Zen-based systems
+ * and the set of systems with the Scalable MCA feature at this time. However, these
+ * are technically independent things.
+ *
+ * It's possible to match on the PCI IDs of the Data Fabric devices, but this will be
+ * an ever expanding list. Instead, match on the SMCA and Zen features to cover all
+ * relevant systems.
+ */
+static const struct x86_cpu_id amd_atl_cpuids[] = {
+ X86_MATCH_FEATURE(X86_FEATURE_SMCA, NULL),
+ X86_MATCH_FEATURE(X86_FEATURE_ZEN, NULL),
+ { }
+};
+MODULE_DEVICE_TABLE(x86cpu, amd_atl_cpuids);
+
+static int __init amd_atl_init(void)
+{
+ if (!x86_match_cpu(amd_atl_cpuids))
+ return -ENODEV;
+
+ if (!amd_nb_num())
+ return -ENODEV;
+
+ check_for_legacy_df_access();
+
+ if (get_df_system_info())
+ return -ENODEV;
+
+ /* Increment this module's recount so that it can't be easily unloaded. */
+ __module_get(THIS_MODULE);
+ amd_atl_register_decoder(convert_umc_mca_addr_to_sys_addr);
+
+ pr_info("AMD Address Translation Library initialized");
+ return 0;
+}
+
+/*
+ * Exit function is only needed for testing and debug. Module unload must be
+ * forced to override refcount check.
+ */
+static void __exit amd_atl_exit(void)
+{
+ amd_atl_unregister_decoder();
+}
+
+module_init(amd_atl_init);
+module_exit(amd_atl_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/ras/amd/atl/dehash.c b/drivers/ras/amd/atl/dehash.c
new file mode 100644
index 000000000000..4ea46262c4f5
--- /dev/null
+++ b/drivers/ras/amd/atl/dehash.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Address Translation Library
+ *
+ * dehash.c : Functions to account for hashing bits
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+#include "internal.h"
+
+/*
+ * Verify the interleave bits are correct in the different interleaving
+ * settings.
+ *
+ * If @num_intlv_dies and/or @num_intlv_sockets are 1, it means the
+ * respective interleaving is disabled.
+ */
+static inline bool map_bits_valid(struct addr_ctx *ctx, u8 bit1, u8 bit2,
+ u8 num_intlv_dies, u8 num_intlv_sockets)
+{
+ if (!(ctx->map.intlv_bit_pos == bit1 || ctx->map.intlv_bit_pos == bit2)) {
+ pr_debug("Invalid interleave bit: %u", ctx->map.intlv_bit_pos);
+ return false;
+ }
+
+ if (ctx->map.num_intlv_dies > num_intlv_dies) {
+ pr_debug("Invalid number of interleave dies: %u", ctx->map.num_intlv_dies);
+ return false;
+ }
+
+ if (ctx->map.num_intlv_sockets > num_intlv_sockets) {
+ pr_debug("Invalid number of interleave sockets: %u", ctx->map.num_intlv_sockets);
+ return false;
+ }
+
+ return true;
+}
+
+static int df2_dehash_addr(struct addr_ctx *ctx)
+{
+ u8 hashed_bit, intlv_bit, intlv_bit_pos;
+
+ if (!map_bits_valid(ctx, 8, 9, 1, 1))
+ return -EINVAL;
+
+ intlv_bit_pos = ctx->map.intlv_bit_pos;
+ intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(12), ctx->ret_addr);
+ hashed_bit ^= FIELD_GET(BIT_ULL(18), ctx->ret_addr);
+ hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr);
+ hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr);
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(intlv_bit_pos);
+
+ return 0;
+}
+
+static int df3_dehash_addr(struct addr_ctx *ctx)
+{
+ bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G;
+ u8 hashed_bit, intlv_bit, intlv_bit_pos;
+
+ if (!map_bits_valid(ctx, 8, 9, 1, 1))
+ return -EINVAL;
+
+ hash_ctl_64k = FIELD_GET(DF3_HASH_CTL_64K, ctx->map.ctl);
+ hash_ctl_2M = FIELD_GET(DF3_HASH_CTL_2M, ctx->map.ctl);
+ hash_ctl_1G = FIELD_GET(DF3_HASH_CTL_1G, ctx->map.ctl);
+
+ intlv_bit_pos = ctx->map.intlv_bit_pos;
+ intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(14), ctx->ret_addr);
+ hashed_bit ^= FIELD_GET(BIT_ULL(18), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(23), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(32), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(intlv_bit_pos);
+
+ /* Calculation complete for 2 channels. Continue for 4 and 8 channels. */
+ if (ctx->map.intlv_mode == DF3_COD4_2CHAN_HASH)
+ return 0;
+
+ intlv_bit = FIELD_GET(BIT_ULL(12), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(16), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(12);
+
+ /* Calculation complete for 4 channels. Continue for 8 channels. */
+ if (ctx->map.intlv_mode == DF3_COD2_4CHAN_HASH)
+ return 0;
+
+ intlv_bit = FIELD_GET(BIT_ULL(13), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(17), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(22), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(31), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(13);
+
+ return 0;
+}
+
+static int df3_6chan_dehash_addr(struct addr_ctx *ctx)
+{
+ u8 intlv_bit_pos = ctx->map.intlv_bit_pos;
+ u8 hashed_bit, intlv_bit, num_intlv_bits;
+ bool hash_ctl_2M, hash_ctl_1G;
+
+ if (ctx->map.intlv_mode != DF3_6CHAN) {
+ atl_debug_on_bad_intlv_mode(ctx);
+ return -EINVAL;
+ }
+
+ num_intlv_bits = ilog2(ctx->map.num_intlv_chan) + 1;
+
+ hash_ctl_2M = FIELD_GET(DF3_HASH_CTL_2M, ctx->map.ctl);
+ hash_ctl_1G = FIELD_GET(DF3_HASH_CTL_1G, ctx->map.ctl);
+
+ intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= !!(BIT_ULL(intlv_bit_pos + num_intlv_bits) & ctx->ret_addr);
+ hashed_bit ^= FIELD_GET(BIT_ULL(23), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(32), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(intlv_bit_pos);
+
+ intlv_bit_pos++;
+ intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(intlv_bit_pos);
+
+ intlv_bit_pos++;
+ intlv_bit = !!(BIT_ULL(intlv_bit_pos) & ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(22), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(31), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(intlv_bit_pos);
+
+ return 0;
+}
+
+static int df4_dehash_addr(struct addr_ctx *ctx)
+{
+ bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G;
+ u8 hashed_bit, intlv_bit;
+
+ if (!map_bits_valid(ctx, 8, 8, 1, 2))
+ return -EINVAL;
+
+ hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
+ hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
+ hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
+
+ intlv_bit = FIELD_GET(BIT_ULL(8), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(16), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr) & hash_ctl_1G;
+
+ if (ctx->map.num_intlv_sockets == 1)
+ hashed_bit ^= FIELD_GET(BIT_ULL(14), ctx->ret_addr);
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(8);
+
+ /*
+ * Hashing is possible with socket interleaving, so check the total number
+ * of channels in the system rather than DRAM map interleaving mode.
+ *
+ * Calculation complete for 2 channels. Continue for 4, 8, and 16 channels.
+ */
+ if (ctx->map.total_intlv_chan <= 2)
+ return 0;
+
+ intlv_bit = FIELD_GET(BIT_ULL(12), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(17), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(22), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(31), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(12);
+
+ /* Calculation complete for 4 channels. Continue for 8 and 16 channels. */
+ if (ctx->map.total_intlv_chan <= 4)
+ return 0;
+
+ intlv_bit = FIELD_GET(BIT_ULL(13), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(18), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(23), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(32), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(13);
+
+ /* Calculation complete for 8 channels. Continue for 16 channels. */
+ if (ctx->map.total_intlv_chan <= 8)
+ return 0;
+
+ intlv_bit = FIELD_GET(BIT_ULL(14), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(19), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(24), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(33), ctx->ret_addr) & hash_ctl_1G;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(14);
+
+ return 0;
+}
+
+static int df4p5_dehash_addr(struct addr_ctx *ctx)
+{
+ bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G, hash_ctl_1T;
+ u8 hashed_bit, intlv_bit;
+ u64 rehash_vector;
+
+ if (!map_bits_valid(ctx, 8, 8, 1, 2))
+ return -EINVAL;
+
+ hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
+ hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
+ hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
+ hash_ctl_1T = FIELD_GET(DF4p5_HASH_CTL_1T, ctx->map.ctl);
+
+ /*
+ * Generate a unique address to determine which bits
+ * need to be dehashed.
+ *
+ * Start with a contiguous bitmask for the total
+ * number of channels starting at bit 8.
+ *
+ * Then make a gap in the proper place based on
+ * interleave mode.
+ */
+ rehash_vector = ctx->map.total_intlv_chan - 1;
+ rehash_vector <<= 8;
+
+ if (ctx->map.intlv_mode == DF4p5_NPS2_4CHAN_1K_HASH ||
+ ctx->map.intlv_mode == DF4p5_NPS1_8CHAN_1K_HASH ||
+ ctx->map.intlv_mode == DF4p5_NPS1_16CHAN_1K_HASH)
+ rehash_vector = expand_bits(10, 2, rehash_vector);
+ else
+ rehash_vector = expand_bits(9, 3, rehash_vector);
+
+ if (rehash_vector & BIT_ULL(8)) {
+ intlv_bit = FIELD_GET(BIT_ULL(8), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(16), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr) & hash_ctl_1G;
+ hashed_bit ^= FIELD_GET(BIT_ULL(40), ctx->ret_addr) & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(8);
+ }
+
+ if (rehash_vector & BIT_ULL(9)) {
+ intlv_bit = FIELD_GET(BIT_ULL(9), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(17), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(22), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(31), ctx->ret_addr) & hash_ctl_1G;
+ hashed_bit ^= FIELD_GET(BIT_ULL(41), ctx->ret_addr) & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(9);
+ }
+
+ if (rehash_vector & BIT_ULL(12)) {
+ intlv_bit = FIELD_GET(BIT_ULL(12), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(18), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(23), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(32), ctx->ret_addr) & hash_ctl_1G;
+ hashed_bit ^= FIELD_GET(BIT_ULL(42), ctx->ret_addr) & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(12);
+ }
+
+ if (rehash_vector & BIT_ULL(13)) {
+ intlv_bit = FIELD_GET(BIT_ULL(13), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(19), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(24), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(33), ctx->ret_addr) & hash_ctl_1G;
+ hashed_bit ^= FIELD_GET(BIT_ULL(43), ctx->ret_addr) & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(13);
+ }
+
+ if (rehash_vector & BIT_ULL(14)) {
+ intlv_bit = FIELD_GET(BIT_ULL(14), ctx->ret_addr);
+
+ hashed_bit = intlv_bit;
+ hashed_bit ^= FIELD_GET(BIT_ULL(20), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(25), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(34), ctx->ret_addr) & hash_ctl_1G;
+ hashed_bit ^= FIELD_GET(BIT_ULL(44), ctx->ret_addr) & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(14);
+ }
+
+ return 0;
+}
+
+/*
+ * MI300 hash bits
+ * 4K 64K 2M 1G 1T 1T
+ * COH_ST_Select[0] = XOR of addr{8, 12, 15, 22, 29, 36, 43}
+ * COH_ST_Select[1] = XOR of addr{9, 13, 16, 23, 30, 37, 44}
+ * COH_ST_Select[2] = XOR of addr{10, 14, 17, 24, 31, 38, 45}
+ * COH_ST_Select[3] = XOR of addr{11, 18, 25, 32, 39, 46}
+ * COH_ST_Select[4] = XOR of addr{14, 19, 26, 33, 40, 47} aka Stack
+ * DieID[0] = XOR of addr{12, 20, 27, 34, 41 }
+ * DieID[1] = XOR of addr{13, 21, 28, 35, 42 }
+ */
+static int mi300_dehash_addr(struct addr_ctx *ctx)
+{
+ bool hash_ctl_4k, hash_ctl_64k, hash_ctl_2M, hash_ctl_1G, hash_ctl_1T;
+ bool hashed_bit, intlv_bit, test_bit;
+ u8 num_intlv_bits, base_bit, i;
+
+ if (!map_bits_valid(ctx, 8, 8, 4, 1))
+ return -EINVAL;
+
+ hash_ctl_4k = FIELD_GET(DF4p5_HASH_CTL_4K, ctx->map.ctl);
+ hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
+ hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
+ hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
+ hash_ctl_1T = FIELD_GET(DF4p5_HASH_CTL_1T, ctx->map.ctl);
+
+ /* Channel bits */
+ num_intlv_bits = ilog2(ctx->map.num_intlv_chan);
+
+ for (i = 0; i < num_intlv_bits; i++) {
+ base_bit = 8 + i;
+
+ /* COH_ST_Select[4] jumps to a base bit of 14. */
+ if (i == 4)
+ base_bit = 14;
+
+ intlv_bit = BIT_ULL(base_bit) & ctx->ret_addr;
+
+ hashed_bit = intlv_bit;
+
+ /* 4k hash bit only applies to the first 3 bits. */
+ if (i <= 2) {
+ test_bit = BIT_ULL(12 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_4k;
+ }
+
+ /* Use temporary 'test_bit' value to avoid Sparse warnings. */
+ test_bit = BIT_ULL(15 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_64k;
+ test_bit = BIT_ULL(22 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_2M;
+ test_bit = BIT_ULL(29 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_1G;
+ test_bit = BIT_ULL(36 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_1T;
+ test_bit = BIT_ULL(43 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(base_bit);
+ }
+
+ /* Die bits */
+ num_intlv_bits = ilog2(ctx->map.num_intlv_dies);
+
+ for (i = 0; i < num_intlv_bits; i++) {
+ base_bit = 12 + i;
+
+ intlv_bit = BIT_ULL(base_bit) & ctx->ret_addr;
+
+ hashed_bit = intlv_bit;
+
+ test_bit = BIT_ULL(20 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_64k;
+ test_bit = BIT_ULL(27 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_2M;
+ test_bit = BIT_ULL(34 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_1G;
+ test_bit = BIT_ULL(41 + i) & ctx->ret_addr;
+ hashed_bit ^= test_bit & hash_ctl_1T;
+
+ if (hashed_bit != intlv_bit)
+ ctx->ret_addr ^= BIT_ULL(base_bit);
+ }
+
+ return 0;
+}
+
+int dehash_address(struct addr_ctx *ctx)
+{
+ switch (ctx->map.intlv_mode) {
+ /* No hashing cases. */
+ case NONE:
+ case NOHASH_2CHAN:
+ case NOHASH_4CHAN:
+ case NOHASH_8CHAN:
+ case NOHASH_16CHAN:
+ case NOHASH_32CHAN:
+ /* Hashing bits handled earlier during CS ID calculation. */
+ case DF4_NPS4_3CHAN_HASH:
+ case DF4_NPS2_5CHAN_HASH:
+ case DF4_NPS2_6CHAN_HASH:
+ case DF4_NPS1_10CHAN_HASH:
+ case DF4_NPS1_12CHAN_HASH:
+ case DF4p5_NPS2_6CHAN_1K_HASH:
+ case DF4p5_NPS2_6CHAN_2K_HASH:
+ case DF4p5_NPS1_10CHAN_1K_HASH:
+ case DF4p5_NPS1_10CHAN_2K_HASH:
+ case DF4p5_NPS1_12CHAN_1K_HASH:
+ case DF4p5_NPS1_12CHAN_2K_HASH:
+ case DF4p5_NPS0_24CHAN_1K_HASH:
+ case DF4p5_NPS0_24CHAN_2K_HASH:
+ /* No hash physical address bits, so nothing to do. */
+ case DF4p5_NPS4_3CHAN_1K_HASH:
+ case DF4p5_NPS4_3CHAN_2K_HASH:
+ case DF4p5_NPS2_5CHAN_1K_HASH:
+ case DF4p5_NPS2_5CHAN_2K_HASH:
+ return 0;
+
+ case DF2_2CHAN_HASH:
+ return df2_dehash_addr(ctx);
+
+ case DF3_COD4_2CHAN_HASH:
+ case DF3_COD2_4CHAN_HASH:
+ case DF3_COD1_8CHAN_HASH:
+ return df3_dehash_addr(ctx);
+
+ case DF3_6CHAN:
+ return df3_6chan_dehash_addr(ctx);
+
+ case DF4_NPS4_2CHAN_HASH:
+ case DF4_NPS2_4CHAN_HASH:
+ case DF4_NPS1_8CHAN_HASH:
+ return df4_dehash_addr(ctx);
+
+ case DF4p5_NPS4_2CHAN_1K_HASH:
+ case DF4p5_NPS4_2CHAN_2K_HASH:
+ case DF4p5_NPS2_4CHAN_2K_HASH:
+ case DF4p5_NPS2_4CHAN_1K_HASH:
+ case DF4p5_NPS1_8CHAN_1K_HASH:
+ case DF4p5_NPS1_8CHAN_2K_HASH:
+ case DF4p5_NPS1_16CHAN_1K_HASH:
+ case DF4p5_NPS1_16CHAN_2K_HASH:
+ return df4p5_dehash_addr(ctx);
+
+ case MI3_HASH_8CHAN:
+ case MI3_HASH_16CHAN:
+ case MI3_HASH_32CHAN:
+ return mi300_dehash_addr(ctx);
+
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return -EINVAL;
+ }
+}
diff --git a/drivers/ras/amd/atl/denormalize.c b/drivers/ras/amd/atl/denormalize.c
new file mode 100644
index 000000000000..e279224288d6
--- /dev/null
+++ b/drivers/ras/amd/atl/denormalize.c
@@ -0,0 +1,718 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Address Translation Library
+ *
+ * denormalize.c : Functions to account for interleaving bits
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+#include "internal.h"
+
+/*
+ * Returns the Destination Fabric ID. This is the first (lowest)
+ * COH_ST Fabric ID used within a DRAM Address map.
+ */
+static u16 get_dst_fabric_id(struct addr_ctx *ctx)
+{
+ switch (df_cfg.rev) {
+ case DF2: return FIELD_GET(DF2_DST_FABRIC_ID, ctx->map.limit);
+ case DF3: return FIELD_GET(DF3_DST_FABRIC_ID, ctx->map.limit);
+ case DF3p5: return FIELD_GET(DF3p5_DST_FABRIC_ID, ctx->map.limit);
+ case DF4: return FIELD_GET(DF4_DST_FABRIC_ID, ctx->map.ctl);
+ case DF4p5: return FIELD_GET(DF4p5_DST_FABRIC_ID, ctx->map.ctl);
+ default:
+ atl_debug_on_bad_df_rev();
+ return 0;
+ }
+}
+
+/*
+ * Make a contiguous gap in address for N bits starting at bit P.
+ *
+ * Example:
+ * address bits: [20:0]
+ * # of interleave bits (n): 3
+ * starting interleave bit (p): 8
+ *
+ * expanded address bits: [20+n : n+p][n+p-1 : p][p-1 : 0]
+ * [23 : 11][10 : 8][7 : 0]
+ */
+static u64 make_space_for_coh_st_id_at_intlv_bit(struct addr_ctx *ctx)
+{
+ return expand_bits(ctx->map.intlv_bit_pos,
+ ctx->map.total_intlv_bits,
+ ctx->ret_addr);
+}
+
+/*
+ * Make two gaps in address for N bits.
+ * First gap is a single bit at bit P.
+ * Second gap is the remaining N-1 bits at bit 12.
+ *
+ * Example:
+ * address bits: [20:0]
+ * # of interleave bits (n): 3
+ * starting interleave bit (p): 8
+ *
+ * First gap
+ * expanded address bits: [20+1 : p+1][p][p-1 : 0]
+ * [21 : 9][8][7 : 0]
+ *
+ * Second gap uses result from first.
+ * r = n - 1; remaining interleave bits
+ * expanded address bits: [21+r : 12+r][12+r-1: 12][11 : 0]
+ * [23 : 14][13 : 12][11 : 0]
+ */
+static u64 make_space_for_coh_st_id_split_2_1(struct addr_ctx *ctx)
+{
+ /* Make a single space at the interleave bit. */
+ u64 denorm_addr = expand_bits(ctx->map.intlv_bit_pos, 1, ctx->ret_addr);
+
+ /* Done if there's only a single interleave bit. */
+ if (ctx->map.total_intlv_bits <= 1)
+ return denorm_addr;
+
+ /* Make spaces for the remaining interleave bits starting at bit 12. */
+ return expand_bits(12, ctx->map.total_intlv_bits - 1, denorm_addr);
+}
+
+/*
+ * Make space for CS ID at bits [14:8] as follows:
+ *
+ * 8 channels -> bits [10:8]
+ * 16 channels -> bits [11:8]
+ * 32 channels -> bits [14,11:8]
+ *
+ * 1 die -> N/A
+ * 2 dies -> bit [12]
+ * 4 dies -> bits [13:12]
+ */
+static u64 make_space_for_coh_st_id_mi300(struct addr_ctx *ctx)
+{
+ u8 num_intlv_bits = ilog2(ctx->map.num_intlv_chan);
+ u64 denorm_addr;
+
+ if (ctx->map.intlv_bit_pos != 8) {
+ pr_debug("Invalid interleave bit: %u", ctx->map.intlv_bit_pos);
+ return ~0ULL;
+ }
+
+ /* Channel bits. Covers up to 4 bits at [11:8]. */
+ denorm_addr = expand_bits(8, min(num_intlv_bits, 4), ctx->ret_addr);
+
+ /* Die bits. Always starts at [12]. */
+ denorm_addr = expand_bits(12, ilog2(ctx->map.num_intlv_dies), denorm_addr);
+
+ /* Additional channel bit at [14]. */
+ if (num_intlv_bits > 4)
+ denorm_addr = expand_bits(14, 1, denorm_addr);
+
+ return denorm_addr;
+}
+
+/*
+ * Take the current calculated address and shift enough bits in the middle
+ * to make a gap where the interleave bits will be inserted.
+ */
+static u64 make_space_for_coh_st_id(struct addr_ctx *ctx)
+{
+ switch (ctx->map.intlv_mode) {
+ case NOHASH_2CHAN:
+ case NOHASH_4CHAN:
+ case NOHASH_8CHAN:
+ case NOHASH_16CHAN:
+ case NOHASH_32CHAN:
+ case DF2_2CHAN_HASH:
+ return make_space_for_coh_st_id_at_intlv_bit(ctx);
+
+ case DF3_COD4_2CHAN_HASH:
+ case DF3_COD2_4CHAN_HASH:
+ case DF3_COD1_8CHAN_HASH:
+ case DF4_NPS4_2CHAN_HASH:
+ case DF4_NPS2_4CHAN_HASH:
+ case DF4_NPS1_8CHAN_HASH:
+ case DF4p5_NPS4_2CHAN_1K_HASH:
+ case DF4p5_NPS4_2CHAN_2K_HASH:
+ case DF4p5_NPS2_4CHAN_2K_HASH:
+ case DF4p5_NPS1_8CHAN_2K_HASH:
+ case DF4p5_NPS1_16CHAN_2K_HASH:
+ return make_space_for_coh_st_id_split_2_1(ctx);
+
+ case MI3_HASH_8CHAN:
+ case MI3_HASH_16CHAN:
+ case MI3_HASH_32CHAN:
+ return make_space_for_coh_st_id_mi300(ctx);
+
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return ~0ULL;
+ }
+}
+
+static u16 get_coh_st_id_df2(struct addr_ctx *ctx)
+{
+ u8 num_socket_intlv_bits = ilog2(ctx->map.num_intlv_sockets);
+ u8 num_die_intlv_bits = ilog2(ctx->map.num_intlv_dies);
+ u8 num_intlv_bits;
+ u16 coh_st_id, mask;
+
+ coh_st_id = ctx->coh_st_fabric_id - get_dst_fabric_id(ctx);
+
+ /* Channel interleave bits */
+ num_intlv_bits = order_base_2(ctx->map.num_intlv_chan);
+ mask = GENMASK(num_intlv_bits - 1, 0);
+ coh_st_id &= mask;
+
+ /* Die interleave bits */
+ if (num_die_intlv_bits) {
+ u16 die_bits;
+
+ mask = GENMASK(num_die_intlv_bits - 1, 0);
+ die_bits = ctx->coh_st_fabric_id & df_cfg.die_id_mask;
+ die_bits >>= df_cfg.die_id_shift;
+
+ coh_st_id |= (die_bits & mask) << num_intlv_bits;
+ num_intlv_bits += num_die_intlv_bits;
+ }
+
+ /* Socket interleave bits */
+ if (num_socket_intlv_bits) {
+ u16 socket_bits;
+
+ mask = GENMASK(num_socket_intlv_bits - 1, 0);
+ socket_bits = ctx->coh_st_fabric_id & df_cfg.socket_id_mask;
+ socket_bits >>= df_cfg.socket_id_shift;
+
+ coh_st_id |= (socket_bits & mask) << num_intlv_bits;
+ }
+
+ return coh_st_id;
+}
+
+static u16 get_coh_st_id_df4(struct addr_ctx *ctx)
+{
+ /*
+ * Start with the original component mask and the number of interleave
+ * bits for the channels in this map.
+ */
+ u8 num_intlv_bits = ilog2(ctx->map.num_intlv_chan);
+ u16 mask = df_cfg.component_id_mask;
+
+ u16 socket_bits;
+
+ /* Set the derived Coherent Station ID to the input Coherent Station Fabric ID. */
+ u16 coh_st_id = ctx->coh_st_fabric_id & mask;
+
+ /*
+ * Subtract the "base" Destination Fabric ID.
+ * This accounts for systems with disabled Coherent Stations.
+ */
+ coh_st_id -= get_dst_fabric_id(ctx) & mask;
+
+ /*
+ * Generate and use a new mask based on the number of bits
+ * needed for channel interleaving in this map.
+ */
+ mask = GENMASK(num_intlv_bits - 1, 0);
+ coh_st_id &= mask;
+
+ /* Done if socket interleaving is not enabled. */
+ if (ctx->map.num_intlv_sockets <= 1)
+ return coh_st_id;
+
+ /*
+ * Figure out how many bits are needed for the number of
+ * interleaved sockets. And shift the derived Coherent Station ID to account
+ * for these.
+ */
+ num_intlv_bits = ilog2(ctx->map.num_intlv_sockets);
+ coh_st_id <<= num_intlv_bits;
+
+ /* Generate a new mask for the socket interleaving bits. */
+ mask = GENMASK(num_intlv_bits - 1, 0);
+
+ /* Get the socket interleave bits from the original Coherent Station Fabric ID. */
+ socket_bits = (ctx->coh_st_fabric_id & df_cfg.socket_id_mask) >> df_cfg.socket_id_shift;
+
+ /* Apply the appropriate socket bits to the derived Coherent Station ID. */
+ coh_st_id |= socket_bits & mask;
+
+ return coh_st_id;
+}
+
+/*
+ * MI300 hash has:
+ * (C)hannel[3:0] = coh_st_id[3:0]
+ * (S)tack[0] = coh_st_id[4]
+ * (D)ie[1:0] = coh_st_id[6:5]
+ *
+ * Hashed coh_st_id is swizzled so that Stack bit is at the end.
+ * coh_st_id = SDDCCCC
+ */
+static u16 get_coh_st_id_mi300(struct addr_ctx *ctx)
+{
+ u8 channel_bits, die_bits, stack_bit;
+ u16 die_id;
+
+ /* Subtract the "base" Destination Fabric ID. */
+ ctx->coh_st_fabric_id -= get_dst_fabric_id(ctx);
+
+ die_id = (ctx->coh_st_fabric_id & df_cfg.die_id_mask) >> df_cfg.die_id_shift;
+
+ channel_bits = FIELD_GET(GENMASK(3, 0), ctx->coh_st_fabric_id);
+ stack_bit = FIELD_GET(BIT(4), ctx->coh_st_fabric_id) << 6;
+ die_bits = die_id << 4;
+
+ return stack_bit | die_bits | channel_bits;
+}
+
+/*
+ * Derive the correct Coherent Station ID that represents the interleave bits
+ * used within the system physical address. This accounts for the
+ * interleave mode, number of interleaved channels/dies/sockets, and
+ * other system/mode-specific bit swizzling.
+ *
+ * Returns: Coherent Station ID on success.
+ * All bits set on error.
+ */
+static u16 calculate_coh_st_id(struct addr_ctx *ctx)
+{
+ switch (ctx->map.intlv_mode) {
+ case NOHASH_2CHAN:
+ case NOHASH_4CHAN:
+ case NOHASH_8CHAN:
+ case NOHASH_16CHAN:
+ case NOHASH_32CHAN:
+ case DF3_COD4_2CHAN_HASH:
+ case DF3_COD2_4CHAN_HASH:
+ case DF3_COD1_8CHAN_HASH:
+ case DF2_2CHAN_HASH:
+ return get_coh_st_id_df2(ctx);
+
+ case DF4_NPS4_2CHAN_HASH:
+ case DF4_NPS2_4CHAN_HASH:
+ case DF4_NPS1_8CHAN_HASH:
+ case DF4p5_NPS4_2CHAN_1K_HASH:
+ case DF4p5_NPS4_2CHAN_2K_HASH:
+ case DF4p5_NPS2_4CHAN_2K_HASH:
+ case DF4p5_NPS1_8CHAN_2K_HASH:
+ case DF4p5_NPS1_16CHAN_2K_HASH:
+ return get_coh_st_id_df4(ctx);
+
+ case MI3_HASH_8CHAN:
+ case MI3_HASH_16CHAN:
+ case MI3_HASH_32CHAN:
+ return get_coh_st_id_mi300(ctx);
+
+ /* COH_ST ID is simply the COH_ST Fabric ID adjusted by the Destination Fabric ID. */
+ case DF4p5_NPS2_4CHAN_1K_HASH:
+ case DF4p5_NPS1_8CHAN_1K_HASH:
+ case DF4p5_NPS1_16CHAN_1K_HASH:
+ return ctx->coh_st_fabric_id - get_dst_fabric_id(ctx);
+
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return ~0;
+ }
+}
+
+static u64 insert_coh_st_id_at_intlv_bit(struct addr_ctx *ctx, u64 denorm_addr, u16 coh_st_id)
+{
+ return denorm_addr | (coh_st_id << ctx->map.intlv_bit_pos);
+}
+
+static u64 insert_coh_st_id_split_2_1(struct addr_ctx *ctx, u64 denorm_addr, u16 coh_st_id)
+{
+ /* Insert coh_st_id[0] at the interleave bit. */
+ denorm_addr |= (coh_st_id & BIT(0)) << ctx->map.intlv_bit_pos;
+
+ /* Insert coh_st_id[2:1] at bit 12. */
+ denorm_addr |= (coh_st_id & GENMASK(2, 1)) << 11;
+
+ return denorm_addr;
+}
+
+static u64 insert_coh_st_id_split_2_2(struct addr_ctx *ctx, u64 denorm_addr, u16 coh_st_id)
+{
+ /* Insert coh_st_id[1:0] at bit 8. */
+ denorm_addr |= (coh_st_id & GENMASK(1, 0)) << 8;
+
+ /*
+ * Insert coh_st_id[n:2] at bit 12. 'n' could be 2 or 3.
+ * Grab both because bit 3 will be clear if unused.
+ */
+ denorm_addr |= (coh_st_id & GENMASK(3, 2)) << 10;
+
+ return denorm_addr;
+}
+
+static u64 insert_coh_st_id(struct addr_ctx *ctx, u64 denorm_addr, u16 coh_st_id)
+{
+ switch (ctx->map.intlv_mode) {
+ case NOHASH_2CHAN:
+ case NOHASH_4CHAN:
+ case NOHASH_8CHAN:
+ case NOHASH_16CHAN:
+ case NOHASH_32CHAN:
+ case MI3_HASH_8CHAN:
+ case MI3_HASH_16CHAN:
+ case MI3_HASH_32CHAN:
+ case DF2_2CHAN_HASH:
+ return insert_coh_st_id_at_intlv_bit(ctx, denorm_addr, coh_st_id);
+
+ case DF3_COD4_2CHAN_HASH:
+ case DF3_COD2_4CHAN_HASH:
+ case DF3_COD1_8CHAN_HASH:
+ case DF4_NPS4_2CHAN_HASH:
+ case DF4_NPS2_4CHAN_HASH:
+ case DF4_NPS1_8CHAN_HASH:
+ case DF4p5_NPS4_2CHAN_1K_HASH:
+ case DF4p5_NPS4_2CHAN_2K_HASH:
+ case DF4p5_NPS2_4CHAN_2K_HASH:
+ case DF4p5_NPS1_8CHAN_2K_HASH:
+ case DF4p5_NPS1_16CHAN_2K_HASH:
+ return insert_coh_st_id_split_2_1(ctx, denorm_addr, coh_st_id);
+
+ case DF4p5_NPS2_4CHAN_1K_HASH:
+ case DF4p5_NPS1_8CHAN_1K_HASH:
+ case DF4p5_NPS1_16CHAN_1K_HASH:
+ return insert_coh_st_id_split_2_2(ctx, denorm_addr, coh_st_id);
+
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return ~0ULL;
+ }
+}
+
+/*
+ * MI300 systems have a fixed, hardware-defined physical-to-logical
+ * Coherent Station mapping. The Remap registers are not used.
+ */
+static const u16 phy_to_log_coh_st_map_mi300[] = {
+ 12, 13, 14, 15,
+ 8, 9, 10, 11,
+ 4, 5, 6, 7,
+ 0, 1, 2, 3,
+ 28, 29, 30, 31,
+ 24, 25, 26, 27,
+ 20, 21, 22, 23,
+ 16, 17, 18, 19,
+};
+
+static u16 get_logical_coh_st_fabric_id_mi300(struct addr_ctx *ctx)
+{
+ if (ctx->inst_id >= ARRAY_SIZE(phy_to_log_coh_st_map_mi300)) {
+ atl_debug(ctx, "Instance ID out of range");
+ return ~0;
+ }
+
+ return phy_to_log_coh_st_map_mi300[ctx->inst_id] | (ctx->node_id << df_cfg.node_id_shift);
+}
+
+static u16 get_logical_coh_st_fabric_id(struct addr_ctx *ctx)
+{
+ u16 component_id, log_fabric_id;
+
+ /* Start with the physical COH_ST Fabric ID. */
+ u16 phys_fabric_id = ctx->coh_st_fabric_id;
+
+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
+ return get_logical_coh_st_fabric_id_mi300(ctx);
+
+ /* Skip logical ID lookup if remapping is disabled. */
+ if (!FIELD_GET(DF4_REMAP_EN, ctx->map.ctl) &&
+ ctx->map.intlv_mode != DF3_6CHAN)
+ return phys_fabric_id;
+
+ /* Mask off the Node ID bits to get the "local" Component ID. */
+ component_id = phys_fabric_id & df_cfg.component_id_mask;
+
+ /*
+ * Search the list of logical Component IDs for the one that
+ * matches this physical Component ID.
+ */
+ for (log_fabric_id = 0; log_fabric_id < MAX_COH_ST_CHANNELS; log_fabric_id++) {
+ if (ctx->map.remap_array[log_fabric_id] == component_id)
+ break;
+ }
+
+ if (log_fabric_id == MAX_COH_ST_CHANNELS)
+ atl_debug(ctx, "COH_ST remap entry not found for 0x%x",
+ log_fabric_id);
+
+ /* Get the Node ID bits from the physical and apply to the logical. */
+ return (phys_fabric_id & df_cfg.node_id_mask) | log_fabric_id;
+}
+
+static int denorm_addr_common(struct addr_ctx *ctx)
+{
+ u64 denorm_addr;
+ u16 coh_st_id;
+
+ /*
+ * Convert the original physical COH_ST Fabric ID to a logical value.
+ * This is required for non-power-of-two and other interleaving modes.
+ */
+ ctx->coh_st_fabric_id = get_logical_coh_st_fabric_id(ctx);
+
+ denorm_addr = make_space_for_coh_st_id(ctx);
+ coh_st_id = calculate_coh_st_id(ctx);
+ ctx->ret_addr = insert_coh_st_id(ctx, denorm_addr, coh_st_id);
+ return 0;
+}
+
+static int denorm_addr_df3_6chan(struct addr_ctx *ctx)
+{
+ u16 coh_st_id = ctx->coh_st_fabric_id & df_cfg.component_id_mask;
+ u8 total_intlv_bits = ctx->map.total_intlv_bits;
+ u8 low_bit, intlv_bit = ctx->map.intlv_bit_pos;
+ u64 msb_intlv_bits, temp_addr_a, temp_addr_b;
+ u8 np2_bits = ctx->map.np2_bits;
+
+ if (ctx->map.intlv_mode != DF3_6CHAN)
+ return -EINVAL;
+
+ /*
+ * 'np2_bits' holds the number of bits needed to cover the
+ * amount of memory (rounded up) in this map using 64K chunks.
+ *
+ * Example:
+ * Total memory in map: 6GB
+ * Rounded up to next power-of-2: 8GB
+ * Number of 64K chunks: 0x20000
+ * np2_bits = log2(# of chunks): 17
+ *
+ * Get the two most-significant interleave bits from the
+ * input address based on the following:
+ *
+ * [15 + np2_bits - total_intlv_bits : 14 + np2_bits - total_intlv_bits]
+ */
+ low_bit = 14 + np2_bits - total_intlv_bits;
+ msb_intlv_bits = ctx->ret_addr >> low_bit;
+ msb_intlv_bits &= 0x3;
+
+ /*
+ * If MSB are 11b, then logical COH_ST ID is 6 or 7.
+ * Need to adjust based on the mod3 result.
+ */
+ if (msb_intlv_bits == 3) {
+ u8 addr_mod, phys_addr_msb, msb_coh_st_id;
+
+ /* Get the remaining interleave bits from the input address. */
+ temp_addr_b = GENMASK_ULL(low_bit - 1, intlv_bit) & ctx->ret_addr;
+ temp_addr_b >>= intlv_bit;
+
+ /* Calculate the logical COH_ST offset based on mod3. */
+ addr_mod = temp_addr_b % 3;
+
+ /* Get COH_ST ID bits [2:1]. */
+ msb_coh_st_id = (coh_st_id >> 1) & 0x3;
+
+ /* Get the bit that starts the physical address bits. */
+ phys_addr_msb = (intlv_bit + np2_bits + 1);
+ phys_addr_msb &= BIT(0);
+ phys_addr_msb++;
+ phys_addr_msb *= 3 - addr_mod + msb_coh_st_id;
+ phys_addr_msb %= 3;
+
+ /* Move the physical address MSB to the correct place. */
+ temp_addr_b |= phys_addr_msb << (low_bit - total_intlv_bits - intlv_bit);
+
+ /* Generate a new COH_ST ID as follows: coh_st_id = [1, 1, coh_st_id[0]] */
+ coh_st_id &= BIT(0);
+ coh_st_id |= GENMASK(2, 1);
+ } else {
+ temp_addr_b = GENMASK_ULL(63, intlv_bit) & ctx->ret_addr;
+ temp_addr_b >>= intlv_bit;
+ }
+
+ temp_addr_a = GENMASK_ULL(intlv_bit - 1, 0) & ctx->ret_addr;
+ temp_addr_b <<= intlv_bit + total_intlv_bits;
+
+ ctx->ret_addr = temp_addr_a | temp_addr_b;
+ ctx->ret_addr |= coh_st_id << intlv_bit;
+ return 0;
+}
+
+static int denorm_addr_df4_np2(struct addr_ctx *ctx)
+{
+ bool hash_ctl_64k, hash_ctl_2M, hash_ctl_1G;
+ u16 group, group_offset, log_coh_st_offset;
+ unsigned int mod_value, shift_value;
+ u16 mask = df_cfg.component_id_mask;
+ u64 temp_addr_a, temp_addr_b;
+ bool hash_pa8, hashed_bit;
+
+ switch (ctx->map.intlv_mode) {
+ case DF4_NPS4_3CHAN_HASH:
+ mod_value = 3;
+ shift_value = 13;
+ break;
+ case DF4_NPS2_6CHAN_HASH:
+ mod_value = 3;
+ shift_value = 12;
+ break;
+ case DF4_NPS1_12CHAN_HASH:
+ mod_value = 3;
+ shift_value = 11;
+ break;
+ case DF4_NPS2_5CHAN_HASH:
+ mod_value = 5;
+ shift_value = 13;
+ break;
+ case DF4_NPS1_10CHAN_HASH:
+ mod_value = 5;
+ shift_value = 12;
+ break;
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return -EINVAL;
+ };
+
+ if (ctx->map.num_intlv_sockets == 1) {
+ hash_pa8 = BIT_ULL(shift_value) & ctx->ret_addr;
+ temp_addr_a = remove_bits(shift_value, shift_value, ctx->ret_addr);
+ } else {
+ hash_pa8 = ctx->coh_st_fabric_id & df_cfg.socket_id_mask;
+ temp_addr_a = ctx->ret_addr;
+ }
+
+ /* Make a gap for the real bit [8]. */
+ temp_addr_a = expand_bits(8, 1, temp_addr_a);
+
+ /* Make an additional gap for bits [13:12], as appropriate.*/
+ if (ctx->map.intlv_mode == DF4_NPS2_6CHAN_HASH ||
+ ctx->map.intlv_mode == DF4_NPS1_10CHAN_HASH) {
+ temp_addr_a = expand_bits(13, 1, temp_addr_a);
+ } else if (ctx->map.intlv_mode == DF4_NPS1_12CHAN_HASH) {
+ temp_addr_a = expand_bits(12, 2, temp_addr_a);
+ }
+
+ /* Keep bits [13:0]. */
+ temp_addr_a &= GENMASK_ULL(13, 0);
+
+ /* Get the appropriate high bits. */
+ shift_value += 1 - ilog2(ctx->map.num_intlv_sockets);
+ temp_addr_b = GENMASK_ULL(63, shift_value) & ctx->ret_addr;
+ temp_addr_b >>= shift_value;
+ temp_addr_b *= mod_value;
+
+ /*
+ * Coherent Stations are divided into groups.
+ *
+ * Multiples of 3 (mod3) are divided into quadrants.
+ * e.g. NP4_3CHAN -> [0, 1, 2] [6, 7, 8]
+ * [3, 4, 5] [9, 10, 11]
+ *
+ * Multiples of 5 (mod5) are divided into sides.
+ * e.g. NP2_5CHAN -> [0, 1, 2, 3, 4] [5, 6, 7, 8, 9]
+ */
+
+ /*
+ * Calculate the logical offset for the COH_ST within its DRAM Address map.
+ * e.g. if map includes [5, 6, 7, 8, 9] and target instance is '8', then
+ * log_coh_st_offset = 8 - 5 = 3
+ */
+ log_coh_st_offset = (ctx->coh_st_fabric_id & mask) - (get_dst_fabric_id(ctx) & mask);
+
+ /*
+ * Figure out the group number.
+ *
+ * Following above example,
+ * log_coh_st_offset = 3
+ * mod_value = 5
+ * group = 3 / 5 = 0
+ */
+ group = log_coh_st_offset / mod_value;
+
+ /*
+ * Figure out the offset within the group.
+ *
+ * Following above example,
+ * log_coh_st_offset = 3
+ * mod_value = 5
+ * group_offset = 3 % 5 = 3
+ */
+ group_offset = log_coh_st_offset % mod_value;
+
+ /* Adjust group_offset if the hashed bit [8] is set. */
+ if (hash_pa8) {
+ if (!group_offset)
+ group_offset = mod_value - 1;
+ else
+ group_offset--;
+ }
+
+ /* Add in the group offset to the high bits. */
+ temp_addr_b += group_offset;
+
+ /* Shift the high bits to the proper starting position. */
+ temp_addr_b <<= 14;
+
+ /* Combine the high and low bits together. */
+ ctx->ret_addr = temp_addr_a | temp_addr_b;
+
+ /* Account for hashing here instead of in dehash_address(). */
+ hash_ctl_64k = FIELD_GET(DF4_HASH_CTL_64K, ctx->map.ctl);
+ hash_ctl_2M = FIELD_GET(DF4_HASH_CTL_2M, ctx->map.ctl);
+ hash_ctl_1G = FIELD_GET(DF4_HASH_CTL_1G, ctx->map.ctl);
+
+ hashed_bit = !!hash_pa8;
+ hashed_bit ^= FIELD_GET(BIT_ULL(14), ctx->ret_addr);
+ hashed_bit ^= FIELD_GET(BIT_ULL(16), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(21), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(30), ctx->ret_addr) & hash_ctl_1G;
+
+ ctx->ret_addr |= hashed_bit << 8;
+
+ /* Done for 3 and 5 channel. */
+ if (ctx->map.intlv_mode == DF4_NPS4_3CHAN_HASH ||
+ ctx->map.intlv_mode == DF4_NPS2_5CHAN_HASH)
+ return 0;
+
+ /* Select the proper 'group' bit to use for Bit 13. */
+ if (ctx->map.intlv_mode == DF4_NPS1_12CHAN_HASH)
+ hashed_bit = !!(group & BIT(1));
+ else
+ hashed_bit = group & BIT(0);
+
+ hashed_bit ^= FIELD_GET(BIT_ULL(18), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(23), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(32), ctx->ret_addr) & hash_ctl_1G;
+
+ ctx->ret_addr |= hashed_bit << 13;
+
+ /* Done for 6 and 10 channel. */
+ if (ctx->map.intlv_mode != DF4_NPS1_12CHAN_HASH)
+ return 0;
+
+ hashed_bit = group & BIT(0);
+ hashed_bit ^= FIELD_GET(BIT_ULL(17), ctx->ret_addr) & hash_ctl_64k;
+ hashed_bit ^= FIELD_GET(BIT_ULL(22), ctx->ret_addr) & hash_ctl_2M;
+ hashed_bit ^= FIELD_GET(BIT_ULL(31), ctx->ret_addr) & hash_ctl_1G;
+
+ ctx->ret_addr |= hashed_bit << 12;
+ return 0;
+}
+
+int denormalize_address(struct addr_ctx *ctx)
+{
+ switch (ctx->map.intlv_mode) {
+ case NONE:
+ return 0;
+ case DF4_NPS4_3CHAN_HASH:
+ case DF4_NPS2_6CHAN_HASH:
+ case DF4_NPS1_12CHAN_HASH:
+ case DF4_NPS2_5CHAN_HASH:
+ case DF4_NPS1_10CHAN_HASH:
+ return denorm_addr_df4_np2(ctx);
+ case DF3_6CHAN:
+ return denorm_addr_df3_6chan(ctx);
+ default:
+ return denorm_addr_common(ctx);
+ }
+}
diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h
new file mode 100644
index 000000000000..5de69e0bb0f9
--- /dev/null
+++ b/drivers/ras/amd/atl/internal.h
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD Address Translation Library
+ *
+ * internal.h : Helper functions and common defines
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+#ifndef __AMD_ATL_INTERNAL_H__
+#define __AMD_ATL_INTERNAL_H__
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/ras.h>
+
+#include <asm/amd_nb.h>
+
+#include "reg_fields.h"
+
+/* Maximum possible number of Coherent Stations within a single Data Fabric. */
+#define MAX_COH_ST_CHANNELS 32
+
+/* PCI ID for Zen4 Server DF Function 0. */
+#define DF_FUNC0_ID_ZEN4_SERVER 0x14AD1022
+
+/* PCI IDs for MI300 DF Function 0. */
+#define DF_FUNC0_ID_MI300 0x15281022
+
+/* Shift needed for adjusting register values to true values. */
+#define DF_DRAM_BASE_LIMIT_LSB 28
+#define MI300_DRAM_LIMIT_LSB 20
+
+enum df_revisions {
+ UNKNOWN,
+ DF2,
+ DF3,
+ DF3p5,
+ DF4,
+ DF4p5,
+};
+
+/* These are mapped 1:1 to the hardware values. Special cases are set at > 0x20. */
+enum intlv_modes {
+ NONE = 0x00,
+ NOHASH_2CHAN = 0x01,
+ NOHASH_4CHAN = 0x03,
+ NOHASH_8CHAN = 0x05,
+ DF3_6CHAN = 0x06,
+ NOHASH_16CHAN = 0x07,
+ NOHASH_32CHAN = 0x08,
+ DF3_COD4_2CHAN_HASH = 0x0C,
+ DF3_COD2_4CHAN_HASH = 0x0D,
+ DF3_COD1_8CHAN_HASH = 0x0E,
+ DF4_NPS4_2CHAN_HASH = 0x10,
+ DF4_NPS2_4CHAN_HASH = 0x11,
+ DF4_NPS1_8CHAN_HASH = 0x12,
+ DF4_NPS4_3CHAN_HASH = 0x13,
+ DF4_NPS2_6CHAN_HASH = 0x14,
+ DF4_NPS1_12CHAN_HASH = 0x15,
+ DF4_NPS2_5CHAN_HASH = 0x16,
+ DF4_NPS1_10CHAN_HASH = 0x17,
+ MI3_HASH_8CHAN = 0x18,
+ MI3_HASH_16CHAN = 0x19,
+ MI3_HASH_32CHAN = 0x1A,
+ DF2_2CHAN_HASH = 0x21,
+ /* DF4.5 modes are all IntLvNumChan + 0x20 */
+ DF4p5_NPS1_16CHAN_1K_HASH = 0x2C,
+ DF4p5_NPS0_24CHAN_1K_HASH = 0x2E,
+ DF4p5_NPS4_2CHAN_1K_HASH = 0x30,
+ DF4p5_NPS2_4CHAN_1K_HASH = 0x31,
+ DF4p5_NPS1_8CHAN_1K_HASH = 0x32,
+ DF4p5_NPS4_3CHAN_1K_HASH = 0x33,
+ DF4p5_NPS2_6CHAN_1K_HASH = 0x34,
+ DF4p5_NPS1_12CHAN_1K_HASH = 0x35,
+ DF4p5_NPS2_5CHAN_1K_HASH = 0x36,
+ DF4p5_NPS1_10CHAN_1K_HASH = 0x37,
+ DF4p5_NPS4_2CHAN_2K_HASH = 0x40,
+ DF4p5_NPS2_4CHAN_2K_HASH = 0x41,
+ DF4p5_NPS1_8CHAN_2K_HASH = 0x42,
+ DF4p5_NPS1_16CHAN_2K_HASH = 0x43,
+ DF4p5_NPS4_3CHAN_2K_HASH = 0x44,
+ DF4p5_NPS2_6CHAN_2K_HASH = 0x45,
+ DF4p5_NPS1_12CHAN_2K_HASH = 0x46,
+ DF4p5_NPS0_24CHAN_2K_HASH = 0x47,
+ DF4p5_NPS2_5CHAN_2K_HASH = 0x48,
+ DF4p5_NPS1_10CHAN_2K_HASH = 0x49,
+};
+
+struct df_flags {
+ __u8 legacy_ficaa : 1,
+ socket_id_shift_quirk : 1,
+ heterogeneous : 1,
+ __reserved_0 : 5;
+};
+
+struct df_config {
+ enum df_revisions rev;
+
+ /*
+ * These masks operate on the 16-bit Coherent Station IDs,
+ * e.g. Instance, Fabric, Destination, etc.
+ */
+ u16 component_id_mask;
+ u16 die_id_mask;
+ u16 node_id_mask;
+ u16 socket_id_mask;
+
+ /*
+ * Least-significant bit of Node ID portion of the
+ * system-wide Coherent Station Fabric ID.
+ */
+ u8 node_id_shift;
+
+ /*
+ * Least-significant bit of Die portion of the Node ID.
+ * Adjusted to include the Node ID shift in order to apply
+ * to the Coherent Station Fabric ID.
+ */
+ u8 die_id_shift;
+
+ /*
+ * Least-significant bit of Socket portion of the Node ID.
+ * Adjusted to include the Node ID shift in order to apply
+ * to the Coherent Station Fabric ID.
+ */
+ u8 socket_id_shift;
+
+ /* Number of DRAM Address maps visible in a Coherent Station. */
+ u8 num_coh_st_maps;
+
+ /* Global flags to handle special cases. */
+ struct df_flags flags;
+};
+
+extern struct df_config df_cfg;
+
+struct dram_addr_map {
+ /*
+ * Each DRAM Address Map can operate independently
+ * in different interleaving modes.
+ */
+ enum intlv_modes intlv_mode;
+
+ /* System-wide number for this address map. */
+ u8 num;
+
+ /* Raw register values */
+ u32 base;
+ u32 limit;
+ u32 ctl;
+ u32 intlv;
+
+ /*
+ * Logical to Physical Coherent Station Remapping array
+ *
+ * Index: Logical Coherent Station Instance ID
+ * Value: Physical Coherent Station Instance ID
+ *
+ * phys_coh_st_inst_id = remap_array[log_coh_st_inst_id]
+ */
+ u8 remap_array[MAX_COH_ST_CHANNELS];
+
+ /*
+ * Number of bits covering DRAM Address map 0
+ * when interleaving is non-power-of-2.
+ *
+ * Used only for DF3_6CHAN.
+ */
+ u8 np2_bits;
+
+ /* Position of the 'interleave bit'. */
+ u8 intlv_bit_pos;
+ /* Number of channels interleaved in this map. */
+ u8 num_intlv_chan;
+ /* Number of dies interleaved in this map. */
+ u8 num_intlv_dies;
+ /* Number of sockets interleaved in this map. */
+ u8 num_intlv_sockets;
+ /*
+ * Total number of channels interleaved accounting
+ * for die and socket interleaving.
+ */
+ u8 total_intlv_chan;
+ /* Total bits needed to cover 'total_intlv_chan'. */
+ u8 total_intlv_bits;
+};
+
+/* Original input values cached for debug printing. */
+struct addr_ctx_inputs {
+ u64 norm_addr;
+ u8 socket_id;
+ u8 die_id;
+ u8 coh_st_inst_id;
+};
+
+struct addr_ctx {
+ u64 ret_addr;
+
+ struct addr_ctx_inputs inputs;
+ struct dram_addr_map map;
+
+ /* AMD Node ID calculated from Socket and Die IDs. */
+ u8 node_id;
+
+ /*
+ * Coherent Station Instance ID
+ * Local ID used within a 'node'.
+ */
+ u16 inst_id;
+
+ /*
+ * Coherent Station Fabric ID
+ * System-wide ID that includes 'node' bits.
+ */
+ u16 coh_st_fabric_id;
+};
+
+int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo);
+int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo);
+
+int get_df_system_info(void);
+int determine_node_id(struct addr_ctx *ctx, u8 socket_num, u8 die_num);
+int get_addr_hash_mi300(void);
+
+int get_address_map(struct addr_ctx *ctx);
+
+int denormalize_address(struct addr_ctx *ctx);
+int dehash_address(struct addr_ctx *ctx);
+
+unsigned long norm_to_sys_addr(u8 socket_id, u8 die_id, u8 coh_st_inst_id, unsigned long addr);
+unsigned long convert_umc_mca_addr_to_sys_addr(struct atl_err *err);
+
+/*
+ * Make a gap in @data that is @num_bits long starting at @bit_num.
+ * e.g. data = 11111111'b
+ * bit_num = 3
+ * num_bits = 2
+ * result = 1111100111'b
+ */
+static inline u64 expand_bits(u8 bit_num, u8 num_bits, u64 data)
+{
+ u64 temp1, temp2;
+
+ if (!num_bits)
+ return data;
+
+ if (!bit_num) {
+ WARN_ON_ONCE(num_bits >= BITS_PER_LONG);
+ return data << num_bits;
+ }
+
+ WARN_ON_ONCE(bit_num >= BITS_PER_LONG);
+
+ temp1 = data & GENMASK_ULL(bit_num - 1, 0);
+
+ temp2 = data & GENMASK_ULL(63, bit_num);
+ temp2 <<= num_bits;
+
+ return temp1 | temp2;
+}
+
+/*
+ * Remove bits in @data between @low_bit and @high_bit inclusive.
+ * e.g. data = XXXYYZZZ'b
+ * low_bit = 3
+ * high_bit = 4
+ * result = XXXZZZ'b
+ */
+static inline u64 remove_bits(u8 low_bit, u8 high_bit, u64 data)
+{
+ u64 temp1, temp2;
+
+ WARN_ON_ONCE(high_bit >= BITS_PER_LONG);
+ WARN_ON_ONCE(low_bit >= BITS_PER_LONG);
+ WARN_ON_ONCE(low_bit > high_bit);
+
+ if (!low_bit)
+ return data >> (high_bit++);
+
+ temp1 = GENMASK_ULL(low_bit - 1, 0) & data;
+ temp2 = GENMASK_ULL(63, high_bit + 1) & data;
+ temp2 >>= high_bit - low_bit + 1;
+
+ return temp1 | temp2;
+}
+
+#define atl_debug(ctx, fmt, arg...) \
+ pr_debug("socket_id=%u die_id=%u coh_st_inst_id=%u norm_addr=0x%016llx: " fmt,\
+ (ctx)->inputs.socket_id, (ctx)->inputs.die_id,\
+ (ctx)->inputs.coh_st_inst_id, (ctx)->inputs.norm_addr, ##arg)
+
+static inline void atl_debug_on_bad_df_rev(void)
+{
+ pr_debug("Unrecognized DF rev: %u", df_cfg.rev);
+}
+
+static inline void atl_debug_on_bad_intlv_mode(struct addr_ctx *ctx)
+{
+ atl_debug(ctx, "Unrecognized interleave mode: %u", ctx->map.intlv_mode);
+}
+
+#endif /* __AMD_ATL_INTERNAL_H__ */
diff --git a/drivers/ras/amd/atl/map.c b/drivers/ras/amd/atl/map.c
new file mode 100644
index 000000000000..8b908e8d7495
--- /dev/null
+++ b/drivers/ras/amd/atl/map.c
@@ -0,0 +1,682 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Address Translation Library
+ *
+ * map.c : Functions to read and decode DRAM address maps
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+#include "internal.h"
+
+static int df2_get_intlv_mode(struct addr_ctx *ctx)
+{
+ ctx->map.intlv_mode = FIELD_GET(DF2_INTLV_NUM_CHAN, ctx->map.base);
+
+ if (ctx->map.intlv_mode == 8)
+ ctx->map.intlv_mode = DF2_2CHAN_HASH;
+
+ if (ctx->map.intlv_mode != NONE &&
+ ctx->map.intlv_mode != NOHASH_2CHAN &&
+ ctx->map.intlv_mode != DF2_2CHAN_HASH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int df3_get_intlv_mode(struct addr_ctx *ctx)
+{
+ ctx->map.intlv_mode = FIELD_GET(DF3_INTLV_NUM_CHAN, ctx->map.base);
+ return 0;
+}
+
+static int df3p5_get_intlv_mode(struct addr_ctx *ctx)
+{
+ ctx->map.intlv_mode = FIELD_GET(DF3p5_INTLV_NUM_CHAN, ctx->map.base);
+
+ if (ctx->map.intlv_mode == DF3_6CHAN)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int df4_get_intlv_mode(struct addr_ctx *ctx)
+{
+ ctx->map.intlv_mode = FIELD_GET(DF4_INTLV_NUM_CHAN, ctx->map.intlv);
+
+ if (ctx->map.intlv_mode == DF3_COD4_2CHAN_HASH ||
+ ctx->map.intlv_mode == DF3_COD2_4CHAN_HASH ||
+ ctx->map.intlv_mode == DF3_COD1_8CHAN_HASH ||
+ ctx->map.intlv_mode == DF3_6CHAN)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int df4p5_get_intlv_mode(struct addr_ctx *ctx)
+{
+ ctx->map.intlv_mode = FIELD_GET(DF4p5_INTLV_NUM_CHAN, ctx->map.intlv);
+
+ if (ctx->map.intlv_mode <= NOHASH_32CHAN)
+ return 0;
+
+ if (ctx->map.intlv_mode >= MI3_HASH_8CHAN &&
+ ctx->map.intlv_mode <= MI3_HASH_32CHAN)
+ return 0;
+
+ /*
+ * Modes matching the ranges above are returned as-is.
+ *
+ * All other modes are "fixed up" by adding 20h to make a unique value.
+ */
+ ctx->map.intlv_mode += 0x20;
+
+ return 0;
+}
+
+static int get_intlv_mode(struct addr_ctx *ctx)
+{
+ int ret;
+
+ switch (df_cfg.rev) {
+ case DF2:
+ ret = df2_get_intlv_mode(ctx);
+ break;
+ case DF3:
+ ret = df3_get_intlv_mode(ctx);
+ break;
+ case DF3p5:
+ ret = df3p5_get_intlv_mode(ctx);
+ break;
+ case DF4:
+ ret = df4_get_intlv_mode(ctx);
+ break;
+ case DF4p5:
+ ret = df4p5_get_intlv_mode(ctx);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ atl_debug_on_bad_df_rev();
+
+ return ret;
+}
+
+static u64 get_hi_addr_offset(u32 reg_dram_offset)
+{
+ u8 shift = DF_DRAM_BASE_LIMIT_LSB;
+ u64 hi_addr_offset;
+
+ switch (df_cfg.rev) {
+ case DF2:
+ hi_addr_offset = FIELD_GET(DF2_HI_ADDR_OFFSET, reg_dram_offset);
+ break;
+ case DF3:
+ case DF3p5:
+ hi_addr_offset = FIELD_GET(DF3_HI_ADDR_OFFSET, reg_dram_offset);
+ break;
+ case DF4:
+ case DF4p5:
+ hi_addr_offset = FIELD_GET(DF4_HI_ADDR_OFFSET, reg_dram_offset);
+ break;
+ default:
+ hi_addr_offset = 0;
+ atl_debug_on_bad_df_rev();
+ }
+
+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
+ shift = MI300_DRAM_LIMIT_LSB;
+
+ return hi_addr_offset << shift;
+}
+
+/*
+ * Returns: 0 if offset is disabled.
+ * 1 if offset is enabled.
+ * -EINVAL on error.
+ */
+static int get_dram_offset(struct addr_ctx *ctx, u64 *norm_offset)
+{
+ u32 reg_dram_offset;
+ u8 map_num;
+
+ /* Should not be called for map 0. */
+ if (!ctx->map.num) {
+ atl_debug(ctx, "Trying to find DRAM offset for map 0");
+ return -EINVAL;
+ }
+
+ /*
+ * DramOffset registers don't exist for map 0, so the base register
+ * actually refers to map 1.
+ * Adjust the map_num for the register offsets.
+ */
+ map_num = ctx->map.num - 1;
+
+ if (df_cfg.rev >= DF4) {
+ /* Read D18F7x140 (DramOffset) */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x140 + (4 * map_num),
+ ctx->inst_id, &reg_dram_offset))
+ return -EINVAL;
+
+ } else {
+ /* Read D18F0x1B4 (DramOffset) */
+ if (df_indirect_read_instance(ctx->node_id, 0, 0x1B4 + (4 * map_num),
+ ctx->inst_id, &reg_dram_offset))
+ return -EINVAL;
+ }
+
+ if (!FIELD_GET(DF_HI_ADDR_OFFSET_EN, reg_dram_offset))
+ return 0;
+
+ *norm_offset = get_hi_addr_offset(reg_dram_offset);
+
+ return 1;
+}
+
+static int df3_6ch_get_dram_addr_map(struct addr_ctx *ctx)
+{
+ u16 dst_fabric_id = FIELD_GET(DF3_DST_FABRIC_ID, ctx->map.limit);
+ u8 i, j, shift = 4, mask = 0xF;
+ u32 reg, offset = 0x60;
+ u16 dst_node_id;
+
+ /* Get Socket 1 register. */
+ if (dst_fabric_id & df_cfg.socket_id_mask)
+ offset = 0x68;
+
+ /* Read D18F0x06{0,8} (DF::Skt0CsTargetRemap0)/(DF::Skt0CsTargetRemap1) */
+ if (df_indirect_read_broadcast(ctx->node_id, 0, offset, &reg))
+ return -EINVAL;
+
+ /* Save 8 remap entries. */
+ for (i = 0, j = 0; i < 8; i++, j++)
+ ctx->map.remap_array[i] = (reg >> (j * shift)) & mask;
+
+ dst_node_id = dst_fabric_id & df_cfg.node_id_mask;
+ dst_node_id >>= df_cfg.node_id_shift;
+
+ /* Read D18F2x090 (DF::Np2ChannelConfig) */
+ if (df_indirect_read_broadcast(dst_node_id, 2, 0x90, &reg))
+ return -EINVAL;
+
+ ctx->map.np2_bits = FIELD_GET(DF_LOG2_ADDR_64K_SPACE0, reg);
+ return 0;
+}
+
+static int df2_get_dram_addr_map(struct addr_ctx *ctx)
+{
+ /* Read D18F0x110 (DramBaseAddress). */
+ if (df_indirect_read_instance(ctx->node_id, 0, 0x110 + (8 * ctx->map.num),
+ ctx->inst_id, &ctx->map.base))
+ return -EINVAL;
+
+ /* Read D18F0x114 (DramLimitAddress). */
+ if (df_indirect_read_instance(ctx->node_id, 0, 0x114 + (8 * ctx->map.num),
+ ctx->inst_id, &ctx->map.limit))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int df3_get_dram_addr_map(struct addr_ctx *ctx)
+{
+ if (df2_get_dram_addr_map(ctx))
+ return -EINVAL;
+
+ /* Read D18F0x3F8 (DfGlobalCtl). */
+ if (df_indirect_read_instance(ctx->node_id, 0, 0x3F8,
+ ctx->inst_id, &ctx->map.ctl))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int df4_get_dram_addr_map(struct addr_ctx *ctx)
+{
+ u8 remap_sel, i, j, shift = 4, mask = 0xF;
+ u32 remap_reg;
+
+ /* Read D18F7xE00 (DramBaseAddress). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0xE00 + (16 * ctx->map.num),
+ ctx->inst_id, &ctx->map.base))
+ return -EINVAL;
+
+ /* Read D18F7xE04 (DramLimitAddress). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0xE04 + (16 * ctx->map.num),
+ ctx->inst_id, &ctx->map.limit))
+ return -EINVAL;
+
+ /* Read D18F7xE08 (DramAddressCtl). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0xE08 + (16 * ctx->map.num),
+ ctx->inst_id, &ctx->map.ctl))
+ return -EINVAL;
+
+ /* Read D18F7xE0C (DramAddressIntlv). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0xE0C + (16 * ctx->map.num),
+ ctx->inst_id, &ctx->map.intlv))
+ return -EINVAL;
+
+ /* Check if Remap Enable bit is valid. */
+ if (!FIELD_GET(DF4_REMAP_EN, ctx->map.ctl))
+ return 0;
+
+ /* Fill with bogus values, because '0' is a valid value. */
+ memset(&ctx->map.remap_array, 0xFF, sizeof(ctx->map.remap_array));
+
+ /* Get Remap registers. */
+ remap_sel = FIELD_GET(DF4_REMAP_SEL, ctx->map.ctl);
+
+ /* Read D18F7x180 (CsTargetRemap0A). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x180 + (8 * remap_sel),
+ ctx->inst_id, &remap_reg))
+ return -EINVAL;
+
+ /* Save first 8 remap entries. */
+ for (i = 0, j = 0; i < 8; i++, j++)
+ ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
+
+ /* Read D18F7x184 (CsTargetRemap0B). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x184 + (8 * remap_sel),
+ ctx->inst_id, &remap_reg))
+ return -EINVAL;
+
+ /* Save next 8 remap entries. */
+ for (i = 8, j = 0; i < 16; i++, j++)
+ ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
+
+ return 0;
+}
+
+static int df4p5_get_dram_addr_map(struct addr_ctx *ctx)
+{
+ u8 remap_sel, i, j, shift = 5, mask = 0x1F;
+ u32 remap_reg;
+
+ /* Read D18F7x200 (DramBaseAddress). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x200 + (16 * ctx->map.num),
+ ctx->inst_id, &ctx->map.base))
+ return -EINVAL;
+
+ /* Read D18F7x204 (DramLimitAddress). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x204 + (16 * ctx->map.num),
+ ctx->inst_id, &ctx->map.limit))
+ return -EINVAL;
+
+ /* Read D18F7x208 (DramAddressCtl). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x208 + (16 * ctx->map.num),
+ ctx->inst_id, &ctx->map.ctl))
+ return -EINVAL;
+
+ /* Read D18F7x20C (DramAddressIntlv). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x20C + (16 * ctx->map.num),
+ ctx->inst_id, &ctx->map.intlv))
+ return -EINVAL;
+
+ /* Check if Remap Enable bit is valid. */
+ if (!FIELD_GET(DF4_REMAP_EN, ctx->map.ctl))
+ return 0;
+
+ /* Fill with bogus values, because '0' is a valid value. */
+ memset(&ctx->map.remap_array, 0xFF, sizeof(ctx->map.remap_array));
+
+ /* Get Remap registers. */
+ remap_sel = FIELD_GET(DF4p5_REMAP_SEL, ctx->map.ctl);
+
+ /* Read D18F7x180 (CsTargetRemap0A). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x180 + (24 * remap_sel),
+ ctx->inst_id, &remap_reg))
+ return -EINVAL;
+
+ /* Save first 6 remap entries. */
+ for (i = 0, j = 0; i < 6; i++, j++)
+ ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
+
+ /* Read D18F7x184 (CsTargetRemap0B). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x184 + (24 * remap_sel),
+ ctx->inst_id, &remap_reg))
+ return -EINVAL;
+
+ /* Save next 6 remap entries. */
+ for (i = 6, j = 0; i < 12; i++, j++)
+ ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
+
+ /* Read D18F7x188 (CsTargetRemap0C). */
+ if (df_indirect_read_instance(ctx->node_id, 7, 0x188 + (24 * remap_sel),
+ ctx->inst_id, &remap_reg))
+ return -EINVAL;
+
+ /* Save next 6 remap entries. */
+ for (i = 12, j = 0; i < 18; i++, j++)
+ ctx->map.remap_array[i] = (remap_reg >> (j * shift)) & mask;
+
+ return 0;
+}
+
+static int get_dram_addr_map(struct addr_ctx *ctx)
+{
+ switch (df_cfg.rev) {
+ case DF2: return df2_get_dram_addr_map(ctx);
+ case DF3:
+ case DF3p5: return df3_get_dram_addr_map(ctx);
+ case DF4: return df4_get_dram_addr_map(ctx);
+ case DF4p5: return df4p5_get_dram_addr_map(ctx);
+ default:
+ atl_debug_on_bad_df_rev();
+ return -EINVAL;
+ }
+}
+
+static int get_coh_st_fabric_id(struct addr_ctx *ctx)
+{
+ u32 reg;
+
+ /*
+ * On MI300 systems, the Coherent Station Fabric ID is derived
+ * later. And it does not depend on the register value.
+ */
+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
+ return 0;
+
+ /* Read D18F0x50 (FabricBlockInstanceInformation3). */
+ if (df_indirect_read_instance(ctx->node_id, 0, 0x50, ctx->inst_id, &reg))
+ return -EINVAL;
+
+ if (df_cfg.rev < DF4p5)
+ ctx->coh_st_fabric_id = FIELD_GET(DF2_COH_ST_FABRIC_ID, reg);
+ else
+ ctx->coh_st_fabric_id = FIELD_GET(DF4p5_COH_ST_FABRIC_ID, reg);
+
+ return 0;
+}
+
+static int find_normalized_offset(struct addr_ctx *ctx, u64 *norm_offset)
+{
+ u64 last_offset = 0;
+ int ret;
+
+ for (ctx->map.num = 1; ctx->map.num < df_cfg.num_coh_st_maps; ctx->map.num++) {
+ ret = get_dram_offset(ctx, norm_offset);
+ if (ret < 0)
+ return ret;
+
+ /* Continue search if this map's offset is not enabled. */
+ if (!ret)
+ continue;
+
+ /* Enabled offsets should never be 0. */
+ if (*norm_offset == 0) {
+ atl_debug(ctx, "Enabled map %u offset is 0", ctx->map.num);
+ return -EINVAL;
+ }
+
+ /* Offsets should always increase from one map to the next. */
+ if (*norm_offset <= last_offset) {
+ atl_debug(ctx, "Map %u offset (0x%016llx) <= previous (0x%016llx)",
+ ctx->map.num, *norm_offset, last_offset);
+ return -EINVAL;
+ }
+
+ /* Match if this map's offset is less than the current calculated address. */
+ if (ctx->ret_addr >= *norm_offset)
+ break;
+
+ last_offset = *norm_offset;
+ }
+
+ /*
+ * Finished search without finding a match.
+ * Reset to map 0 and no offset.
+ */
+ if (ctx->map.num >= df_cfg.num_coh_st_maps) {
+ ctx->map.num = 0;
+ *norm_offset = 0;
+ }
+
+ return 0;
+}
+
+static bool valid_map(struct addr_ctx *ctx)
+{
+ if (df_cfg.rev >= DF4)
+ return FIELD_GET(DF_ADDR_RANGE_VAL, ctx->map.ctl);
+ else
+ return FIELD_GET(DF_ADDR_RANGE_VAL, ctx->map.base);
+}
+
+static int get_address_map_common(struct addr_ctx *ctx)
+{
+ u64 norm_offset = 0;
+
+ if (get_coh_st_fabric_id(ctx))
+ return -EINVAL;
+
+ if (find_normalized_offset(ctx, &norm_offset))
+ return -EINVAL;
+
+ if (get_dram_addr_map(ctx))
+ return -EINVAL;
+
+ if (!valid_map(ctx))
+ return -EINVAL;
+
+ ctx->ret_addr -= norm_offset;
+
+ return 0;
+}
+
+static u8 get_num_intlv_chan(struct addr_ctx *ctx)
+{
+ switch (ctx->map.intlv_mode) {
+ case NONE:
+ return 1;
+ case NOHASH_2CHAN:
+ case DF2_2CHAN_HASH:
+ case DF3_COD4_2CHAN_HASH:
+ case DF4_NPS4_2CHAN_HASH:
+ case DF4p5_NPS4_2CHAN_1K_HASH:
+ case DF4p5_NPS4_2CHAN_2K_HASH:
+ return 2;
+ case DF4_NPS4_3CHAN_HASH:
+ case DF4p5_NPS4_3CHAN_1K_HASH:
+ case DF4p5_NPS4_3CHAN_2K_HASH:
+ return 3;
+ case NOHASH_4CHAN:
+ case DF3_COD2_4CHAN_HASH:
+ case DF4_NPS2_4CHAN_HASH:
+ case DF4p5_NPS2_4CHAN_1K_HASH:
+ case DF4p5_NPS2_4CHAN_2K_HASH:
+ return 4;
+ case DF4_NPS2_5CHAN_HASH:
+ case DF4p5_NPS2_5CHAN_1K_HASH:
+ case DF4p5_NPS2_5CHAN_2K_HASH:
+ return 5;
+ case DF3_6CHAN:
+ case DF4_NPS2_6CHAN_HASH:
+ case DF4p5_NPS2_6CHAN_1K_HASH:
+ case DF4p5_NPS2_6CHAN_2K_HASH:
+ return 6;
+ case NOHASH_8CHAN:
+ case DF3_COD1_8CHAN_HASH:
+ case DF4_NPS1_8CHAN_HASH:
+ case MI3_HASH_8CHAN:
+ case DF4p5_NPS1_8CHAN_1K_HASH:
+ case DF4p5_NPS1_8CHAN_2K_HASH:
+ return 8;
+ case DF4_NPS1_10CHAN_HASH:
+ case DF4p5_NPS1_10CHAN_1K_HASH:
+ case DF4p5_NPS1_10CHAN_2K_HASH:
+ return 10;
+ case DF4_NPS1_12CHAN_HASH:
+ case DF4p5_NPS1_12CHAN_1K_HASH:
+ case DF4p5_NPS1_12CHAN_2K_HASH:
+ return 12;
+ case NOHASH_16CHAN:
+ case MI3_HASH_16CHAN:
+ case DF4p5_NPS1_16CHAN_1K_HASH:
+ case DF4p5_NPS1_16CHAN_2K_HASH:
+ return 16;
+ case DF4p5_NPS0_24CHAN_1K_HASH:
+ case DF4p5_NPS0_24CHAN_2K_HASH:
+ return 24;
+ case NOHASH_32CHAN:
+ case MI3_HASH_32CHAN:
+ return 32;
+ default:
+ atl_debug_on_bad_intlv_mode(ctx);
+ return 0;
+ }
+}
+
+static void calculate_intlv_bits(struct addr_ctx *ctx)
+{
+ ctx->map.num_intlv_chan = get_num_intlv_chan(ctx);
+
+ ctx->map.total_intlv_chan = ctx->map.num_intlv_chan;
+ ctx->map.total_intlv_chan *= ctx->map.num_intlv_dies;
+ ctx->map.total_intlv_chan *= ctx->map.num_intlv_sockets;
+
+ /*
+ * Get the number of bits needed to cover this many channels.
+ * order_base_2() rounds up automatically.
+ */
+ ctx->map.total_intlv_bits = order_base_2(ctx->map.total_intlv_chan);
+}
+
+static u8 get_intlv_bit_pos(struct addr_ctx *ctx)
+{
+ u8 addr_sel = 0;
+
+ switch (df_cfg.rev) {
+ case DF2:
+ addr_sel = FIELD_GET(DF2_INTLV_ADDR_SEL, ctx->map.base);
+ break;
+ case DF3:
+ case DF3p5:
+ addr_sel = FIELD_GET(DF3_INTLV_ADDR_SEL, ctx->map.base);
+ break;
+ case DF4:
+ case DF4p5:
+ addr_sel = FIELD_GET(DF4_INTLV_ADDR_SEL, ctx->map.intlv);
+ break;
+ default:
+ atl_debug_on_bad_df_rev();
+ break;
+ }
+
+ /* Add '8' to get the 'interleave bit position'. */
+ return addr_sel + 8;
+}
+
+static u8 get_num_intlv_dies(struct addr_ctx *ctx)
+{
+ u8 dies = 0;
+
+ switch (df_cfg.rev) {
+ case DF2:
+ dies = FIELD_GET(DF2_INTLV_NUM_DIES, ctx->map.limit);
+ break;
+ case DF3:
+ dies = FIELD_GET(DF3_INTLV_NUM_DIES, ctx->map.base);
+ break;
+ case DF3p5:
+ dies = FIELD_GET(DF3p5_INTLV_NUM_DIES, ctx->map.base);
+ break;
+ case DF4:
+ case DF4p5:
+ dies = FIELD_GET(DF4_INTLV_NUM_DIES, ctx->map.intlv);
+ break;
+ default:
+ atl_debug_on_bad_df_rev();
+ break;
+ }
+
+ /* Register value is log2, e.g. 0 -> 1 die, 1 -> 2 dies, etc. */
+ return 1 << dies;
+}
+
+static u8 get_num_intlv_sockets(struct addr_ctx *ctx)
+{
+ u8 sockets = 0;
+
+ switch (df_cfg.rev) {
+ case DF2:
+ sockets = FIELD_GET(DF2_INTLV_NUM_SOCKETS, ctx->map.limit);
+ break;
+ case DF3:
+ case DF3p5:
+ sockets = FIELD_GET(DF2_INTLV_NUM_SOCKETS, ctx->map.base);
+ break;
+ case DF4:
+ case DF4p5:
+ sockets = FIELD_GET(DF4_INTLV_NUM_SOCKETS, ctx->map.intlv);
+ break;
+ default:
+ atl_debug_on_bad_df_rev();
+ break;
+ }
+
+ /* Register value is log2, e.g. 0 -> 1 sockets, 1 -> 2 sockets, etc. */
+ return 1 << sockets;
+}
+
+static int get_global_map_data(struct addr_ctx *ctx)
+{
+ if (get_intlv_mode(ctx))
+ return -EINVAL;
+
+ if (ctx->map.intlv_mode == DF3_6CHAN &&
+ df3_6ch_get_dram_addr_map(ctx))
+ return -EINVAL;
+
+ ctx->map.intlv_bit_pos = get_intlv_bit_pos(ctx);
+ ctx->map.num_intlv_dies = get_num_intlv_dies(ctx);
+ ctx->map.num_intlv_sockets = get_num_intlv_sockets(ctx);
+ calculate_intlv_bits(ctx);
+
+ return 0;
+}
+
+static void dump_address_map(struct dram_addr_map *map)
+{
+ u8 i;
+
+ pr_debug("intlv_mode=0x%x", map->intlv_mode);
+ pr_debug("num=0x%x", map->num);
+ pr_debug("base=0x%x", map->base);
+ pr_debug("limit=0x%x", map->limit);
+ pr_debug("ctl=0x%x", map->ctl);
+ pr_debug("intlv=0x%x", map->intlv);
+
+ for (i = 0; i < MAX_COH_ST_CHANNELS; i++)
+ pr_debug("remap_array[%u]=0x%x", i, map->remap_array[i]);
+
+ pr_debug("intlv_bit_pos=%u", map->intlv_bit_pos);
+ pr_debug("num_intlv_chan=%u", map->num_intlv_chan);
+ pr_debug("num_intlv_dies=%u", map->num_intlv_dies);
+ pr_debug("num_intlv_sockets=%u", map->num_intlv_sockets);
+ pr_debug("total_intlv_chan=%u", map->total_intlv_chan);
+ pr_debug("total_intlv_bits=%u", map->total_intlv_bits);
+}
+
+int get_address_map(struct addr_ctx *ctx)
+{
+ int ret;
+
+ ret = get_address_map_common(ctx);
+ if (ret)
+ return ret;
+
+ ret = get_global_map_data(ctx);
+ if (ret)
+ return ret;
+
+ dump_address_map(&ctx->map);
+
+ return ret;
+}
diff --git a/drivers/ras/amd/atl/reg_fields.h b/drivers/ras/amd/atl/reg_fields.h
new file mode 100644
index 000000000000..9dcdf6e4a856
--- /dev/null
+++ b/drivers/ras/amd/atl/reg_fields.h
@@ -0,0 +1,606 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD Address Translation Library
+ *
+ * reg_fields.h : Register field definitions
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+/*
+ * Notes on naming:
+ * 1) Use "DF_" prefix for fields that are the same for all revisions.
+ * 2) Use "DFx_" prefix for fields that differ between revisions.
+ * a) "x" is the first major revision where the new field appears.
+ * b) E.g., if DF2 and DF3 have the same field, then call it DF2.
+ * c) E.g., if DF3p5 and DF4 have the same field, then call it DF4.
+ */
+
+/*
+ * Coherent Station Fabric ID
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x50 [Fabric Block Instance Information 3]
+ * DF2 BlockFabricId [19:8]
+ * DF3 BlockFabricId [19:8]
+ * DF3p5 BlockFabricId [19:8]
+ * DF4 BlockFabricId [19:8]
+ * DF4p5 BlockFabricId [15:8]
+ */
+#define DF2_COH_ST_FABRIC_ID GENMASK(19, 8)
+#define DF4p5_COH_ST_FABRIC_ID GENMASK(15, 8)
+
+/*
+ * Component ID Mask
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ *
+ * D18F1x208 [System Fabric ID Mask 0]
+ * DF3 ComponentIdMask [9:0]
+ *
+ * D18F1x150 [System Fabric ID Mask 0]
+ * DF3p5 ComponentIdMask [15:0]
+ *
+ * D18F4x1B0 [System Fabric ID Mask 0]
+ * DF4 ComponentIdMask [15:0]
+ * DF4p5 ComponentIdMask [15:0]
+ */
+#define DF3_COMPONENT_ID_MASK GENMASK(9, 0)
+#define DF4_COMPONENT_ID_MASK GENMASK(15, 0)
+
+/*
+ * Destination Fabric ID
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x114 [DRAM Limit Address]
+ * DF2 DstFabricID [7:0]
+ * DF3 DstFabricID [9:0]
+ * DF3 DstFabricID [11:0]
+ *
+ * D18F7xE08 [DRAM Address Control]
+ * DF4 DstFabricID [27:16]
+ *
+ * D18F7x208 [DRAM Address Control]
+ * DF4p5 DstFabricID [23:16]
+ */
+#define DF2_DST_FABRIC_ID GENMASK(7, 0)
+#define DF3_DST_FABRIC_ID GENMASK(9, 0)
+#define DF3p5_DST_FABRIC_ID GENMASK(11, 0)
+#define DF4_DST_FABRIC_ID GENMASK(27, 16)
+#define DF4p5_DST_FABRIC_ID GENMASK(23, 16)
+
+/*
+ * Die ID Mask
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F1x208 [System Fabric ID Mask]
+ * DF2 DieIdMask [15:8]
+ *
+ * D18F1x20C [System Fabric ID Mask 1]
+ * DF3 DieIdMask [18:16]
+ *
+ * D18F1x158 [System Fabric ID Mask 2]
+ * DF3p5 DieIdMask [15:0]
+ *
+ * D18F4x1B8 [System Fabric ID Mask 2]
+ * DF4 DieIdMask [15:0]
+ * DF4p5 DieIdMask [15:0]
+ */
+#define DF2_DIE_ID_MASK GENMASK(15, 8)
+#define DF3_DIE_ID_MASK GENMASK(18, 16)
+#define DF4_DIE_ID_MASK GENMASK(15, 0)
+
+/*
+ * Die ID Shift
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F1x208 [System Fabric ID Mask]
+ * DF2 DieIdShift [27:24]
+ *
+ * DF3 N/A
+ * DF3p5 N/A
+ * DF4 N/A
+ * DF4p5 N/A
+ */
+#define DF2_DIE_ID_SHIFT GENMASK(27, 24)
+
+/*
+ * DRAM Address Range Valid
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x110 [DRAM Base Address]
+ * DF2 AddrRngVal [0]
+ * DF3 AddrRngVal [0]
+ * DF3p5 AddrRngVal [0]
+ *
+ * D18F7xE08 [DRAM Address Control]
+ * DF4 AddrRngVal [0]
+ *
+ * D18F7x208 [DRAM Address Control]
+ * DF4p5 AddrRngVal [0]
+ */
+#define DF_ADDR_RANGE_VAL BIT(0)
+
+/*
+ * DRAM Base Address
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x110 [DRAM Base Address]
+ * DF2 DramBaseAddr [31:12]
+ * DF3 DramBaseAddr [31:12]
+ * DF3p5 DramBaseAddr [31:12]
+ *
+ * D18F7xE00 [DRAM Base Address]
+ * DF4 DramBaseAddr [27:0]
+ *
+ * D18F7x200 [DRAM Base Address]
+ * DF4p5 DramBaseAddr [27:0]
+ */
+#define DF2_BASE_ADDR GENMASK(31, 12)
+#define DF4_BASE_ADDR GENMASK(27, 0)
+
+/*
+ * DRAM Hole Base
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x104 [DRAM Hole Control]
+ * DF2 DramHoleBase [31:24]
+ * DF3 DramHoleBase [31:24]
+ * DF3p5 DramHoleBase [31:24]
+ *
+ * D18F7x104 [DRAM Hole Control]
+ * DF4 DramHoleBase [31:24]
+ * DF4p5 DramHoleBase [31:24]
+ */
+#define DF_DRAM_HOLE_BASE_MASK GENMASK(31, 24)
+
+/*
+ * DRAM Limit Address
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x114 [DRAM Limit Address]
+ * DF2 DramLimitAddr [31:12]
+ * DF3 DramLimitAddr [31:12]
+ * DF3p5 DramLimitAddr [31:12]
+ *
+ * D18F7xE04 [DRAM Limit Address]
+ * DF4 DramLimitAddr [27:0]
+ *
+ * D18F7x204 [DRAM Limit Address]
+ * DF4p5 DramLimitAddr [27:0]
+ */
+#define DF2_DRAM_LIMIT_ADDR GENMASK(31, 12)
+#define DF4_DRAM_LIMIT_ADDR GENMASK(27, 0)
+
+/*
+ * Hash Interleave Controls
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ *
+ * D18F0x3F8 [DF Global Control]
+ * DF3 GlbHashIntlvCtl64K [20]
+ * GlbHashIntlvCtl2M [21]
+ * GlbHashIntlvCtl1G [22]
+ *
+ * DF3p5 GlbHashIntlvCtl64K [20]
+ * GlbHashIntlvCtl2M [21]
+ * GlbHashIntlvCtl1G [22]
+ *
+ * D18F7xE08 [DRAM Address Control]
+ * DF4 HashIntlvCtl64K [8]
+ * HashIntlvCtl2M [9]
+ * HashIntlvCtl1G [10]
+ *
+ * D18F7x208 [DRAM Address Control]
+ * DF4p5 HashIntlvCtl4K [7]
+ * HashIntlvCtl64K [8]
+ * HashIntlvCtl2M [9]
+ * HashIntlvCtl1G [10]
+ * HashIntlvCtl1T [15]
+ */
+#define DF3_HASH_CTL_64K BIT(20)
+#define DF3_HASH_CTL_2M BIT(21)
+#define DF3_HASH_CTL_1G BIT(22)
+#define DF4_HASH_CTL_64K BIT(8)
+#define DF4_HASH_CTL_2M BIT(9)
+#define DF4_HASH_CTL_1G BIT(10)
+#define DF4p5_HASH_CTL_4K BIT(7)
+#define DF4p5_HASH_CTL_1T BIT(15)
+
+/*
+ * High Address Offset
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x1B4 [DRAM Offset]
+ * DF2 HiAddrOffset [31:20]
+ * DF3 HiAddrOffset [31:12]
+ * DF3p5 HiAddrOffset [31:12]
+ *
+ * D18F7x140 [DRAM Offset]
+ * DF4 HiAddrOffset [24:1]
+ * DF4p5 HiAddrOffset [24:1]
+ * MI300 HiAddrOffset [31:1]
+ */
+#define DF2_HI_ADDR_OFFSET GENMASK(31, 20)
+#define DF3_HI_ADDR_OFFSET GENMASK(31, 12)
+
+/* Follow reference code by including reserved bits for simplicity. */
+#define DF4_HI_ADDR_OFFSET GENMASK(31, 1)
+
+/*
+ * High Address Offset Enable
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x1B4 [DRAM Offset]
+ * DF2 HiAddrOffsetEn [0]
+ * DF3 HiAddrOffsetEn [0]
+ * DF3p5 HiAddrOffsetEn [0]
+ *
+ * D18F7x140 [DRAM Offset]
+ * DF4 HiAddrOffsetEn [0]
+ * DF4p5 HiAddrOffsetEn [0]
+ */
+#define DF_HI_ADDR_OFFSET_EN BIT(0)
+
+/*
+ * Interleave Address Select
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x110 [DRAM Base Address]
+ * DF2 IntLvAddrSel [10:8]
+ * DF3 IntLvAddrSel [11:9]
+ * DF3p5 IntLvAddrSel [11:9]
+ *
+ * D18F7xE0C [DRAM Address Interleave]
+ * DF4 IntLvAddrSel [2:0]
+ *
+ * D18F7x20C [DRAM Address Interleave]
+ * DF4p5 IntLvAddrSel [2:0]
+ */
+#define DF2_INTLV_ADDR_SEL GENMASK(10, 8)
+#define DF3_INTLV_ADDR_SEL GENMASK(11, 9)
+#define DF4_INTLV_ADDR_SEL GENMASK(2, 0)
+
+/*
+ * Interleave Number of Channels
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x110 [DRAM Base Address]
+ * DF2 IntLvNumChan [7:4]
+ * DF3 IntLvNumChan [5:2]
+ * DF3p5 IntLvNumChan [6:2]
+ *
+ * D18F7xE0C [DRAM Address Interleave]
+ * DF4 IntLvNumChan [8:4]
+ *
+ * D18F7x20C [DRAM Address Interleave]
+ * DF4p5 IntLvNumChan [9:4]
+ */
+#define DF2_INTLV_NUM_CHAN GENMASK(7, 4)
+#define DF3_INTLV_NUM_CHAN GENMASK(5, 2)
+#define DF3p5_INTLV_NUM_CHAN GENMASK(6, 2)
+#define DF4_INTLV_NUM_CHAN GENMASK(8, 4)
+#define DF4p5_INTLV_NUM_CHAN GENMASK(9, 4)
+
+/*
+ * Interleave Number of Dies
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x114 [DRAM Limit Address]
+ * DF2 IntLvNumDies [11:10]
+ *
+ * D18F0x110 [DRAM Base Address]
+ * DF3 IntLvNumDies [7:6]
+ * DF3p5 IntLvNumDies [7]
+ *
+ * D18F7xE0C [DRAM Address Interleave]
+ * DF4 IntLvNumDies [13:12]
+ *
+ * D18F7x20C [DRAM Address Interleave]
+ * DF4p5 IntLvNumDies [13:12]
+ */
+#define DF2_INTLV_NUM_DIES GENMASK(11, 10)
+#define DF3_INTLV_NUM_DIES GENMASK(7, 6)
+#define DF3p5_INTLV_NUM_DIES BIT(7)
+#define DF4_INTLV_NUM_DIES GENMASK(13, 12)
+
+/*
+ * Interleave Number of Sockets
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x114 [DRAM Limit Address]
+ * DF2 IntLvNumSockets [8]
+ *
+ * D18F0x110 [DRAM Base Address]
+ * DF3 IntLvNumSockets [8]
+ * DF3p5 IntLvNumSockets [8]
+ *
+ * D18F7xE0C [DRAM Address Interleave]
+ * DF4 IntLvNumSockets [18]
+ *
+ * D18F7x20C [DRAM Address Interleave]
+ * DF4p5 IntLvNumSockets [18]
+ */
+#define DF2_INTLV_NUM_SOCKETS BIT(8)
+#define DF4_INTLV_NUM_SOCKETS BIT(18)
+
+/*
+ * Legacy MMIO Hole Enable
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F0x110 [DRAM Base Address]
+ * DF2 LgcyMmioHoleEn [1]
+ * DF3 LgcyMmioHoleEn [1]
+ * DF3p5 LgcyMmioHoleEn [1]
+ *
+ * D18F7xE08 [DRAM Address Control]
+ * DF4 LgcyMmioHoleEn [1]
+ *
+ * D18F7x208 [DRAM Address Control]
+ * DF4p5 LgcyMmioHoleEn [1]
+ */
+#define DF_LEGACY_MMIO_HOLE_EN BIT(1)
+
+/*
+ * Log2 Address 64K Space 0
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ *
+ * D18F2x90 [Non-power-of-2 channel Configuration Register for COH_ST DRAM Address Maps]
+ * DF3 Log2Addr64KSpace0 [5:0]
+ *
+ * DF3p5 N/A
+ * DF4 N/A
+ * DF4p5 N/A
+ */
+#define DF_LOG2_ADDR_64K_SPACE0 GENMASK(5, 0)
+
+/*
+ * Major Revision
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ * DF3 N/A
+ * DF3p5 N/A
+ *
+ * D18F0x040 [Fabric Block Instance Count]
+ * DF4 MajorRevision [27:24]
+ * DF4p5 MajorRevision [27:24]
+ */
+#define DF_MAJOR_REVISION GENMASK(27, 24)
+
+/*
+ * Minor Revision
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ * DF3 N/A
+ * DF3p5 N/A
+ *
+ * D18F0x040 [Fabric Block Instance Count]
+ * DF4 MinorRevision [23:16]
+ * DF4p5 MinorRevision [23:16]
+ */
+#define DF_MINOR_REVISION GENMASK(23, 16)
+
+/*
+ * Node ID Mask
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ *
+ * D18F1x208 [System Fabric ID Mask 0]
+ * DF3 NodeIdMask [25:16]
+ *
+ * D18F1x150 [System Fabric ID Mask 0]
+ * DF3p5 NodeIdMask [31:16]
+ *
+ * D18F4x1B0 [System Fabric ID Mask 0]
+ * DF4 NodeIdMask [31:16]
+ * DF4p5 NodeIdMask [31:16]
+ */
+#define DF3_NODE_ID_MASK GENMASK(25, 16)
+#define DF4_NODE_ID_MASK GENMASK(31, 16)
+
+/*
+ * Node ID Shift
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ *
+ * D18F1x20C [System Fabric ID Mask 1]
+ * DF3 NodeIdShift [3:0]
+ *
+ * D18F1x154 [System Fabric ID Mask 1]
+ * DF3p5 NodeIdShift [3:0]
+ *
+ * D18F4x1B4 [System Fabric ID Mask 1]
+ * DF4 NodeIdShift [3:0]
+ * DF4p5 NodeIdShift [3:0]
+ */
+#define DF3_NODE_ID_SHIFT GENMASK(3, 0)
+
+/*
+ * Remap Enable
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ * DF3 N/A
+ * DF3p5 N/A
+ *
+ * D18F7xE08 [DRAM Address Control]
+ * DF4 RemapEn [4]
+ *
+ * D18F7x208 [DRAM Address Control]
+ * DF4p5 RemapEn [4]
+ */
+#define DF4_REMAP_EN BIT(4)
+
+/*
+ * Remap Select
+ *
+ * Access type: Instance
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * DF2 N/A
+ * DF3 N/A
+ * DF3p5 N/A
+ *
+ * D18F7xE08 [DRAM Address Control]
+ * DF4 RemapSel [7:5]
+ *
+ * D18F7x208 [DRAM Address Control]
+ * DF4p5 RemapSel [6:5]
+ */
+#define DF4_REMAP_SEL GENMASK(7, 5)
+#define DF4p5_REMAP_SEL GENMASK(6, 5)
+
+/*
+ * Socket ID Mask
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F1x208 [System Fabric ID Mask]
+ * DF2 SocketIdMask [23:16]
+ *
+ * D18F1x20C [System Fabric ID Mask 1]
+ * DF3 SocketIdMask [26:24]
+ *
+ * D18F1x158 [System Fabric ID Mask 2]
+ * DF3p5 SocketIdMask [31:16]
+ *
+ * D18F4x1B8 [System Fabric ID Mask 2]
+ * DF4 SocketIdMask [31:16]
+ * DF4p5 SocketIdMask [31:16]
+ */
+#define DF2_SOCKET_ID_MASK GENMASK(23, 16)
+#define DF3_SOCKET_ID_MASK GENMASK(26, 24)
+#define DF4_SOCKET_ID_MASK GENMASK(31, 16)
+
+/*
+ * Socket ID Shift
+ *
+ * Access type: Broadcast
+ *
+ * Register
+ * Rev Fieldname Bits
+ *
+ * D18F1x208 [System Fabric ID Mask]
+ * DF2 SocketIdShift [31:28]
+ *
+ * D18F1x20C [System Fabric ID Mask 1]
+ * DF3 SocketIdShift [9:8]
+ *
+ * D18F1x158 [System Fabric ID Mask 2]
+ * DF3p5 SocketIdShift [11:8]
+ *
+ * D18F4x1B4 [System Fabric ID Mask 1]
+ * DF4 SocketIdShift [11:8]
+ * DF4p5 SocketIdShift [11:8]
+ */
+#define DF2_SOCKET_ID_SHIFT GENMASK(31, 28)
+#define DF3_SOCKET_ID_SHIFT GENMASK(9, 8)
+#define DF4_SOCKET_ID_SHIFT GENMASK(11, 8)
diff --git a/drivers/ras/amd/atl/system.c b/drivers/ras/amd/atl/system.c
new file mode 100644
index 000000000000..701349e84942
--- /dev/null
+++ b/drivers/ras/amd/atl/system.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Address Translation Library
+ *
+ * system.c : Functions to read and save system-wide data
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+#include "internal.h"
+
+int determine_node_id(struct addr_ctx *ctx, u8 socket_id, u8 die_id)
+{
+ u16 socket_id_bits, die_id_bits;
+
+ if (socket_id > 0 && df_cfg.socket_id_mask == 0) {
+ atl_debug(ctx, "Invalid socket inputs: socket_id=%u socket_id_mask=0x%x",
+ socket_id, df_cfg.socket_id_mask);
+ return -EINVAL;
+ }
+
+ /* Do each step independently to avoid shift out-of-bounds issues. */
+ socket_id_bits = socket_id;
+ socket_id_bits <<= df_cfg.socket_id_shift;
+ socket_id_bits &= df_cfg.socket_id_mask;
+
+ if (die_id > 0 && df_cfg.die_id_mask == 0) {
+ atl_debug(ctx, "Invalid die inputs: die_id=%u die_id_mask=0x%x",
+ die_id, df_cfg.die_id_mask);
+ return -EINVAL;
+ }
+
+ /* Do each step independently to avoid shift out-of-bounds issues. */
+ die_id_bits = die_id;
+ die_id_bits <<= df_cfg.die_id_shift;
+ die_id_bits &= df_cfg.die_id_mask;
+
+ ctx->node_id = (socket_id_bits | die_id_bits) >> df_cfg.node_id_shift;
+ return 0;
+}
+
+static void df2_get_masks_shifts(u32 mask0)
+{
+ df_cfg.socket_id_shift = FIELD_GET(DF2_SOCKET_ID_SHIFT, mask0);
+ df_cfg.socket_id_mask = FIELD_GET(DF2_SOCKET_ID_MASK, mask0);
+ df_cfg.die_id_shift = FIELD_GET(DF2_DIE_ID_SHIFT, mask0);
+ df_cfg.die_id_mask = FIELD_GET(DF2_DIE_ID_MASK, mask0);
+ df_cfg.node_id_shift = df_cfg.die_id_shift;
+ df_cfg.node_id_mask = df_cfg.socket_id_mask | df_cfg.die_id_mask;
+ df_cfg.component_id_mask = ~df_cfg.node_id_mask;
+}
+
+static void df3_get_masks_shifts(u32 mask0, u32 mask1)
+{
+ df_cfg.component_id_mask = FIELD_GET(DF3_COMPONENT_ID_MASK, mask0);
+ df_cfg.node_id_mask = FIELD_GET(DF3_NODE_ID_MASK, mask0);
+
+ df_cfg.node_id_shift = FIELD_GET(DF3_NODE_ID_SHIFT, mask1);
+ df_cfg.socket_id_shift = FIELD_GET(DF3_SOCKET_ID_SHIFT, mask1);
+ df_cfg.socket_id_mask = FIELD_GET(DF3_SOCKET_ID_MASK, mask1);
+ df_cfg.die_id_mask = FIELD_GET(DF3_DIE_ID_MASK, mask1);
+}
+
+static void df3p5_get_masks_shifts(u32 mask0, u32 mask1, u32 mask2)
+{
+ df_cfg.component_id_mask = FIELD_GET(DF4_COMPONENT_ID_MASK, mask0);
+ df_cfg.node_id_mask = FIELD_GET(DF4_NODE_ID_MASK, mask0);
+
+ df_cfg.node_id_shift = FIELD_GET(DF3_NODE_ID_SHIFT, mask1);
+ df_cfg.socket_id_shift = FIELD_GET(DF4_SOCKET_ID_SHIFT, mask1);
+
+ df_cfg.socket_id_mask = FIELD_GET(DF4_SOCKET_ID_MASK, mask2);
+ df_cfg.die_id_mask = FIELD_GET(DF4_DIE_ID_MASK, mask2);
+}
+
+static void df4_get_masks_shifts(u32 mask0, u32 mask1, u32 mask2)
+{
+ df3p5_get_masks_shifts(mask0, mask1, mask2);
+
+ if (!(df_cfg.flags.socket_id_shift_quirk && df_cfg.socket_id_shift == 1))
+ return;
+
+ df_cfg.socket_id_shift = 0;
+ df_cfg.socket_id_mask = 1;
+ df_cfg.die_id_shift = 0;
+ df_cfg.die_id_mask = 0;
+ df_cfg.node_id_shift = 8;
+ df_cfg.node_id_mask = 0x100;
+}
+
+static int df4_get_fabric_id_mask_registers(void)
+{
+ u32 mask0, mask1, mask2;
+
+ /* Read D18F4x1B0 (SystemFabricIdMask0) */
+ if (df_indirect_read_broadcast(0, 4, 0x1B0, &mask0))
+ return -EINVAL;
+
+ /* Read D18F4x1B4 (SystemFabricIdMask1) */
+ if (df_indirect_read_broadcast(0, 4, 0x1B4, &mask1))
+ return -EINVAL;
+
+ /* Read D18F4x1B8 (SystemFabricIdMask2) */
+ if (df_indirect_read_broadcast(0, 4, 0x1B8, &mask2))
+ return -EINVAL;
+
+ df4_get_masks_shifts(mask0, mask1, mask2);
+ return 0;
+}
+
+static int df4_determine_df_rev(u32 reg)
+{
+ df_cfg.rev = FIELD_GET(DF_MINOR_REVISION, reg) < 5 ? DF4 : DF4p5;
+
+ /* Check for special cases or quirks based on Device/Vendor IDs.*/
+
+ /* Read D18F0x000 (DeviceVendorId0) */
+ if (df_indirect_read_broadcast(0, 0, 0, &reg))
+ return -EINVAL;
+
+ if (reg == DF_FUNC0_ID_ZEN4_SERVER)
+ df_cfg.flags.socket_id_shift_quirk = 1;
+
+ if (reg == DF_FUNC0_ID_MI300) {
+ df_cfg.flags.heterogeneous = 1;
+
+ if (get_addr_hash_mi300())
+ return -EINVAL;
+ }
+
+ return df4_get_fabric_id_mask_registers();
+}
+
+static int determine_df_rev_legacy(void)
+{
+ u32 fabric_id_mask0, fabric_id_mask1, fabric_id_mask2;
+
+ /*
+ * Check for DF3.5.
+ *
+ * Component ID Mask must be non-zero. Register D18F1x150 is
+ * reserved pre-DF3.5, so value will be Read-as-Zero.
+ */
+
+ /* Read D18F1x150 (SystemFabricIdMask0). */
+ if (df_indirect_read_broadcast(0, 1, 0x150, &fabric_id_mask0))
+ return -EINVAL;
+
+ if (FIELD_GET(DF4_COMPONENT_ID_MASK, fabric_id_mask0)) {
+ df_cfg.rev = DF3p5;
+
+ /* Read D18F1x154 (SystemFabricIdMask1) */
+ if (df_indirect_read_broadcast(0, 1, 0x154, &fabric_id_mask1))
+ return -EINVAL;
+
+ /* Read D18F1x158 (SystemFabricIdMask2) */
+ if (df_indirect_read_broadcast(0, 1, 0x158, &fabric_id_mask2))
+ return -EINVAL;
+
+ df3p5_get_masks_shifts(fabric_id_mask0, fabric_id_mask1, fabric_id_mask2);
+ return 0;
+ }
+
+ /*
+ * Check for DF3.
+ *
+ * Component ID Mask must be non-zero. Field is Read-as-Zero on DF2.
+ */
+
+ /* Read D18F1x208 (SystemFabricIdMask). */
+ if (df_indirect_read_broadcast(0, 1, 0x208, &fabric_id_mask0))
+ return -EINVAL;
+
+ if (FIELD_GET(DF3_COMPONENT_ID_MASK, fabric_id_mask0)) {
+ df_cfg.rev = DF3;
+
+ /* Read D18F1x20C (SystemFabricIdMask1) */
+ if (df_indirect_read_broadcast(0, 1, 0x20C, &fabric_id_mask1))
+ return -EINVAL;
+
+ df3_get_masks_shifts(fabric_id_mask0, fabric_id_mask1);
+ return 0;
+ }
+
+ /* Default to DF2. */
+ df_cfg.rev = DF2;
+ df2_get_masks_shifts(fabric_id_mask0);
+ return 0;
+}
+
+static int determine_df_rev(void)
+{
+ u32 reg;
+ u8 rev;
+
+ if (df_cfg.rev != UNKNOWN)
+ return 0;
+
+ /* Read D18F0x40 (FabricBlockInstanceCount). */
+ if (df_indirect_read_broadcast(0, 0, 0x40, &reg))
+ return -EINVAL;
+
+ /*
+ * Revision fields added for DF4 and later.
+ *
+ * Major revision of '0' is found pre-DF4. Field is Read-as-Zero.
+ */
+ rev = FIELD_GET(DF_MAJOR_REVISION, reg);
+ if (!rev)
+ return determine_df_rev_legacy();
+
+ /*
+ * Fail out for major revisions other than '4'.
+ *
+ * Explicit support should be added for newer systems to avoid issues.
+ */
+ if (rev == 4)
+ return df4_determine_df_rev(reg);
+
+ return -EINVAL;
+}
+
+static void get_num_maps(void)
+{
+ switch (df_cfg.rev) {
+ case DF2:
+ case DF3:
+ case DF3p5:
+ df_cfg.num_coh_st_maps = 2;
+ break;
+ case DF4:
+ case DF4p5:
+ df_cfg.num_coh_st_maps = 4;
+ break;
+ default:
+ atl_debug_on_bad_df_rev();
+ }
+}
+
+static void apply_node_id_shift(void)
+{
+ if (df_cfg.rev == DF2)
+ return;
+
+ df_cfg.die_id_shift = df_cfg.node_id_shift;
+ df_cfg.die_id_mask <<= df_cfg.node_id_shift;
+ df_cfg.socket_id_mask <<= df_cfg.node_id_shift;
+ df_cfg.socket_id_shift += df_cfg.node_id_shift;
+}
+
+static void dump_df_cfg(void)
+{
+ pr_debug("rev=0x%x", df_cfg.rev);
+
+ pr_debug("component_id_mask=0x%x", df_cfg.component_id_mask);
+ pr_debug("die_id_mask=0x%x", df_cfg.die_id_mask);
+ pr_debug("node_id_mask=0x%x", df_cfg.node_id_mask);
+ pr_debug("socket_id_mask=0x%x", df_cfg.socket_id_mask);
+
+ pr_debug("die_id_shift=0x%x", df_cfg.die_id_shift);
+ pr_debug("node_id_shift=0x%x", df_cfg.node_id_shift);
+ pr_debug("socket_id_shift=0x%x", df_cfg.socket_id_shift);
+
+ pr_debug("num_coh_st_maps=%u", df_cfg.num_coh_st_maps);
+
+ pr_debug("flags.legacy_ficaa=%u", df_cfg.flags.legacy_ficaa);
+ pr_debug("flags.socket_id_shift_quirk=%u", df_cfg.flags.socket_id_shift_quirk);
+}
+
+int get_df_system_info(void)
+{
+ if (determine_df_rev()) {
+ pr_warn("amd_atl: Failed to determine DF Revision");
+ df_cfg.rev = UNKNOWN;
+ return -EINVAL;
+ }
+
+ apply_node_id_shift();
+
+ get_num_maps();
+
+ dump_df_cfg();
+
+ return 0;
+}
diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c
new file mode 100644
index 000000000000..59b6169093f7
--- /dev/null
+++ b/drivers/ras/amd/atl/umc.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Address Translation Library
+ *
+ * umc.c : Unified Memory Controller (UMC) topology helpers
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Yazen Ghannam <Yazen.Ghannam@amd.com>
+ */
+
+#include "internal.h"
+
+/*
+ * MI300 has a fixed, model-specific mapping between a UMC instance and
+ * its related Data Fabric Coherent Station instance.
+ *
+ * The MCA_IPID_UMC[InstanceId] field holds a unique identifier for the
+ * UMC instance within a Node. Use this to find the appropriate Coherent
+ * Station ID.
+ *
+ * Redundant bits were removed from the map below.
+ */
+static const u16 umc_coh_st_map[32] = {
+ 0x393, 0x293, 0x193, 0x093,
+ 0x392, 0x292, 0x192, 0x092,
+ 0x391, 0x291, 0x191, 0x091,
+ 0x390, 0x290, 0x190, 0x090,
+ 0x793, 0x693, 0x593, 0x493,
+ 0x792, 0x692, 0x592, 0x492,
+ 0x791, 0x691, 0x591, 0x491,
+ 0x790, 0x690, 0x590, 0x490,
+};
+
+#define UMC_ID_MI300 GENMASK(23, 12)
+static u8 get_coh_st_inst_id_mi300(struct atl_err *err)
+{
+ u16 umc_id = FIELD_GET(UMC_ID_MI300, err->ipid);
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(umc_coh_st_map); i++) {
+ if (umc_id == umc_coh_st_map[i])
+ break;
+ }
+
+ WARN_ON_ONCE(i >= ARRAY_SIZE(umc_coh_st_map));
+
+ return i;
+}
+
+/* XOR the bits in @val. */
+static u16 bitwise_xor_bits(u16 val)
+{
+ u16 tmp = 0;
+ u8 i;
+
+ for (i = 0; i < 16; i++)
+ tmp ^= (val >> i) & 0x1;
+
+ return tmp;
+}
+
+struct xor_bits {
+ bool xor_enable;
+ u16 col_xor;
+ u32 row_xor;
+};
+
+#define NUM_BANK_BITS 4
+
+static struct {
+ /* UMC::CH::AddrHashBank */
+ struct xor_bits bank[NUM_BANK_BITS];
+
+ /* UMC::CH::AddrHashPC */
+ struct xor_bits pc;
+
+ /* UMC::CH::AddrHashPC2 */
+ u8 bank_xor;
+} addr_hash;
+
+#define MI300_UMC_CH_BASE 0x90000
+#define MI300_ADDR_HASH_BANK0 (MI300_UMC_CH_BASE + 0xC8)
+#define MI300_ADDR_HASH_PC (MI300_UMC_CH_BASE + 0xE0)
+#define MI300_ADDR_HASH_PC2 (MI300_UMC_CH_BASE + 0xE4)
+
+#define ADDR_HASH_XOR_EN BIT(0)
+#define ADDR_HASH_COL_XOR GENMASK(13, 1)
+#define ADDR_HASH_ROW_XOR GENMASK(31, 14)
+#define ADDR_HASH_BANK_XOR GENMASK(5, 0)
+
+/*
+ * Read UMC::CH::AddrHash{Bank,PC,PC2} registers to get XOR bits used
+ * for hashing. Do this during module init, since the values will not
+ * change during run time.
+ *
+ * These registers are instantiated for each UMC across each AMD Node.
+ * However, they should be identically programmed due to the fixed hardware
+ * design of MI300 systems. So read the values from Node 0 UMC 0 and keep a
+ * single global structure for simplicity.
+ */
+int get_addr_hash_mi300(void)
+{
+ u32 temp;
+ int ret;
+ u8 i;
+
+ for (i = 0; i < NUM_BANK_BITS; i++) {
+ ret = amd_smn_read(0, MI300_ADDR_HASH_BANK0 + (i * 4), &temp);
+ if (ret)
+ return ret;
+
+ addr_hash.bank[i].xor_enable = FIELD_GET(ADDR_HASH_XOR_EN, temp);
+ addr_hash.bank[i].col_xor = FIELD_GET(ADDR_HASH_COL_XOR, temp);
+ addr_hash.bank[i].row_xor = FIELD_GET(ADDR_HASH_ROW_XOR, temp);
+ }
+
+ ret = amd_smn_read(0, MI300_ADDR_HASH_PC, &temp);
+ if (ret)
+ return ret;
+
+ addr_hash.pc.xor_enable = FIELD_GET(ADDR_HASH_XOR_EN, temp);
+ addr_hash.pc.col_xor = FIELD_GET(ADDR_HASH_COL_XOR, temp);
+ addr_hash.pc.row_xor = FIELD_GET(ADDR_HASH_ROW_XOR, temp);
+
+ ret = amd_smn_read(0, MI300_ADDR_HASH_PC2, &temp);
+ if (ret)
+ return ret;
+
+ addr_hash.bank_xor = FIELD_GET(ADDR_HASH_BANK_XOR, temp);
+
+ return 0;
+}
+
+/*
+ * MI300 systems report a DRAM address in MCA_ADDR for DRAM ECC errors. This must
+ * be converted to the intermediate normalized address (NA) before translating to a
+ * system physical address.
+ *
+ * The DRAM address includes bank, row, and column. Also included are bits for
+ * pseudochannel (PC) and stack ID (SID).
+ *
+ * Abbreviations: (S)tack ID, (P)seudochannel, (R)ow, (B)ank, (C)olumn, (Z)ero
+ *
+ * The MCA address format is as follows:
+ * MCA_ADDR[27:0] = {S[1:0], P[0], R[14:0], B[3:0], C[4:0], Z[0]}
+ *
+ * The normalized address format is fixed in hardware and is as follows:
+ * NA[30:0] = {S[1:0], R[13:0], C4, B[1:0], B[3:2], C[3:2], P, C[1:0], Z[4:0]}
+ *
+ * Additionally, the PC and Bank bits may be hashed. This must be accounted for before
+ * reconstructing the normalized address.
+ */
+#define MI300_UMC_MCA_COL GENMASK(5, 1)
+#define MI300_UMC_MCA_BANK GENMASK(9, 6)
+#define MI300_UMC_MCA_ROW GENMASK(24, 10)
+#define MI300_UMC_MCA_PC BIT(25)
+#define MI300_UMC_MCA_SID GENMASK(27, 26)
+
+#define MI300_NA_COL_1_0 GENMASK(6, 5)
+#define MI300_NA_PC BIT(7)
+#define MI300_NA_COL_3_2 GENMASK(9, 8)
+#define MI300_NA_BANK_3_2 GENMASK(11, 10)
+#define MI300_NA_BANK_1_0 GENMASK(13, 12)
+#define MI300_NA_COL_4 BIT(14)
+#define MI300_NA_ROW GENMASK(28, 15)
+#define MI300_NA_SID GENMASK(30, 29)
+
+static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr)
+{
+ u16 i, col, row, bank, pc, sid, temp;
+
+ col = FIELD_GET(MI300_UMC_MCA_COL, addr);
+ bank = FIELD_GET(MI300_UMC_MCA_BANK, addr);
+ row = FIELD_GET(MI300_UMC_MCA_ROW, addr);
+ pc = FIELD_GET(MI300_UMC_MCA_PC, addr);
+ sid = FIELD_GET(MI300_UMC_MCA_SID, addr);
+
+ /* Calculate hash for each Bank bit. */
+ for (i = 0; i < NUM_BANK_BITS; i++) {
+ if (!addr_hash.bank[i].xor_enable)
+ continue;
+
+ temp = bitwise_xor_bits(col & addr_hash.bank[i].col_xor);
+ temp ^= bitwise_xor_bits(row & addr_hash.bank[i].row_xor);
+ bank ^= temp << i;
+ }
+
+ /* Calculate hash for PC bit. */
+ if (addr_hash.pc.xor_enable) {
+ /* Bits SID[1:0] act as Bank[6:5] for PC hash, so apply them here. */
+ bank |= sid << 5;
+
+ temp = bitwise_xor_bits(col & addr_hash.pc.col_xor);
+ temp ^= bitwise_xor_bits(row & addr_hash.pc.row_xor);
+ temp ^= bitwise_xor_bits(bank & addr_hash.bank_xor);
+ pc ^= temp;
+
+ /* Drop SID bits for the sake of debug printing later. */
+ bank &= 0x1F;
+ }
+
+ /* Reconstruct the normalized address starting with NA[4:0] = 0 */
+ addr = 0;
+
+ /* NA[6:5] = Column[1:0] */
+ temp = col & 0x3;
+ addr |= FIELD_PREP(MI300_NA_COL_1_0, temp);
+
+ /* NA[7] = PC */
+ addr |= FIELD_PREP(MI300_NA_PC, pc);
+
+ /* NA[9:8] = Column[3:2] */
+ temp = (col >> 2) & 0x3;
+ addr |= FIELD_PREP(MI300_NA_COL_3_2, temp);
+
+ /* NA[11:10] = Bank[3:2] */
+ temp = (bank >> 2) & 0x3;
+ addr |= FIELD_PREP(MI300_NA_BANK_3_2, temp);
+
+ /* NA[13:12] = Bank[1:0] */
+ temp = bank & 0x3;
+ addr |= FIELD_PREP(MI300_NA_BANK_1_0, temp);
+
+ /* NA[14] = Column[4] */
+ temp = (col >> 4) & 0x1;
+ addr |= FIELD_PREP(MI300_NA_COL_4, temp);
+
+ /* NA[28:15] = Row[13:0] */
+ addr |= FIELD_PREP(MI300_NA_ROW, row);
+
+ /* NA[30:29] = SID[1:0] */
+ addr |= FIELD_PREP(MI300_NA_SID, sid);
+
+ pr_debug("Addr=0x%016lx", addr);
+ pr_debug("Bank=%u Row=%u Column=%u PC=%u SID=%u", bank, row, col, pc, sid);
+
+ return addr;
+}
+
+/*
+ * When a DRAM ECC error occurs on MI300 systems, it is recommended to retire
+ * all memory within that DRAM row. This applies to the memory with a DRAM
+ * bank.
+ *
+ * To find the memory addresses, loop through permutations of the DRAM column
+ * bits and find the System Physical address of each. The column bits are used
+ * to calculate the intermediate Normalized address, so all permutations should
+ * be checked.
+ *
+ * See amd_atl::convert_dram_to_norm_addr_mi300() for MI300 address formats.
+ */
+#define MI300_NUM_COL BIT(HWEIGHT(MI300_UMC_MCA_COL))
+static void retire_row_mi300(struct atl_err *a_err)
+{
+ unsigned long addr;
+ struct page *p;
+ u8 col;
+
+ for (col = 0; col < MI300_NUM_COL; col++) {
+ a_err->addr &= ~MI300_UMC_MCA_COL;
+ a_err->addr |= FIELD_PREP(MI300_UMC_MCA_COL, col);
+
+ addr = amd_convert_umc_mca_addr_to_sys_addr(a_err);
+ if (IS_ERR_VALUE(addr))
+ continue;
+
+ addr = PHYS_PFN(addr);
+
+ /*
+ * Skip invalid or already poisoned pages to avoid unnecessary
+ * error messages from memory_failure().
+ */
+ p = pfn_to_online_page(addr);
+ if (!p)
+ continue;
+
+ if (PageHWPoison(p))
+ continue;
+
+ memory_failure(addr, 0);
+ }
+}
+
+void amd_retire_dram_row(struct atl_err *a_err)
+{
+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
+ return retire_row_mi300(a_err);
+}
+EXPORT_SYMBOL_GPL(amd_retire_dram_row);
+
+static unsigned long get_addr(unsigned long addr)
+{
+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
+ return convert_dram_to_norm_addr_mi300(addr);
+
+ return addr;
+}
+
+#define MCA_IPID_INST_ID_HI GENMASK_ULL(47, 44)
+static u8 get_die_id(struct atl_err *err)
+{
+ /*
+ * AMD Node ID is provided in MCA_IPID[InstanceIdHi], and this
+ * needs to be divided by 4 to get the internal Die ID.
+ */
+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous) {
+ u8 node_id = FIELD_GET(MCA_IPID_INST_ID_HI, err->ipid);
+
+ return node_id >> 2;
+ }
+
+ /*
+ * For CPUs, this is the AMD Node ID modulo the number
+ * of AMD Nodes per socket.
+ */
+ return topology_amd_node_id(err->cpu) % topology_amd_nodes_per_pkg();
+}
+
+#define UMC_CHANNEL_NUM GENMASK(31, 20)
+static u8 get_coh_st_inst_id(struct atl_err *err)
+{
+ if (df_cfg.rev == DF4p5 && df_cfg.flags.heterogeneous)
+ return get_coh_st_inst_id_mi300(err);
+
+ return FIELD_GET(UMC_CHANNEL_NUM, err->ipid);
+}
+
+unsigned long convert_umc_mca_addr_to_sys_addr(struct atl_err *err)
+{
+ u8 socket_id = topology_physical_package_id(err->cpu);
+ u8 coh_st_inst_id = get_coh_st_inst_id(err);
+ unsigned long addr = get_addr(err->addr);
+ u8 die_id = get_die_id(err);
+
+ pr_debug("socket_id=0x%x die_id=0x%x coh_st_inst_id=0x%x addr=0x%016lx",
+ socket_id, die_id, coh_st_inst_id, addr);
+
+ return norm_to_sys_addr(socket_id, die_id, coh_st_inst_id, addr);
+}
diff --git a/drivers/ras/amd/fmpm.c b/drivers/ras/amd/fmpm.c
new file mode 100644
index 000000000000..2f4ac9591c8f
--- /dev/null
+++ b/drivers/ras/amd/fmpm.c
@@ -0,0 +1,1013 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * FRU (Field-Replaceable Unit) Memory Poison Manager
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Authors:
+ * Naveen Krishna Chatradhi <naveenkrishna.chatradhi@amd.com>
+ * Muralidhara M K <muralidhara.mk@amd.com>
+ * Yazen Ghannam <Yazen.Ghannam@amd.com>
+ *
+ * Implementation notes, assumptions, and limitations:
+ *
+ * - FRU memory poison section and memory poison descriptor definitions are not yet
+ * included in the UEFI specification. So they are defined here. Afterwards, they
+ * may be moved to linux/cper.h, if appropriate.
+ *
+ * - Platforms based on AMD MI300 systems will be the first to use these structures.
+ * There are a number of assumptions made here that will need to be generalized
+ * to support other platforms.
+ *
+ * AMD MI300-based platform(s) assumptions:
+ * - Memory errors are reported through x86 MCA.
+ * - The entire DRAM row containing a memory error should be retired.
+ * - There will be (1) FRU memory poison section per CPER.
+ * - The FRU will be the CPU package (processor socket).
+ * - The default number of memory poison descriptor entries should be (8).
+ * - The platform will use ACPI ERST for persistent storage.
+ * - All FRU records should be saved to persistent storage. Module init will
+ * fail if any FRU record is not successfully written.
+ *
+ * - Boot time memory retirement may occur later than ideal due to dependencies
+ * on other libraries and drivers. This leaves a gap where bad memory may be
+ * accessed during early boot stages.
+ *
+ * - Enough memory should be pre-allocated for each FRU record to be able to hold
+ * the expected number of descriptor entries. This, mostly empty, record is
+ * written to storage during init time. Subsequent writes to the same record
+ * should allow the Platform to update the stored record in-place. Otherwise,
+ * if the record is extended, then the Platform may need to perform costly memory
+ * management operations on the storage. For example, the Platform may spend time
+ * in Firmware copying and invalidating memory on a relatively slow SPI ROM.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cper.h>
+#include <linux/ras.h>
+#include <linux/cpu.h>
+
+#include <acpi/apei.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/mce.h>
+
+#include "../debugfs.h"
+
+#define INVALID_CPU UINT_MAX
+
+/* Validation Bits */
+#define FMP_VALID_ARCH_TYPE BIT_ULL(0)
+#define FMP_VALID_ARCH BIT_ULL(1)
+#define FMP_VALID_ID_TYPE BIT_ULL(2)
+#define FMP_VALID_ID BIT_ULL(3)
+#define FMP_VALID_LIST_ENTRIES BIT_ULL(4)
+#define FMP_VALID_LIST BIT_ULL(5)
+
+/* FRU Architecture Types */
+#define FMP_ARCH_TYPE_X86_CPUID_1_EAX 0
+
+/* FRU ID Types */
+#define FMP_ID_TYPE_X86_PPIN 0
+
+/* FRU Memory Poison Section */
+struct cper_sec_fru_mem_poison {
+ u32 checksum;
+ u64 validation_bits;
+ u32 fru_arch_type;
+ u64 fru_arch;
+ u32 fru_id_type;
+ u64 fru_id;
+ u32 nr_entries;
+} __packed;
+
+/* FRU Descriptor ID Types */
+#define FPD_HW_ID_TYPE_MCA_IPID 0
+
+/* FRU Descriptor Address Types */
+#define FPD_ADDR_TYPE_MCA_ADDR 0
+
+/* Memory Poison Descriptor */
+struct cper_fru_poison_desc {
+ u64 timestamp;
+ u32 hw_id_type;
+ u64 hw_id;
+ u32 addr_type;
+ u64 addr;
+} __packed;
+
+/* Collection of headers and sections for easy pointer use. */
+struct fru_rec {
+ struct cper_record_header hdr;
+ struct cper_section_descriptor sec_desc;
+ struct cper_sec_fru_mem_poison fmp;
+ struct cper_fru_poison_desc entries[];
+} __packed;
+
+/*
+ * Pointers to the complete CPER record of each FRU.
+ *
+ * Memory allocation will include padded space for descriptor entries.
+ */
+static struct fru_rec **fru_records;
+
+/* system physical addresses array */
+static u64 *spa_entries;
+
+#define INVALID_SPA ~0ULL
+
+static struct dentry *fmpm_dfs_dir;
+static struct dentry *fmpm_dfs_entries;
+
+#define CPER_CREATOR_FMP \
+ GUID_INIT(0xcd5c2993, 0xf4b2, 0x41b2, 0xb5, 0xd4, 0xf9, 0xc3, \
+ 0xa0, 0x33, 0x08, 0x75)
+
+#define CPER_SECTION_TYPE_FMP \
+ GUID_INIT(0x5e4706c1, 0x5356, 0x48c6, 0x93, 0x0b, 0x52, 0xf2, \
+ 0x12, 0x0a, 0x44, 0x58)
+
+/**
+ * DOC: max_nr_entries (byte)
+ * Maximum number of descriptor entries possible for each FRU.
+ *
+ * Values between '1' and '255' are valid.
+ * No input or '0' will default to FMPM_DEFAULT_MAX_NR_ENTRIES.
+ */
+static u8 max_nr_entries;
+module_param(max_nr_entries, byte, 0644);
+MODULE_PARM_DESC(max_nr_entries,
+ "Maximum number of memory poison descriptor entries per FRU");
+
+#define FMPM_DEFAULT_MAX_NR_ENTRIES 8
+
+/* Maximum number of FRUs in the system. */
+#define FMPM_MAX_NR_FRU 256
+static unsigned int max_nr_fru;
+
+/* Total length of record including headers and list of descriptor entries. */
+static size_t max_rec_len;
+
+/* Total number of SPA entries across all FRUs. */
+static unsigned int spa_nr_entries;
+
+/*
+ * Protect the local records cache in fru_records and prevent concurrent
+ * writes to storage. This is only needed after init once notifier block
+ * registration is done.
+ *
+ * The majority of a record is fixed at module init and will not change
+ * during run time. The entries within a record will be updated as new
+ * errors are reported. The mutex should be held whenever the entries are
+ * accessed during run time.
+ */
+static DEFINE_MUTEX(fmpm_update_mutex);
+
+#define for_each_fru(i, rec) \
+ for (i = 0; rec = fru_records[i], i < max_nr_fru; i++)
+
+static inline u32 get_fmp_len(struct fru_rec *rec)
+{
+ return rec->sec_desc.section_length - sizeof(struct cper_section_descriptor);
+}
+
+static struct fru_rec *get_fru_record(u64 fru_id)
+{
+ struct fru_rec *rec;
+ unsigned int i;
+
+ for_each_fru(i, rec) {
+ if (rec->fmp.fru_id == fru_id)
+ return rec;
+ }
+
+ pr_debug("Record not found for FRU 0x%016llx\n", fru_id);
+
+ return NULL;
+}
+
+/*
+ * Sum up all bytes within the FRU Memory Poison Section including the Memory
+ * Poison Descriptor entries.
+ *
+ * Don't include the old checksum here. It's a u32 value, so summing each of its
+ * bytes will give the wrong total.
+ */
+static u32 do_fmp_checksum(struct cper_sec_fru_mem_poison *fmp, u32 len)
+{
+ u32 checksum = 0;
+ u8 *buf, *end;
+
+ /* Skip old checksum. */
+ buf = (u8 *)fmp + sizeof(u32);
+ end = buf + len;
+
+ while (buf < end)
+ checksum += (u8)(*(buf++));
+
+ return checksum;
+}
+
+static int update_record_on_storage(struct fru_rec *rec)
+{
+ u32 len, checksum;
+ int ret;
+
+ /* Calculate a new checksum. */
+ len = get_fmp_len(rec);
+
+ /* Get the current total. */
+ checksum = do_fmp_checksum(&rec->fmp, len);
+
+ /* Use the complement value. */
+ rec->fmp.checksum = -checksum;
+
+ pr_debug("Writing to storage\n");
+
+ ret = erst_write(&rec->hdr);
+ if (ret) {
+ pr_warn("Storage update failed for FRU 0x%016llx\n", rec->fmp.fru_id);
+
+ if (ret == -ENOSPC)
+ pr_warn("Not enough space on storage\n");
+ }
+
+ return ret;
+}
+
+static bool rec_has_valid_entries(struct fru_rec *rec)
+{
+ if (!(rec->fmp.validation_bits & FMP_VALID_LIST_ENTRIES))
+ return false;
+
+ if (!(rec->fmp.validation_bits & FMP_VALID_LIST))
+ return false;
+
+ return true;
+}
+
+static bool fpds_equal(struct cper_fru_poison_desc *old, struct cper_fru_poison_desc *new)
+{
+ /*
+ * Ignore timestamp field.
+ * The same physical error may be reported multiple times due to stuck bits, etc.
+ *
+ * Also, order the checks from most->least likely to fail to shortcut the code.
+ */
+ if (old->addr != new->addr)
+ return false;
+
+ if (old->hw_id != new->hw_id)
+ return false;
+
+ if (old->addr_type != new->addr_type)
+ return false;
+
+ if (old->hw_id_type != new->hw_id_type)
+ return false;
+
+ return true;
+}
+
+static bool rec_has_fpd(struct fru_rec *rec, struct cper_fru_poison_desc *fpd)
+{
+ unsigned int i;
+
+ for (i = 0; i < rec->fmp.nr_entries; i++) {
+ struct cper_fru_poison_desc *fpd_i = &rec->entries[i];
+
+ if (fpds_equal(fpd_i, fpd)) {
+ pr_debug("Found duplicate record\n");
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void save_spa(struct fru_rec *rec, unsigned int entry,
+ u64 addr, u64 id, unsigned int cpu)
+{
+ unsigned int i, fru_idx, spa_entry;
+ struct atl_err a_err;
+ unsigned long spa;
+
+ if (entry >= max_nr_entries) {
+ pr_warn_once("FRU descriptor entry %d out-of-bounds (max: %d)\n",
+ entry, max_nr_entries);
+ return;
+ }
+
+ /* spa_nr_entries is always multiple of max_nr_entries */
+ for (i = 0; i < spa_nr_entries; i += max_nr_entries) {
+ fru_idx = i / max_nr_entries;
+ if (fru_records[fru_idx] == rec)
+ break;
+ }
+
+ if (i >= spa_nr_entries) {
+ pr_warn_once("FRU record %d not found\n", i);
+ return;
+ }
+
+ spa_entry = i + entry;
+ if (spa_entry >= spa_nr_entries) {
+ pr_warn_once("spa_entries[] index out-of-bounds\n");
+ return;
+ }
+
+ memset(&a_err, 0, sizeof(struct atl_err));
+
+ a_err.addr = addr;
+ a_err.ipid = id;
+ a_err.cpu = cpu;
+
+ spa = amd_convert_umc_mca_addr_to_sys_addr(&a_err);
+ if (IS_ERR_VALUE(spa)) {
+ pr_debug("Failed to get system address\n");
+ return;
+ }
+
+ spa_entries[spa_entry] = spa;
+ pr_debug("fru_idx: %u, entry: %u, spa_entry: %u, spa: 0x%016llx\n",
+ fru_idx, entry, spa_entry, spa_entries[spa_entry]);
+}
+
+static void update_fru_record(struct fru_rec *rec, struct mce *m)
+{
+ struct cper_sec_fru_mem_poison *fmp = &rec->fmp;
+ struct cper_fru_poison_desc fpd, *fpd_dest;
+ u32 entry = 0;
+
+ mutex_lock(&fmpm_update_mutex);
+
+ memset(&fpd, 0, sizeof(struct cper_fru_poison_desc));
+
+ fpd.timestamp = m->time;
+ fpd.hw_id_type = FPD_HW_ID_TYPE_MCA_IPID;
+ fpd.hw_id = m->ipid;
+ fpd.addr_type = FPD_ADDR_TYPE_MCA_ADDR;
+ fpd.addr = m->addr;
+
+ /* This is the first entry, so just save it. */
+ if (!rec_has_valid_entries(rec))
+ goto save_fpd;
+
+ /* Ignore already recorded errors. */
+ if (rec_has_fpd(rec, &fpd))
+ goto out_unlock;
+
+ if (rec->fmp.nr_entries >= max_nr_entries) {
+ pr_warn("Exceeded number of entries for FRU 0x%016llx\n", rec->fmp.fru_id);
+ goto out_unlock;
+ }
+
+ entry = fmp->nr_entries;
+
+save_fpd:
+ save_spa(rec, entry, m->addr, m->ipid, m->extcpu);
+ fpd_dest = &rec->entries[entry];
+ memcpy(fpd_dest, &fpd, sizeof(struct cper_fru_poison_desc));
+
+ fmp->nr_entries = entry + 1;
+ fmp->validation_bits |= FMP_VALID_LIST_ENTRIES;
+ fmp->validation_bits |= FMP_VALID_LIST;
+
+ pr_debug("Updated FRU 0x%016llx entry #%u\n", fmp->fru_id, entry);
+
+ update_record_on_storage(rec);
+
+out_unlock:
+ mutex_unlock(&fmpm_update_mutex);
+}
+
+static void retire_dram_row(u64 addr, u64 id, u32 cpu)
+{
+ struct atl_err a_err;
+
+ memset(&a_err, 0, sizeof(struct atl_err));
+
+ a_err.addr = addr;
+ a_err.ipid = id;
+ a_err.cpu = cpu;
+
+ amd_retire_dram_row(&a_err);
+}
+
+static int fru_handle_mem_poison(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct mce *m = (struct mce *)data;
+ struct fru_rec *rec;
+
+ if (!mce_is_memory_error(m))
+ return NOTIFY_DONE;
+
+ retire_dram_row(m->addr, m->ipid, m->extcpu);
+
+ /*
+ * An invalid FRU ID should not happen on real errors. But it
+ * could happen from software error injection, etc.
+ */
+ rec = get_fru_record(m->ppin);
+ if (!rec)
+ return NOTIFY_DONE;
+
+ update_fru_record(rec, m);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block fru_mem_poison_nb = {
+ .notifier_call = fru_handle_mem_poison,
+ .priority = MCE_PRIO_LOWEST,
+};
+
+static void retire_mem_fmp(struct fru_rec *rec)
+{
+ struct cper_sec_fru_mem_poison *fmp = &rec->fmp;
+ unsigned int i, cpu;
+
+ for (i = 0; i < fmp->nr_entries; i++) {
+ struct cper_fru_poison_desc *fpd = &rec->entries[i];
+ unsigned int err_cpu = INVALID_CPU;
+
+ if (fpd->hw_id_type != FPD_HW_ID_TYPE_MCA_IPID)
+ continue;
+
+ if (fpd->addr_type != FPD_ADDR_TYPE_MCA_ADDR)
+ continue;
+
+ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ if (topology_ppin(cpu) == fmp->fru_id) {
+ err_cpu = cpu;
+ break;
+ }
+ }
+ cpus_read_unlock();
+
+ if (err_cpu == INVALID_CPU)
+ continue;
+
+ retire_dram_row(fpd->addr, fpd->hw_id, err_cpu);
+ save_spa(rec, i, fpd->addr, fpd->hw_id, err_cpu);
+ }
+}
+
+static void retire_mem_records(void)
+{
+ struct fru_rec *rec;
+ unsigned int i;
+
+ for_each_fru(i, rec) {
+ if (!rec_has_valid_entries(rec))
+ continue;
+
+ retire_mem_fmp(rec);
+ }
+}
+
+/* Set the CPER Record Header and CPER Section Descriptor fields. */
+static void set_rec_fields(struct fru_rec *rec)
+{
+ struct cper_section_descriptor *sec_desc = &rec->sec_desc;
+ struct cper_record_header *hdr = &rec->hdr;
+
+ memcpy(hdr->signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
+ hdr->revision = CPER_RECORD_REV;
+ hdr->signature_end = CPER_SIG_END;
+
+ /*
+ * Currently, it is assumed that there is one FRU Memory Poison
+ * section per CPER. But this may change for other implementations.
+ */
+ hdr->section_count = 1;
+
+ /* The logged errors are recoverable. Otherwise, they'd never make it here. */
+ hdr->error_severity = CPER_SEV_RECOVERABLE;
+
+ hdr->validation_bits = 0;
+ hdr->record_length = max_rec_len;
+ hdr->creator_id = CPER_CREATOR_FMP;
+ hdr->notification_type = CPER_NOTIFY_MCE;
+ hdr->record_id = cper_next_record_id();
+ hdr->flags = CPER_HW_ERROR_FLAGS_PREVERR;
+
+ sec_desc->section_offset = sizeof(struct cper_record_header);
+ sec_desc->section_length = max_rec_len - sizeof(struct cper_record_header);
+ sec_desc->revision = CPER_SEC_REV;
+ sec_desc->validation_bits = 0;
+ sec_desc->flags = CPER_SEC_PRIMARY;
+ sec_desc->section_type = CPER_SECTION_TYPE_FMP;
+ sec_desc->section_severity = CPER_SEV_RECOVERABLE;
+}
+
+static int save_new_records(void)
+{
+ DECLARE_BITMAP(new_records, FMPM_MAX_NR_FRU);
+ struct fru_rec *rec;
+ unsigned int i;
+ int ret = 0;
+
+ for_each_fru(i, rec) {
+ if (rec->hdr.record_length)
+ continue;
+
+ set_rec_fields(rec);
+
+ ret = update_record_on_storage(rec);
+ if (ret)
+ goto out_clear;
+
+ set_bit(i, new_records);
+ }
+
+ return ret;
+
+out_clear:
+ for_each_fru(i, rec) {
+ if (!test_bit(i, new_records))
+ continue;
+
+ erst_clear(rec->hdr.record_id);
+ }
+
+ return ret;
+}
+
+/* Check that the record matches expected types for the current system.*/
+static bool fmp_is_usable(struct fru_rec *rec)
+{
+ struct cper_sec_fru_mem_poison *fmp = &rec->fmp;
+ u64 cpuid;
+
+ pr_debug("Validation bits: 0x%016llx\n", fmp->validation_bits);
+
+ if (!(fmp->validation_bits & FMP_VALID_ARCH_TYPE)) {
+ pr_debug("Arch type unknown\n");
+ return false;
+ }
+
+ if (fmp->fru_arch_type != FMP_ARCH_TYPE_X86_CPUID_1_EAX) {
+ pr_debug("Arch type not 'x86 Family/Model/Stepping'\n");
+ return false;
+ }
+
+ if (!(fmp->validation_bits & FMP_VALID_ARCH)) {
+ pr_debug("Arch value unknown\n");
+ return false;
+ }
+
+ cpuid = cpuid_eax(1);
+ if (fmp->fru_arch != cpuid) {
+ pr_debug("Arch value mismatch: record = 0x%016llx, system = 0x%016llx\n",
+ fmp->fru_arch, cpuid);
+ return false;
+ }
+
+ if (!(fmp->validation_bits & FMP_VALID_ID_TYPE)) {
+ pr_debug("FRU ID type unknown\n");
+ return false;
+ }
+
+ if (fmp->fru_id_type != FMP_ID_TYPE_X86_PPIN) {
+ pr_debug("FRU ID type is not 'x86 PPIN'\n");
+ return false;
+ }
+
+ if (!(fmp->validation_bits & FMP_VALID_ID)) {
+ pr_debug("FRU ID value unknown\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool fmp_is_valid(struct fru_rec *rec)
+{
+ struct cper_sec_fru_mem_poison *fmp = &rec->fmp;
+ u32 checksum, len;
+
+ len = get_fmp_len(rec);
+ if (len < sizeof(struct cper_sec_fru_mem_poison)) {
+ pr_debug("fmp length is too small\n");
+ return false;
+ }
+
+ /* Checksum must sum to zero for the entire section. */
+ checksum = do_fmp_checksum(fmp, len) + fmp->checksum;
+ if (checksum) {
+ pr_debug("fmp checksum failed: sum = 0x%x\n", checksum);
+ print_hex_dump_debug("fmp record: ", DUMP_PREFIX_NONE, 16, 1, fmp, len, false);
+ return false;
+ }
+
+ if (!fmp_is_usable(rec))
+ return false;
+
+ return true;
+}
+
+static struct fru_rec *get_valid_record(struct fru_rec *old)
+{
+ struct fru_rec *new;
+
+ if (!fmp_is_valid(old)) {
+ pr_debug("Ignoring invalid record\n");
+ return NULL;
+ }
+
+ new = get_fru_record(old->fmp.fru_id);
+ if (!new)
+ pr_debug("Ignoring record for absent FRU\n");
+
+ return new;
+}
+
+/*
+ * Fetch saved records from persistent storage.
+ *
+ * For each found record:
+ * - If it was not created by this module, then ignore it.
+ * - If it is valid, then copy its data to the local cache.
+ * - If it is not valid, then erase it.
+ */
+static int get_saved_records(void)
+{
+ struct fru_rec *old, *new;
+ u64 record_id;
+ int ret, pos;
+ ssize_t len;
+
+ /*
+ * Assume saved records match current max size.
+ *
+ * However, this may not be true depending on module parameters.
+ */
+ old = kmalloc(max_rec_len, GFP_KERNEL);
+ if (!old) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = erst_get_record_id_begin(&pos);
+ if (ret < 0)
+ goto out_end;
+
+ while (!erst_get_record_id_next(&pos, &record_id)) {
+ if (record_id == APEI_ERST_INVALID_RECORD_ID)
+ goto out_end;
+ /*
+ * Make sure to clear temporary buffer between reads to avoid
+ * leftover data from records of various sizes.
+ */
+ memset(old, 0, max_rec_len);
+
+ len = erst_read_record(record_id, &old->hdr, max_rec_len,
+ sizeof(struct fru_rec), &CPER_CREATOR_FMP);
+ if (len < 0)
+ continue;
+
+ if (len > max_rec_len) {
+ pr_debug("Found record larger than max_rec_len\n");
+ continue;
+ }
+
+ new = get_valid_record(old);
+ if (!new)
+ erst_clear(record_id);
+
+ /* Restore the record */
+ memcpy(new, old, len);
+ }
+
+out_end:
+ erst_get_record_id_end();
+ kfree(old);
+out:
+ return ret;
+}
+
+static void set_fmp_fields(struct fru_rec *rec, unsigned int cpu)
+{
+ struct cper_sec_fru_mem_poison *fmp = &rec->fmp;
+
+ fmp->fru_arch_type = FMP_ARCH_TYPE_X86_CPUID_1_EAX;
+ fmp->validation_bits |= FMP_VALID_ARCH_TYPE;
+
+ /* Assume all CPUs in the system have the same value for now. */
+ fmp->fru_arch = cpuid_eax(1);
+ fmp->validation_bits |= FMP_VALID_ARCH;
+
+ fmp->fru_id_type = FMP_ID_TYPE_X86_PPIN;
+ fmp->validation_bits |= FMP_VALID_ID_TYPE;
+
+ fmp->fru_id = topology_ppin(cpu);
+ fmp->validation_bits |= FMP_VALID_ID;
+}
+
+static int init_fmps(void)
+{
+ struct fru_rec *rec;
+ unsigned int i, cpu;
+ int ret = 0;
+
+ for_each_fru(i, rec) {
+ unsigned int fru_cpu = INVALID_CPU;
+
+ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ if (topology_physical_package_id(cpu) == i) {
+ fru_cpu = cpu;
+ break;
+ }
+ }
+ cpus_read_unlock();
+
+ if (fru_cpu == INVALID_CPU) {
+ pr_debug("Failed to find matching CPU for FRU #%u\n", i);
+ ret = -ENODEV;
+ break;
+ }
+
+ set_fmp_fields(rec, fru_cpu);
+ }
+
+ return ret;
+}
+
+static int get_system_info(void)
+{
+ /* Only load on MI300A systems for now. */
+ if (!(boot_cpu_data.x86_model >= 0x90 &&
+ boot_cpu_data.x86_model <= 0x9f))
+ return -ENODEV;
+
+ if (!cpu_feature_enabled(X86_FEATURE_AMD_PPIN)) {
+ pr_debug("PPIN feature not available\n");
+ return -ENODEV;
+ }
+
+ /* Use CPU socket as FRU for MI300 systems. */
+ max_nr_fru = topology_max_packages();
+ if (!max_nr_fru)
+ return -ENODEV;
+
+ if (max_nr_fru > FMPM_MAX_NR_FRU) {
+ pr_warn("Too many FRUs to manage: found: %u, max: %u\n",
+ max_nr_fru, FMPM_MAX_NR_FRU);
+ return -ENODEV;
+ }
+
+ if (!max_nr_entries)
+ max_nr_entries = FMPM_DEFAULT_MAX_NR_ENTRIES;
+
+ spa_nr_entries = max_nr_fru * max_nr_entries;
+
+ max_rec_len = sizeof(struct fru_rec);
+ max_rec_len += sizeof(struct cper_fru_poison_desc) * max_nr_entries;
+
+ pr_info("max FRUs: %u, max entries: %u, max record length: %lu\n",
+ max_nr_fru, max_nr_entries, max_rec_len);
+
+ return 0;
+}
+
+static void free_records(void)
+{
+ struct fru_rec *rec;
+ int i;
+
+ for_each_fru(i, rec)
+ kfree(rec);
+
+ kfree(fru_records);
+ kfree(spa_entries);
+}
+
+static int allocate_records(void)
+{
+ int i, ret = 0;
+
+ fru_records = kcalloc(max_nr_fru, sizeof(struct fru_rec *), GFP_KERNEL);
+ if (!fru_records) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < max_nr_fru; i++) {
+ fru_records[i] = kzalloc(max_rec_len, GFP_KERNEL);
+ if (!fru_records[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ }
+
+ spa_entries = kcalloc(spa_nr_entries, sizeof(u64), GFP_KERNEL);
+ if (!spa_entries) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ for (i = 0; i < spa_nr_entries; i++)
+ spa_entries[i] = INVALID_SPA;
+
+ return ret;
+
+out_free:
+ while (--i >= 0)
+ kfree(fru_records[i]);
+
+ kfree(fru_records);
+out:
+ return ret;
+}
+
+static void *fmpm_start(struct seq_file *f, loff_t *pos)
+{
+ if (*pos >= (spa_nr_entries + 1))
+ return NULL;
+ return pos;
+}
+
+static void *fmpm_next(struct seq_file *f, void *data, loff_t *pos)
+{
+ if (++(*pos) >= (spa_nr_entries + 1))
+ return NULL;
+ return pos;
+}
+
+static void fmpm_stop(struct seq_file *f, void *data)
+{
+}
+
+#define SHORT_WIDTH 8
+#define U64_WIDTH 18
+#define TIMESTAMP_WIDTH 19
+#define LONG_WIDTH 24
+#define U64_PAD (LONG_WIDTH - U64_WIDTH)
+#define TS_PAD (LONG_WIDTH - TIMESTAMP_WIDTH)
+static int fmpm_show(struct seq_file *f, void *data)
+{
+ unsigned int fru_idx, entry, spa_entry, line;
+ struct cper_fru_poison_desc *fpd;
+ struct fru_rec *rec;
+
+ line = *(loff_t *)data;
+ if (line == 0) {
+ seq_printf(f, "%-*s", SHORT_WIDTH, "fru_idx");
+ seq_printf(f, "%-*s", LONG_WIDTH, "fru_id");
+ seq_printf(f, "%-*s", SHORT_WIDTH, "entry");
+ seq_printf(f, "%-*s", LONG_WIDTH, "timestamp");
+ seq_printf(f, "%-*s", LONG_WIDTH, "hw_id");
+ seq_printf(f, "%-*s", LONG_WIDTH, "addr");
+ seq_printf(f, "%-*s", LONG_WIDTH, "spa");
+ goto out_newline;
+ }
+
+ spa_entry = line - 1;
+ fru_idx = spa_entry / max_nr_entries;
+ entry = spa_entry % max_nr_entries;
+
+ rec = fru_records[fru_idx];
+ if (!rec)
+ goto out;
+
+ seq_printf(f, "%-*u", SHORT_WIDTH, fru_idx);
+ seq_printf(f, "0x%016llx%-*s", rec->fmp.fru_id, U64_PAD, "");
+ seq_printf(f, "%-*u", SHORT_WIDTH, entry);
+
+ mutex_lock(&fmpm_update_mutex);
+
+ if (entry >= rec->fmp.nr_entries) {
+ seq_printf(f, "%-*s", LONG_WIDTH, "*");
+ seq_printf(f, "%-*s", LONG_WIDTH, "*");
+ seq_printf(f, "%-*s", LONG_WIDTH, "*");
+ seq_printf(f, "%-*s", LONG_WIDTH, "*");
+ goto out_unlock;
+ }
+
+ fpd = &rec->entries[entry];
+
+ seq_printf(f, "%ptT%-*s", &fpd->timestamp, TS_PAD, "");
+ seq_printf(f, "0x%016llx%-*s", fpd->hw_id, U64_PAD, "");
+ seq_printf(f, "0x%016llx%-*s", fpd->addr, U64_PAD, "");
+
+ if (spa_entries[spa_entry] == INVALID_SPA)
+ seq_printf(f, "%-*s", LONG_WIDTH, "*");
+ else
+ seq_printf(f, "0x%016llx%-*s", spa_entries[spa_entry], U64_PAD, "");
+
+out_unlock:
+ mutex_unlock(&fmpm_update_mutex);
+out_newline:
+ seq_putc(f, '\n');
+out:
+ return 0;
+}
+
+static const struct seq_operations fmpm_seq_ops = {
+ .start = fmpm_start,
+ .next = fmpm_next,
+ .stop = fmpm_stop,
+ .show = fmpm_show,
+};
+
+static int fmpm_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &fmpm_seq_ops);
+}
+
+static const struct file_operations fmpm_fops = {
+ .open = fmpm_open,
+ .release = seq_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void setup_debugfs(void)
+{
+ struct dentry *dfs = ras_get_debugfs_root();
+
+ if (!dfs)
+ return;
+
+ fmpm_dfs_dir = debugfs_create_dir("fmpm", dfs);
+ if (!fmpm_dfs_dir)
+ return;
+
+ fmpm_dfs_entries = debugfs_create_file("entries", 0400, fmpm_dfs_dir, NULL, &fmpm_fops);
+ if (!fmpm_dfs_entries)
+ debugfs_remove(fmpm_dfs_dir);
+}
+
+static const struct x86_cpu_id fmpm_cpuids[] = {
+ X86_MATCH_VENDOR_FAM(AMD, 0x19, NULL),
+ { }
+};
+MODULE_DEVICE_TABLE(x86cpu, fmpm_cpuids);
+
+static int __init fru_mem_poison_init(void)
+{
+ int ret;
+
+ if (!x86_match_cpu(fmpm_cpuids)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (erst_disable) {
+ pr_debug("ERST not available\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = get_system_info();
+ if (ret)
+ goto out;
+
+ ret = allocate_records();
+ if (ret)
+ goto out;
+
+ ret = init_fmps();
+ if (ret)
+ goto out_free;
+
+ ret = get_saved_records();
+ if (ret)
+ goto out_free;
+
+ ret = save_new_records();
+ if (ret)
+ goto out_free;
+
+ setup_debugfs();
+
+ retire_mem_records();
+
+ mce_register_decode_chain(&fru_mem_poison_nb);
+
+ pr_info("FRU Memory Poison Manager initialized\n");
+ return 0;
+
+out_free:
+ free_records();
+out:
+ return ret;
+}
+
+static void __exit fru_mem_poison_exit(void)
+{
+ mce_unregister_decode_chain(&fru_mem_poison_nb);
+ debugfs_remove(fmpm_dfs_dir);
+ free_records();
+}
+
+module_init(fru_mem_poison_init);
+module_exit(fru_mem_poison_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FRU Memory Poison Manager");
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index 321af498ee11..e440b15fbabc 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -480,9 +480,15 @@ DEFINE_SHOW_ATTRIBUTE(array);
static int __init create_debugfs_nodes(void)
{
- struct dentry *d, *pfn, *decay, *count, *array;
+ struct dentry *d, *pfn, *decay, *count, *array, *dfs;
- d = debugfs_create_dir("cec", ras_debugfs_dir);
+ dfs = ras_get_debugfs_root();
+ if (!dfs) {
+ pr_warn("Error getting RAS debugfs root!\n");
+ return -1;
+ }
+
+ d = debugfs_create_dir("cec", dfs);
if (!d) {
pr_warn("Error creating cec debugfs node!\n");
return -1;
diff --git a/drivers/ras/debugfs.c b/drivers/ras/debugfs.c
index ffb973c328e3..42afd3de68b2 100644
--- a/drivers/ras/debugfs.c
+++ b/drivers/ras/debugfs.c
@@ -3,10 +3,16 @@
#include <linux/ras.h>
#include "debugfs.h"
-struct dentry *ras_debugfs_dir;
+static struct dentry *ras_debugfs_dir;
static atomic_t trace_count = ATOMIC_INIT(0);
+struct dentry *ras_get_debugfs_root(void)
+{
+ return ras_debugfs_dir;
+}
+EXPORT_SYMBOL_GPL(ras_get_debugfs_root);
+
int ras_userspace_consumers(void)
{
return atomic_read(&trace_count);
diff --git a/drivers/ras/debugfs.h b/drivers/ras/debugfs.h
index c07443b462ad..4749ccdeeba1 100644
--- a/drivers/ras/debugfs.h
+++ b/drivers/ras/debugfs.h
@@ -4,6 +4,6 @@
#include <linux/debugfs.h>
-extern struct dentry *ras_debugfs_dir;
+struct dentry *ras_get_debugfs_root(void);
#endif /* __RAS_DEBUGFS_H__ */
diff --git a/drivers/ras/ras.c b/drivers/ras/ras.c
index 95540ea8dd9d..a6e4792a1b2e 100644
--- a/drivers/ras/ras.c
+++ b/drivers/ras/ras.c
@@ -10,6 +10,37 @@
#include <linux/ras.h>
#include <linux/uuid.h>
+#if IS_ENABLED(CONFIG_AMD_ATL)
+/*
+ * Once set, this function pointer should never be unset.
+ *
+ * The library module will set this pointer if it successfully loads. The module
+ * should not be unloaded except for testing and debug purposes.
+ */
+static unsigned long (*amd_atl_umc_na_to_spa)(struct atl_err *err);
+
+void amd_atl_register_decoder(unsigned long (*f)(struct atl_err *))
+{
+ amd_atl_umc_na_to_spa = f;
+}
+EXPORT_SYMBOL_GPL(amd_atl_register_decoder);
+
+void amd_atl_unregister_decoder(void)
+{
+ amd_atl_umc_na_to_spa = NULL;
+}
+EXPORT_SYMBOL_GPL(amd_atl_unregister_decoder);
+
+unsigned long amd_convert_umc_mca_addr_to_sys_addr(struct atl_err *err)
+{
+ if (!amd_atl_umc_na_to_spa)
+ return -EINVAL;
+
+ return amd_atl_umc_na_to_spa(err);
+}
+EXPORT_SYMBOL_GPL(amd_convert_umc_mca_addr_to_sys_addr);
+#endif /* CONFIG_AMD_ATL */
+
#define CREATE_TRACE_POINTS
#define TRACE_INCLUDE_PATH ../../include/ras
#include <ras/ras_event.h>
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index a968dabb48f5..d019ca6dee9b 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3932,7 +3932,6 @@ static int regulator_get_optimal_voltage(struct regulator_dev *rdev,
if (ret < 0)
return ret;
- possible_uV = desired_min_uV;
done = true;
goto finish;
@@ -5891,7 +5890,7 @@ static const struct dev_pm_ops __maybe_unused regulator_pm_ops = {
};
#endif
-struct class regulator_class = {
+const struct class regulator_class = {
.name = "regulator",
.dev_release = regulator_dev_release,
.dev_groups = regulator_dev_groups,
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 8fd9ac787588..352547c375bd 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
@@ -413,31 +412,35 @@ static struct da9055_regulator_info da9055_regulator_info[] = {
* GPIO can control regulator state and/or select the regulator register
* set A/B for voltage ramping.
*/
-static int da9055_gpio_init(struct da9055_regulator *regulator,
+static int da9055_gpio_init(struct device *dev,
+ struct da9055_regulator *regulator,
struct regulator_config *config,
struct da9055_pdata *pdata, int id)
{
struct da9055_regulator_info *info = regulator->info;
+ struct gpio_desc *ren;
+ struct gpio_desc *ena;
+ struct gpio_desc *rsel;
int ret = 0;
- if (!pdata)
- return 0;
+ /* Look for "regulator-enable-gpios" GPIOs in the regulator node */
+ ren = devm_gpiod_get_optional(dev, "regulator-enable", GPIOD_IN);
+ if (IS_ERR(ren))
+ return PTR_ERR(ren);
- if (pdata->gpio_ren && pdata->gpio_ren[id]) {
- char name[18];
- int gpio_mux = pdata->gpio_ren[id];
+ if (ren) {
+ /* This GPIO is not optional at this point */
+ ena = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(ena))
+ return PTR_ERR(ena);
- config->ena_gpiod = pdata->ena_gpiods[id];
+ config->ena_gpiod = ena;
/*
* GPI pin is muxed with regulator to control the
* regulator state.
*/
- sprintf(name, "DA9055 GPI %d", gpio_mux);
- ret = devm_gpio_request_one(config->dev, gpio_mux, GPIOF_DIR_IN,
- name);
- if (ret < 0)
- goto err;
+ gpiod_set_consumer_name(ren, "DA9055 ren GPI");
/*
* Let the regulator know that its state is controlled
@@ -448,24 +451,22 @@ static int da9055_gpio_init(struct da9055_regulator *regulator,
pdata->reg_ren[id]
<< DA9055_E_GPI_SHIFT);
if (ret < 0)
- goto err;
+ return ret;
}
- if (pdata->gpio_rsel && pdata->gpio_rsel[id]) {
- char name[18];
- int gpio_mux = pdata->gpio_rsel[id];
+ /* Look for "regulator-select-gpios" GPIOs in the regulator node */
+ rsel = devm_gpiod_get_optional(dev, "regulator-select", GPIOD_IN);
+ if (IS_ERR(rsel))
+ return PTR_ERR(rsel);
+ if (rsel) {
regulator->reg_rselect = pdata->reg_rsel[id];
/*
* GPI pin is muxed with regulator to select the
* regulator register set A/B for voltage ramping.
*/
- sprintf(name, "DA9055 GPI %d", gpio_mux);
- ret = devm_gpio_request_one(config->dev, gpio_mux, GPIOF_DIR_IN,
- name);
- if (ret < 0)
- goto err;
+ gpiod_set_consumer_name(rsel, "DA9055 rsel GPI");
/*
* Let the regulator know that its register set A/B
@@ -477,7 +478,6 @@ static int da9055_gpio_init(struct da9055_regulator *regulator,
<< DA9055_V_GPI_SHIFT);
}
-err:
return ret;
}
@@ -532,7 +532,7 @@ static int da9055_regulator_probe(struct platform_device *pdev)
if (pdata)
config.init_data = pdata->regulators[pdev->id];
- ret = da9055_gpio_init(regulator, &config, pdata, pdev->id);
+ ret = da9055_gpio_init(&pdev->dev, regulator, &config, pdata, pdev->id);
if (ret < 0)
return ret;
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
index b551a400bdd1..5ee76b533576 100644
--- a/drivers/regulator/da9121-regulator.c
+++ b/drivers/regulator/da9121-regulator.c
@@ -14,7 +14,6 @@
// Copyright (C) 2020 Dialog Semiconductor
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/machine.h>
diff --git a/drivers/regulator/fixed-helper.c b/drivers/regulator/fixed-helper.c
index 0eb2442456f0..2d5a42b2b3d8 100644
--- a/drivers/regulator/fixed-helper.c
+++ b/drivers/regulator/fixed-helper.c
@@ -15,7 +15,7 @@ static void regulator_fixed_release(struct device *dev)
{
struct fixed_regulator_data *data = container_of(dev,
struct fixed_regulator_data, pdev.dev);
- kfree(data->cfg.supply_name);
+ kfree_const(data->cfg.supply_name);
kfree(data);
}
@@ -36,7 +36,7 @@ struct platform_device *regulator_register_always_on(int id, const char *name,
if (!data)
return NULL;
- data->cfg.supply_name = kstrdup(name, GFP_KERNEL);
+ data->cfg.supply_name = kstrdup_const(name, GFP_KERNEL);
if (!data->cfg.supply_name) {
kfree(data);
return NULL;
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index fb4433068d29..77a502141089 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -58,7 +58,7 @@ struct regulator {
struct dentry *debugfs;
};
-extern struct class regulator_class;
+extern const struct class regulator_class;
static inline struct regulator_dev *dev_to_rdev(struct device *dev)
{
diff --git a/drivers/regulator/lp873x-regulator.c b/drivers/regulator/lp873x-regulator.c
index 8dfdd1db2070..84a134cfcd9c 100644
--- a/drivers/regulator/lp873x-regulator.c
+++ b/drivers/regulator/lp873x-regulator.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -93,7 +94,7 @@ static int lp873x_buck_set_ramp_delay(struct regulator_dev *rdev,
ret = regmap_update_bits(lp873->regmap, regulators[id].ctrl2_reg,
LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE,
- reg << __ffs(LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE));
+ FIELD_PREP(LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE, reg));
if (ret) {
dev_err(lp873->dev, "SLEW RATE write failed: %d\n", ret);
return ret;
diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c
index 61ee5cf3f241..1259b5d20153 100644
--- a/drivers/regulator/lp87565-regulator.c
+++ b/drivers/regulator/lp87565-regulator.c
@@ -5,6 +5,7 @@
* Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -99,7 +100,7 @@ static int lp87565_buck_set_ramp_delay(struct regulator_dev *rdev,
ret = regmap_update_bits(rdev->regmap, regulators[id].ctrl2_reg,
LP87565_BUCK_CTRL_2_SLEW_RATE,
- reg << __ffs(LP87565_BUCK_CTRL_2_SLEW_RATE));
+ FIELD_PREP(LP87565_BUCK_CTRL_2_SLEW_RATE, reg));
if (ret) {
dev_err(&rdev->dev, "SLEW RATE write failed: %d\n", ret);
return ret;
diff --git a/drivers/regulator/lp8788-buck.c b/drivers/regulator/lp8788-buck.c
index e97ade09dede..2ade249ab6df 100644
--- a/drivers/regulator/lp8788-buck.c
+++ b/drivers/regulator/lp8788-buck.c
@@ -13,7 +13,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/mfd/lp8788.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
/* register address */
#define LP8788_EN_BUCK 0x0C
@@ -69,8 +69,8 @@
#define BUCK_FPWM_SHIFT(x) (x)
enum lp8788_dvs_state {
- DVS_LOW = GPIOF_OUT_INIT_LOW,
- DVS_HIGH = GPIOF_OUT_INIT_HIGH,
+ DVS_LOW = 0,
+ DVS_HIGH = 1,
};
enum lp8788_dvs_mode {
@@ -89,6 +89,8 @@ struct lp8788_buck {
struct lp8788 *lp;
struct regulator_dev *regulator;
void *dvs;
+ struct gpio_desc *gpio1;
+ struct gpio_desc *gpio2; /* Only used on BUCK2 */
};
/* BUCK 1 ~ 4 voltage ranges */
@@ -106,8 +108,7 @@ static void lp8788_buck1_set_dvs(struct lp8788_buck *buck)
return;
pinstate = dvs->vsel == DVS_SEL_V0 ? DVS_LOW : DVS_HIGH;
- if (gpio_is_valid(dvs->gpio))
- gpio_set_value(dvs->gpio, pinstate);
+ gpiod_set_value(buck->gpio1, pinstate);
}
static void lp8788_buck2_set_dvs(struct lp8788_buck *buck)
@@ -139,11 +140,8 @@ static void lp8788_buck2_set_dvs(struct lp8788_buck *buck)
return;
}
- if (gpio_is_valid(dvs->gpio[0]))
- gpio_set_value(dvs->gpio[0], pin1);
-
- if (gpio_is_valid(dvs->gpio[1]))
- gpio_set_value(dvs->gpio[1], pin2);
+ gpiod_set_value(buck->gpio1, pin1);
+ gpiod_set_value(buck->gpio2, pin2);
}
static void lp8788_set_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id)
@@ -202,19 +200,13 @@ static u8 lp8788_select_buck_vout_addr(struct lp8788_buck *buck,
enum lp8788_buck_id id)
{
enum lp8788_dvs_mode mode = lp8788_get_buck_dvs_ctrl_mode(buck, id);
- struct lp8788_buck1_dvs *b1_dvs;
- struct lp8788_buck2_dvs *b2_dvs;
u8 val, idx, addr;
int pin1, pin2;
switch (id) {
case BUCK1:
if (mode == EXTPIN) {
- b1_dvs = (struct lp8788_buck1_dvs *)buck->dvs;
- if (!b1_dvs)
- goto err;
-
- idx = gpio_get_value(b1_dvs->gpio) ? 1 : 0;
+ idx = gpiod_get_value(buck->gpio1);
} else {
lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val);
idx = (val & LP8788_BUCK1_DVS_M) >> LP8788_BUCK1_DVS_S;
@@ -223,12 +215,8 @@ static u8 lp8788_select_buck_vout_addr(struct lp8788_buck *buck,
break;
case BUCK2:
if (mode == EXTPIN) {
- b2_dvs = (struct lp8788_buck2_dvs *)buck->dvs;
- if (!b2_dvs)
- goto err;
-
- pin1 = gpio_get_value(b2_dvs->gpio[0]);
- pin2 = gpio_get_value(b2_dvs->gpio[1]);
+ pin1 = gpiod_get_value(buck->gpio1);
+ pin2 = gpiod_get_value(buck->gpio2);
if (pin1 == PIN_LOW && pin2 == PIN_LOW)
idx = 0;
@@ -424,28 +412,28 @@ static int lp8788_dvs_gpio_request(struct platform_device *pdev,
enum lp8788_buck_id id)
{
struct lp8788_platform_data *pdata = buck->lp->pdata;
- char *b1_name = "LP8788_B1_DVS";
- char *b2_name[] = { "LP8788_B2_DVS1", "LP8788_B2_DVS2" };
- int i, gpio, ret;
+ struct device *dev = &pdev->dev;
switch (id) {
case BUCK1:
- gpio = pdata->buck1_dvs->gpio;
- ret = devm_gpio_request_one(&pdev->dev, gpio, DVS_LOW,
- b1_name);
- if (ret)
- return ret;
+ buck->gpio1 = devm_gpiod_get(dev, "dvs", GPIOD_OUT_LOW);
+ if (IS_ERR(buck->gpio1))
+ return PTR_ERR(buck->gpio1);
+ gpiod_set_consumer_name(buck->gpio1, "LP8788_B1_DVS");
buck->dvs = pdata->buck1_dvs;
break;
case BUCK2:
- for (i = 0; i < LP8788_NUM_BUCK2_DVS; i++) {
- gpio = pdata->buck2_dvs->gpio[i];
- ret = devm_gpio_request_one(&pdev->dev, gpio,
- DVS_LOW, b2_name[i]);
- if (ret)
- return ret;
- }
+ buck->gpio1 = devm_gpiod_get_index(dev, "dvs", 0, GPIOD_OUT_LOW);
+ if (IS_ERR(buck->gpio1))
+ return PTR_ERR(buck->gpio1);
+ gpiod_set_consumer_name(buck->gpio1, "LP8788_B2_DVS1");
+
+ buck->gpio2 = devm_gpiod_get_index(dev, "dvs", 1, GPIOD_OUT_LOW);
+ if (IS_ERR(buck->gpio2))
+ return PTR_ERR(buck->gpio2);
+ gpiod_set_consumer_name(buck->gpio2, "LP8788_B2_DVS2");
+
buck->dvs = pdata->buck2_dvs;
break;
default:
diff --git a/drivers/regulator/max5970-regulator.c b/drivers/regulator/max5970-regulator.c
index 830a1c4cd705..8bbcd983a74a 100644
--- a/drivers/regulator/max5970-regulator.c
+++ b/drivers/regulator/max5970-regulator.c
@@ -29,8 +29,8 @@ struct max5970_regulator {
};
enum max597x_regulator_id {
- MAX597X_SW0,
- MAX597X_SW1,
+ MAX597X_sw0,
+ MAX597X_sw1,
};
static int max5970_read_adc(struct regmap *regmap, int reg, long *val)
@@ -378,8 +378,8 @@ static int max597x_dt_parse(struct device_node *np,
}
static const struct regulator_desc regulators[] = {
- MAX597X_SWITCH(SW0, MAX5970_REG_CHXEN, 0, "vss1"),
- MAX597X_SWITCH(SW1, MAX5970_REG_CHXEN, 1, "vss2"),
+ MAX597X_SWITCH(sw0, MAX5970_REG_CHXEN, 0, "vss1"),
+ MAX597X_SWITCH(sw1, MAX5970_REG_CHXEN, 1, "vss2"),
};
static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg,
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 8d5193207552..f8bb6828feef 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -20,9 +20,7 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/max8973-regulator.h>
#include <linux/regulator/of_regulator.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_gpio.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -102,7 +100,7 @@ struct max8973_chip {
struct regulator_desc desc;
struct regmap *regmap;
bool enable_external_control;
- int dvs_gpio;
+ struct gpio_desc *dvs_gpiod;
int lru_index[MAX8973_MAX_VOUT_REG];
int curr_vout_val[MAX8973_MAX_VOUT_REG];
int curr_vout_reg;
@@ -184,7 +182,7 @@ static int max8973_dcdc_set_voltage_sel(struct regulator_dev *rdev,
* If gpios are available to select the VOUT register then least
* recently used register for new configuration.
*/
- if (gpio_is_valid(max->dvs_gpio))
+ if (max->dvs_gpiod)
found = find_voltage_set_register(max, vsel,
&vout_reg, &gpio_val);
@@ -201,8 +199,8 @@ static int max8973_dcdc_set_voltage_sel(struct regulator_dev *rdev,
}
/* Select proper VOUT register vio gpios */
- if (gpio_is_valid(max->dvs_gpio)) {
- gpio_set_value_cansleep(max->dvs_gpio, gpio_val & 0x1);
+ if (max->dvs_gpiod) {
+ gpiod_set_value_cansleep(max->dvs_gpiod, gpio_val & 0x1);
max->curr_gpio_val = gpio_val;
}
return 0;
@@ -531,7 +529,6 @@ static struct max8973_regulator_platform_data *max8973_parse_dt(
pdata->enable_ext_control = of_property_read_bool(np,
"maxim,externally-enable");
- pdata->dvs_gpio = of_get_named_gpio(np, "maxim,dvs-gpio", 0);
ret = of_property_read_u32(np, "maxim,dvs-default-state", &pval);
if (!ret)
@@ -612,13 +609,17 @@ static int max8973_probe(struct i2c_client *client)
return -EIO;
}
- if (pdata->dvs_gpio == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
max = devm_kzalloc(&client->dev, sizeof(*max), GFP_KERNEL);
if (!max)
return -ENOMEM;
+ max->dvs_gpiod = devm_gpiod_get_optional(&client->dev, "maxim,dvs",
+ (pdata->dvs_def_state) ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW);
+ if (IS_ERR(max->dvs_gpiod))
+ return dev_err_probe(&client->dev, PTR_ERR(max->dvs_gpiod),
+ "failed to obtain dvs gpio\n");
+ gpiod_set_consumer_name(max->dvs_gpiod, "max8973-dvs");
+
max->regmap = devm_regmap_init_i2c(client, &max8973_regmap_config);
if (IS_ERR(max->regmap)) {
ret = PTR_ERR(max->regmap);
@@ -663,7 +664,6 @@ static int max8973_probe(struct i2c_client *client)
max->desc.ramp_delay_table = max8973_buck_ramp_table;
max->desc.n_ramp_values = ARRAY_SIZE(max8973_buck_ramp_table);
- max->dvs_gpio = (pdata->dvs_gpio) ? pdata->dvs_gpio : -EINVAL;
max->enable_external_control = pdata->enable_ext_control;
max->curr_gpio_val = pdata->dvs_def_state;
max->curr_vout_reg = MAX8973_VOUT + pdata->dvs_def_state;
@@ -671,21 +671,9 @@ static int max8973_probe(struct i2c_client *client)
max->lru_index[0] = max->curr_vout_reg;
- if (gpio_is_valid(max->dvs_gpio)) {
- int gpio_flags;
+ if (max->dvs_gpiod) {
int i;
- gpio_flags = (pdata->dvs_def_state) ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
- ret = devm_gpio_request_one(&client->dev, max->dvs_gpio,
- gpio_flags, "max8973-dvs");
- if (ret) {
- dev_err(&client->dev,
- "gpio_request for gpio %d failed, err = %d\n",
- max->dvs_gpio, ret);
- return ret;
- }
-
/*
* Initialize the lru index with vout_reg id
* The index 0 will be most recently used and
diff --git a/drivers/regulator/max8997-regulator.c b/drivers/regulator/max8997-regulator.c
index 0b38eaa73597..5f201ee9a5b8 100644
--- a/drivers/regulator/max8997-regulator.c
+++ b/drivers/regulator/max8997-regulator.c
@@ -9,8 +9,7 @@
#include <linux/bug.h>
#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -32,7 +31,7 @@ struct max8997_data {
u8 buck1_vol[8];
u8 buck2_vol[8];
u8 buck5_vol[8];
- int buck125_gpios[3];
+ struct gpio_desc *buck125_gpiods[3];
int buck125_gpioindex;
bool ignore_gpiodvs_side_effect;
@@ -52,9 +51,9 @@ static inline void max8997_set_gpio(struct max8997_data *max8997)
int set2 = ((max8997->buck125_gpioindex) >> 1) & 0x1;
int set1 = ((max8997->buck125_gpioindex) >> 2) & 0x1;
- gpio_set_value(max8997->buck125_gpios[0], set1);
- gpio_set_value(max8997->buck125_gpios[1], set2);
- gpio_set_value(max8997->buck125_gpios[2], set3);
+ gpiod_set_value(max8997->buck125_gpiods[0], set1);
+ gpiod_set_value(max8997->buck125_gpiods[1], set2);
+ gpiod_set_value(max8997->buck125_gpiods[2], set3);
}
struct voltage_map_desc {
@@ -873,31 +872,13 @@ static struct regulator_desc regulators[] = {
};
#ifdef CONFIG_OF
-static int max8997_pmic_dt_parse_dvs_gpio(struct platform_device *pdev,
- struct max8997_platform_data *pdata,
- struct device_node *pmic_np)
-{
- int i, gpio;
-
- for (i = 0; i < 3; i++) {
- gpio = of_get_named_gpio(pmic_np,
- "max8997,pmic-buck125-dvs-gpios", i);
- if (!gpio_is_valid(gpio)) {
- dev_err(&pdev->dev, "invalid gpio[%d]: %d\n", i, gpio);
- return -EINVAL;
- }
- pdata->buck125_gpios[i] = gpio;
- }
- return 0;
-}
-
static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
struct max8997_platform_data *pdata)
{
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct device_node *pmic_np, *regulators_np, *reg_np;
struct max8997_regulator_data *rdata;
- unsigned int i, dvs_voltage_nr = 1, ret;
+ unsigned int i, dvs_voltage_nr = 1;
pmic_np = iodev->dev->of_node;
if (!pmic_np) {
@@ -949,10 +930,6 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
pdata->buck5_gpiodvs) {
- ret = max8997_pmic_dt_parse_dvs_gpio(pdev, pdata, pmic_np);
- if (ret)
- return -EINVAL;
-
if (of_property_read_u32(pmic_np,
"max8997,pmic-buck125-default-dvs-idx",
&pdata->buck125_default_idx)) {
@@ -1039,7 +1016,6 @@ static int max8997_pmic_probe(struct platform_device *pdev)
max8997->buck1_gpiodvs = pdata->buck1_gpiodvs;
max8997->buck2_gpiodvs = pdata->buck2_gpiodvs;
max8997->buck5_gpiodvs = pdata->buck5_gpiodvs;
- memcpy(max8997->buck125_gpios, pdata->buck125_gpios, sizeof(int) * 3);
max8997->ignore_gpiodvs_side_effect = pdata->ignore_gpiodvs_side_effect;
nr_dvs = (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
@@ -1110,38 +1086,27 @@ static int max8997_pmic_probe(struct platform_device *pdev)
*/
if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
pdata->buck5_gpiodvs) {
+ const char *gpio_names[3] = {"MAX8997 SET1", "MAX8997 SET2", "MAX8997 SET3"};
- if (!gpio_is_valid(pdata->buck125_gpios[0]) ||
- !gpio_is_valid(pdata->buck125_gpios[1]) ||
- !gpio_is_valid(pdata->buck125_gpios[2])) {
- dev_err(&pdev->dev, "GPIO NOT VALID\n");
- return -EINVAL;
- }
+ for (i = 0; i < 3; i++) {
+ enum gpiod_flags flags;
- ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[0],
- "MAX8997 SET1");
- if (ret)
- return ret;
-
- ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[1],
- "MAX8997 SET2");
- if (ret)
- return ret;
-
- ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[2],
- "MAX8997 SET3");
- if (ret)
- return ret;
-
- gpio_direction_output(pdata->buck125_gpios[0],
- (max8997->buck125_gpioindex >> 2)
- & 0x1); /* SET1 */
- gpio_direction_output(pdata->buck125_gpios[1],
- (max8997->buck125_gpioindex >> 1)
- & 0x1); /* SET2 */
- gpio_direction_output(pdata->buck125_gpios[2],
- (max8997->buck125_gpioindex >> 0)
- & 0x1); /* SET3 */
+ if (max8997->buck125_gpioindex & BIT(2 - i))
+ flags = GPIOD_OUT_HIGH;
+ else
+ flags = GPIOD_OUT_LOW;
+
+ max8997->buck125_gpiods[i] = devm_gpiod_get_index(iodev->dev,
+ "max8997,pmic-buck125-dvs",
+ i,
+ flags);
+ if (IS_ERR(max8997->buck125_gpiods[i])) {
+ ret = PTR_ERR(max8997->buck125_gpiods[i]);
+ return dev_err_probe(iodev->dev, ret, "cant get GPIO %d (%d)\n",
+ i, ret);
+ }
+ gpiod_set_consumer_name(max8997->buck125_gpiods[i], gpio_names[i]);
+ }
}
/* DVS-GPIO disabled */
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index fadb4717384a..254a77887f66 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -10,12 +10,12 @@
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/bits.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
@@ -31,6 +31,9 @@ struct max8998_data {
unsigned int buck1_idx; /* index to last changed voltage */
/* value in a set */
unsigned int buck2_idx;
+ struct gpio_desc *buck1_gpio1;
+ struct gpio_desc *buck1_gpio2;
+ struct gpio_desc *buck2_gpio;
};
static const unsigned int charger_current_table[] = {
@@ -227,15 +230,15 @@ static int max8998_set_voltage_ldo_sel(struct regulator_dev *rdev,
return ret;
}
-static inline void buck1_gpio_set(int gpio1, int gpio2, int v)
+static inline void buck1_gpio_set(struct gpio_desc *gpio1, struct gpio_desc *gpio2, int v)
{
- gpio_set_value(gpio1, v & 0x1);
- gpio_set_value(gpio2, (v >> 1) & 0x1);
+ gpiod_set_value(gpio1, v & 0x1);
+ gpiod_set_value(gpio2, (v >> 1) & 0x1);
}
-static inline void buck2_gpio_set(int gpio, int v)
+static inline void buck2_gpio_set(struct gpio_desc *gpio, int v)
{
- gpio_set_value(gpio, v & 0x1);
+ gpiod_set_value(gpio, v & 0x1);
}
static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev,
@@ -260,16 +263,15 @@ static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev,
selector, max8998->buck1_vol[0], max8998->buck1_vol[1],
max8998->buck1_vol[2], max8998->buck1_vol[3]);
- if (gpio_is_valid(pdata->buck1_set1) &&
- gpio_is_valid(pdata->buck1_set2)) {
+ if (max8998->buck1_gpio1 && max8998->buck1_gpio2) {
/* check if requested voltage */
/* value is already defined */
for (j = 0; j < ARRAY_SIZE(max8998->buck1_vol); j++) {
if (max8998->buck1_vol[j] == selector) {
max8998->buck1_idx = j;
- buck1_gpio_set(pdata->buck1_set1,
- pdata->buck1_set2, j);
+ buck1_gpio_set(max8998->buck1_gpio1,
+ max8998->buck1_gpio2, j);
goto buck1_exit;
}
}
@@ -286,13 +288,13 @@ static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev,
&shift,
&mask);
ret = max8998_write_reg(i2c, reg, selector);
- buck1_gpio_set(pdata->buck1_set1,
- pdata->buck1_set2, max8998->buck1_idx);
+ buck1_gpio_set(max8998->buck1_gpio1,
+ max8998->buck1_gpio2, max8998->buck1_idx);
buck1_last_val++;
buck1_exit:
dev_dbg(max8998->dev, "%s: SET1:%d, SET2:%d\n",
- i2c->name, gpio_get_value(pdata->buck1_set1),
- gpio_get_value(pdata->buck1_set2));
+ i2c->name, gpiod_get_value(max8998->buck1_gpio1),
+ gpiod_get_value(max8998->buck1_gpio2));
break;
} else {
ret = max8998_write_reg(i2c, reg, selector);
@@ -303,14 +305,13 @@ buck1_exit:
dev_dbg(max8998->dev,
"BUCK2, selector:%d buck2_vol1:%d, buck2_vol2:%d\n",
selector, max8998->buck2_vol[0], max8998->buck2_vol[1]);
- if (gpio_is_valid(pdata->buck2_set3)) {
-
+ if (max8998->buck2_gpio) {
/* check if requested voltage */
/* value is already defined */
for (j = 0; j < ARRAY_SIZE(max8998->buck2_vol); j++) {
if (max8998->buck2_vol[j] == selector) {
max8998->buck2_idx = j;
- buck2_gpio_set(pdata->buck2_set3, j);
+ buck2_gpio_set(max8998->buck2_gpio, j);
goto buck2_exit;
}
}
@@ -322,10 +323,10 @@ buck1_exit:
&reg, &shift, &mask);
ret = max8998_write_reg(i2c, reg, selector);
max8998->buck2_vol[max8998->buck2_idx] = selector;
- buck2_gpio_set(pdata->buck2_set3, max8998->buck2_idx);
+ buck2_gpio_set(max8998->buck2_gpio, max8998->buck2_idx);
buck2_exit:
dev_dbg(max8998->dev, "%s: SET3:%d\n", i2c->name,
- gpio_get_value(pdata->buck2_set3));
+ gpiod_get_value(max8998->buck2_gpio));
} else {
ret = max8998_write_reg(i2c, reg, selector);
}
@@ -539,36 +540,6 @@ static const struct regulator_desc regulators[] = {
charger_current_table, MAX8998_REG_CHGR1, 0x7),
};
-static int max8998_pmic_dt_parse_dvs_gpio(struct max8998_dev *iodev,
- struct max8998_platform_data *pdata,
- struct device_node *pmic_np)
-{
- int gpio;
-
- gpio = of_get_named_gpio(pmic_np, "max8998,pmic-buck1-dvs-gpios", 0);
- if (!gpio_is_valid(gpio)) {
- dev_err(iodev->dev, "invalid buck1 gpio[0]: %d\n", gpio);
- return -EINVAL;
- }
- pdata->buck1_set1 = gpio;
-
- gpio = of_get_named_gpio(pmic_np, "max8998,pmic-buck1-dvs-gpios", 1);
- if (!gpio_is_valid(gpio)) {
- dev_err(iodev->dev, "invalid buck1 gpio[1]: %d\n", gpio);
- return -EINVAL;
- }
- pdata->buck1_set2 = gpio;
-
- gpio = of_get_named_gpio(pmic_np, "max8998,pmic-buck2-dvs-gpio", 0);
- if (!gpio_is_valid(gpio)) {
- dev_err(iodev->dev, "invalid buck 2 gpio: %d\n", gpio);
- return -EINVAL;
- }
- pdata->buck2_set3 = gpio;
-
- return 0;
-}
-
static int max8998_pmic_dt_parse_pdata(struct max8998_dev *iodev,
struct max8998_platform_data *pdata)
{
@@ -614,10 +585,6 @@ static int max8998_pmic_dt_parse_pdata(struct max8998_dev *iodev,
of_node_put(reg_np);
of_node_put(regulators_np);
- ret = max8998_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np);
- if (ret)
- return -EINVAL;
-
pdata->buck_voltage_lock = of_property_read_bool(pmic_np, "max8998,pmic-buck-voltage-lock");
ret = of_property_read_u32(pmic_np,
@@ -665,6 +632,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
struct regulator_dev *rdev;
struct max8998_data *max8998;
struct i2c_client *i2c;
+ enum gpiod_flags flags;
int i, ret;
unsigned int v;
@@ -693,37 +661,38 @@ static int max8998_pmic_probe(struct platform_device *pdev)
max8998->buck1_idx = pdata->buck1_default_idx;
max8998->buck2_idx = pdata->buck2_default_idx;
- /* NOTE: */
- /* For unused GPIO NOT marked as -1 (thereof equal to 0) WARN_ON */
- /* will be displayed */
-
/* Check if MAX8998 voltage selection GPIOs are defined */
- if (gpio_is_valid(pdata->buck1_set1) &&
- gpio_is_valid(pdata->buck1_set2)) {
- /* Check if SET1 is not equal to 0 */
- if (!pdata->buck1_set1) {
- dev_err(&pdev->dev,
- "MAX8998 SET1 GPIO defined as 0 !\n");
- WARN_ON(!pdata->buck1_set1);
- return -EIO;
- }
- /* Check if SET2 is not equal to 0 */
- if (!pdata->buck1_set2) {
- dev_err(&pdev->dev,
- "MAX8998 SET2 GPIO defined as 0 !\n");
- WARN_ON(!pdata->buck1_set2);
- return -EIO;
- }
-
- gpio_request(pdata->buck1_set1, "MAX8998 BUCK1_SET1");
- gpio_direction_output(pdata->buck1_set1,
- max8998->buck1_idx & 0x1);
-
-
- gpio_request(pdata->buck1_set2, "MAX8998 BUCK1_SET2");
- gpio_direction_output(pdata->buck1_set2,
- (max8998->buck1_idx >> 1) & 0x1);
-
+ flags = (max8998->buck1_idx & BIT(0)) ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ max8998->buck1_gpio1 = devm_gpiod_get_index_optional(iodev->dev,
+ "max8998,pmic-buck1-dvs",
+ 0,
+ flags);
+ if (IS_ERR(max8998->buck1_gpio1))
+ return dev_err_probe(&pdev->dev, PTR_ERR(max8998->buck1_gpio1),
+ "could not get BUCK1 GPIO1\n");
+ gpiod_set_consumer_name(max8998->buck1_gpio1, "MAX8998 BUCK1_SET1");
+
+ flags = (max8998->buck1_idx & BIT(1)) ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ max8998->buck1_gpio2 = devm_gpiod_get_index_optional(iodev->dev,
+ "max8998,pmic-buck1-dvs",
+ 1,
+ flags);
+ if (IS_ERR(max8998->buck1_gpio2))
+ return dev_err_probe(&pdev->dev, PTR_ERR(max8998->buck1_gpio2),
+ "could not get BUCK1 GPIO2\n");
+ gpiod_set_consumer_name(max8998->buck1_gpio1, "MAX8998 BUCK1_SET2");
+
+ flags = (max8998->buck2_idx & BIT(0)) ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ max8998->buck2_gpio = devm_gpiod_get_index_optional(iodev->dev,
+ "max8998,pmic-buck2-dvs",
+ 0,
+ flags);
+ if (IS_ERR(max8998->buck2_gpio))
+ return dev_err_probe(&pdev->dev, PTR_ERR(max8998->buck2_gpio),
+ "could not get BUCK2 GPIO\n");
+ gpiod_set_consumer_name(max8998->buck1_gpio1, "MAX8998 BUCK2_SET3");
+
+ if (max8998->buck1_gpio1 && max8998->buck1_gpio2) {
/* Set predefined values for BUCK1 registers */
for (v = 0; v < ARRAY_SIZE(pdata->buck1_voltage); ++v) {
int index = MAX8998_BUCK1 - MAX8998_LDO2;
@@ -742,18 +711,7 @@ static int max8998_pmic_probe(struct platform_device *pdev)
}
}
- if (gpio_is_valid(pdata->buck2_set3)) {
- /* Check if SET3 is not equal to 0 */
- if (!pdata->buck2_set3) {
- dev_err(&pdev->dev,
- "MAX8998 SET3 GPIO defined as 0 !\n");
- WARN_ON(!pdata->buck2_set3);
- return -EIO;
- }
- gpio_request(pdata->buck2_set3, "MAX8998 BUCK2_SET3");
- gpio_direction_output(pdata->buck2_set3,
- max8998->buck2_idx & 0x1);
-
+ if (max8998->buck2_gpio) {
/* Set predefined values for BUCK2 registers */
for (v = 0; v < ARRAY_SIZE(pdata->buck2_voltage); ++v) {
int index = MAX8998_BUCK2 - MAX8998_LDO2;
diff --git a/drivers/regulator/mp8859.c b/drivers/regulator/mp8859.c
index b820bd6043e5..ab105ffd6a2e 100644
--- a/drivers/regulator/mp8859.c
+++ b/drivers/regulator/mp8859.c
@@ -35,6 +35,16 @@
#define MP8859_GO_BIT 0x01
+#define MP8859_IOUT_LIM_MASK 0x7f
+
+#define MP8859_ENABLE_MASK 0x80
+#define MP8859_DISCHG_EN_MASK 0x10
+#define MP8859_MODE_MASK 0x08
+
+#define MP8859_PG_MASK 0x80
+#define MP8859_OTP_MASK 0x40
+#define MP8859_OTW_MASK 0x20
+#define MP8859_CC_CV_MASK 0x10
static int mp8859_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel)
{
@@ -73,21 +83,221 @@ static int mp8859_get_voltage_sel(struct regulator_dev *rdev)
return val;
}
+static int mp8859_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int from, unsigned int to)
+{
+ int change;
+
+ /* The voltage ramps at 1mV/uS, selectors are 10mV */
+ if (from > to)
+ change = from - to;
+ else
+ change = to - from;
+
+ return change * 10 * 1000;
+}
+
+static unsigned int mp8859_get_mode(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, MP8859_CTL1_REG, &val);
+ if (ret != 0) {
+ dev_err(&rdev->dev, "Failed to read mode: %d\n", ret);
+ return 0;
+ }
+
+ if (val & MP8859_MODE_MASK)
+ return REGULATOR_MODE_FAST;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int mp8859_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ unsigned int val;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = MP8859_MODE_MASK;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(rdev->regmap, MP8859_CTL1_REG,
+ MP8859_MODE_MASK, val);
+}
+
+static int mp8859_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ unsigned int cur_val, new_val;
+ int ret, i;
+
+ /* Steps of 50mA */
+ new_val = max_uA / 50000;
+ if (new_val > MP8859_IOUT_LIM_MASK)
+ return -EINVAL;
+ if (new_val == 0)
+ return -EINVAL;
+
+ /*
+ * If the regulator is limiting then ramp gradually as per
+ * datasheet, otherwise just set the value directly.
+ */
+ ret = regmap_read(rdev->regmap, MP8859_STATUS_REG, &cur_val);
+ if (ret != 0)
+ return ret;
+ if (!(cur_val & MP8859_CC_CV_MASK)) {
+ return regmap_update_bits(rdev->regmap, MP8859_IOUT_LIM_REG,
+ MP8859_IOUT_LIM_MASK, new_val);
+ }
+
+ ret = regmap_read(rdev->regmap, MP8859_IOUT_LIM_REG, &cur_val);
+ if (ret != 0)
+ return ret;
+
+ if (cur_val >= new_val) {
+ for (i = cur_val; i >= new_val; i--) {
+ ret = regmap_update_bits(rdev->regmap,
+ MP8859_IOUT_LIM_REG,
+ MP8859_IOUT_LIM_MASK,
+ cur_val - i);
+ if (ret != 0)
+ return ret;
+ }
+ } else {
+ for (i = cur_val; i <= new_val; i++) {
+ ret = regmap_update_bits(rdev->regmap,
+ MP8859_IOUT_LIM_REG,
+ MP8859_IOUT_LIM_MASK,
+ cur_val + i);
+ if (ret != 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int mp8859_get_status(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret;
+
+ /* Output status is only meaingful when enabled */
+ ret = regmap_read(rdev->regmap, MP8859_CTL1_REG, &val);
+ if (ret != 0)
+ return ret;
+ if (!(val & MP8859_ENABLE_MASK))
+ return REGULATOR_STATUS_UNDEFINED;
+
+ ret = regmap_read(rdev->regmap, MP8859_STATUS_REG, &val);
+ if (ret != 0)
+ return ret;
+
+ if (val & MP8859_PG_MASK)
+ return REGULATOR_STATUS_ON;
+ else
+ return REGULATOR_STATUS_ERROR;
+}
+
+static int mp8859_get_error_flags(struct regulator_dev *rdev,
+ unsigned int *flags)
+{
+ unsigned int status, enabled;
+ int ret;
+
+ *flags = 0;
+
+ /* Output status is only meaingful when enabled */
+ ret = regmap_read(rdev->regmap, MP8859_CTL1_REG, &enabled);
+ if (ret != 0)
+ return ret;
+ enabled &= MP8859_ENABLE_MASK;
+
+ ret = regmap_read(rdev->regmap, MP8859_STATUS_REG, &status);
+ if (ret != 0)
+ return ret;
+
+ if (enabled && !(status & MP8859_PG_MASK))
+ status |= REGULATOR_ERROR_FAIL;
+ if (status & MP8859_OTP_MASK)
+ status |= REGULATOR_ERROR_OVER_TEMP;
+ if (status & MP8859_OTW_MASK)
+ status |= REGULATOR_ERROR_OVER_TEMP_WARN;
+ if (status & MP8859_CC_CV_MASK)
+ status |= REGULATOR_ERROR_OVER_CURRENT;
+
+ return 0;
+}
+
static const struct linear_range mp8859_dcdc_ranges[] = {
REGULATOR_LINEAR_RANGE(0, VOL_MIN_IDX, VOL_MAX_IDX, 10000),
};
+static bool mp8859_readable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MP8859_VOUT_L_REG:
+ case MP8859_VOUT_H_REG:
+ case MP8859_VOUT_GO_REG:
+ case MP8859_IOUT_LIM_REG:
+ case MP8859_CTL1_REG:
+ case MP8859_CTL2_REG:
+ case MP8859_STATUS_REG:
+ case MP8859_INTERRUPT_REG:
+ case MP8859_MASK_REG:
+ case MP8859_ID1_REG:
+ case MP8859_MFR_ID_REG:
+ case MP8859_DEV_ID_REG:
+ case MP8859_IC_REV_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool mp8859_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MP8859_VOUT_GO_REG:
+ case MP8859_STATUS_REG:
+ case MP8859_INTERRUPT_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
static const struct regmap_config mp8859_regmap = {
.reg_bits = 8,
.val_bits = 8,
.max_register = MP8859_MAX_REG,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
+ .readable_reg = mp8859_readable,
+ .volatile_reg = mp8859_volatile,
};
static const struct regulator_ops mp8859_ops = {
.set_voltage_sel = mp8859_set_voltage_sel,
.get_voltage_sel = mp8859_get_voltage_sel,
.list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_time_sel = mp8859_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_mode = mp8859_set_mode,
+ .get_mode = mp8859_get_mode,
+ .set_active_discharge = regulator_set_active_discharge_regmap,
+ .set_current_limit = mp8859_set_current_limit,
+ .get_status = mp8859_get_status,
+ .get_error_flags = mp8859_get_error_flags,
};
static const struct regulator_desc mp8859_regulators[] = {
@@ -100,6 +310,12 @@ static const struct regulator_desc mp8859_regulators[] = {
.n_voltages = VOL_MAX_IDX + 1,
.linear_ranges = mp8859_dcdc_ranges,
.n_linear_ranges = 1,
+ .enable_reg = MP8859_CTL1_REG,
+ .enable_mask = MP8859_ENABLE_MASK,
+ .enable_val = MP8859_ENABLE_MASK,
+ .active_discharge_reg = MP8859_CTL1_REG,
+ .active_discharge_on = MP8859_DISCHG_EN_MASK,
+ .active_discharge_mask = MP8859_DISCHG_EN_MASK,
.ops = &mp8859_ops,
.owner = THIS_MODULE,
},
@@ -111,12 +327,46 @@ static int mp8859_i2c_probe(struct i2c_client *i2c)
struct regulator_config config = {.dev = &i2c->dev};
struct regmap *regmap = devm_regmap_init_i2c(i2c, &mp8859_regmap);
struct regulator_dev *rdev;
+ unsigned int val, rev;
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
dev_err(&i2c->dev, "regmap init failed: %d\n", ret);
return ret;
}
+
+ ret = regmap_read(regmap, MP8859_MFR_ID_REG, &val);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to read manufacturer ID: %d\n", ret);
+ return ret;
+ }
+ if (val != 0x9) {
+ dev_err(&i2c->dev, "Manufacturer ID %x != 9\n", val);
+ return -EINVAL;
+ }
+
+ ret = regmap_read(regmap, MP8859_DEV_ID_REG, &val);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to read device ID: %d\n", ret);
+ return ret;
+ }
+ if (val != 0x58) {
+ dev_err(&i2c->dev, "Manufacturer ID %x != 0x58\n", val);
+ return -EINVAL;
+ }
+
+ ret = regmap_read(regmap, MP8859_IC_REV_REG, &rev);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to read device revision: %d\n", ret);
+ return ret;
+ }
+ ret = regmap_read(regmap, MP8859_ID1_REG, &val);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to read device ID1: %d\n", ret);
+ return ret;
+ }
+ dev_info(&i2c->dev, "MP8859-%04d revision %d\n", val, rev);
+
rdev = devm_regulator_register(&i2c->dev, &mp8859_regulators[0],
&config);
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index 60cfcd741c2a..7434b6b22d32 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -271,11 +271,10 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
of_find_property(np, "voltage-table", &length);
if ((length < sizeof(*duty_cycle_table)) ||
- (length % sizeof(*duty_cycle_table))) {
- dev_err(&pdev->dev, "voltage-table length(%d) is invalid\n",
- length);
- return -EINVAL;
- }
+ (length % sizeof(*duty_cycle_table)))
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "voltage-table length(%d) is invalid\n",
+ length);
duty_cycle_table = devm_kzalloc(&pdev->dev, length, GFP_KERNEL);
if (!duty_cycle_table)
@@ -284,10 +283,9 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
ret = of_property_read_u32_array(np, "voltage-table",
(u32 *)duty_cycle_table,
length / sizeof(u32));
- if (ret) {
- dev_err(&pdev->dev, "Failed to read voltage-table: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to read voltage-table\n");
drvdata->state = -ENOTRECOVERABLE;
drvdata->duty_cycle_table = duty_cycle_table;
@@ -359,10 +357,9 @@ static int pwm_regulator_probe(struct platform_device *pdev)
enum gpiod_flags gpio_flags;
int ret;
- if (!np) {
- dev_err(&pdev->dev, "Device Tree node missing\n");
- return -EINVAL;
- }
+ if (!np)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "Device Tree node missing\n");
drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
@@ -400,8 +397,7 @@ static int pwm_regulator_probe(struct platform_device *pdev)
gpio_flags);
if (IS_ERR(drvdata->enb_gpio)) {
ret = PTR_ERR(drvdata->enb_gpio);
- dev_err(&pdev->dev, "Failed to get enable GPIO: %d\n", ret);
- return ret;
+ return dev_err_probe(&pdev->dev, ret, "Failed to get enable GPIO\n");
}
ret = pwm_adjust_config(drvdata->pwm);
@@ -409,19 +405,17 @@ static int pwm_regulator_probe(struct platform_device *pdev)
return ret;
ret = pwm_regulator_init_boot_on(pdev, drvdata, init_data);
- if (ret) {
- dev_err(&pdev->dev, "Failed to apply boot_on settings: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to apply boot_on settings\n");
regulator = devm_regulator_register(&pdev->dev,
&drvdata->desc, &config);
if (IS_ERR(regulator)) {
ret = PTR_ERR(regulator);
- dev_err(&pdev->dev, "Failed to register regulator %s: %d\n",
- drvdata->desc.name, ret);
- return ret;
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register regulator %s\n",
+ drvdata->desc.name);
}
return 0;
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index d1be9568025e..3b7e06b9f5ce 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -11,11 +11,10 @@
#include <linux/regulator/of_regulator.h>
#include <linux/soc/qcom/smd-rpm.h>
+struct qcom_smd_rpm *smd_vreg_rpm;
+
struct qcom_rpm_reg {
struct device *dev;
-
- struct qcom_smd_rpm *rpm;
-
u32 type;
u32 id;
@@ -70,7 +69,7 @@ static int rpm_reg_write_active(struct qcom_rpm_reg *vreg)
if (!reqlen)
return 0;
- ret = qcom_rpm_smd_write(vreg->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ ret = qcom_rpm_smd_write(smd_vreg_rpm, QCOM_SMD_RPM_ACTIVE_STATE,
vreg->type, vreg->id,
req, sizeof(req[0]) * reqlen);
if (!ret) {
@@ -1384,14 +1383,13 @@ MODULE_DEVICE_TABLE(of, rpm_of_match);
* @dev: Pointer to the top level qcom_smd-regulator PMIC device
* @node: Pointer to the individual qcom_smd-regulator resource
* device node
- * @rpm: Pointer to the rpm bus node
* @pmic_rpm_data: Pointer to a null-terminated array of qcom_smd-regulator
* resources defined for the top level PMIC device
*
* Return: 0 on success, errno on failure
*/
static int rpm_regulator_init_vreg(struct qcom_rpm_reg *vreg, struct device *dev,
- struct device_node *node, struct qcom_smd_rpm *rpm,
+ struct device_node *node,
const struct rpm_regulator_data *pmic_rpm_data)
{
struct regulator_config config = {};
@@ -1409,7 +1407,6 @@ static int rpm_regulator_init_vreg(struct qcom_rpm_reg *vreg, struct device *dev
}
vreg->dev = dev;
- vreg->rpm = rpm;
vreg->type = rpm_data->type;
vreg->id = rpm_data->id;
@@ -1449,6 +1446,11 @@ static int rpm_reg_probe(struct platform_device *pdev)
return -ENODEV;
}
+ if (smd_vreg_rpm && rpm != smd_vreg_rpm)
+ return dev_err_probe(dev, -EINVAL, "RPM mismatch\n");
+
+ smd_vreg_rpm = rpm;
+
vreg_data = of_device_get_match_data(dev);
if (!vreg_data)
return -ENODEV;
@@ -1460,8 +1462,7 @@ static int rpm_reg_probe(struct platform_device *pdev)
return -ENOMEM;
}
- ret = rpm_regulator_init_vreg(vreg, dev, node, rpm, vreg_data);
-
+ ret = rpm_regulator_init_vreg(vreg, dev, node, vreg_data);
if (ret < 0) {
of_node_put(node);
return ret;
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index e374fa6e5f28..d89ae7f16d7a 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -1017,14 +1017,14 @@ static const struct regulator_desc rk805_reg[] = {
};
static const struct linear_range rk806_buck_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(500000, 0, 160, 6250), /* 500mV ~ 1500mV */
- REGULATOR_LINEAR_RANGE(1500000, 161, 237, 25000), /* 1500mV ~ 3400mV */
- REGULATOR_LINEAR_RANGE(3400000, 238, 255, 0),
+ REGULATOR_LINEAR_RANGE(500000, 0, 159, 6250), /* 500mV ~ 1500mV */
+ REGULATOR_LINEAR_RANGE(1500000, 160, 235, 25000), /* 1500mV ~ 3400mV */
+ REGULATOR_LINEAR_RANGE(3400000, 236, 255, 0),
};
static const struct linear_range rk806_ldo_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(500000, 0, 232, 12500), /* 500mV ~ 3400mV */
- REGULATOR_LINEAR_RANGE(3400000, 233, 255, 0), /* 500mV ~ 3400mV */
+ REGULATOR_LINEAR_RANGE(500000, 0, 231, 12500), /* 500mV ~ 3400mV */
+ REGULATOR_LINEAR_RANGE(3400000, 232, 255, 0),
};
static const struct regulator_desc rk806_reg[] = {
diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c
index 53d1b9d6f69c..86a626a4f610 100644
--- a/drivers/regulator/userspace-consumer.c
+++ b/drivers/regulator/userspace-consumer.c
@@ -208,6 +208,7 @@ static const struct of_device_id regulator_userspace_consumer_of_match[] = {
{ .compatible = "regulator-output", },
{},
};
+MODULE_DEVICE_TABLE(of, regulator_userspace_consumer_of_match);
static struct platform_driver regulator_userspace_consumer_driver = {
.probe = regulator_userspace_consumer_probe,
diff --git a/drivers/rtc/lib_test.c b/drivers/rtc/lib_test.c
index d5caf36c56cd..225c859d6da5 100644
--- a/drivers/rtc/lib_test.c
+++ b/drivers/rtc/lib_test.c
@@ -54,7 +54,7 @@ static void rtc_time64_to_tm_test_date_range(struct kunit *test)
days = div_s64(secs, 86400);
- #define FAIL_MSG "%d/%02d/%02d (%2d) : %ld", \
+ #define FAIL_MSG "%d/%02d/%02d (%2d) : %lld", \
year, month, mday, yday, days
KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 7327e81352e9..cead018c3f06 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -8,9 +8,6 @@
* Copyright IBM Corp. 1999, 2009
*/
-#define KMSG_COMPONENT "dasd"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -30,9 +27,6 @@
#include <asm/itcw.h>
#include <asm/diag.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd:"
-
#include "dasd_int.h"
/*
* SECTION: Constant definitions to be used within this file
@@ -313,39 +307,57 @@ static int dasd_state_basic_to_known(struct dasd_device *device)
*/
static int dasd_state_basic_to_ready(struct dasd_device *device)
{
- int rc;
- struct dasd_block *block;
- struct gendisk *disk;
+ struct dasd_block *block = device->block;
+ struct queue_limits lim;
+ int rc = 0;
- rc = 0;
- block = device->block;
/* make disk known with correct capacity */
- if (block) {
- if (block->base->discipline->do_analysis != NULL)
- rc = block->base->discipline->do_analysis(block);
- if (rc) {
- if (rc != -EAGAIN) {
- device->state = DASD_STATE_UNFMT;
- disk = device->block->gdp;
- kobject_uevent(&disk_to_dev(disk)->kobj,
- KOBJ_CHANGE);
- goto out;
- }
- return rc;
- }
- if (device->discipline->setup_blk_queue)
- device->discipline->setup_blk_queue(block);
- set_capacity(block->gdp,
- block->blocks << block->s2b_shift);
+ if (!block) {
device->state = DASD_STATE_READY;
- rc = dasd_scan_partitions(block);
- if (rc) {
- device->state = DASD_STATE_BASIC;
+ goto out;
+ }
+
+ if (block->base->discipline->do_analysis != NULL)
+ rc = block->base->discipline->do_analysis(block);
+ if (rc) {
+ if (rc == -EAGAIN)
return rc;
- }
- } else {
- device->state = DASD_STATE_READY;
+ device->state = DASD_STATE_UNFMT;
+ kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
+ KOBJ_CHANGE);
+ goto out;
+ }
+
+ lim = queue_limits_start_update(block->gdp->queue);
+ lim.max_dev_sectors = device->discipline->max_sectors(block);
+ lim.max_hw_sectors = lim.max_dev_sectors;
+ lim.logical_block_size = block->bp_block;
+
+ if (device->discipline->has_discard) {
+ unsigned int max_bytes;
+
+ lim.discard_granularity = block->bp_block;
+
+ /* Calculate max_discard_sectors and make it PAGE aligned */
+ max_bytes = USHRT_MAX * block->bp_block;
+ max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE);
+
+ lim.max_hw_discard_sectors = max_bytes / block->bp_block;
+ lim.max_write_zeroes_sectors = lim.max_hw_discard_sectors;
}
+ rc = queue_limits_commit_update(block->gdp->queue, &lim);
+ if (rc)
+ return rc;
+
+ set_capacity(block->gdp, block->blocks << block->s2b_shift);
+ device->state = DASD_STATE_READY;
+
+ rc = dasd_scan_partitions(block);
+ if (rc) {
+ device->state = DASD_STATE_BASIC;
+ return rc;
+ }
+
out:
if (device->discipline->basic_to_ready)
rc = device->discipline->basic_to_ready(device);
@@ -412,7 +424,7 @@ dasd_state_ready_to_online(struct dasd_device * device)
KOBJ_CHANGE);
return 0;
}
- disk_uevent(device->block->bdev_handle->bdev->bd_disk,
+ disk_uevent(file_bdev(device->block->bdev_file)->bd_disk,
KOBJ_CHANGE);
}
return 0;
@@ -433,7 +445,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
device->state = DASD_STATE_READY;
if (device->block && !(device->features & DASD_FEATURE_USERAW))
- disk_uevent(device->block->bdev_handle->bdev->bd_disk,
+ disk_uevent(file_bdev(device->block->bdev_file)->bd_disk,
KOBJ_CHANGE);
return 0;
}
@@ -1301,7 +1313,6 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
int retries, rc;
- char errorstring[ERRORLENGTH];
/* Check the cqr */
rc = dasd_check_cqr(cqr);
@@ -1340,10 +1351,8 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
rc = 0;
break;
default:
- /* internal error 10 - unknown rc*/
- snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
- dev_err(&device->cdev->dev, "An error occurred in the "
- "DASD device driver, reason=%s\n", errorstring);
+ dev_err(&device->cdev->dev,
+ "Unexpected error during request termination %d\n", rc);
BUG();
break;
}
@@ -1362,7 +1371,6 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
int rc;
- char errorstring[ERRORLENGTH];
/* Check the cqr */
rc = dasd_check_cqr(cqr);
@@ -1382,10 +1390,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
return -EPERM;
}
if (cqr->retries < 0) {
- /* internal error 14 - start_IO run out of retries */
- sprintf(errorstring, "14 %p", cqr);
- dev_err(&device->cdev->dev, "An error occurred in the DASD "
- "device driver, reason=%s\n", errorstring);
+ dev_err(&device->cdev->dev,
+ "Start I/O ran out of retries\n");
cqr->status = DASD_CQR_ERROR;
return -EIO;
}
@@ -1463,11 +1469,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
"not accessible");
break;
default:
- /* internal error 11 - unknown rc */
- snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", errorstring);
+ "Unexpected error during request start %d", rc);
BUG();
break;
}
@@ -1904,8 +1907,6 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device,
static void __dasd_process_cqr(struct dasd_device *device,
struct dasd_ccw_req *cqr)
{
- char errorstring[ERRORLENGTH];
-
switch (cqr->status) {
case DASD_CQR_SUCCESS:
cqr->status = DASD_CQR_DONE;
@@ -1917,11 +1918,8 @@ static void __dasd_process_cqr(struct dasd_device *device,
cqr->status = DASD_CQR_TERMINATED;
break;
default:
- /* internal error 12 - wrong cqr status*/
- snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", errorstring);
+ "Unexpected CQR status %02x", cqr->status);
BUG();
}
if (cqr->callback)
@@ -1986,16 +1984,14 @@ static void __dasd_device_check_expire(struct dasd_device *device)
if (device->discipline->term_IO(cqr) != 0) {
/* Hmpf, try again in 5 sec */
dev_err(&device->cdev->dev,
- "cqr %p timed out (%lus) but cannot be "
- "ended, retrying in 5 s\n",
- cqr, (cqr->expires/HZ));
+ "CQR timed out (%lus) but cannot be ended, retrying in 5s\n",
+ (cqr->expires / HZ));
cqr->expires += 5*HZ;
dasd_device_set_timer(device, 5*HZ);
} else {
dev_err(&device->cdev->dev,
- "cqr %p timed out (%lus), %i retries "
- "remaining\n", cqr, (cqr->expires/HZ),
- cqr->retries);
+ "CQR timed out (%lus), %i retries remaining\n",
+ (cqr->expires / HZ), cqr->retries);
}
__dasd_device_check_autoquiesce_timeout(device, cqr);
}
@@ -2116,8 +2112,7 @@ int dasd_flush_device_queue(struct dasd_device *device)
if (rc) {
/* unable to terminate requeust */
dev_err(&device->cdev->dev,
- "Flushing the DASD request queue "
- "failed for request %p\n", cqr);
+ "Flushing the DASD request queue failed\n");
/* stop flush processing */
goto finished;
}
@@ -2633,8 +2628,7 @@ static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
rc = device->discipline->term_IO(cqr);
if (rc) {
dev_err(&device->cdev->dev,
- "Cancelling request %p failed with rc=%d\n",
- cqr, rc);
+ "Cancelling request failed with rc=%d\n", rc);
} else {
cqr->stopclk = get_tod_clock();
}
@@ -3402,8 +3396,7 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
ret = ccw_device_set_online(cdev);
if (ret)
- pr_warn("%s: Setting the DASD online failed with rc=%d\n",
- dev_name(&cdev->dev), ret);
+ dev_warn(&cdev->dev, "Setting the DASD online failed with rc=%d\n", ret);
}
/*
@@ -3490,8 +3483,11 @@ int dasd_generic_set_online(struct ccw_device *cdev,
{
struct dasd_discipline *discipline;
struct dasd_device *device;
+ struct device *dev;
int rc;
+ dev = &cdev->dev;
+
/* first online clears initial online feature flag */
dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
device = dasd_create_device(cdev);
@@ -3504,11 +3500,10 @@ int dasd_generic_set_online(struct ccw_device *cdev,
/* Try to load the required module. */
rc = request_module(DASD_DIAG_MOD);
if (rc) {
- pr_warn("%s Setting the DASD online failed "
- "because the required module %s "
- "could not be loaded (rc=%d)\n",
- dev_name(&cdev->dev), DASD_DIAG_MOD,
- rc);
+ dev_warn(dev, "Setting the DASD online failed "
+ "because the required module %s "
+ "could not be loaded (rc=%d)\n",
+ DASD_DIAG_MOD, rc);
dasd_delete_device(device);
return -ENODEV;
}
@@ -3516,8 +3511,7 @@ int dasd_generic_set_online(struct ccw_device *cdev,
/* Module init could have failed, so check again here after
* request_module(). */
if (!dasd_diag_discipline_pointer) {
- pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
- dev_name(&cdev->dev));
+ dev_warn(dev, "Setting the DASD online failed because of missing DIAG discipline\n");
dasd_delete_device(device);
return -ENODEV;
}
@@ -3527,37 +3521,33 @@ int dasd_generic_set_online(struct ccw_device *cdev,
dasd_delete_device(device);
return -EINVAL;
}
+ device->base_discipline = base_discipline;
if (!try_module_get(discipline->owner)) {
- module_put(base_discipline->owner);
dasd_delete_device(device);
return -EINVAL;
}
- device->base_discipline = base_discipline;
device->discipline = discipline;
/* check_device will allocate block device if necessary */
rc = discipline->check_device(device);
if (rc) {
- pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
- dev_name(&cdev->dev), discipline->name, rc);
- module_put(discipline->owner);
- module_put(base_discipline->owner);
+ dev_warn(dev, "Setting the DASD online with discipline %s failed with rc=%i\n",
+ discipline->name, rc);
dasd_delete_device(device);
return rc;
}
dasd_set_target_state(device, DASD_STATE_ONLINE);
if (device->state <= DASD_STATE_KNOWN) {
- pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
- dev_name(&cdev->dev));
+ dev_warn(dev, "Setting the DASD online failed because of a missing discipline\n");
rc = -ENODEV;
dasd_set_target_state(device, DASD_STATE_NEW);
if (device->block)
dasd_free_block(device->block);
dasd_delete_device(device);
- } else
- pr_debug("dasd_generic device %s found\n",
- dev_name(&cdev->dev));
+ } else {
+ dev_dbg(dev, "dasd_generic device found\n");
+ }
wait_event(dasd_init_waitq, _wait_for_device(device));
@@ -3568,10 +3558,13 @@ EXPORT_SYMBOL_GPL(dasd_generic_set_online);
int dasd_generic_set_offline(struct ccw_device *cdev)
{
+ int max_count, open_count, rc;
struct dasd_device *device;
struct dasd_block *block;
- int max_count, open_count, rc;
unsigned long flags;
+ struct device *dev;
+
+ dev = &cdev->dev;
rc = 0;
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
@@ -3588,15 +3581,14 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* in the other openers.
*/
if (device->block) {
- max_count = device->block->bdev_handle ? 0 : -1;
+ max_count = device->block->bdev_file ? 0 : -1;
open_count = atomic_read(&device->block->open_count);
if (open_count > max_count) {
if (open_count > 0)
- pr_warn("%s: The DASD cannot be set offline with open count %i\n",
- dev_name(&cdev->dev), open_count);
+ dev_warn(dev, "The DASD cannot be set offline with open count %i\n",
+ open_count);
else
- pr_warn("%s: The DASD cannot be set offline while it is in use\n",
- dev_name(&cdev->dev));
+ dev_warn(dev, "The DASD cannot be set offline while it is in use\n");
rc = -EBUSY;
goto out_err;
}
@@ -3634,8 +3626,8 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* so sync bdev first and then wait for our queues to become
* empty
*/
- if (device->block && device->block->bdev_handle)
- bdev_mark_dead(device->block->bdev_handle->bdev, false);
+ if (device->block && device->block->bdev_file)
+ bdev_mark_dead(file_bdev(device->block->bdev_file), false);
dasd_schedule_device_bh(device);
rc = wait_event_interruptible(shutdown_waitq,
_wait_for_empty_queues(device));
@@ -3956,8 +3948,8 @@ static int dasd_handle_autoquiesce(struct dasd_device *device,
if (dasd_eer_enabled(device))
dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE);
- pr_info("%s: The DASD has been put in the quiesce state\n",
- dev_name(&device->cdev->dev));
+ dev_info(&device->cdev->dev,
+ "The DASD has been put in the quiesce state\n");
dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE);
if (device->features & DASD_FEATURE_REQUEUEQUIESCE)
@@ -3977,10 +3969,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
NULL);
if (IS_ERR(cqr)) {
- /* internal error 13 - Allocating the RDC request failed*/
- dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", "13");
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate RDC request");
return cqr;
}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 89957bb7244d..459b7f8ac883 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -7,13 +7,9 @@
*
*/
-#define KMSG_COMPONENT "dasd-eckd"
-
#include <linux/timer.h>
#include <asm/idals.h>
-#define PRINTK_HEADER "dasd_erp(3990): "
-
#include "dasd_int.h"
#include "dasd_eckd.h"
@@ -398,7 +394,6 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
struct dasd_device *device = erp->startdev;
char msg_format = (sense[7] & 0xF0);
char msg_no = (sense[7] & 0x0F);
- char errorstring[ERRORLENGTH];
switch (msg_format) {
case 0x00: /* Format 0 - Program or System Checks */
@@ -1004,12 +999,9 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
}
break;
- default: /* unknown message format - should not happen
- internal error 03 - unknown message format */
- snprintf(errorstring, ERRORLENGTH, "03 %x02", msg_format);
+ default:
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", errorstring);
+ "Unknown message format %02x", msg_format);
break;
} /* end switch message format */
@@ -1056,11 +1048,9 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
set_bit(DASD_CQR_SUPPRESS_CR, &erp->refers->flags);
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
} else {
- /* fatal error - set status to FAILED
- internal error 09 - Command Reject */
if (!test_bit(DASD_CQR_SUPPRESS_CR, &erp->flags))
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, reason=09\n");
+ "An I/O command request was rejected\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
}
@@ -1128,13 +1118,7 @@ dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
erp->function = dasd_3990_erp_equip_check;
if (sense[1] & SNS1_WRITE_INHIBITED) {
- dev_info(&device->cdev->dev,
- "Write inhibited path encountered\n");
-
- /* vary path offline
- internal error 04 - Path should be varied off-line.*/
- dev_err(&device->cdev->dev, "An error occurred in the DASD "
- "device driver, reason=%s\n", "04");
+ dev_err(&device->cdev->dev, "Write inhibited path encountered\n");
erp = dasd_3990_erp_action_1(erp);
@@ -1285,11 +1269,7 @@ dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
erp = dasd_3990_erp_action_4(erp, sense);
} else {
- /* internal error 06 - The track format is not valid*/
- dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", "06");
-
+ dev_err(&device->cdev->dev, "Track format is not valid\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
}
@@ -1663,9 +1643,8 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
sizeof(struct LO_eckd_data), device);
if (IS_ERR(erp)) {
- /* internal error 01 - Unable to allocate ERP */
- dev_err(&device->cdev->dev, "An error occurred in the DASD "
- "device driver, reason=%s\n", "01");
+ DBF_DEV_EVENT(DBF_ERR, device, "%s",
+ "Unable to allocate ERP request (1B 32)");
return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
}
@@ -1807,10 +1786,8 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
cpa = previous_erp->irb.scsw.cmd.cpa;
if (cpa == 0) {
- /* internal error 02 -
- Unable to determine address of the CCW to be restarted */
- dev_err(&device->cdev->dev, "An error occurred in the DASD "
- "device driver, reason=%s\n", "02");
+ dev_err(&device->cdev->dev,
+ "Unable to determine address of to be restarted CCW\n");
previous_erp->status = DASD_CQR_FAILED;
@@ -2009,15 +1986,9 @@ dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense)
{
if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) {
-
- /* set to suspended duplex state then restart
- internal error 05 - Set device to suspended duplex state
- should be done */
struct dasd_device *device = erp->startdev;
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", "05");
-
+ "Compound configuration error occurred\n");
}
erp->function = dasd_3990_erp_compound_config;
@@ -2153,10 +2124,9 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
erp = dasd_3990_erp_int_req(erp);
break;
- case 0x0F: /* length mismatch during update write command
- internal error 08 - update write command error*/
- dev_err(&device->cdev->dev, "An error occurred in the "
- "DASD device driver, reason=%s\n", "08");
+ case 0x0F:
+ dev_err(&device->cdev->dev,
+ "Update write command error occurred\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
break;
@@ -2165,12 +2135,9 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
erp = dasd_3990_erp_action_10_32(erp, sense);
break;
- case 0x15: /* next track outside defined extend
- internal error 07 - The next track is not
- within the defined storage extent */
+ case 0x15:
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", "07");
+ "Track outside defined extent error occurred\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
break;
@@ -2663,7 +2630,7 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
* necessary
*/
dev_err(&device->cdev->dev,
- "ERP %p has run out of retries and failed\n", erp);
+ "ERP %px has run out of retries and failed\n", erp);
erp->status = DASD_CQR_FAILED;
}
@@ -2704,8 +2671,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
while (erp_done != erp) {
if (erp_done == NULL) /* end of chain reached */
- panic(PRINTK_HEADER "Programming error in ERP! The "
- "original request was lost\n");
+ panic("Programming error in ERP! The original request was lost\n");
/* remove the request from the device queue */
list_del(&erp_done->blocklist);
@@ -2786,11 +2752,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
"ERP chain at BEGINNING of ERP-ACTION\n");
for (temp_erp = cqr;
temp_erp != NULL; temp_erp = temp_erp->refers) {
-
dev_err(&device->cdev->dev,
- "ERP %p (%02x) refers to %p\n",
- temp_erp, temp_erp->status,
- temp_erp->refers);
+ "ERP %px (%02x) refers to %px\n",
+ temp_erp, temp_erp->status, temp_erp->refers);
}
}
@@ -2837,11 +2801,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
"ERP chain at END of ERP-ACTION\n");
for (temp_erp = erp;
temp_erp != NULL; temp_erp = temp_erp->refers) {
-
dev_err(&device->cdev->dev,
- "ERP %p (%02x) refers to %p\n",
- temp_erp, temp_erp->status,
- temp_erp->refers);
+ "ERP %px (%02x) refers to %px\n",
+ temp_erp, temp_erp->status, temp_erp->refers);
}
}
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index c9740ae88d1a..e84cd5436556 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -6,20 +6,12 @@
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
-#define KMSG_COMPONENT "dasd-eckd"
-
#include <linux/list.h>
#include <linux/slab.h>
#include <asm/ebcdic.h>
#include "dasd_int.h"
#include "dasd_eckd.h"
-#ifdef PRINTK_HEADER
-#undef PRINTK_HEADER
-#endif /* PRINTK_HEADER */
-#define PRINTK_HEADER "dasd(eckd):"
-
-
/*
* General concept of alias management:
* - PAV and DASD alias management is specific to the eckd discipline.
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index c4e36650c426..0316c20823ee 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -13,8 +13,6 @@
*
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -24,8 +22,6 @@
#include <linux/uaccess.h>
#include <asm/ipl.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd_devmap:"
#define DASD_MAX_PARAMS 256
#include "dasd_int.h"
@@ -1114,7 +1110,7 @@ dasd_use_diag_show(struct device *dev, struct device_attribute *attr, char *buf)
use_diag = (devmap->features & DASD_FEATURE_USEDIAG) != 0;
else
use_diag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USEDIAG) != 0;
- return sprintf(buf, use_diag ? "1\n" : "0\n");
+ return sysfs_emit(buf, use_diag ? "1\n" : "0\n");
}
static ssize_t
@@ -1163,7 +1159,7 @@ dasd_use_raw_show(struct device *dev, struct device_attribute *attr, char *buf)
use_raw = (devmap->features & DASD_FEATURE_USERAW) != 0;
else
use_raw = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USERAW) != 0;
- return sprintf(buf, use_raw ? "1\n" : "0\n");
+ return sysfs_emit(buf, use_raw ? "1\n" : "0\n");
}
static ssize_t
@@ -1259,7 +1255,7 @@ dasd_access_show(struct device *dev, struct device_attribute *attr,
if (count < 0)
return count;
- return sprintf(buf, "%d\n", count);
+ return sysfs_emit(buf, "%d\n", count);
}
static DEVICE_ATTR(host_access_count, 0444, dasd_access_show, NULL);
@@ -1338,19 +1334,19 @@ static ssize_t dasd_alias_show(struct device *dev,
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
- return sprintf(buf, "0\n");
+ return sysfs_emit(buf, "0\n");
if (device->discipline && device->discipline->get_uid &&
!device->discipline->get_uid(device, &uid)) {
if (uid.type == UA_BASE_PAV_ALIAS ||
uid.type == UA_HYPER_PAV_ALIAS) {
dasd_put_device(device);
- return sprintf(buf, "1\n");
+ return sysfs_emit(buf, "1\n");
}
}
dasd_put_device(device);
- return sprintf(buf, "0\n");
+ return sysfs_emit(buf, "0\n");
}
static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL);
@@ -1412,15 +1408,9 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
break;
}
- if (strlen(uid.vduit) > 0)
- snprintf(uid_string, sizeof(uid_string),
- "%s.%s.%04x.%s.%s",
- uid.vendor, uid.serial, uid.ssid, ua_string,
- uid.vduit);
- else
- snprintf(uid_string, sizeof(uid_string),
- "%s.%s.%04x.%s",
- uid.vendor, uid.serial, uid.ssid, ua_string);
+ snprintf(uid_string, sizeof(uid_string), "%s.%s.%04x.%s%s%s",
+ uid.vendor, uid.serial, uid.ssid, ua_string,
+ uid.vduit[0] ? "." : "", uid.vduit);
}
dasd_put_device(device);
@@ -1862,7 +1852,7 @@ static ssize_t dasd_pm_show(struct device *dev,
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
- return sprintf(buf, "0\n");
+ return sysfs_emit(buf, "0\n");
opm = dasd_path_get_opm(device);
nppm = dasd_path_get_nppm(device);
@@ -1872,8 +1862,8 @@ static ssize_t dasd_pm_show(struct device *dev,
ifccpm = dasd_path_get_ifccpm(device);
dasd_put_device(device);
- return sprintf(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm,
- cablepm, cuirpm, hpfpm, ifccpm);
+ return sysfs_emit(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm,
+ cablepm, cuirpm, hpfpm, ifccpm);
}
static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 2e4e555b37c3..ea4b1d01bb76 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -8,8 +8,6 @@
*
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/kernel_stat.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
@@ -31,8 +29,6 @@
#include "dasd_int.h"
#include "dasd_diag.h"
-#define PRINTK_HEADER "dasd(diag):"
-
MODULE_LICENSE("GPL");
/* The maximum number of blocks per request (max_blocks) is dependent on the
@@ -621,25 +617,9 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
"dump sense not available for DIAG data");
}
-/*
- * Initialize block layer request queue.
- */
-static void dasd_diag_setup_blk_queue(struct dasd_block *block)
+static unsigned int dasd_diag_max_sectors(struct dasd_block *block)
{
- unsigned int logical_block_size = block->bp_block;
- struct request_queue *q = block->gdp->queue;
- int max;
-
- max = DIAG_MAX_BLOCKS << block->s2b_shift;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- q->limits.max_dev_sectors = max;
- blk_queue_logical_block_size(q, logical_block_size);
- blk_queue_max_hw_sectors(q, max);
- blk_queue_max_segments(q, USHRT_MAX);
- /* With page sized segments each segment can be translated into one idaw/tidaw */
- blk_queue_max_segment_size(q, PAGE_SIZE);
- blk_queue_segment_boundary(q, PAGE_SIZE - 1);
- blk_queue_dma_alignment(q, PAGE_SIZE - 1);
+ return DIAG_MAX_BLOCKS << block->s2b_shift;
}
static int dasd_diag_pe_handler(struct dasd_device *device,
@@ -652,10 +632,10 @@ static struct dasd_discipline dasd_diag_discipline = {
.owner = THIS_MODULE,
.name = "DIAG",
.ebcname = "DIAG",
+ .max_sectors = dasd_diag_max_sectors,
.check_device = dasd_diag_check_device,
.pe_handler = dasd_diag_pe_handler,
.fill_geometry = dasd_diag_fill_geometry,
- .setup_blk_queue = dasd_diag_setup_blk_queue,
.start_IO = dasd_start_diag,
.term_IO = dasd_diag_term_IO,
.handle_terminated_request = dasd_diag_handle_terminated_request,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index bd89b032968a..373c1a86c33e 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -10,8 +10,6 @@
* Author.........: Nigel Hislop <hislop_nigel@emc.com>
*/
-#define KMSG_COMPONENT "dasd-eckd"
-
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -37,11 +35,6 @@
#include "dasd_int.h"
#include "dasd_eckd.h"
-#ifdef PRINTK_HEADER
-#undef PRINTK_HEADER
-#endif /* PRINTK_HEADER */
-#define PRINTK_HEADER "dasd(eckd):"
-
/*
* raw track access always map to 64k in memory
* so it maps to 16 blocks of 4k per track
@@ -1072,22 +1065,14 @@ static void dasd_eckd_read_fc_security(struct dasd_device *device)
}
}
-static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
- char *print_uid)
+static void dasd_eckd_get_uid_string(struct dasd_conf *conf, char *print_uid)
{
struct dasd_uid uid;
create_uid(conf, &uid);
- if (strlen(uid.vduit) > 0)
- snprintf(print_uid, DASD_UID_STRLEN,
- "%s.%s.%04x.%02x.%s",
- uid.vendor, uid.serial, uid.ssid,
- uid.real_unit_addr, uid.vduit);
- else
- snprintf(print_uid, DASD_UID_STRLEN,
- "%s.%s.%04x.%02x",
- uid.vendor, uid.serial, uid.ssid,
- uid.real_unit_addr);
+ snprintf(print_uid, DASD_UID_STRLEN, "%s.%s.%04x.%02x%s%s",
+ uid.vendor, uid.serial, uid.ssid, uid.real_unit_addr,
+ uid.vduit[0] ? "." : "", uid.vduit);
}
static int dasd_eckd_check_cabling(struct dasd_device *device,
@@ -5529,15 +5514,15 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
* and return number of printed chars.
*/
static void
-dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
+dasd_eckd_dump_ccw_range(struct dasd_device *device, struct ccw1 *from,
+ struct ccw1 *to, char *page)
{
int len, count;
char *datap;
len = 0;
while (from <= to) {
- len += sprintf(page + len, PRINTK_HEADER
- " CCW %p: %08X %08X DAT:",
+ len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
from, ((int *) from)[0], ((int *) from)[1]);
/* get pointer to data (consider IDALs) */
@@ -5560,7 +5545,7 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
from++;
}
if (len > 0)
- printk(KERN_ERR "%s", page);
+ dev_err(&device->cdev->dev, "%s", page);
}
static void
@@ -5591,9 +5576,12 @@ dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
struct dasd_ccw_req *req, struct irb *irb)
{
- char *page;
struct ccw1 *first, *last, *fail, *from, *to;
+ struct device *dev;
int len, sl, sct;
+ char *page;
+
+ dev = &device->cdev->dev;
page = (char *) get_zeroed_page(GFP_ATOMIC);
if (page == NULL) {
@@ -5602,24 +5590,18 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
return;
}
/* dump the sense data */
- len = sprintf(page, PRINTK_HEADER
- " I/O status report for device %s:\n",
- dev_name(&device->cdev->dev));
- len += sprintf(page + len, PRINTK_HEADER
- " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
- "CS:%02X RC:%d\n",
+ len = sprintf(page, "I/O status report:\n");
+ len += sprintf(page + len,
+ "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X CS:%02X RC:%d\n",
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
req ? req->intrc : 0);
- len += sprintf(page + len, PRINTK_HEADER
- " device %s: Failing CCW: %p\n",
- dev_name(&device->cdev->dev),
+ len += sprintf(page + len, "Failing CCW: %px\n",
phys_to_virt(irb->scsw.cmd.cpa));
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
- len += sprintf(page + len, PRINTK_HEADER
- " Sense(hex) %2d-%2d:",
+ len += sprintf(page + len, "Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
@@ -5631,23 +5613,20 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
if (irb->ecw[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */
- sprintf(page + len, PRINTK_HEADER
- " 24 Byte: %x MSG %x, "
- "%s MSGb to SYSOP\n",
+ sprintf(page + len,
+ "24 Byte: %x MSG %x, %s MSGb to SYSOP\n",
irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
irb->ecw[1] & 0x10 ? "" : "no");
} else {
/* 32 Byte Sense Data */
- sprintf(page + len, PRINTK_HEADER
- " 32 Byte: Format: %x "
- "Exception class %x\n",
+ sprintf(page + len,
+ "32 Byte: Format: %x Exception class %x\n",
irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
}
} else {
- sprintf(page + len, PRINTK_HEADER
- " SORRY - NO VALID SENSE AVAILABLE\n");
+ sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n");
}
- printk(KERN_ERR "%s", page);
+ dev_err(dev, "%s", page);
if (req) {
/* req == NULL for unsolicited interrupts */
@@ -5656,8 +5635,8 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
first = req->cpaddr;
for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
to = min(first + 6, last);
- printk(KERN_ERR PRINTK_HEADER " Related CP in req: %p\n", req);
- dasd_eckd_dump_ccw_range(first, to, page);
+ dev_err(dev, "Related CP in req: %px\n", req);
+ dasd_eckd_dump_ccw_range(device, first, to, page);
/* print failing CCW area (maximum 4) */
/* scsw->cda is either valid or zero */
@@ -5665,19 +5644,19 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
fail = phys_to_virt(irb->scsw.cmd.cpa); /* failing CCW */
if (from < fail - 2) {
from = fail - 2; /* there is a gap - print header */
- printk(KERN_ERR PRINTK_HEADER "......\n");
+ dev_err(dev, "......\n");
}
to = min(fail + 1, last);
- dasd_eckd_dump_ccw_range(from, to, page + len);
+ dasd_eckd_dump_ccw_range(device, from, to, page + len);
/* print last CCWs (maximum 2) */
len = 0;
from = max(from, ++to);
if (from < last - 1) {
from = last - 1; /* there is a gap - print header */
- printk(KERN_ERR PRINTK_HEADER "......\n");
+ dev_err(dev, "......\n");
}
- dasd_eckd_dump_ccw_range(from, last, page + len);
+ dasd_eckd_dump_ccw_range(device, from, last, page + len);
}
free_page((unsigned long) page);
}
@@ -5701,11 +5680,9 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
return;
}
/* dump the sense data */
- len = sprintf(page, PRINTK_HEADER
- " I/O status report for device %s:\n",
- dev_name(&device->cdev->dev));
- len += sprintf(page + len, PRINTK_HEADER
- " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
+ len = sprintf(page, "I/O status report:\n");
+ len += sprintf(page + len,
+ "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
"CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
@@ -5713,9 +5690,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
irb->scsw.tm.fcxs,
(irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
req ? req->intrc : 0);
- len += sprintf(page + len, PRINTK_HEADER
- " device %s: Failing TCW: %p\n",
- dev_name(&device->cdev->dev),
+ len += sprintf(page + len, "Failing TCW: %px\n",
phys_to_virt(irb->scsw.tm.tcw));
tsb = NULL;
@@ -5724,47 +5699,37 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw));
if (tsb) {
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->length %d\n", tsb->length);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->flags %x\n", tsb->flags);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->dcw_offset %d\n", tsb->dcw_offset);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->count %d\n", tsb->count);
+ len += sprintf(page + len, "tsb->length %d\n", tsb->length);
+ len += sprintf(page + len, "tsb->flags %x\n", tsb->flags);
+ len += sprintf(page + len, "tsb->dcw_offset %d\n", tsb->dcw_offset);
+ len += sprintf(page + len, "tsb->count %d\n", tsb->count);
residual = tsb->count - 28;
- len += sprintf(page + len, PRINTK_HEADER
- " residual %d\n", residual);
+ len += sprintf(page + len, "residual %d\n", residual);
switch (tsb->flags & 0x07) {
case 1: /* tsa_iostat */
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.iostat.dev_time %d\n",
+ len += sprintf(page + len, "tsb->tsa.iostat.dev_time %d\n",
tsb->tsa.iostat.dev_time);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.iostat.def_time %d\n",
+ len += sprintf(page + len, "tsb->tsa.iostat.def_time %d\n",
tsb->tsa.iostat.def_time);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.iostat.queue_time %d\n",
+ len += sprintf(page + len, "tsb->tsa.iostat.queue_time %d\n",
tsb->tsa.iostat.queue_time);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.iostat.dev_busy_time %d\n",
+ len += sprintf(page + len, "tsb->tsa.iostat.dev_busy_time %d\n",
tsb->tsa.iostat.dev_busy_time);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.iostat.dev_act_time %d\n",
+ len += sprintf(page + len, "tsb->tsa.iostat.dev_act_time %d\n",
tsb->tsa.iostat.dev_act_time);
sense = tsb->tsa.iostat.sense;
break;
case 2: /* ts_ddpc */
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
+ len += sprintf(page + len, "tsb->tsa.ddpc.rc %d\n",
+ tsb->tsa.ddpc.rc);
for (sl = 0; sl < 2; sl++) {
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.ddpc.rcq %2d-%2d: ",
+ len += sprintf(page + len,
+ "tsb->tsa.ddpc.rcq %2d-%2d: ",
(8 * sl), ((8 * sl) + 7));
rcq = tsb->tsa.ddpc.rcq;
for (sct = 0; sct < 8; sct++) {
- len += sprintf(page + len, " %02x",
+ len += sprintf(page + len, "%02x",
rcq[8 * sl + sct]);
}
len += sprintf(page + len, "\n");
@@ -5772,15 +5737,15 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
sense = tsb->tsa.ddpc.sense;
break;
case 3: /* tsa_intrg */
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.intrg.: not supported yet\n");
+ len += sprintf(page + len,
+ "tsb->tsa.intrg.: not supported yet\n");
break;
}
if (sense) {
for (sl = 0; sl < 4; sl++) {
- len += sprintf(page + len, PRINTK_HEADER
- " Sense(hex) %2d-%2d:",
+ len += sprintf(page + len,
+ "Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
len += sprintf(page + len, " %02x",
@@ -5791,27 +5756,23 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
if (sense[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */
- sprintf(page + len, PRINTK_HEADER
- " 24 Byte: %x MSG %x, "
- "%s MSGb to SYSOP\n",
+ sprintf(page + len,
+ "24 Byte: %x MSG %x, %s MSGb to SYSOP\n",
sense[7] >> 4, sense[7] & 0x0f,
sense[1] & 0x10 ? "" : "no");
} else {
/* 32 Byte Sense Data */
- sprintf(page + len, PRINTK_HEADER
- " 32 Byte: Format: %x "
- "Exception class %x\n",
+ sprintf(page + len,
+ "32 Byte: Format: %x Exception class %x\n",
sense[6] & 0x0f, sense[22] >> 4);
}
} else {
- sprintf(page + len, PRINTK_HEADER
- " SORRY - NO VALID SENSE AVAILABLE\n");
+ sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n");
}
} else {
- sprintf(page + len, PRINTK_HEADER
- " SORRY - NO TSB DATA AVAILABLE\n");
+ sprintf(page + len, "SORRY - NO TSB DATA AVAILABLE\n");
}
- printk(KERN_ERR "%s", page);
+ dev_err(&device->cdev->dev, "%s", page);
free_page((unsigned long) page);
}
@@ -6865,17 +6826,9 @@ static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
dasd_schedule_requeue(device);
}
-/*
- * Initialize block layer request queue.
- */
-static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
+static unsigned int dasd_eckd_max_sectors(struct dasd_block *block)
{
- unsigned int logical_block_size = block->bp_block;
- struct request_queue *q = block->gdp->queue;
- struct dasd_device *device = block->base;
- int max;
-
- if (device->features & DASD_FEATURE_USERAW) {
+ if (block->base->features & DASD_FEATURE_USERAW) {
/*
* the max_blocks value for raw_track access is 256
* it is higher than the native ECKD value because we
@@ -6883,19 +6836,10 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
* so the max_hw_sectors are
* 2048 x 512B = 1024kB = 16 tracks
*/
- max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
- } else {
- max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
+ return DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
}
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- q->limits.max_dev_sectors = max;
- blk_queue_logical_block_size(q, logical_block_size);
- blk_queue_max_hw_sectors(q, max);
- blk_queue_max_segments(q, USHRT_MAX);
- /* With page sized segments each segment can be translated into one idaw/tidaw */
- blk_queue_max_segment_size(q, PAGE_SIZE);
- blk_queue_segment_boundary(q, PAGE_SIZE - 1);
- blk_queue_dma_alignment(q, PAGE_SIZE - 1);
+
+ return DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
}
static struct ccw_driver dasd_eckd_driver = {
@@ -6927,7 +6871,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
.basic_to_ready = dasd_eckd_basic_to_ready,
.online_to_ready = dasd_eckd_online_to_ready,
.basic_to_known = dasd_eckd_basic_to_known,
- .setup_blk_queue = dasd_eckd_setup_blk_queue,
+ .max_sectors = dasd_eckd_max_sectors,
.fill_geometry = dasd_eckd_fill_geometry,
.start_IO = dasd_start_IO,
.term_IO = dasd_term_IO,
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index c956de711cf7..5064a616e041 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -7,8 +7,6 @@
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
-#define KMSG_COMPONENT "dasd-eckd"
-
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/kernel.h>
@@ -28,11 +26,6 @@
#include "dasd_int.h"
#include "dasd_eckd.h"
-#ifdef PRINTK_HEADER
-#undef PRINTK_HEADER
-#endif /* PRINTK_HEADER */
-#define PRINTK_HEADER "dasd(eer):"
-
/*
* SECTION: the internal buffer
*/
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index c07e6e713518..4c0d3a704513 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -9,8 +9,6 @@
*
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/ctype.h>
#include <linux/init.h>
@@ -18,9 +16,6 @@
#include <asm/ebcdic.h>
#include <linux/uaccess.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd_erp:"
-
#include "dasd_int.h"
struct dasd_ccw_req *
@@ -170,12 +165,12 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
device = cqr->startdev;
if (cqr->intrc == -ETIMEDOUT) {
dev_err(&device->cdev->dev,
- "A timeout error occurred for cqr %p\n", cqr);
+ "A timeout error occurred for cqr %px\n", cqr);
return;
}
if (cqr->intrc == -ENOLINK) {
dev_err(&device->cdev->dev,
- "A transport error occurred for cqr %p\n", cqr);
+ "A transport error occurred for cqr %px\n", cqr);
return;
}
/* dump sense data */
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index c06fa2b27120..bcbb2f8e91fe 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -25,11 +25,6 @@
#include "dasd_int.h"
#include "dasd_fba.h"
-#ifdef PRINTK_HEADER
-#undef PRINTK_HEADER
-#endif /* PRINTK_HEADER */
-#define PRINTK_HEADER "dasd(fba):"
-
#define FBA_DEFAULT_RETRIES 32
#define DASD_FBA_CCW_WRITE 0x41
@@ -660,30 +655,27 @@ static void
dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
struct irb *irb)
{
- char *page;
struct ccw1 *act, *end, *last;
int len, sl, sct, count;
+ struct device *dev;
+ char *page;
+
+ dev = &device->cdev->dev;
page = (char *) get_zeroed_page(GFP_ATOMIC);
if (page == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
- "No memory to dump sense data");
+ "No memory to dump sense data");
return;
}
- len = sprintf(page, PRINTK_HEADER
- " I/O status report for device %s:\n",
- dev_name(&device->cdev->dev));
- len += sprintf(page + len, PRINTK_HEADER
- " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
- irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
- len += sprintf(page + len, PRINTK_HEADER
- " device %s: Failing CCW: %p\n",
- dev_name(&device->cdev->dev),
+ len = sprintf(page, "I/O status report:\n");
+ len += sprintf(page + len, "in req: %px CS: 0x%02X DS: 0x%02X\n",
+ req, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
+ len += sprintf(page + len, "Failing CCW: %px\n",
(void *) (addr_t) irb->scsw.cmd.cpa);
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
- len += sprintf(page + len, PRINTK_HEADER
- " Sense(hex) %2d-%2d:",
+ len += sprintf(page + len, "Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
@@ -693,20 +685,18 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
len += sprintf(page + len, "\n");
}
} else {
- len += sprintf(page + len, PRINTK_HEADER
- " SORRY - NO VALID SENSE AVAILABLE\n");
+ len += sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n");
}
- printk(KERN_ERR "%s", page);
+ dev_err(dev, "%s", page);
/* dump the Channel Program */
/* print first CCWs (maximum 8) */
act = req->cpaddr;
- for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
+ for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
end = min(act + 8, last);
- len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req);
+ len = sprintf(page, "Related CP in req: %px\n", req);
while (act <= end) {
- len += sprintf(page + len, PRINTK_HEADER
- " CCW %p: %08X %08X DAT:",
+ len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
@@ -716,19 +706,17 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
len += sprintf(page + len, "\n");
act++;
}
- printk(KERN_ERR "%s", page);
-
+ dev_err(dev, "%s", page);
/* print failing CCW area */
len = 0;
if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
- len += sprintf(page + len, PRINTK_HEADER "......\n");
+ len += sprintf(page + len, "......\n");
}
end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
while (act <= end) {
- len += sprintf(page + len, PRINTK_HEADER
- " CCW %p: %08X %08X DAT:",
+ len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
@@ -742,11 +730,10 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
/* print last CCWs */
if (act < last - 2) {
act = last - 2;
- len += sprintf(page + len, PRINTK_HEADER "......\n");
+ len += sprintf(page + len, "......\n");
}
while (act <= last) {
- len += sprintf(page + len, PRINTK_HEADER
- " CCW %p: %08X %08X DAT:",
+ len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
@@ -757,39 +744,13 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
act++;
}
if (len > 0)
- printk(KERN_ERR "%s", page);
+ dev_err(dev, "%s", page);
free_page((unsigned long) page);
}
-/*
- * Initialize block layer request queue.
- */
-static void dasd_fba_setup_blk_queue(struct dasd_block *block)
+static unsigned int dasd_fba_max_sectors(struct dasd_block *block)
{
- unsigned int logical_block_size = block->bp_block;
- struct request_queue *q = block->gdp->queue;
- unsigned int max_bytes, max_discard_sectors;
- int max;
-
- max = DASD_FBA_MAX_BLOCKS << block->s2b_shift;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- q->limits.max_dev_sectors = max;
- blk_queue_logical_block_size(q, logical_block_size);
- blk_queue_max_hw_sectors(q, max);
- blk_queue_max_segments(q, USHRT_MAX);
- /* With page sized segments each segment can be translated into one idaw/tidaw */
- blk_queue_max_segment_size(q, PAGE_SIZE);
- blk_queue_segment_boundary(q, PAGE_SIZE - 1);
-
- q->limits.discard_granularity = logical_block_size;
-
- /* Calculate max_discard_sectors and make it PAGE aligned */
- max_bytes = USHRT_MAX * logical_block_size;
- max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE);
- max_discard_sectors = max_bytes / logical_block_size;
-
- blk_queue_max_discard_sectors(q, max_discard_sectors);
- blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
+ return DASD_FBA_MAX_BLOCKS << block->s2b_shift;
}
static int dasd_fba_pe_handler(struct dasd_device *device,
@@ -802,10 +763,11 @@ static struct dasd_discipline dasd_fba_discipline = {
.owner = THIS_MODULE,
.name = "FBA ",
.ebcname = "FBA ",
+ .has_discard = true,
.check_device = dasd_fba_check_characteristics,
.do_analysis = dasd_fba_do_analysis,
.pe_handler = dasd_fba_pe_handler,
- .setup_blk_queue = dasd_fba_setup_blk_queue,
+ .max_sectors = dasd_fba_max_sectors,
.fill_geometry = dasd_fba_fill_geometry,
.start_IO = dasd_start_IO,
.term_IO = dasd_term_IO,
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 55e3abe94cde..4533dd055ca8 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -11,8 +11,6 @@
*
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/interrupt.h>
#include <linux/major.h>
#include <linux/fs.h>
@@ -20,9 +18,6 @@
#include <linux/uaccess.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd_gendisk:"
-
#include "dasd_int.h"
static unsigned int queue_depth = 32;
@@ -39,6 +34,16 @@ MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD d
*/
int dasd_gendisk_alloc(struct dasd_block *block)
{
+ struct queue_limits lim = {
+ /*
+ * With page sized segments, each segment can be translated into
+ * one idaw/tidaw.
+ */
+ .max_segment_size = PAGE_SIZE,
+ .seg_boundary_mask = PAGE_SIZE - 1,
+ .dma_alignment = PAGE_SIZE - 1,
+ .max_segments = USHRT_MAX,
+ };
struct gendisk *gdp;
struct dasd_device *base;
int len, rc;
@@ -58,11 +63,12 @@ int dasd_gendisk_alloc(struct dasd_block *block)
if (rc)
return rc;
- gdp = blk_mq_alloc_disk(&block->tag_set, block);
+ gdp = blk_mq_alloc_disk(&block->tag_set, &lim, block);
if (IS_ERR(gdp)) {
blk_mq_free_tag_set(&block->tag_set);
return PTR_ERR(gdp);
}
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, gdp->queue);
/* Initialize gendisk structure. */
gdp->major = DASD_MAJOR;
@@ -127,15 +133,15 @@ void dasd_gendisk_free(struct dasd_block *block)
*/
int dasd_scan_partitions(struct dasd_block *block)
{
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
int rc;
- bdev_handle = bdev_open_by_dev(disk_devt(block->gdp), BLK_OPEN_READ,
+ bdev_file = bdev_file_open_by_dev(disk_devt(block->gdp), BLK_OPEN_READ,
NULL, NULL);
- if (IS_ERR(bdev_handle)) {
+ if (IS_ERR(bdev_file)) {
DBF_DEV_EVENT(DBF_ERR, block->base,
"scan partitions error, blkdev_get returned %ld",
- PTR_ERR(bdev_handle));
+ PTR_ERR(bdev_file));
return -ENODEV;
}
@@ -147,15 +153,15 @@ int dasd_scan_partitions(struct dasd_block *block)
"scan partitions error, rc %d", rc);
/*
- * Since the matching bdev_release() call to the
- * bdev_open_by_path() in this function is not called before
+ * Since the matching fput() call to the
+ * bdev_file_open_by_path() in this function is not called before
* dasd_destroy_partitions the offline open_count limit needs to be
- * increased from 0 to 1. This is done by setting device->bdev_handle
+ * increased from 0 to 1. This is done by setting device->bdev_file
* (see dasd_generic_set_offline). As long as the partition detection
* is running no offline should be allowed. That is why the assignment
- * to block->bdev_handle is done AFTER the BLKRRPART ioctl.
+ * to block->bdev_file is done AFTER the BLKRRPART ioctl.
*/
- block->bdev_handle = bdev_handle;
+ block->bdev_file = bdev_file;
return 0;
}
@@ -165,21 +171,21 @@ int dasd_scan_partitions(struct dasd_block *block)
*/
void dasd_destroy_partitions(struct dasd_block *block)
{
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
/*
- * Get the bdev_handle pointer from the device structure and clear
- * device->bdev_handle to lower the offline open_count limit again.
+ * Get the bdev_file pointer from the device structure and clear
+ * device->bdev_file to lower the offline open_count limit again.
*/
- bdev_handle = block->bdev_handle;
- block->bdev_handle = NULL;
+ bdev_file = block->bdev_file;
+ block->bdev_file = NULL;
- mutex_lock(&bdev_handle->bdev->bd_disk->open_mutex);
- bdev_disk_changed(bdev_handle->bdev->bd_disk, true);
- mutex_unlock(&bdev_handle->bdev->bd_disk->open_mutex);
+ mutex_lock(&file_bdev(bdev_file)->bd_disk->open_mutex);
+ bdev_disk_changed(file_bdev(bdev_file)->bd_disk, true);
+ mutex_unlock(&file_bdev(bdev_file)->bd_disk->open_mutex);
/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
- bdev_release(bdev_handle);
+ fput(bdev_file);
}
int dasd_gendisk_init(void)
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 1b1b8a41c4d4..e5f40536b425 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -113,9 +113,6 @@ do { \
__dev_id.ssid, __dev_id.devno, d_data); \
} while (0)
-/* limit size for an errorstring */
-#define ERRORLENGTH 30
-
/* definition of dbf debug levels */
#define DBF_EMERG 0 /* system is unusable */
#define DBF_ALERT 1 /* action must be taken immediately */
@@ -126,32 +123,6 @@ do { \
#define DBF_INFO 6 /* informational */
#define DBF_DEBUG 6 /* debug-level messages */
-/* messages to be written via klogd and dbf */
-#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\
-do { \
- printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
- dev_name(&d_device->cdev->dev), d_args); \
- DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \
-} while(0)
-
-#define MESSAGE(d_loglevel,d_string,d_args...)\
-do { \
- printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
- DBF_EVENT(DBF_ALERT, d_string, d_args); \
-} while(0)
-
-/* messages to be written via klogd only */
-#define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\
-do { \
- printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
- dev_name(&d_device->cdev->dev), d_args); \
-} while(0)
-
-#define MESSAGE_LOG(d_loglevel,d_string,d_args...)\
-do { \
- printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
-} while(0)
-
/* Macro to calculate number of blocks per page */
#define BLOCKS_PER_PAGE(blksize) (PAGE_SIZE / blksize)
@@ -322,6 +293,7 @@ struct dasd_discipline {
struct module *owner;
char ebcname[8]; /* a name used for tagging and printks */
char name[8]; /* a name used for tagging and printks */
+ bool has_discard;
struct list_head list; /* used for list of disciplines */
@@ -360,10 +332,7 @@ struct dasd_discipline {
int (*online_to_ready) (struct dasd_device *);
int (*basic_to_known)(struct dasd_device *);
- /*
- * Initialize block layer request queue.
- */
- void (*setup_blk_queue)(struct dasd_block *);
+ unsigned int (*max_sectors)(struct dasd_block *);
/* (struct dasd_device *);
* Device operation functions. build_cp creates a ccw chain for
* a block device request, start_io starts the request and
@@ -650,7 +619,7 @@ struct dasd_block {
struct gendisk *gdp;
spinlock_t request_queue_lock;
struct blk_mq_tag_set tag_set;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
atomic_t open_count;
unsigned long blocks; /* size of volume in blocks */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 61b9675e2a67..7e0ed7032f76 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -10,8 +10,6 @@
* i/o controls for the dasd driver.
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/interrupt.h>
#include <linux/compat.h>
#include <linux/major.h>
@@ -24,12 +22,8 @@
#include <linux/uaccess.h>
#include <linux/dasd_mod.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd_ioctl:"
-
#include "dasd_int.h"
-
static int
dasd_ioctl_api_version(void __user *argp)
{
@@ -537,7 +531,7 @@ static int __dasd_ioctl_information(struct dasd_block *block,
* This must be hidden from user-space.
*/
dasd_info->open_count = atomic_read(&block->open_count);
- if (!block->bdev_handle)
+ if (!block->bdev_file)
dasd_info->open_count++;
/*
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 62a859ea67f8..0faaa437d9be 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -11,8 +11,6 @@
*
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -23,9 +21,6 @@
#include <asm/debug.h>
#include <linux/uaccess.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd_proc:"
-
#include "dasd_int.h"
static struct proc_dir_entry *dasd_proc_root_entry = NULL;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 4b7ecd4fd431..9c8f529b827c 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -546,6 +546,9 @@ static const struct attribute_group *dcssblk_dev_attr_groups[] = {
static ssize_t
dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
+ struct queue_limits lim = {
+ .logical_block_size = 4096,
+ };
int rc, i, j, num_of_segments;
struct dcssblk_dev_info *dev_info;
struct segment_info *seg_info, *temp;
@@ -629,9 +632,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->dev.release = dcssblk_release_segment;
dev_info->dev.groups = dcssblk_dev_attr_groups;
INIT_LIST_HEAD(&dev_info->lh);
- dev_info->gd = blk_alloc_disk(NUMA_NO_NODE);
- if (dev_info->gd == NULL) {
- rc = -ENOMEM;
+ dev_info->gd = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(dev_info->gd)) {
+ rc = PTR_ERR(dev_info->gd);
goto seg_list_del;
}
dev_info->gd->major = dcssblk_major;
@@ -639,7 +642,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->gd->fops = &dcssblk_devops;
dev_info->gd->private_data = dev_info;
dev_info->gd->flags |= GENHD_FL_NO_PART;
- blk_queue_logical_block_size(dev_info->gd->queue, 4096);
blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue);
seg_byte_size = (dev_info->end - dev_info->start + 1);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index ade95e91b3c8..9f6fdd0daa74 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -435,10 +435,17 @@ static const struct blk_mq_ops scm_mq_ops = {
int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
{
- unsigned int devindex, nr_max_blk;
+ struct queue_limits lim = {
+ .logical_block_size = 1 << 12,
+ };
+ unsigned int devindex;
struct request_queue *rq;
int len, ret;
+ lim.max_segments = min(scmdev->nr_max_block,
+ (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
+ lim.max_hw_sectors = lim.max_segments << 3; /* 8 * 512 = blk_size */
+
devindex = atomic_inc_return(&nr_devices) - 1;
/* scma..scmz + scmaa..scmzz */
if (devindex > 701) {
@@ -462,18 +469,12 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
if (ret)
goto out;
- bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, scmdev);
+ bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, &lim, scmdev);
if (IS_ERR(bdev->gendisk)) {
ret = PTR_ERR(bdev->gendisk);
goto out_tag;
}
rq = bdev->rq = bdev->gendisk->queue;
- nr_max_blk = min(scmdev->nr_max_block,
- (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
-
- blk_queue_logical_block_size(rq, 1 << 12);
- blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
- blk_queue_max_segments(rq, nr_max_blk);
blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 82efdd20ad01..1d17a83569ce 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -195,7 +195,7 @@ static void free_chan_prog(struct ccw1 *cpa)
struct ccw1 *ptr = cpa;
while (ptr->cda) {
- kfree((void *)(addr_t) ptr->cda);
+ kfree(phys_to_virt(ptr->cda));
ptr++;
}
kfree(cpa);
@@ -237,7 +237,7 @@ static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
free_chan_prog(cpa);
return ERR_PTR(-ENOMEM);
}
- cpa[i].cda = (u32)(addr_t) kbuf;
+ cpa[i].cda = (u32)virt_to_phys(kbuf);
if (copy_from_user(kbuf, ubuf, reclen)) {
free_chan_prog(cpa);
return ERR_PTR(-EFAULT);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index bc3be0330f1d..0969fa01df58 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -29,7 +29,6 @@
#include <asm/irqflags.h>
#include <asm/checksum.h>
#include <asm/os_info.h>
-#include <asm/switch_to.h>
#include <asm/maccess.h>
#include "sclp.h"
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index aa3292e57e38..6eb8bcd948dc 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -31,7 +31,7 @@
* to devices that use multiple subchannels.
*/
-static struct bus_type ccwgroup_bus_type;
+static const struct bus_type ccwgroup_bus_type;
static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
{
@@ -465,7 +465,7 @@ static void ccwgroup_shutdown(struct device *dev)
gdrv->shutdown(gdev);
}
-static struct bus_type ccwgroup_bus_type = {
+static const struct bus_type ccwgroup_bus_type = {
.name = "ccwgroup",
.dev_groups = ccwgroup_dev_groups,
.remove = ccwgroup_remove,
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 64ed55c3aed6..3d88899dff7c 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1091,8 +1091,8 @@ int __init chsc_init(void)
{
int ret;
- sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sei_page = (void *)get_zeroed_page(GFP_KERNEL);
+ chsc_page = (void *)get_zeroed_page(GFP_KERNEL);
if (!sei_page || !chsc_page) {
ret = -ENOMEM;
goto out_err;
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 902237d0baef..e6c800653f98 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -293,7 +293,7 @@ static int chsc_ioctl_start(void __user *user_area)
if (!css_general_characteristics.dynio)
/* It makes no sense to try. */
return -EOPNOTSUPP;
- chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
+ chsc_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!chsc_area)
return -ENOMEM;
request = kzalloc(sizeof(*request), GFP_KERNEL);
@@ -341,7 +341,7 @@ static int chsc_ioctl_on_close_set(void __user *user_area)
ret = -ENOMEM;
goto out_unlock;
}
- on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
+ on_close_chsc_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!on_close_chsc_area) {
ret = -ENOMEM;
goto out_free_request;
@@ -393,7 +393,7 @@ static int chsc_ioctl_start_sync(void __user *user_area)
struct chsc_sync_area *chsc_area;
int ret, ccode;
- chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ chsc_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!chsc_area)
return -ENOMEM;
if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
@@ -439,7 +439,7 @@ static int chsc_ioctl_info_channel_path(void __user *user_cd)
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *scpcd_area;
- scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ scpcd_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!scpcd_area)
return -ENOMEM;
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
@@ -501,7 +501,7 @@ static int chsc_ioctl_info_cu(void __user *user_cd)
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *scucd_area;
- scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ scucd_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!scucd_area)
return -ENOMEM;
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
@@ -564,7 +564,7 @@ static int chsc_ioctl_info_sch_cu(void __user *user_cud)
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *sscud_area;
- sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sscud_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!sscud_area)
return -ENOMEM;
cud = kzalloc(sizeof(*cud), GFP_KERNEL);
@@ -626,7 +626,7 @@ static int chsc_ioctl_conf_info(void __user *user_ci)
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *sci_area;
- sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sci_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!sci_area)
return -ENOMEM;
ci = kzalloc(sizeof(*ci), GFP_KERNEL);
@@ -697,7 +697,7 @@ static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
u32 res;
} __attribute__ ((packed)) *cssids_parm;
- sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sccl_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!sccl_area)
return -ENOMEM;
ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
@@ -757,7 +757,7 @@ static int chsc_ioctl_chpd(void __user *user_chpd)
int ret;
chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
- scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ scpd_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!scpd_area || !chpd) {
ret = -ENOMEM;
goto out_free;
@@ -797,7 +797,7 @@ static int chsc_ioctl_dcal(void __user *user_dcal)
u8 data[PAGE_SIZE - 36];
} __attribute__ ((packed)) *sdcal_area;
- sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sdcal_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!sdcal_area)
return -ENOMEM;
dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 5584aa46c94e..f80dc18e2a76 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -169,7 +169,8 @@ static inline void cmf_activate(void *area, unsigned int onoff)
" lgr 2,%[mbo]\n"
" schm\n"
:
- : [r1] "d" ((unsigned long)onoff), [mbo] "d" (area)
+ : [r1] "d" ((unsigned long)onoff),
+ [mbo] "d" (virt_to_phys(area))
: "1", "2");
}
@@ -501,8 +502,7 @@ static int alloc_cmb(struct ccw_device *cdev)
WARN_ON(!list_empty(&cmb_area.list));
spin_unlock(&cmb_area.lock);
- mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
- get_order(size));
+ mem = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
spin_lock(&cmb_area.lock);
if (cmb_area.mem) {
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 28a88ed2c3aa..094431a62ad5 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -39,7 +39,7 @@ int max_ssid;
#define MAX_CSS_IDX 0
struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
-static struct bus_type css_bus_type;
+static const struct bus_type css_bus_type;
int
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
@@ -1409,7 +1409,7 @@ static int css_uevent(const struct device *dev, struct kobj_uevent_env *env)
return ret;
}
-static struct bus_type css_bus_type = {
+static const struct bus_type css_bus_type = {
.name = "css",
.match = css_bus_match,
.probe = css_probe,
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 0cfb179e1bcb..f95d12345d98 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -49,7 +49,7 @@ static const unsigned long recovery_delay[] = { 3, 30, 300 };
static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
-static struct bus_type ccw_bus_type;
+static const struct bus_type ccw_bus_type;
/******************* bus type handling ***********************/
@@ -1776,7 +1776,7 @@ static void ccw_device_shutdown(struct device *dev)
__disable_cmf(cdev);
}
-static struct bus_type ccw_bus_type = {
+static const struct bus_type ccw_bus_type = {
.name = "ccw",
.match = ccw_bus_match,
.uevent = ccw_uevent,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index c533d1dadc6b..a5dba3829769 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -202,7 +202,8 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
- if (cdev->private->state == DEV_STATE_VERIFY) {
+ if (cdev->private->state == DEV_STATE_VERIFY ||
+ cdev->private->flags.doverify) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = FAKE_CMD_IRB;
@@ -214,8 +215,7 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
}
if (cdev->private->state != DEV_STATE_ONLINE ||
((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
- !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
- cdev->private->flags.doverify)
+ !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)))
return -EBUSY;
ret = cio_set_options (sch, flags);
if (ret)
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
index 6b21ba68c1fe..c7894d61306d 100644
--- a/drivers/s390/cio/scm.c
+++ b/drivers/s390/cio/scm.c
@@ -42,7 +42,7 @@ static int scmdev_uevent(const struct device *dev, struct kobj_uevent_env *env)
return add_uevent_var(env, "MODALIAS=scm:scmdev");
}
-static struct bus_type scm_bus_type = {
+static const struct bus_type scm_bus_type = {
.name = "scm",
.probe = scmdev_probe,
.remove = scmdev_remove,
@@ -228,7 +228,7 @@ int scm_update_information(void)
size_t num;
int ret;
- scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ scm_info = (void *)__get_free_page(GFP_KERNEL);
if (!scm_info)
return -ENOMEM;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f46dd6abacd7..cce0bafd4c92 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -38,6 +38,7 @@
#include <linux/debugfs.h>
#include <linux/ctype.h>
#include <linux/module.h>
+#include <asm/uv.h>
#include "ap_bus.h"
#include "ap_debug.h"
@@ -83,14 +84,11 @@ EXPORT_SYMBOL(ap_perms);
DEFINE_MUTEX(ap_perms_mutex);
EXPORT_SYMBOL(ap_perms_mutex);
-/* # of bus scans since init */
-static atomic64_t ap_scan_bus_count;
-
/* # of bindings complete since init */
static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0);
-/* completion for initial APQN bindings complete */
-static DECLARE_COMPLETION(ap_init_apqn_bindings_complete);
+/* completion for APQN bindings complete */
+static DECLARE_COMPLETION(ap_apqn_bindings_complete);
static struct ap_config_info *ap_qci_info;
static struct ap_config_info *ap_qci_info_old;
@@ -101,12 +99,16 @@ static struct ap_config_info *ap_qci_info_old;
debug_info_t *ap_dbf_info;
/*
- * Workqueue timer for bus rescan.
+ * AP bus rescan related things.
*/
-static struct timer_list ap_config_timer;
-static int ap_config_time = AP_CONFIG_TIME;
-static void ap_scan_bus(struct work_struct *);
-static DECLARE_WORK(ap_scan_work, ap_scan_bus);
+static bool ap_scan_bus(void);
+static bool ap_scan_bus_result; /* result of last ap_scan_bus() */
+static DEFINE_MUTEX(ap_scan_bus_mutex); /* mutex ap_scan_bus() invocations */
+static atomic64_t ap_scan_bus_count; /* counter ap_scan_bus() invocations */
+static int ap_scan_bus_time = AP_CONFIG_TIME;
+static struct timer_list ap_scan_bus_timer;
+static void ap_scan_bus_wq_callback(struct work_struct *);
+static DECLARE_WORK(ap_scan_bus_work, ap_scan_bus_wq_callback);
/*
* Tasklet & timer for AP request polling and interrupts
@@ -135,7 +137,7 @@ static int ap_max_domain_id = 15;
/* Maximum adapter id, if not given via qci */
static int ap_max_adapter_id = 63;
-static struct bus_type ap_bus_type;
+static const struct bus_type ap_bus_type;
/* Adapter interrupt definitions */
static void ap_interrupt_handler(struct airq_struct *airq,
@@ -753,7 +755,7 @@ static void ap_calc_bound_apqns(unsigned int *apqns, unsigned int *bound)
}
/*
- * After initial ap bus scan do check if all existing APQNs are
+ * After ap bus scan do check if all existing APQNs are
* bound to device drivers.
*/
static void ap_check_bindings_complete(void)
@@ -763,9 +765,9 @@ static void ap_check_bindings_complete(void)
if (atomic64_read(&ap_scan_bus_count) >= 1) {
ap_calc_bound_apqns(&apqns, &bound);
if (bound == apqns) {
- if (!completion_done(&ap_init_apqn_bindings_complete)) {
- complete_all(&ap_init_apqn_bindings_complete);
- AP_DBF_INFO("%s complete\n", __func__);
+ if (!completion_done(&ap_apqn_bindings_complete)) {
+ complete_all(&ap_apqn_bindings_complete);
+ pr_debug("%s all apqn bindings complete\n", __func__);
}
ap_send_bindings_complete_uevent();
}
@@ -782,27 +784,29 @@ static void ap_check_bindings_complete(void)
* -ETIME is returned. On failures negative return values are
* returned to the caller.
*/
-int ap_wait_init_apqn_bindings_complete(unsigned long timeout)
+int ap_wait_apqn_bindings_complete(unsigned long timeout)
{
+ int rc = 0;
long l;
- if (completion_done(&ap_init_apqn_bindings_complete))
+ if (completion_done(&ap_apqn_bindings_complete))
return 0;
if (timeout)
l = wait_for_completion_interruptible_timeout(
- &ap_init_apqn_bindings_complete, timeout);
+ &ap_apqn_bindings_complete, timeout);
else
l = wait_for_completion_interruptible(
- &ap_init_apqn_bindings_complete);
+ &ap_apqn_bindings_complete);
if (l < 0)
- return l == -ERESTARTSYS ? -EINTR : l;
+ rc = l == -ERESTARTSYS ? -EINTR : l;
else if (l == 0 && timeout)
- return -ETIME;
+ rc = -ETIME;
- return 0;
+ pr_debug("%s rc=%d\n", __func__, rc);
+ return rc;
}
-EXPORT_SYMBOL(ap_wait_init_apqn_bindings_complete);
+EXPORT_SYMBOL(ap_wait_apqn_bindings_complete);
static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
{
@@ -826,8 +830,8 @@ static int __ap_revise_reserved(struct device *dev, void *dummy)
drvres = to_ap_drv(dev->driver)->flags
& AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres) {
- AP_DBF_DBG("%s reprobing queue=%02x.%04x\n",
- __func__, card, queue);
+ pr_debug("%s reprobing queue=%02x.%04x\n",
+ __func__, card, queue);
rc = device_reprobe(dev);
if (rc)
AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
@@ -939,8 +943,6 @@ static int ap_device_probe(struct device *dev)
if (is_queue_dev(dev))
hash_del(&to_ap_queue(dev)->hnode);
spin_unlock_bh(&ap_queues_lock);
- } else {
- ap_check_bindings_complete();
}
out:
@@ -1012,16 +1014,47 @@ void ap_driver_unregister(struct ap_driver *ap_drv)
}
EXPORT_SYMBOL(ap_driver_unregister);
-void ap_bus_force_rescan(void)
+/*
+ * Enforce a synchronous AP bus rescan.
+ * Returns true if the bus scan finds a change in the AP configuration
+ * and AP devices have been added or deleted when this function returns.
+ */
+bool ap_bus_force_rescan(void)
{
+ unsigned long scan_counter = atomic64_read(&ap_scan_bus_count);
+ bool rc = false;
+
+ pr_debug(">%s scan counter=%lu\n", __func__, scan_counter);
+
/* Only trigger AP bus scans after the initial scan is done */
- if (atomic64_read(&ap_scan_bus_count) <= 0)
- return;
+ if (scan_counter <= 0)
+ goto out;
+
+ /* Try to acquire the AP scan bus mutex */
+ if (mutex_trylock(&ap_scan_bus_mutex)) {
+ /* mutex acquired, run the AP bus scan */
+ ap_scan_bus_result = ap_scan_bus();
+ rc = ap_scan_bus_result;
+ mutex_unlock(&ap_scan_bus_mutex);
+ goto out;
+ }
+
+ /*
+ * Mutex acquire failed. So there is currently another task
+ * already running the AP bus scan. Then let's simple wait
+ * for the lock which means the other task has finished and
+ * stored the result in ap_scan_bus_result.
+ */
+ if (mutex_lock_interruptible(&ap_scan_bus_mutex)) {
+ /* some error occurred, ignore and go out */
+ goto out;
+ }
+ rc = ap_scan_bus_result;
+ mutex_unlock(&ap_scan_bus_mutex);
- /* processing a asynchronous bus rescan */
- del_timer(&ap_config_timer);
- queue_work(system_long_wq, &ap_scan_work);
- flush_work(&ap_scan_work);
+out:
+ pr_debug("%s rc=%d\n", __func__, rc);
+ return rc;
}
EXPORT_SYMBOL(ap_bus_force_rescan);
@@ -1030,7 +1063,7 @@ EXPORT_SYMBOL(ap_bus_force_rescan);
*/
void ap_bus_cfg_chg(void)
{
- AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__);
+ pr_debug("%s config change, forcing bus rescan\n", __func__);
ap_bus_force_rescan();
}
@@ -1250,7 +1283,7 @@ static BUS_ATTR_RO(ap_interrupts);
static ssize_t config_time_show(const struct bus_type *bus, char *buf)
{
- return sysfs_emit(buf, "%d\n", ap_config_time);
+ return sysfs_emit(buf, "%d\n", ap_scan_bus_time);
}
static ssize_t config_time_store(const struct bus_type *bus,
@@ -1260,8 +1293,8 @@ static ssize_t config_time_store(const struct bus_type *bus,
if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
return -EINVAL;
- ap_config_time = time;
- mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
+ ap_scan_bus_time = time;
+ mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
return count;
}
@@ -1603,7 +1636,7 @@ static struct attribute *ap_bus_attrs[] = {
};
ATTRIBUTE_GROUPS(ap_bus);
-static struct bus_type ap_bus_type = {
+static const struct bus_type ap_bus_type = {
.name = "ap",
.bus_groups = ap_bus_groups,
.match = &ap_bus_match,
@@ -1888,8 +1921,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED;
}
spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev checkstop on\n",
- __func__, ac->id, dom);
+ pr_debug("%s(%d,%d) queue dev checkstop on\n",
+ __func__, ac->id, dom);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
goto put_dev_and_continue;
@@ -1899,8 +1932,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
_ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n",
- __func__, ac->id, dom);
+ pr_debug("%s(%d,%d) queue dev checkstop off\n",
+ __func__, ac->id, dom);
goto put_dev_and_continue;
}
/* config state change */
@@ -1912,8 +1945,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
}
spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev config off\n",
- __func__, ac->id, dom);
+ pr_debug("%s(%d,%d) queue dev config off\n",
+ __func__, ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
@@ -1924,8 +1957,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
_ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev config on\n",
- __func__, ac->id, dom);
+ pr_debug("%s(%d,%d) queue dev config on\n",
+ __func__, ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
goto put_dev_and_continue;
}
@@ -1997,8 +2030,8 @@ static inline void ap_scan_adapter(int ap)
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
- AP_DBF_DBG("%s(%d) no type info (no APQN found), ignored\n",
- __func__, ap);
+ pr_debug("%s(%d) no type info (no APQN found), ignored\n",
+ __func__, ap);
}
return;
}
@@ -2010,8 +2043,8 @@ static inline void ap_scan_adapter(int ap)
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
- AP_DBF_DBG("%s(%d) no valid type (0) info, ignored\n",
- __func__, ap);
+ pr_debug("%s(%d) no valid type (0) info, ignored\n",
+ __func__, ap);
}
return;
}
@@ -2135,23 +2168,80 @@ static bool ap_get_configuration(void)
sizeof(struct ap_config_info)) != 0;
}
+/*
+ * ap_config_has_new_aps - Check current against old qci info if
+ * new adapters have appeared. Returns true if at least one new
+ * adapter in the apm mask is showing up. Existing adapters or
+ * receding adapters are not counted.
+ */
+static bool ap_config_has_new_aps(void)
+{
+
+ unsigned long m[BITS_TO_LONGS(AP_DEVICES)];
+
+ if (!ap_qci_info)
+ return false;
+
+ bitmap_andnot(m, (unsigned long *)ap_qci_info->apm,
+ (unsigned long *)ap_qci_info_old->apm, AP_DEVICES);
+ if (!bitmap_empty(m, AP_DEVICES))
+ return true;
+
+ return false;
+}
+
+/*
+ * ap_config_has_new_doms - Check current against old qci info if
+ * new (usage) domains have appeared. Returns true if at least one
+ * new domain in the aqm mask is showing up. Existing domains or
+ * receding domains are not counted.
+ */
+static bool ap_config_has_new_doms(void)
+{
+ unsigned long m[BITS_TO_LONGS(AP_DOMAINS)];
+
+ if (!ap_qci_info)
+ return false;
+
+ bitmap_andnot(m, (unsigned long *)ap_qci_info->aqm,
+ (unsigned long *)ap_qci_info_old->aqm, AP_DOMAINS);
+ if (!bitmap_empty(m, AP_DOMAINS))
+ return true;
+
+ return false;
+}
+
/**
* ap_scan_bus(): Scan the AP bus for new devices
- * Runs periodically, workqueue timer (ap_config_time)
- * @unused: Unused pointer.
+ * Always run under mutex ap_scan_bus_mutex protection
+ * which needs to get locked/unlocked by the caller!
+ * Returns true if any config change has been detected
+ * during the scan, otherwise false.
*/
-static void ap_scan_bus(struct work_struct *unused)
+static bool ap_scan_bus(void)
{
- int ap, config_changed = 0;
+ bool config_changed;
+ int ap;
+
+ pr_debug(">%s\n", __func__);
- /* config change notify */
+ /* (re-)fetch configuration via QCI */
config_changed = ap_get_configuration();
- if (config_changed)
+ if (config_changed) {
+ if (ap_config_has_new_aps() || ap_config_has_new_doms()) {
+ /*
+ * Appearance of new adapters and/or domains need to
+ * build new ap devices which need to get bound to an
+ * device driver. Thus reset the APQN bindings complete
+ * completion.
+ */
+ reinit_completion(&ap_apqn_bindings_complete);
+ }
+ /* post a config change notify */
notify_config_changed();
+ }
ap_select_domain();
- AP_DBF_DBG("%s running\n", __func__);
-
/* loop over all possible adapters */
for (ap = 0; ap <= ap_max_adapter_id; ap++)
ap_scan_adapter(ap);
@@ -2174,23 +2264,56 @@ static void ap_scan_bus(struct work_struct *unused)
}
if (atomic64_inc_return(&ap_scan_bus_count) == 1) {
- AP_DBF_DBG("%s init scan complete\n", __func__);
+ pr_debug("%s init scan complete\n", __func__);
ap_send_init_scan_done_uevent();
- ap_check_bindings_complete();
}
- mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
+ ap_check_bindings_complete();
+
+ mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
+
+ pr_debug("<%s config_changed=%d\n", __func__, config_changed);
+
+ return config_changed;
}
-static void ap_config_timeout(struct timer_list *unused)
+/*
+ * Callback for the ap_scan_bus_timer
+ * Runs periodically, workqueue timer (ap_scan_bus_time)
+ */
+static void ap_scan_bus_timer_callback(struct timer_list *unused)
{
- queue_work(system_long_wq, &ap_scan_work);
+ /*
+ * schedule work into the system long wq which when
+ * the work is finally executed, calls the AP bus scan.
+ */
+ queue_work(system_long_wq, &ap_scan_bus_work);
+}
+
+/*
+ * Callback for the ap_scan_bus_work
+ */
+static void ap_scan_bus_wq_callback(struct work_struct *unused)
+{
+ /*
+ * Try to invoke an ap_scan_bus(). If the mutex acquisition
+ * fails there is currently another task already running the
+ * AP scan bus and there is no need to wait and re-trigger the
+ * scan again. Please note at the end of the scan bus function
+ * the AP scan bus timer is re-armed which triggers then the
+ * ap_scan_bus_timer_callback which enqueues a work into the
+ * system_long_wq which invokes this function here again.
+ */
+ if (mutex_trylock(&ap_scan_bus_mutex)) {
+ ap_scan_bus_result = ap_scan_bus();
+ mutex_unlock(&ap_scan_bus_mutex);
+ }
}
static int __init ap_debug_init(void)
{
ap_dbf_info = debug_register("ap", 2, 1,
- DBF_MAX_SPRINTF_ARGS * sizeof(long));
+ AP_DBF_MAX_SPRINTF_ARGS * sizeof(long));
debug_register_view(ap_dbf_info, &debug_sprintf_view);
debug_set_level(ap_dbf_info, DBF_ERR);
@@ -2274,7 +2397,7 @@ static int __init ap_module_init(void)
ap_root_device->bus = &ap_bus_type;
/* Setup the AP bus rescan timer. */
- timer_setup(&ap_config_timer, ap_config_timeout, 0);
+ timer_setup(&ap_scan_bus_timer, ap_scan_bus_timer_callback, 0);
/*
* Setup the high resolution poll timer.
@@ -2292,7 +2415,7 @@ static int __init ap_module_init(void)
goto out_work;
}
- queue_work(system_long_wq, &ap_scan_work);
+ queue_work(system_long_wq, &ap_scan_bus_work);
return 0;
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 98814839ef30..59c7ed49aa02 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -266,7 +266,7 @@ int ap_sb_available(void);
bool ap_is_se_guest(void);
void ap_wait(enum ap_sm_wait wait);
void ap_request_timeout(struct timer_list *t);
-void ap_bus_force_rescan(void);
+bool ap_bus_force_rescan(void);
int ap_test_config_usage_domain(unsigned int domain);
int ap_test_config_ctrl_domain(unsigned int domain);
@@ -352,8 +352,12 @@ int ap_parse_mask_str(const char *str,
* the return value is 0. If the timeout (in jiffies) hits instead
* -ETIME is returned. On failures negative return values are
* returned to the caller.
+ * It may be that the AP bus scan finds new devices. Then the
+ * condition that all APQNs are bound to their device drivers
+ * is reset to false and this call again blocks until either all
+ * APQNs are bound to a device driver or the timeout hits again.
*/
-int ap_wait_init_apqn_bindings_complete(unsigned long timeout);
+int ap_wait_apqn_bindings_complete(unsigned long timeout);
void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg);
void ap_send_online_uevent(struct ap_device *ap_dev, int online);
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
index c083ce88a9a6..2f66271b8564 100644
--- a/drivers/s390/crypto/ap_debug.h
+++ b/drivers/s390/crypto/ap_debug.h
@@ -16,7 +16,7 @@
#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
-#define DBF_MAX_SPRINTF_ARGS 6
+#define AP_DBF_MAX_SPRINTF_ARGS 6
#define AP_DBF(...) \
debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__)
@@ -26,8 +26,6 @@
debug_sprintf_event(ap_dbf_info, DBF_WARN, ##__VA_ARGS__)
#define AP_DBF_INFO(...) \
debug_sprintf_event(ap_dbf_info, DBF_INFO, ##__VA_ARGS__)
-#define AP_DBF_DBG(...) \
- debug_sprintf_event(ap_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
extern debug_info_t *ap_dbf_info;
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 682595443145..6e4e8d324a6d 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -136,6 +136,8 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
+ print_hex_dump_debug("aprpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ aq->reply->msg, aq->reply->len, false);
aq->queue_count = max_t(int, 0, aq->queue_count - 1);
if (!status.queue_empty && !aq->queue_count)
aq->queue_count++;
@@ -169,6 +171,9 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
aq->queue_count = 0;
list_splice_init(&aq->pendingq, &aq->requestq);
aq->requestq_count += aq->pendingq_count;
+ pr_debug("%s queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
+ __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
+ aq->pendingq_count, aq->requestq_count);
aq->pendingq_count = 0;
break;
default:
@@ -243,6 +248,8 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
/* Start the next request on the queue. */
ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
+ print_hex_dump_debug("apreq: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg->msg, ap_msg->len, false);
status = __ap_send(qid, ap_msg->psmid,
ap_msg->msg, ap_msg->len,
ap_msg->flags & AP_MSG_FLAG_SPECIAL);
@@ -446,9 +453,9 @@ static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
case AP_BS_Q_USABLE:
/* association is through */
aq->sm_state = AP_SM_STATE_IDLE;
- AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n",
- __func__, AP_QID_CARD(aq->qid),
- AP_QID_QUEUE(aq->qid), aq->assoc_idx);
+ pr_debug("%s queue 0x%02x.%04x associated with %u\n",
+ __func__, AP_QID_CARD(aq->qid),
+ AP_QID_QUEUE(aq->qid), aq->assoc_idx);
return AP_SM_WAIT_NONE;
case AP_BS_Q_USABLE_NO_SECURE_KEY:
/* association still pending */
@@ -690,9 +697,9 @@ static ssize_t ap_functions_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
- AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
- __func__, status.response_code,
- AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
@@ -846,9 +853,9 @@ static ssize_t se_bind_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
- AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
- __func__, status.response_code,
- AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
@@ -974,9 +981,9 @@ static ssize_t se_associate_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
- AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
- __func__, status.response_code,
- AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 6cfb6b2340c9..dccf664a3d95 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -42,24 +42,23 @@ MODULE_DESCRIPTION("s390 protected key interface");
* debug feature data and functions
*/
-static debug_info_t *debug_info;
+static debug_info_t *pkey_dbf_info;
-#define DEBUG_DBG(...) debug_sprintf_event(debug_info, 6, ##__VA_ARGS__)
-#define DEBUG_INFO(...) debug_sprintf_event(debug_info, 5, ##__VA_ARGS__)
-#define DEBUG_WARN(...) debug_sprintf_event(debug_info, 4, ##__VA_ARGS__)
-#define DEBUG_ERR(...) debug_sprintf_event(debug_info, 3, ##__VA_ARGS__)
+#define PKEY_DBF_INFO(...) debug_sprintf_event(pkey_dbf_info, 5, ##__VA_ARGS__)
+#define PKEY_DBF_WARN(...) debug_sprintf_event(pkey_dbf_info, 4, ##__VA_ARGS__)
+#define PKEY_DBF_ERR(...) debug_sprintf_event(pkey_dbf_info, 3, ##__VA_ARGS__)
static void __init pkey_debug_init(void)
{
/* 5 arguments per dbf entry (including the format string ptr) */
- debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
- debug_register_view(debug_info, &debug_sprintf_view);
- debug_set_level(debug_info, 3);
+ pkey_dbf_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
+ debug_register_view(pkey_dbf_info, &debug_sprintf_view);
+ debug_set_level(pkey_dbf_info, 3);
}
static void __exit pkey_debug_exit(void)
{
- debug_unregister(debug_info);
+ debug_unregister(pkey_dbf_info);
}
/* inside view of a protected key token (only type 0x00 version 0x01) */
@@ -163,14 +162,14 @@ static int pkey_clr2protkey(u32 keytype, const u8 *clrkey,
fc = CPACF_PCKMO_ENC_ECC_ED448_KEY;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keytype %u\n",
- __func__, keytype);
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
+ __func__, keytype);
return -EINVAL;
}
if (*protkeylen < keysize + AES_WK_VP_SIZE) {
- DEBUG_ERR("%s prot key buffer size too small: %u < %d\n",
- __func__, *protkeylen, keysize + AES_WK_VP_SIZE);
+ PKEY_DBF_ERR("%s prot key buffer size too small: %u < %d\n",
+ __func__, *protkeylen, keysize + AES_WK_VP_SIZE);
return -EINVAL;
}
@@ -182,7 +181,7 @@ static int pkey_clr2protkey(u32 keytype, const u8 *clrkey,
}
/* check for the pckmo subfunction we need now */
if (!cpacf_test_func(&pckmo_functions, fc)) {
- DEBUG_ERR("%s pckmo functions not available\n", __func__);
+ PKEY_DBF_ERR("%s pckmo functions not available\n", __func__);
return -ENODEV;
}
@@ -244,7 +243,7 @@ static int pkey_skey2pkey(const u8 *key, u8 *protkey,
}
if (rc)
- DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+ pr_debug("%s failed rc=%d\n", __func__, rc);
return rc;
}
@@ -283,7 +282,7 @@ static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
out:
kfree(apqns);
if (rc)
- DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+ pr_debug("%s failed rc=%d\n", __func__, rc);
return rc;
}
@@ -294,33 +293,36 @@ static int pkey_ep11key2pkey(const u8 *key, size_t keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
u32 nr_apqns, *apqns = NULL;
+ int i, j, rc = -ENODEV;
u16 card, dom;
- int i, rc;
zcrypt_wait_api_operational();
- /* build a list of apqns suitable for this key */
- rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7,
- ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
- ep11_kb_wkvp(key, keylen));
- if (rc)
- goto out;
+ /* try two times in case of failure */
+ for (i = 0; i < 2 && rc; i++) {
- /* go through the list of apqns and try to derive an pkey */
- for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
- card = apqns[i] >> 16;
- dom = apqns[i] & 0xFFFF;
- rc = ep11_kblob2protkey(card, dom, key, keylen,
- protkey, protkeylen, protkeytype);
- if (rc == 0)
- break;
+ /* build a list of apqns suitable for this key */
+ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7,
+ ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
+ ep11_kb_wkvp(key, keylen));
+ if (rc)
+ continue; /* retry findcard on failure */
+
+ /* go through the list of apqns and try to derive an pkey */
+ for (rc = -ENODEV, j = 0; j < nr_apqns && rc; j++) {
+ card = apqns[j] >> 16;
+ dom = apqns[j] & 0xFFFF;
+ rc = ep11_kblob2protkey(card, dom, key, keylen,
+ protkey, protkeylen, protkeytype);
+ }
+
+ kfree(apqns);
}
-out:
- kfree(apqns);
if (rc)
- DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+ pr_debug("%s failed rc=%d\n", __func__, rc);
+
return rc;
}
@@ -336,7 +338,7 @@ static int pkey_verifykey(const struct pkey_seckey *seckey,
int rc;
/* check the secure key for valid AES secure key */
- rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *)seckey, 0);
+ rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, (u8 *)seckey, 0);
if (rc)
goto out;
if (pattributes)
@@ -351,7 +353,7 @@ static int pkey_verifykey(const struct pkey_seckey *seckey,
if (rc > 0) {
/* key mkvp matches to old master key mkvp */
- DEBUG_DBG("%s secure key has old mkvp\n", __func__);
+ pr_debug("%s secure key has old mkvp\n", __func__);
if (pattributes)
*pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
rc = 0;
@@ -363,7 +365,7 @@ static int pkey_verifykey(const struct pkey_seckey *seckey,
*pdomain = domain;
out:
- DEBUG_DBG("%s rc=%d\n", __func__, rc);
+ pr_debug("%s rc=%d\n", __func__, rc);
return rc;
}
@@ -379,8 +381,8 @@ static int pkey_genprotkey(u32 keytype, u8 *protkey,
keysize = pkey_keytype_aes_to_size(keytype);
if (!keysize) {
- DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__,
- keytype);
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", __func__,
+ keytype);
return -EINVAL;
}
@@ -428,13 +430,13 @@ static int pkey_verifyprotkey(const u8 *protkey, u32 protkeylen,
fc = CPACF_KMC_PAES_256;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keytype %u\n", __func__,
- protkeytype);
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", __func__,
+ protkeytype);
return -EINVAL;
}
if (protkeylen != pkeylen) {
- DEBUG_ERR("%s invalid protected key size %u for keytype %u\n",
- __func__, protkeylen, protkeytype);
+ PKEY_DBF_ERR("%s invalid protected key size %u for keytype %u\n",
+ __func__, protkeylen, protkeytype);
return -EINVAL;
}
@@ -446,7 +448,7 @@ static int pkey_verifyprotkey(const u8 *protkey, u32 protkeylen,
k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf,
sizeof(null_msg));
if (k != sizeof(null_msg)) {
- DEBUG_ERR("%s protected key is not valid\n", __func__);
+ PKEY_DBF_ERR("%s protected key is not valid\n", __func__);
return -EKEYREJECTED;
}
@@ -464,13 +466,13 @@ static int nonccatokaes2pkey(const struct clearkeytoken *t,
keysize = pkey_keytype_aes_to_size(t->keytype);
if (!keysize) {
- DEBUG_ERR("%s unknown/unsupported keytype %u\n",
- __func__, t->keytype);
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
+ __func__, t->keytype);
return -EINVAL;
}
if (t->len != keysize) {
- DEBUG_ERR("%s non clear key aes token: invalid key len %u\n",
- __func__, t->len);
+ PKEY_DBF_ERR("%s non clear key aes token: invalid key len %u\n",
+ __func__, t->len);
return -EINVAL;
}
@@ -505,7 +507,7 @@ try_via_ep11:
goto out;
failure:
- DEBUG_ERR("%s unable to build protected key from clear", __func__);
+ PKEY_DBF_ERR("%s unable to build protected key from clear", __func__);
out:
kfree(tmpbuf);
@@ -536,14 +538,14 @@ static int nonccatokecc2pkey(const struct clearkeytoken *t,
keylen = 64;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keytype %u\n",
- __func__, t->keytype);
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
+ __func__, t->keytype);
return -EINVAL;
}
if (t->len != keylen) {
- DEBUG_ERR("%s non clear key ecc token: invalid key len %u\n",
- __func__, t->len);
+ PKEY_DBF_ERR("%s non clear key ecc token: invalid key len %u\n",
+ __func__, t->len);
return -EINVAL;
}
@@ -551,8 +553,8 @@ static int nonccatokecc2pkey(const struct clearkeytoken *t,
rc = pkey_clr2protkey(t->keytype, t->clearkey,
protkey, protkeylen, protkeytype);
if (rc) {
- DEBUG_ERR("%s unable to build protected key from clear",
- __func__);
+ PKEY_DBF_ERR("%s unable to build protected key from clear",
+ __func__);
}
return rc;
@@ -604,15 +606,15 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
protkeylen, protkeytype);
break;
default:
- DEBUG_ERR("%s unknown/unsupported non cca clear key type %u\n",
- __func__, t->keytype);
+ PKEY_DBF_ERR("%s unknown/unsupported non cca clear key type %u\n",
+ __func__, t->keytype);
return -EINVAL;
}
break;
}
case TOKVER_EP11_AES: {
/* check ep11 key for exportable as protected key */
- rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
+ rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1);
if (rc)
goto out;
rc = pkey_ep11key2pkey(key, keylen,
@@ -621,15 +623,16 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
}
case TOKVER_EP11_AES_WITH_HEADER:
/* check ep11 key with header for exportable as protected key */
- rc = ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1);
+ rc = ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1);
if (rc)
goto out;
rc = pkey_ep11key2pkey(key, keylen,
protkey, protkeylen, protkeytype);
break;
default:
- DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n",
- __func__, hdr->version);
+ PKEY_DBF_ERR("%s unknown/unsupported non-CCA token version %d\n",
+ __func__, hdr->version);
}
out:
@@ -654,8 +657,8 @@ static int pkey_ccainttok2pkey(const u8 *key, u32 keylen,
return -EINVAL;
break;
default:
- DEBUG_ERR("%s unknown/unsupported CCA internal token version %d\n",
- __func__, hdr->version);
+ PKEY_DBF_ERR("%s unknown/unsupported CCA internal token version %d\n",
+ __func__, hdr->version);
return -EINVAL;
}
@@ -672,7 +675,7 @@ int pkey_keyblob2pkey(const u8 *key, u32 keylen,
int rc;
if (keylen < sizeof(struct keytoken_header)) {
- DEBUG_ERR("%s invalid keylen %d\n", __func__, keylen);
+ PKEY_DBF_ERR("%s invalid keylen %d\n", __func__, keylen);
return -EINVAL;
}
@@ -686,12 +689,12 @@ int pkey_keyblob2pkey(const u8 *key, u32 keylen,
protkey, protkeylen, protkeytype);
break;
default:
- DEBUG_ERR("%s unknown/unsupported blob type %d\n",
- __func__, hdr->type);
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n",
+ __func__, hdr->type);
return -EINVAL;
}
- DEBUG_DBG("%s rc=%d\n", __func__, rc);
+ pr_debug("%s rc=%d\n", __func__, rc);
return rc;
}
EXPORT_SYMBOL(pkey_keyblob2pkey);
@@ -839,7 +842,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
hdr->version == TOKVER_CCA_AES) {
struct secaeskeytoken *t = (struct secaeskeytoken *)key;
- rc = cca_check_secaeskeytoken(debug_info, 3, key, 0);
+ rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0);
if (rc)
goto out;
if (ktype)
@@ -869,7 +872,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
hdr->version == TOKVER_CCA_VLSC) {
struct cipherkeytoken *t = (struct cipherkeytoken *)key;
- rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1);
+ rc = cca_check_secaescipherkey(pkey_dbf_info, 3, key, 0, 1);
if (rc)
goto out;
if (ktype)
@@ -907,7 +910,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
struct ep11keyblob *kb = (struct ep11keyblob *)key;
int api;
- rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
+ rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1);
if (rc)
goto out;
if (ktype)
@@ -933,8 +936,8 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
struct ep11kblob_header *kh = (struct ep11kblob_header *)key;
int api;
- rc = ep11_check_aes_key_with_hdr(debug_info, 3,
- key, keylen, 1);
+ rc = ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1);
if (rc)
goto out;
if (ktype)
@@ -981,25 +984,27 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
if (hdr->version == TOKVER_CCA_AES) {
if (keylen != sizeof(struct secaeskeytoken))
return -EINVAL;
- if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
+ if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0))
return -EINVAL;
} else if (hdr->version == TOKVER_CCA_VLSC) {
if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
return -EINVAL;
- if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
+ if (cca_check_secaescipherkey(pkey_dbf_info,
+ 3, key, 0, 1))
return -EINVAL;
} else {
- DEBUG_ERR("%s unknown CCA internal token version %d\n",
- __func__, hdr->version);
+ PKEY_DBF_ERR("%s unknown CCA internal token version %d\n",
+ __func__, hdr->version);
return -EINVAL;
}
} else if (hdr->type == TOKTYPE_NON_CCA) {
if (hdr->version == TOKVER_EP11_AES) {
- if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
+ if (ep11_check_aes_key(pkey_dbf_info,
+ 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
- if (ep11_check_aes_key_with_hdr(debug_info, 3,
- key, keylen, 1))
+ if (ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1))
return -EINVAL;
} else {
return pkey_nonccatok2pkey(key, keylen,
@@ -1007,8 +1012,8 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
protkeytype);
}
} else {
- DEBUG_ERR("%s unknown/unsupported blob type %d\n",
- __func__, hdr->type);
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n",
+ __func__, hdr->type);
return -EINVAL;
}
@@ -1234,50 +1239,53 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
hdr->version == TOKVER_EP11_AES_WITH_HEADER &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
/* EP11 AES key blob with header */
- if (ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1))
+ if (ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_ECC_WITH_HEADER &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
/* EP11 ECC key blob with header */
- if (ep11_check_ecc_key_with_hdr(debug_info, 3, key, keylen, 1))
+ if (ep11_check_ecc_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES &&
is_ep11_keyblob(key)) {
/* EP11 AES key blob with header in session field */
- if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
+ if (ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
if (hdr->version == TOKVER_CCA_AES) {
/* CCA AES data key */
if (keylen != sizeof(struct secaeskeytoken))
return -EINVAL;
- if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
+ if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0))
return -EINVAL;
} else if (hdr->version == TOKVER_CCA_VLSC) {
/* CCA AES cipher key */
if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
return -EINVAL;
- if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
+ if (cca_check_secaescipherkey(pkey_dbf_info,
+ 3, key, 0, 1))
return -EINVAL;
} else {
- DEBUG_ERR("%s unknown CCA internal token version %d\n",
- __func__, hdr->version);
+ PKEY_DBF_ERR("%s unknown CCA internal token version %d\n",
+ __func__, hdr->version);
return -EINVAL;
}
} else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
/* CCA ECC (private) key */
if (keylen < sizeof(struct eccprivkeytoken))
return -EINVAL;
- if (cca_check_sececckeytoken(debug_info, 3, key, keylen, 1))
+ if (cca_check_sececckeytoken(pkey_dbf_info, 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_NON_CCA) {
return pkey_nonccatok2pkey(key, keylen,
protkey, protkeylen, protkeytype);
} else {
- DEBUG_ERR("%s unknown/unsupported blob type %d\n",
- __func__, hdr->type);
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n",
+ __func__, hdr->type);
return -EINVAL;
}
@@ -1350,7 +1358,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = cca_genseckey(kgs.cardnr, kgs.domain,
kgs.keytype, kgs.seckey.seckey);
- DEBUG_DBG("%s cca_genseckey()=%d\n", __func__, rc);
+ pr_debug("%s cca_genseckey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ugs, &kgs, sizeof(kgs)))
@@ -1365,7 +1373,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = cca_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
kcs.clrkey.clrkey, kcs.seckey.seckey);
- DEBUG_DBG("%s cca_clr2seckey()=%d\n", __func__, rc);
+ pr_debug("%s cca_clr2seckey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ucs, &kcs, sizeof(kcs)))
@@ -1383,7 +1391,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = cca_sec2protkey(ksp.cardnr, ksp.domain,
ksp.seckey.seckey, ksp.protkey.protkey,
&ksp.protkey.len, &ksp.protkey.type);
- DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc);
+ pr_debug("%s cca_sec2protkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(usp, &ksp, sizeof(ksp)))
@@ -1400,7 +1408,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = pkey_clr2protkey(kcp.keytype, kcp.clrkey.clrkey,
kcp.protkey.protkey,
&kcp.protkey.len, &kcp.protkey.type);
- DEBUG_DBG("%s pkey_clr2protkey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_clr2protkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ucp, &kcp, sizeof(kcp)))
@@ -1416,7 +1424,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = cca_findcard(kfc.seckey.seckey,
&kfc.cardnr, &kfc.domain, 1);
- DEBUG_DBG("%s cca_findcard()=%d\n", __func__, rc);
+ pr_debug("%s cca_findcard()=%d\n", __func__, rc);
if (rc < 0)
break;
if (copy_to_user(ufc, &kfc, sizeof(kfc)))
@@ -1432,7 +1440,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
ksp.protkey.len = sizeof(ksp.protkey.protkey);
rc = pkey_skey2pkey(ksp.seckey.seckey, ksp.protkey.protkey,
&ksp.protkey.len, &ksp.protkey.type);
- DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_skey2pkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(usp, &ksp, sizeof(ksp)))
@@ -1447,7 +1455,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain,
&kvk.keysize, &kvk.attributes);
- DEBUG_DBG("%s pkey_verifykey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_verifykey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(uvk, &kvk, sizeof(kvk)))
@@ -1463,7 +1471,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
kgp.protkey.len = sizeof(kgp.protkey.protkey);
rc = pkey_genprotkey(kgp.keytype, kgp.protkey.protkey,
&kgp.protkey.len, &kgp.protkey.type);
- DEBUG_DBG("%s pkey_genprotkey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_genprotkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ugp, &kgp, sizeof(kgp)))
@@ -1478,7 +1486,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = pkey_verifyprotkey(kvp.protkey.protkey,
kvp.protkey.len, kvp.protkey.type);
- DEBUG_DBG("%s pkey_verifyprotkey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_verifyprotkey()=%d\n", __func__, rc);
break;
}
case PKEY_KBLOB2PROTK: {
@@ -1494,7 +1502,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
ktp.protkey.len = sizeof(ktp.protkey.protkey);
rc = pkey_keyblob2pkey(kkey, ktp.keylen, ktp.protkey.protkey,
&ktp.protkey.len, &ktp.protkey.type);
- DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
memzero_explicit(kkey, ktp.keylen);
kfree(kkey);
if (rc)
@@ -1523,7 +1531,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = pkey_genseckey2(apqns, kgs.apqn_entries,
kgs.type, kgs.size, kgs.keygenflags,
kkey, &klen);
- DEBUG_DBG("%s pkey_genseckey2()=%d\n", __func__, rc);
+ pr_debug("%s pkey_genseckey2()=%d\n", __func__, rc);
kfree(apqns);
if (rc) {
kfree(kkey);
@@ -1565,7 +1573,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = pkey_clr2seckey2(apqns, kcs.apqn_entries,
kcs.type, kcs.size, kcs.keygenflags,
kcs.clrkey.clrkey, kkey, &klen);
- DEBUG_DBG("%s pkey_clr2seckey2()=%d\n", __func__, rc);
+ pr_debug("%s pkey_clr2seckey2()=%d\n", __func__, rc);
kfree(apqns);
if (rc) {
kfree(kkey);
@@ -1601,7 +1609,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = pkey_verifykey2(kkey, kvk.keylen,
&kvk.cardnr, &kvk.domain,
&kvk.type, &kvk.size, &kvk.flags);
- DEBUG_DBG("%s pkey_verifykey2()=%d\n", __func__, rc);
+ pr_debug("%s pkey_verifykey2()=%d\n", __func__, rc);
kfree(kkey);
if (rc)
break;
@@ -1630,7 +1638,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
kkey, ktp.keylen,
ktp.protkey.protkey, &ktp.protkey.len,
&ktp.protkey.type);
- DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
+ pr_debug("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
kfree(apqns);
memzero_explicit(kkey, ktp.keylen);
kfree(kkey);
@@ -1664,7 +1672,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
}
rc = pkey_apqns4key(kkey, kak.keylen, kak.flags,
apqns, &nr_apqns);
- DEBUG_DBG("%s pkey_apqns4key()=%d\n", __func__, rc);
+ pr_debug("%s pkey_apqns4key()=%d\n", __func__, rc);
kfree(kkey);
if (rc && rc != -ENOSPC) {
kfree(apqns);
@@ -1707,7 +1715,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
}
rc = pkey_apqns4keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp,
kat.flags, apqns, &nr_apqns);
- DEBUG_DBG("%s pkey_apqns4keytype()=%d\n", __func__, rc);
+ pr_debug("%s pkey_apqns4keytype()=%d\n", __func__, rc);
if (rc && rc != -ENOSPC) {
kfree(apqns);
break;
@@ -1757,7 +1765,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries,
kkey, ktp.keylen,
protkey, &protkeylen, &ktp.pkeytype);
- DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
+ pr_debug("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
kfree(apqns);
memzero_explicit(kkey, ktp.keylen);
kfree(kkey);
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index a5ab03e42ff1..4aeb3e1213c7 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -60,7 +60,7 @@ static void vfio_ap_matrix_dev_release(struct device *dev)
kfree(matrix_dev);
}
-static struct bus_type matrix_bus = {
+static const struct bus_type matrix_bus = {
.name = "matrix",
};
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 983b3b16196c..fc169bc61593 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -659,6 +659,21 @@ static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
AP_DOMAINS);
}
+static bool _queue_passable(struct vfio_ap_queue *q)
+{
+ if (!q)
+ return false;
+
+ switch (q->reset_status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ return true;
+ default:
+ return false;
+ }
+}
+
/*
* vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev
* to ensure no queue devices are passed through to
@@ -687,7 +702,6 @@ static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev,
unsigned long apid, apqi, apqn;
DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS);
- struct vfio_ap_queue *q;
bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES);
bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS);
@@ -716,8 +730,7 @@ static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev,
* hardware device.
*/
apqn = AP_MKQID(apid, apqi);
- q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
- if (!q || q->reset_status.response_code) {
+ if (!_queue_passable(vfio_ap_mdev_get_queue(matrix_mdev, apqn))) {
clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
/*
@@ -1691,6 +1704,7 @@ static int apq_status_check(int apqn, struct ap_queue_status *status)
switch (status->response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
return 0;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
@@ -1747,14 +1761,6 @@ static void apq_reset_check(struct work_struct *reset_work)
memcpy(&q->reset_status, &status, sizeof(status));
continue;
}
- /*
- * When an AP adapter is deconfigured, the
- * associated queues are reset, so let's set the
- * status response code to 0 so the queue may be
- * passed through (i.e., not filtered)
- */
- if (status.response_code == AP_RESPONSE_DECONFIGURED)
- q->reset_status.response_code = 0;
if (q->saved_isc != VFIO_AP_ISC_INVALID)
vfio_ap_free_aqic_resources(q);
break;
@@ -1781,12 +1787,7 @@ static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
queue_work(system_long_wq, &q->reset_work);
break;
case AP_RESPONSE_DECONFIGURED:
- /*
- * When an AP adapter is deconfigured, the associated
- * queues are reset, so let's set the status response code to 0
- * so the queue may be passed through (i.e., not filtered).
- */
- q->reset_status.response_code = 0;
+ case AP_RESPONSE_CHECKSTOPPED:
vfio_ap_free_aqic_resources(q);
break;
default:
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 74200f54dfff..02c503f16bc2 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -12,6 +12,9 @@
* Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com>
*/
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -57,10 +60,6 @@ DEFINE_SPINLOCK(zcrypt_list_lock);
LIST_HEAD(zcrypt_card_list);
static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
-static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
-
-atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
-EXPORT_SYMBOL(zcrypt_rescan_req);
static LIST_HEAD(zcrypt_ops_list);
@@ -69,20 +68,15 @@ debug_info_t *zcrypt_dbf_info;
/*
* Process a rescan of the transport layer.
- *
- * Returns 1, if the rescan has been processed, otherwise 0.
+ * Runs a synchronous AP bus rescan.
+ * Returns true if something has changed (for example the
+ * bus scan has found and build up new devices) and it is
+ * worth to do a retry. Otherwise false is returned meaning
+ * no changes on the AP bus level.
*/
-static inline int zcrypt_process_rescan(void)
-{
- if (atomic_read(&zcrypt_rescan_req)) {
- atomic_set(&zcrypt_rescan_req, 0);
- atomic_inc(&zcrypt_rescan_count);
- ap_bus_force_rescan();
- ZCRYPT_DBF_INFO("%s rescan count=%07d\n", __func__,
- atomic_inc_return(&zcrypt_rescan_count));
- return 1;
- }
- return 0;
+static inline bool zcrypt_process_rescan(void)
+{
+ return ap_bus_force_rescan();
}
void zcrypt_msgtype_register(struct zcrypt_ops *zops)
@@ -715,8 +709,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
- __func__);
+ pr_debug("%s no matching queue found => ENODEV\n", __func__);
rc = -ENODEV;
goto out;
}
@@ -820,8 +813,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
- __func__);
+ pr_debug("%s no matching queue found => ENODEV\n", __func__);
rc = -ENODEV;
goto out;
}
@@ -865,6 +857,8 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
if (rc)
goto out;
+ print_hex_dump_debug("ccareq: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg.msg, ap_msg.len, false);
tdom = *domain;
if (perms != &ap_perms && tdom < AP_DOMAINS) {
@@ -940,8 +934,8 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n",
- __func__, xcrb->user_defined, *domain);
+ pr_debug("%s no match for address %02x.%04x => ENODEV\n",
+ __func__, xcrb->user_defined, *domain);
rc = -ENODEV;
goto out;
}
@@ -952,6 +946,10 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
*domain = AP_QID_QUEUE(qid);
rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg);
+ if (!rc) {
+ print_hex_dump_debug("ccarpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg.msg, ap_msg.len, false);
+ }
spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
@@ -970,7 +968,26 @@ out:
long zcrypt_send_cprb(struct ica_xcRB *xcrb)
{
- return _zcrypt_send_cprb(false, &ap_perms, NULL, xcrb);
+ struct zcrypt_track tr;
+ int rc;
+
+ memset(&tr, 0, sizeof(tr));
+
+ do {
+ rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
+ do {
+ rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc)
+ pr_debug("%s rc=%d\n", __func__, rc);
+
+ return rc;
}
EXPORT_SYMBOL(zcrypt_send_cprb);
@@ -1045,6 +1062,8 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
if (rc)
goto out_free;
+ print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg.msg, ap_msg.len, false);
if (perms != &ap_perms && domain < AUTOSEL_DOM) {
if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
@@ -1113,15 +1132,15 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
if (!pref_zq) {
if (targets && target_num == 1) {
- ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n",
- __func__, (int)targets->ap_id,
- (int)targets->dom_id);
+ pr_debug("%s no match for address %02x.%04x => ENODEV\n",
+ __func__, (int)targets->ap_id,
+ (int)targets->dom_id);
} else if (targets) {
- ZCRYPT_DBF_DBG("%s no match for %d target addrs => ENODEV\n",
- __func__, (int)target_num);
+ pr_debug("%s no match for %d target addrs => ENODEV\n",
+ __func__, (int)target_num);
} else {
- ZCRYPT_DBF_DBG("%s no match for address ff.ffff => ENODEV\n",
- __func__);
+ pr_debug("%s no match for address ff.ffff => ENODEV\n",
+ __func__);
}
rc = -ENODEV;
goto out_free;
@@ -1129,6 +1148,10 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
qid = pref_zq->queue->qid;
rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
+ if (!rc) {
+ print_hex_dump_debug("ep11rpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg.msg, ap_msg.len, false);
+ }
spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
@@ -1149,7 +1172,26 @@ out:
long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
{
- return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb);
+ struct zcrypt_track tr;
+ int rc;
+
+ memset(&tr, 0, sizeof(tr));
+
+ do {
+ rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
+ do {
+ rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc)
+ pr_debug("%s rc=%d\n", __func__, rc);
+
+ return rc;
}
EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
@@ -1199,8 +1241,7 @@ static long zcrypt_rng(char *buffer)
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
- __func__);
+ pr_debug("%s no matching queue found => ENODEV\n", __func__);
rc = -ENODEV;
goto out;
}
@@ -1431,20 +1472,17 @@ static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc) {
- ZCRYPT_DBF_DBG("ioctl ICARSAMODEXPO rc=%d\n", rc);
+ pr_debug("ioctl ICARSAMODEXPO rc=%d\n", rc);
return rc;
}
return put_user(mex.outputdatalength, &umex->outputdatalength);
@@ -1463,20 +1501,17 @@ static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc) {
- ZCRYPT_DBF_DBG("ioctl ICARSACRT rc=%d\n", rc);
+ pr_debug("ioctl ICARSACRT rc=%d\n", rc);
return rc;
}
return put_user(crt.outputdatalength, &ucrt->outputdatalength);
@@ -1495,21 +1530,18 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
- ZCRYPT_DBF_DBG("ioctl ZSENDCPRB rc=%d status=0x%x\n",
- rc, xcrb.status);
+ pr_debug("ioctl ZSENDCPRB rc=%d status=0x%x\n",
+ rc, xcrb.status);
if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
return -EFAULT;
return rc;
@@ -1528,20 +1560,17 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
do {
rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
- ZCRYPT_DBF_DBG("ioctl ZSENDEP11CPRB rc=%d\n", rc);
+ pr_debug("ioctl ZSENDEP11CPRB rc=%d\n", rc);
if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
return -EFAULT;
return rc;
@@ -1670,7 +1699,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
}
/* unknown ioctl number */
default:
- ZCRYPT_DBF_DBG("unknown ioctl 0x%08x\n", cmd);
+ pr_debug("unknown ioctl 0x%08x\n", cmd);
return -ENOIOCTLCMD;
}
}
@@ -1708,16 +1737,13 @@ static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
mex64.n_modulus = compat_ptr(mex32.n_modulus);
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
@@ -1761,16 +1787,13 @@ static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
@@ -1833,16 +1856,13 @@ static long trans_xcrb32(struct ap_perms *perms, struct file *filp,
xcrb64.status = xcrb32.status;
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length;
@@ -1914,8 +1934,8 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
*/
if (zcrypt_rng_buffer_index == 0) {
rc = zcrypt_rng((char *)zcrypt_rng_buffer);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ /* on ENODEV failure: retry once again after an AP bus rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
rc = zcrypt_rng((char *)zcrypt_rng_buffer);
if (rc < 0)
return -EIO;
@@ -1977,7 +1997,7 @@ void zcrypt_rng_device_remove(void)
* an asynchronous job. This function waits until these initial jobs
* are done and so the zcrypt api should be ready to serve crypto
* requests - if there are resources available. The function uses an
- * internal timeout of 60s. The very first caller will either wait for
+ * internal timeout of 30s. The very first caller will either wait for
* ap bus bindings complete or the timeout happens. This state will be
* remembered for further callers which will only be blocked until a
* decision is made (timeout or bindings complete).
@@ -1996,8 +2016,8 @@ int zcrypt_wait_api_operational(void)
switch (zcrypt_wait_api_state) {
case 0:
/* initial state, invoke wait for the ap bus complete */
- rc = ap_wait_init_apqn_bindings_complete(
- msecs_to_jiffies(60 * 1000));
+ rc = ap_wait_apqn_bindings_complete(
+ msecs_to_jiffies(ZCRYPT_WAIT_BINDINGS_COMPLETE_MS));
switch (rc) {
case 0:
/* ap bus bindings are complete */
@@ -2014,8 +2034,8 @@ int zcrypt_wait_api_operational(void)
break;
default:
/* other failure */
- ZCRYPT_DBF_DBG("%s ap_wait_init_apqn_bindings_complete()=%d\n",
- __func__, rc);
+ pr_debug("%s ap_wait_init_apqn_bindings_complete()=%d\n",
+ __func__, rc);
break;
}
break;
@@ -2038,7 +2058,7 @@ EXPORT_SYMBOL(zcrypt_wait_api_operational);
int __init zcrypt_debug_init(void)
{
zcrypt_dbf_info = debug_register("zcrypt", 2, 1,
- DBF_MAX_SPRINTF_ARGS * sizeof(long));
+ ZCRYPT_DBF_MAX_SPRINTF_ARGS * sizeof(long));
debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
debug_set_level(zcrypt_dbf_info, DBF_ERR);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index de659954c8f7..4ed481df57ca 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -38,6 +38,15 @@
*/
#define ZCRYPT_RNG_BUFFER_SIZE 4096
+/**
+ * The zcrypt_wait_api_operational() function waits this
+ * amount in milliseconds for ap_wait_aqpn_bindings_complete().
+ * Also on a cprb send failure with ENODEV the send functions
+ * trigger an ap bus rescan and wait this time in milliseconds
+ * for ap_wait_aqpn_bindings_complete() before resending.
+ */
+#define ZCRYPT_WAIT_BINDINGS_COMPLETE_MS 30000
+
/*
* Identifier for Crypto Request Performance Index
*/
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 263fe182648b..0a3a678ffc7e 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -23,11 +23,6 @@
#include "zcrypt_msgtype6.h"
#include "zcrypt_ccamisc.h"
-#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
-#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
-#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
-#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
-
/* Size of parameter block used for all cca requests/replies */
#define PARMBSIZE 512
@@ -367,8 +362,8 @@ int cca_genseckey(u16 cardnr, u16 domain,
memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8);
break;
default:
- DEBUG_ERR("%s unknown/unsupported keybitsize %d\n",
- __func__, keybitsize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
rc = -EINVAL;
goto out;
}
@@ -386,15 +381,15 @@ int cca_genseckey(u16 cardnr, u16 domain,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR("%s secure key generate failure, card response %d/%d\n",
- __func__,
+ ZCRYPT_DBF_ERR("%s secure key generate failure, card response %d/%d\n",
+ __func__,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
rc = -EIO;
@@ -411,8 +406,8 @@ int cca_genseckey(u16 cardnr, u16 domain,
- sizeof(prepparm->lv3.keyblock.toklen)
- sizeof(prepparm->lv3.keyblock.tokattr);
if (seckeysize != SECKEYBLOBSIZE) {
- DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n",
- __func__, seckeysize, SECKEYBLOBSIZE);
+ ZCRYPT_DBF_ERR("%s secure token size mismatch %d != %d bytes\n",
+ __func__, seckeysize, SECKEYBLOBSIZE);
rc = -EIO;
goto out;
}
@@ -505,8 +500,8 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
keysize = 32;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keybitsize %d\n",
- __func__, keybitsize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
rc = -EINVAL;
goto out;
}
@@ -524,17 +519,17 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR("%s clear key import failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s clear key import failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
@@ -549,8 +544,8 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
- sizeof(prepparm->lv3.keyblock.toklen)
- sizeof(prepparm->lv3.keyblock.tokattr);
if (seckeysize != SECKEYBLOBSIZE) {
- DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n",
- __func__, seckeysize, SECKEYBLOBSIZE);
+ ZCRYPT_DBF_ERR("%s secure token size mismatch %d != %d bytes\n",
+ __func__, seckeysize, SECKEYBLOBSIZE);
rc = -EIO;
goto out;
}
@@ -651,17 +646,17 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
rc = -EAGAIN;
else
@@ -669,10 +664,10 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
goto out;
}
if (prepcblk->ccp_rscode != 0) {
- DEBUG_WARN("%s unwrap secure key warning, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
}
/* process response cprb param block */
@@ -683,8 +678,8 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
/* check the returned keyblock */
if (prepparm->lv3.ckb.version != 0x01 &&
prepparm->lv3.ckb.version != 0x02) {
- DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
- __func__, (int)prepparm->lv3.ckb.version);
+ ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+ __func__, (int)prepparm->lv3.ckb.version);
rc = -EIO;
goto out;
}
@@ -707,8 +702,8 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
*protkeytype = PKEY_KEYTYPE_AES_256;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keylen %d\n",
- __func__, prepparm->lv3.ckb.len);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keylen %d\n",
+ __func__, prepparm->lv3.ckb.len);
rc = -EIO;
goto out;
}
@@ -840,9 +835,8 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
case 256:
break;
default:
- DEBUG_ERR(
- "%s unknown/unsupported keybitsize %d\n",
- __func__, keybitsize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
rc = -EINVAL;
goto out;
}
@@ -880,19 +874,17 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s cipher key generate failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s cipher key generate failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
@@ -905,8 +897,8 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
/* do some plausibility checks on the key block */
if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) {
- DEBUG_ERR("%s reply with invalid or unknown key block\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s reply with invalid or unknown key block\n",
+ __func__);
rc = -EIO;
goto out;
}
@@ -1048,19 +1040,17 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s CSNBKPI2 failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s CSNBKPI2 failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
@@ -1073,8 +1063,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
/* do some plausibility checks on the key block */
if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) {
- DEBUG_ERR("%s reply with invalid or unknown key block\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s reply with invalid or unknown key block\n",
+ __func__);
rc = -EIO;
goto out;
}
@@ -1132,33 +1122,29 @@ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags,
rc = _ip_cprb_helper(card, dom, "AES ", "FIRST ", "MIN3PART",
exorbuf, keybitsize, token, &tokensize);
if (rc) {
- DEBUG_ERR(
- "%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
goto out;
}
rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
clrkey, keybitsize, token, &tokensize);
if (rc) {
- DEBUG_ERR(
- "%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
goto out;
}
rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
exorbuf, keybitsize, token, &tokensize);
if (rc) {
- DEBUG_ERR(
- "%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
goto out;
}
rc = _ip_cprb_helper(card, dom, "AES ", "COMPLETE", NULL,
NULL, keybitsize, token, &tokensize);
if (rc) {
- DEBUG_ERR(
- "%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
goto out;
}
@@ -1265,19 +1251,17 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
rc = -EAGAIN;
else
@@ -1285,11 +1269,10 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
goto out;
}
if (prepcblk->ccp_rscode != 0) {
- DEBUG_WARN(
- "%s unwrap secure key warning, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
}
/* process response cprb param block */
@@ -1300,15 +1283,14 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
/* check the returned keyblock */
if (prepparm->vud.ckb.version != 0x01 &&
prepparm->vud.ckb.version != 0x02) {
- DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
- __func__, (int)prepparm->vud.ckb.version);
+ ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+ __func__, (int)prepparm->vud.ckb.version);
rc = -EIO;
goto out;
}
if (prepparm->vud.ckb.algo != 0x02) {
- DEBUG_ERR(
- "%s reply param keyblock algo mismatch 0x%02x != 0x02\n",
- __func__, (int)prepparm->vud.ckb.algo);
+ ZCRYPT_DBF_ERR("%s reply param keyblock algo mismatch 0x%02x != 0x02\n",
+ __func__, (int)prepparm->vud.ckb.algo);
rc = -EIO;
goto out;
}
@@ -1331,8 +1313,8 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
*protkeytype = PKEY_KEYTYPE_AES_256;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keylen %d\n",
- __func__, prepparm->vud.ckb.keylen);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keylen %d\n",
+ __func__, prepparm->vud.ckb.keylen);
rc = -EIO;
goto out;
}
@@ -1432,19 +1414,17 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
rc = -EAGAIN;
else
@@ -1452,11 +1432,10 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
goto out;
}
if (prepcblk->ccp_rscode != 0) {
- DEBUG_WARN(
- "%s unwrap secure key warning, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
}
/* process response cprb param block */
@@ -1466,23 +1445,22 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
/* check the returned keyblock */
if (prepparm->vud.ckb.version != 0x02) {
- DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n",
- __func__, (int)prepparm->vud.ckb.version);
+ ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n",
+ __func__, (int)prepparm->vud.ckb.version);
rc = -EIO;
goto out;
}
if (prepparm->vud.ckb.algo != 0x81) {
- DEBUG_ERR(
- "%s reply param keyblock algo mismatch 0x%02x != 0x81\n",
- __func__, (int)prepparm->vud.ckb.algo);
+ ZCRYPT_DBF_ERR("%s reply param keyblock algo mismatch 0x%02x != 0x81\n",
+ __func__, (int)prepparm->vud.ckb.algo);
rc = -EIO;
goto out;
}
/* copy the translated protected key */
if (prepparm->vud.ckb.keylen > *protkeylen) {
- DEBUG_ERR("%s prot keylen mismatch %d > buffersize %u\n",
- __func__, prepparm->vud.ckb.keylen, *protkeylen);
+ ZCRYPT_DBF_ERR("%s prot keylen mismatch %d > buffersize %u\n",
+ __func__, prepparm->vud.ckb.keylen, *protkeylen);
rc = -EIO;
goto out;
}
@@ -1550,17 +1528,17 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
index 5cf88aabd64b..9a208dc4c200 100644
--- a/drivers/s390/crypto/zcrypt_debug.h
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -17,7 +17,7 @@
#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
-#define DBF_MAX_SPRINTF_ARGS 6
+#define ZCRYPT_DBF_MAX_SPRINTF_ARGS 6
#define ZCRYPT_DBF(...) \
debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
@@ -27,8 +27,6 @@
debug_sprintf_event(zcrypt_dbf_info, DBF_WARN, ##__VA_ARGS__)
#define ZCRYPT_DBF_INFO(...) \
debug_sprintf_event(zcrypt_dbf_info, DBF_INFO, ##__VA_ARGS__)
-#define ZCRYPT_DBF_DBG(...) \
- debug_sprintf_event(zcrypt_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
extern debug_info_t *zcrypt_dbf_info;
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index 0a877f9792c2..eb7f5489ccf9 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -24,11 +24,6 @@
#include "zcrypt_ep11misc.h"
#include "zcrypt_ccamisc.h"
-#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
-#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
-#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
-#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
-
#define EP11_PINBLOB_V1_BYTES 56
/* default iv used here */
@@ -510,7 +505,7 @@ static int check_reply_pl(const u8 *pl, const char *func)
/* start tag */
if (*pl++ != 0x30) {
- DEBUG_ERR("%s reply start tag mismatch\n", func);
+ ZCRYPT_DBF_ERR("%s reply start tag mismatch\n", func);
return -EIO;
}
@@ -527,40 +522,41 @@ static int check_reply_pl(const u8 *pl, const char *func)
len = *((u16 *)pl);
pl += 2;
} else {
- DEBUG_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n",
- func, *pl);
+ ZCRYPT_DBF_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n",
+ func, *pl);
return -EIO;
}
/* len should cover at least 3 fields with 32 bit value each */
if (len < 3 * 6) {
- DEBUG_ERR("%s reply length %d too small\n", func, len);
+ ZCRYPT_DBF_ERR("%s reply length %d too small\n", func, len);
return -EIO;
}
/* function tag, length and value */
if (pl[0] != 0x04 || pl[1] != 0x04) {
- DEBUG_ERR("%s function tag or length mismatch\n", func);
+ ZCRYPT_DBF_ERR("%s function tag or length mismatch\n", func);
return -EIO;
}
pl += 6;
/* dom tag, length and value */
if (pl[0] != 0x04 || pl[1] != 0x04) {
- DEBUG_ERR("%s dom tag or length mismatch\n", func);
+ ZCRYPT_DBF_ERR("%s dom tag or length mismatch\n", func);
return -EIO;
}
pl += 6;
/* return value tag, length and value */
if (pl[0] != 0x04 || pl[1] != 0x04) {
- DEBUG_ERR("%s return value tag or length mismatch\n", func);
+ ZCRYPT_DBF_ERR("%s return value tag or length mismatch\n",
+ func);
return -EIO;
}
pl += 2;
ret = *((u32 *)pl);
if (ret != 0) {
- DEBUG_ERR("%s return value 0x%04x != 0\n", func, ret);
+ ZCRYPT_DBF_ERR("%s return value 0x%04x != 0\n", func, ret);
return -EIO;
}
@@ -626,9 +622,8 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
@@ -636,13 +631,13 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
if (rc)
goto out;
if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
- DEBUG_ERR("%s unknown reply data format\n", __func__);
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
if (rep_pl->data_len > buflen) {
- DEBUG_ERR("%s mismatch between reply data len and buffer len\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s mismatch between reply data len and buffer len\n",
+ __func__);
rc = -ENOSPC;
goto out;
}
@@ -816,9 +811,8 @@ static int _ep11_genaeskey(u16 card, u16 domain,
case 256:
break;
default:
- DEBUG_ERR(
- "%s unknown/unsupported keybitsize %d\n",
- __func__, keybitsize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
rc = -EINVAL;
goto out;
}
@@ -878,9 +872,8 @@ static int _ep11_genaeskey(u16 card, u16 domain,
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int)card, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)card, (int)domain, rc);
goto out;
}
@@ -888,13 +881,13 @@ static int _ep11_genaeskey(u16 card, u16 domain,
if (rc)
goto out;
if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
- DEBUG_ERR("%s unknown reply data format\n", __func__);
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
if (rep_pl->data_len > *keybufsize) {
- DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s mismatch reply data len / key buffer len\n",
+ __func__);
rc = -ENOSPC;
goto out;
}
@@ -1030,9 +1023,8 @@ static int ep11_cryptsingle(u16 card, u16 domain,
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int)card, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)card, (int)domain, rc);
goto out;
}
@@ -1040,7 +1032,7 @@ static int ep11_cryptsingle(u16 card, u16 domain,
if (rc)
goto out;
if (rep_pl->data_tag != 0x04) {
- DEBUG_ERR("%s unknown reply data format\n", __func__);
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
@@ -1053,14 +1045,14 @@ static int ep11_cryptsingle(u16 card, u16 domain,
n = *((u16 *)p);
p += 2;
} else {
- DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n",
- __func__, rep_pl->data_lenfmt);
+ ZCRYPT_DBF_ERR("%s unknown reply data length format 0x%02hhx\n",
+ __func__, rep_pl->data_lenfmt);
rc = -EIO;
goto out;
}
if (n > *outbufsize) {
- DEBUG_ERR("%s mismatch reply data len %d / output buffer %zu\n",
- __func__, n, *outbufsize);
+ ZCRYPT_DBF_ERR("%s mismatch reply data len %d / output buffer %zu\n",
+ __func__, n, *outbufsize);
rc = -ENOSPC;
goto out;
}
@@ -1188,9 +1180,8 @@ static int _ep11_unwrapkey(u16 card, u16 domain,
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int)card, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)card, (int)domain, rc);
goto out;
}
@@ -1198,13 +1189,13 @@ static int _ep11_unwrapkey(u16 card, u16 domain,
if (rc)
goto out;
if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
- DEBUG_ERR("%s unknown reply data format\n", __func__);
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
if (rep_pl->data_len > *keybufsize) {
- DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s mismatch reply data len / key buffer len\n",
+ __func__);
rc = -ENOSPC;
goto out;
}
@@ -1343,9 +1334,8 @@ static int _ep11_wrapkey(u16 card, u16 domain,
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int)card, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)card, (int)domain, rc);
goto out;
}
@@ -1353,13 +1343,13 @@ static int _ep11_wrapkey(u16 card, u16 domain,
if (rc)
goto out;
if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
- DEBUG_ERR("%s unknown reply data format\n", __func__);
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
if (rep_pl->data_len > *datasize) {
- DEBUG_ERR("%s mismatch reply data len / data buffer len\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s mismatch reply data len / data buffer len\n",
+ __func__);
rc = -ENOSPC;
goto out;
}
@@ -1386,9 +1376,8 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) {
clrkeylen = keybitsize / 8;
} else {
- DEBUG_ERR(
- "%s unknown/unsupported keybitsize %d\n",
- __func__, keybitsize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
return -EINVAL;
}
@@ -1405,9 +1394,8 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
kek, &keklen);
if (rc) {
- DEBUG_ERR(
- "%s generate kek key failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s generate kek key failed, rc=%d\n",
+ __func__, rc);
goto out;
}
@@ -1415,9 +1403,8 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
clrkey, clrkeylen, encbuf, &encbuflen);
if (rc) {
- DEBUG_ERR(
- "%s encrypting key value with kek key failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s encrypting key value with kek key failed, rc=%d\n",
+ __func__, rc);
goto out;
}
@@ -1426,9 +1413,8 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
encbuf, encbuflen, 0, def_iv,
keybitsize, 0, keybuf, keybufsize, keytype);
if (rc) {
- DEBUG_ERR(
- "%s importing key value as new key failed,, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s importing key value as new key failed,, rc=%d\n",
+ __func__, rc);
goto out;
}
@@ -1476,17 +1462,16 @@ int ep11_kblob2protkey(u16 card, u16 dom,
rc = _ep11_wrapkey(card, dom, (u8 *)key, keylen,
0, def_iv, wkbuf, &wkbuflen);
if (rc) {
- DEBUG_ERR(
- "%s rewrapping ep11 key to pkey failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s rewrapping ep11 key to pkey failed, rc=%d\n",
+ __func__, rc);
goto out;
}
wki = (struct wk_info *)wkbuf;
/* check struct version and pkey type */
if (wki->version != 1 || wki->pkeytype < 1 || wki->pkeytype > 5) {
- DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
- __func__, (int)wki->version, (int)wki->pkeytype);
+ ZCRYPT_DBF_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
+ __func__, (int)wki->version, (int)wki->pkeytype);
rc = -EIO;
goto out;
}
@@ -1511,8 +1496,8 @@ int ep11_kblob2protkey(u16 card, u16 dom,
*protkeytype = PKEY_KEYTYPE_AES_256;
break;
default:
- DEBUG_ERR("%s unknown/unsupported AES pkeysize %d\n",
- __func__, (int)wki->pkeysize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported AES pkeysize %d\n",
+ __func__, (int)wki->pkeysize);
rc = -EIO;
goto out;
}
@@ -1525,16 +1510,16 @@ int ep11_kblob2protkey(u16 card, u16 dom,
break;
case 2: /* TDES */
default:
- DEBUG_ERR("%s unknown/unsupported key type %d\n",
- __func__, (int)wki->pkeytype);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported key type %d\n",
+ __func__, (int)wki->pkeytype);
rc = -EIO;
goto out;
}
/* copy the translated protected key */
if (wki->pkeysize > *protkeylen) {
- DEBUG_ERR("%s wk info pkeysize %llu > protkeysize %u\n",
- __func__, wki->pkeysize, *protkeylen);
+ ZCRYPT_DBF_ERR("%s wk info pkeysize %llu > protkeysize %u\n",
+ __func__, wki->pkeysize, *protkeylen);
rc = -EINVAL;
goto out;
}
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index a44fcfcec938..46e27b43a8af 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -119,10 +119,9 @@ static inline int convert_error(struct zcrypt_queue *zq,
case REP82_ERROR_MESSAGE_TYPE: /* 0x20 */
case REP82_ERROR_TRANSPORT_FAIL: /* 0x90 */
/*
- * Msg to wrong type or card/infrastructure failure.
- * Trigger rescan of the ap bus, trigger retry request.
+ * Msg to wrong type or card/infrastructure failure. Return
+ * EAGAIN, the upper layer may do a retry on the request.
*/
- atomic_set(&zcrypt_rescan_req, 1);
/* For type 86 response show the apfs value (failure reason) */
if (ehdr->reply_code == REP82_ERROR_TRANSPORT_FAIL &&
ehdr->type == TYPE86_RSP_CODE) {
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 2e155de8abe5..3b39cb8f926d 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -427,7 +427,7 @@ static void zcrypt_msgtype50_receive(struct ap_queue *aq,
len = t80h->len;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
+ pr_debug("%s len mismatch => EMSGSIZE\n", __func__);
msg->rc = -EMSGSIZE;
goto out;
}
@@ -487,9 +487,9 @@ static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq,
out:
ap_msg->private = NULL;
if (rc)
- ZCRYPT_DBF_DBG("%s send me cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid), rc);
+ pr_debug("%s send me cprb at dev=%02x.%04x rc=%d\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
@@ -537,9 +537,9 @@ static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq,
out:
ap_msg->private = NULL;
if (rc)
- ZCRYPT_DBF_DBG("%s send crt cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid), rc);
+ pr_debug("%s send crt cprb at dev=%02x.%04x rc=%d\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 3c53abbdc342..215f257d2360 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -437,9 +437,9 @@ static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg,
ap_msg->flags |= AP_MSG_FLAG_ADMIN;
break;
default:
- ZCRYPT_DBF_DBG("%s unknown CPRB minor version '%c%c'\n",
- __func__, msg->cprbx.func_id[0],
- msg->cprbx.func_id[1]);
+ pr_debug("%s unknown CPRB minor version '%c%c'\n",
+ __func__, msg->cprbx.func_id[0],
+ msg->cprbx.func_id[1]);
}
/* copy data block */
@@ -629,9 +629,9 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
/* Copy CPRB to user */
if (xcrb->reply_control_blk_length < msg->fmt2.count1) {
- ZCRYPT_DBF_DBG("%s reply_control_blk_length %u < required %u => EMSGSIZE\n",
- __func__, xcrb->reply_control_blk_length,
- msg->fmt2.count1);
+ pr_debug("%s reply_control_blk_length %u < required %u => EMSGSIZE\n",
+ __func__, xcrb->reply_control_blk_length,
+ msg->fmt2.count1);
return -EMSGSIZE;
}
if (z_copy_to_user(userspace, xcrb->reply_control_blk_addr,
@@ -642,9 +642,9 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
/* Copy data buffer to user */
if (msg->fmt2.count2) {
if (xcrb->reply_data_length < msg->fmt2.count2) {
- ZCRYPT_DBF_DBG("%s reply_data_length %u < required %u => EMSGSIZE\n",
- __func__, xcrb->reply_data_length,
- msg->fmt2.count2);
+ pr_debug("%s reply_data_length %u < required %u => EMSGSIZE\n",
+ __func__, xcrb->reply_data_length,
+ msg->fmt2.count2);
return -EMSGSIZE;
}
if (z_copy_to_user(userspace, xcrb->reply_data_addr,
@@ -673,9 +673,9 @@ static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
char *data = reply->msg;
if (xcrb->resp_len < msg->fmt2.count1) {
- ZCRYPT_DBF_DBG("%s resp_len %u < required %u => EMSGSIZE\n",
- __func__, (unsigned int)xcrb->resp_len,
- msg->fmt2.count1);
+ pr_debug("%s resp_len %u < required %u => EMSGSIZE\n",
+ __func__, (unsigned int)xcrb->resp_len,
+ msg->fmt2.count1);
return -EMSGSIZE;
}
@@ -875,7 +875,8 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
len = sizeof(struct type86x_reply) + t86r->length;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
+ pr_debug("%s len mismatch => EMSGSIZE\n",
+ __func__);
msg->rc = -EMSGSIZE;
goto out;
}
@@ -889,7 +890,8 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
+ pr_debug("%s len mismatch => EMSGSIZE\n",
+ __func__);
msg->rc = -EMSGSIZE;
goto out;
}
@@ -939,7 +941,8 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
+ pr_debug("%s len mismatch => EMSGSIZE\n",
+ __func__);
msg->rc = -EMSGSIZE;
goto out;
}
@@ -1151,9 +1154,9 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
out:
if (rc)
- ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid), rc);
+ pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
@@ -1274,9 +1277,9 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
out:
if (rc)
- ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid), rc);
+ pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index b92a32b4b114..04c64ce0a1ca 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -255,9 +255,10 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
if (!recover) {
hash_del(&addr->hnode);
kfree(addr);
- continue;
+ } else {
+ /* prepare for recovery */
+ addr->disp_flag = QETH_DISP_ADDR_ADD;
}
- addr->disp_flag = QETH_DISP_ADDR_ADD;
}
mutex_unlock(&card->ip_lock);
@@ -278,9 +279,11 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
rc = qeth_l3_register_addr_entry(card, addr);
- if (!rc) {
+ if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) {
+ /* keep it in the records */
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
} else {
+ /* bad address */
hash_del(&addr->hnode);
kfree(addr);
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index addac7fbe37b..9ce27092729c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1270,7 +1270,7 @@ source "drivers/scsi/arm/Kconfig"
config JAZZ_ESP
bool "MIPS JAZZ FAS216 SCSI support"
- depends on MACH_JAZZ && SCSI
+ depends on MACH_JAZZ && SCSI=y
select SCSI_SPI_ATTRS
help
This is the driver for the onboard SCSI host adapter of MIPS Magnum
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 19eee108db02..5c8d1ba3f8f3 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -319,17 +319,16 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *sel;
struct fcoe_fcf *fcf;
- unsigned long flags;
mutex_lock(&fip->ctlr_mutex);
- spin_lock_irqsave(&fip->ctlr_lock, flags);
+ spin_lock_bh(&fip->ctlr_lock);
kfree_skb(fip->flogi_req);
fip->flogi_req = NULL;
list_for_each_entry(fcf, &fip->fcfs, list)
fcf->flogi_sent = 0;
- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ spin_unlock_bh(&fip->ctlr_lock);
sel = fip->sel_fcf;
if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
@@ -700,7 +699,6 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
{
struct fc_frame *fp;
struct fc_frame_header *fh;
- unsigned long flags;
u16 old_xid;
u8 op;
u8 mac[ETH_ALEN];
@@ -734,11 +732,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
op = FIP_DT_FLOGI;
if (fip->mode == FIP_MODE_VN2VN)
break;
- spin_lock_irqsave(&fip->ctlr_lock, flags);
+ spin_lock_bh(&fip->ctlr_lock);
kfree_skb(fip->flogi_req);
fip->flogi_req = skb;
fip->flogi_req_send = 1;
- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ spin_unlock_bh(&fip->ctlr_lock);
schedule_work(&fip->timer_work);
return -EINPROGRESS;
case ELS_FDISC:
@@ -1707,11 +1705,10 @@ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *fcf;
- unsigned long flags;
int error;
mutex_lock(&fip->ctlr_mutex);
- spin_lock_irqsave(&fip->ctlr_lock, flags);
+ spin_lock_bh(&fip->ctlr_lock);
LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
fcf = fcoe_ctlr_select(fip);
if (!fcf || fcf->flogi_sent) {
@@ -1722,7 +1719,7 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
fcoe_ctlr_solicit(fip, NULL);
error = fcoe_ctlr_flogi_send_locked(fip);
}
- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ spin_unlock_bh(&fip->ctlr_lock);
mutex_unlock(&fip->ctlr_mutex);
return error;
}
@@ -1739,9 +1736,8 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *fcf;
- unsigned long flags;
- spin_lock_irqsave(&fip->ctlr_lock, flags);
+ spin_lock_bh(&fip->ctlr_lock);
fcf = fip->sel_fcf;
if (!fcf || !fip->flogi_req_send)
goto unlock;
@@ -1768,7 +1764,7 @@ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
} else /* XXX */
LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
unlock:
- spin_unlock_irqrestore(&fip->ctlr_lock, flags);
+ spin_unlock_bh(&fip->ctlr_lock);
}
/**
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 2074937c05bc..ce73f08ee889 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -305,6 +305,7 @@ struct fnic {
unsigned int copy_wq_base;
struct work_struct link_work;
struct work_struct frame_work;
+ struct work_struct flush_work;
struct sk_buff_head frame_queue;
struct sk_buff_head tx_queue;
@@ -363,7 +364,7 @@ void fnic_handle_event(struct work_struct *work);
int fnic_rq_cmpl_handler(struct fnic *fnic, int);
int fnic_alloc_rq_frame(struct vnic_rq *rq);
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
-void fnic_flush_tx(struct fnic *);
+void fnic_flush_tx(struct work_struct *work);
void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb);
void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *);
void fnic_update_mac(struct fc_lport *, u8 *new);
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 5e312a55cc7d..a08293b2ad9f 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -1182,7 +1182,7 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
/**
* fnic_flush_tx() - send queued frames.
- * @fnic: fnic device
+ * @work: pointer to work element
*
* Send frames that were waiting to go out in FC or Ethernet mode.
* Whenever changing modes we purge queued frames, so these frames should
@@ -1190,8 +1190,9 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
*
* Called without fnic_lock held.
*/
-void fnic_flush_tx(struct fnic *fnic)
+void fnic_flush_tx(struct work_struct *work)
{
+ struct fnic *fnic = container_of(work, struct fnic, flush_work);
struct sk_buff *skb;
struct fc_frame *fp;
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 5ed1d897311a..29eead383eb9 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -830,6 +830,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&fnic->vlans_lock);
INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
INIT_WORK(&fnic->event_work, fnic_handle_event);
+ INIT_WORK(&fnic->flush_work, fnic_flush_tx);
skb_queue_head_init(&fnic->fip_frame_queue);
INIT_LIST_HEAD(&fnic->evlist);
INIT_LIST_HEAD(&fnic->vlans);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 8d7fc5284293..fc4cee91b175 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -680,7 +680,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- fnic_flush_tx(fnic);
+ queue_work(fnic_event_queue, &fnic->flush_work);
reset_cmpl_handler_end:
fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
@@ -736,7 +736,7 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
- fnic_flush_tx(fnic);
+ queue_work(fnic_event_queue, &fnic->flush_work);
queue_work(fnic_event_queue, &fnic->frame_work);
} else {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index d26941b131fd..bf879d81846b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1918,7 +1918,7 @@ out:
*
* Returns the number of SGEs added to the SGL.
**/
-static int
+static uint32_t
lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct sli4_sge *sgl, int datasegcnt,
struct lpfc_io_buf *lpfc_cmd)
@@ -1926,8 +1926,8 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct scatterlist *sgde = NULL; /* s/g data entry */
struct sli4_sge_diseed *diseed = NULL;
dma_addr_t physaddr;
- int i = 0, num_sge = 0, status;
- uint32_t reftag;
+ int i = 0, status;
+ uint32_t reftag, num_sge = 0;
uint8_t txop, rxop;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint32_t rc;
@@ -2099,7 +2099,7 @@ out:
*
* Returns the number of SGEs added to the SGL.
**/
-static int
+static uint32_t
lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct sli4_sge *sgl, int datacnt, int protcnt,
struct lpfc_io_buf *lpfc_cmd)
@@ -2123,8 +2123,8 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint32_t rc;
#endif
uint32_t checking = 1;
- uint32_t dma_offset = 0;
- int num_sge = 0, j = 2;
+ uint32_t dma_offset = 0, num_sge = 0;
+ int j = 2;
struct sli4_hybrid_sgl *sgl_xtra = NULL;
sgpe = scsi_prot_sglist(sc);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c
index c0c8ab586957..d32ad46318cb 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_transport.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c
@@ -1671,7 +1671,7 @@ mpi3mr_update_mr_sas_port(struct mpi3mr_ioc *mrioc, struct host_port *h_port,
void
mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
{
- struct host_port h_port[64];
+ struct host_port *h_port = NULL;
int i, j, found, host_port_count = 0, port_idx;
u16 sz, attached_handle, ioc_status;
struct mpi3_sas_io_unit_page0 *sas_io_unit_pg0 = NULL;
@@ -1685,6 +1685,10 @@ mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
sas_io_unit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_io_unit_pg0)
return;
+ h_port = kcalloc(64, sizeof(struct host_port), GFP_KERNEL);
+ if (!h_port)
+ goto out;
+
if (mpi3mr_cfg_get_sas_io_unit_pg0(mrioc, sas_io_unit_pg0, sz)) {
ioc_err(mrioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -1814,6 +1818,7 @@ mpi3mr_refresh_sas_ports(struct mpi3mr_ioc *mrioc)
}
}
out:
+ kfree(h_port);
kfree(sas_io_unit_pg0);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 8761bc58d965..b8120ca93c79 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -7378,7 +7378,9 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
return -EFAULT;
}
- issue_diag_reset:
+ return 0;
+
+issue_diag_reset:
rc = _base_diag_reset(ioc);
return rc;
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 76d369343c7a..8cad9792a562 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -328,21 +328,39 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
return result + 4;
}
+enum scsi_vpd_parameters {
+ SCSI_VPD_HEADER_SIZE = 4,
+ SCSI_VPD_LIST_SIZE = 36,
+};
+
static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
{
- unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4);
+ unsigned char vpd[SCSI_VPD_LIST_SIZE] __aligned(4);
int result;
if (sdev->no_vpd_size)
return SCSI_DEFAULT_VPD_LEN;
/*
+ * Fetch the supported pages VPD and validate that the requested page
+ * number is present.
+ */
+ if (page != 0) {
+ result = scsi_vpd_inquiry(sdev, vpd, 0, sizeof(vpd));
+ if (result < SCSI_VPD_HEADER_SIZE)
+ return 0;
+
+ result -= SCSI_VPD_HEADER_SIZE;
+ if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
+ return 0;
+ }
+ /*
* Fetch the VPD page header to find out how big the page
* is. This is done to prevent problems on legacy devices
* which can not handle allocation lengths as large as
* potentially requested by the caller.
*/
- result = scsi_vpd_inquiry(sdev, vpd_header, page, sizeof(vpd_header));
+ result = scsi_vpd_inquiry(sdev, vpd, page, SCSI_VPD_HEADER_SIZE);
if (result < 0)
return 0;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 4f455884fdc4..612489afe8d2 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -282,11 +282,12 @@ static void scsi_eh_inc_host_failed(struct rcu_head *head)
{
struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
struct Scsi_Host *shost = scmd->device->host;
+ unsigned int busy = scsi_host_busy(shost);
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
shost->host_failed++;
- scsi_eh_wakeup(shost, scsi_host_busy(shost));
+ scsi_eh_wakeup(shost, busy);
spin_unlock_irqrestore(shost->host_lock, flags);
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 1fb80eae9a63..df5ac03d5d6c 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -278,9 +278,11 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
rcu_read_lock();
__clear_bit(SCMD_STATE_INFLIGHT, &cmd->state);
if (unlikely(scsi_host_in_recovery(shost))) {
+ unsigned int busy = scsi_host_busy(shost);
+
spin_lock_irqsave(shost->host_lock, flags);
if (shost->host_failed || shost->host_eh_scheduled)
- scsi_eh_wakeup(shost, scsi_host_busy(shost));
+ scsi_eh_wakeup(shost, busy);
spin_unlock_irqrestore(shost->host_lock, flags);
}
rcu_read_unlock();
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 44680f65ea14..9969f4e2f1c3 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -332,7 +332,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->sg_reserved_size = INT_MAX;
- q = blk_mq_init_queue(&sdev->host->tag_set);
+ q = blk_mq_alloc_queue(&sdev->host->tag_set, NULL, NULL);
if (IS_ERR(q)) {
/* release fn is set up in scsi_sysfs_device_initialise, so
* have to free and put manually here */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0833b3e6aa6e..bdd0acf7fa3c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3407,6 +3407,24 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
return true;
}
+static void sd_read_block_zero(struct scsi_disk *sdkp)
+{
+ unsigned int buf_len = sdkp->device->sector_size;
+ char *buffer, cmd[10] = { };
+
+ buffer = kmalloc(buf_len, GFP_KERNEL);
+ if (!buffer)
+ return;
+
+ cmd[0] = READ_10;
+ put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
+ put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
+
+ scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
+ SD_TIMEOUT, sdkp->max_retries, NULL);
+ kfree(buffer);
+}
+
/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
@@ -3446,7 +3464,13 @@ static int sd_revalidate_disk(struct gendisk *disk)
*/
if (sdkp->media_present) {
sd_read_capacity(sdkp, buffer);
-
+ /*
+ * Some USB/UAS devices return generic values for mode pages
+ * until the media has been accessed. Trigger a READ operation
+ * to force the device to populate mode pages.
+ */
+ if (sdp->read_before_ms)
+ sd_read_block_zero(sdkp);
/*
* set the default to rotational. All non-rotational devices
* support the block characteristics VPD page, which will
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index ceff1ec13f9e..385180c98be4 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -6533,8 +6533,11 @@ static void pqi_map_queues(struct Scsi_Host *shost)
{
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
- blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ if (!ctrl_info->disable_managed_interrupts)
+ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
ctrl_info->pci_dev, 0);
+ else
+ return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
}
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 0810b5b0c688..50c664b65f4d 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -68,4 +68,13 @@ config MTK_SVS
chip process corner, temperatures and other factors. Then DVFS
driver could apply SVS bank voltage to PMIC/Buck.
+config MTK_SOCINFO
+ tristate "MediaTek SoC Information"
+ default y
+ depends on NVMEM_MTK_EFUSE
+ help
+ The MediaTek SoC Information (mtk-socinfo) driver provides
+ information about the SoC to the userspace including the
+ manufacturer name, marketing name and soc name.
+
endmenu
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index 9d3ce7878c5c..6830512848fd 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_MTK_REGULATOR_COUPLER) += mtk-regulator-coupler.o
obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o
obj-$(CONFIG_MTK_MMSYS) += mtk-mutex.o
obj-$(CONFIG_MTK_SVS) += mtk-svs.o
+obj-$(CONFIG_MTK_SOCINFO) += mtk-socinfo.o
diff --git a/drivers/soc/mediatek/mtk-socinfo.c b/drivers/soc/mediatek/mtk-socinfo.c
new file mode 100644
index 000000000000..42572e8c1520
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-socinfo.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/device.h>
+#include <linux/device/bus.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/sys_soc.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#define MTK_SOCINFO_ENTRY(_soc_name, _segment_name, _marketing_name, _cell_data1, _cell_data2) {\
+ .soc_name = _soc_name, \
+ .segment_name = _segment_name, \
+ .marketing_name = _marketing_name, \
+ .cell_data = {_cell_data1, _cell_data2} \
+}
+#define CELL_NOT_USED (0xFFFFFFFF)
+#define MAX_CELLS (2)
+
+struct mtk_socinfo {
+ struct device *dev;
+ struct name_data *name_data;
+ struct socinfo_data *socinfo_data;
+ struct soc_device *soc_dev;
+};
+
+struct socinfo_data {
+ char *soc_name;
+ char *segment_name;
+ char *marketing_name;
+ u32 cell_data[MAX_CELLS];
+};
+
+static const char *cell_names[MAX_CELLS] = {"socinfo-data1", "socinfo-data2"};
+
+static struct socinfo_data socinfo_data_table[] = {
+ MTK_SOCINFO_ENTRY("MT8173", "MT8173V/AC", "MT8173", 0x6CA20004, 0x10000000),
+ MTK_SOCINFO_ENTRY("MT8183", "MT8183V/AZA", "Kompanio 500", 0x00010043, 0x00000840),
+ MTK_SOCINFO_ENTRY("MT8183", "MT8183V/AZA", "Kompanio 500", 0x00010043, 0x00000940),
+ MTK_SOCINFO_ENTRY("MT8186", "MT8186GV/AZA", "Kompanio 520", 0x81861001, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8186T", "MT8186TV/AZA", "Kompanio 528", 0x81862001, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8188", "MT8188GV/AZA", "Kompanio 830", 0x81880000, 0x00000010),
+ MTK_SOCINFO_ENTRY("MT8188", "MT8188GV/HZA", "Kompanio 830", 0x81880000, 0x00000011),
+ MTK_SOCINFO_ENTRY("MT8192", "MT8192V/AZA", "Kompanio 820", 0x00001100, 0x00040080),
+ MTK_SOCINFO_ENTRY("MT8192T", "MT8192V/ATZA", "Kompanio 828", 0x00000100, 0x000400C0),
+ MTK_SOCINFO_ENTRY("MT8195", "MT8195GV/EZA", "Kompanio 1200", 0x81950300, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8195", "MT8195GV/EHZA", "Kompanio 1200", 0x81950304, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EZA", "Kompanio 1380", 0x81950400, CELL_NOT_USED),
+ MTK_SOCINFO_ENTRY("MT8195", "MT8195TV/EHZA", "Kompanio 1380", 0x81950404, CELL_NOT_USED),
+};
+
+static int mtk_socinfo_create_socinfo_node(struct mtk_socinfo *mtk_socinfop)
+{
+ struct soc_device_attribute *attrs;
+ static char machine[30] = {0};
+ static const char *soc_manufacturer = "MediaTek";
+
+ attrs = devm_kzalloc(mtk_socinfop->dev, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ snprintf(machine, sizeof(machine), "%s (%s)", mtk_socinfop->socinfo_data->marketing_name,
+ mtk_socinfop->socinfo_data->soc_name);
+ attrs->family = soc_manufacturer;
+ attrs->machine = machine;
+
+ mtk_socinfop->soc_dev = soc_device_register(attrs);
+ if (IS_ERR(mtk_socinfop->soc_dev))
+ return PTR_ERR(mtk_socinfop->soc_dev);
+
+ dev_info(mtk_socinfop->dev, "%s %s SoC detected.\n", soc_manufacturer, attrs->machine);
+ return 0;
+}
+
+static u32 mtk_socinfo_read_cell(struct device *dev, const char *name)
+{
+ struct nvmem_device *nvmemp;
+ struct device_node *np, *nvmem_node = dev->parent->of_node;
+ u32 offset;
+ u32 cell_val = CELL_NOT_USED;
+
+ /* should never fail since the nvmem driver registers this child */
+ nvmemp = nvmem_device_find(nvmem_node, device_match_of_node);
+ if (IS_ERR(nvmemp))
+ goto out;
+
+ np = of_get_child_by_name(nvmem_node, name);
+ if (!np)
+ goto put_device;
+
+ if (of_property_read_u32_index(np, "reg", 0, &offset))
+ goto put_node;
+
+ nvmem_device_read(nvmemp, offset, sizeof(cell_val), &cell_val);
+
+put_node:
+ of_node_put(np);
+put_device:
+ nvmem_device_put(nvmemp);
+out:
+ return cell_val;
+}
+
+static int mtk_socinfo_get_socinfo_data(struct mtk_socinfo *mtk_socinfop)
+{
+ unsigned int i, j;
+ unsigned int num_cell_data = 0;
+ u32 cell_data[MAX_CELLS] = {0};
+ bool match_socinfo;
+ int match_socinfo_index = -1;
+
+ for (i = 0; i < MAX_CELLS; i++) {
+ cell_data[i] = mtk_socinfo_read_cell(mtk_socinfop->dev, cell_names[i]);
+ if (cell_data[i] != CELL_NOT_USED)
+ num_cell_data++;
+ else
+ break;
+ }
+
+ if (!num_cell_data)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(socinfo_data_table); i++) {
+ match_socinfo = true;
+ for (j = 0; j < num_cell_data; j++) {
+ if (cell_data[j] != socinfo_data_table[i].cell_data[j]) {
+ match_socinfo = false;
+ break;
+ }
+ }
+ if (match_socinfo) {
+ mtk_socinfop->socinfo_data = &(socinfo_data_table[i]);
+ match_socinfo_index = i;
+ break;
+ }
+ }
+
+ return match_socinfo_index >= 0 ? match_socinfo_index : -ENOENT;
+}
+
+static int mtk_socinfo_probe(struct platform_device *pdev)
+{
+ struct mtk_socinfo *mtk_socinfop;
+ int ret;
+
+ mtk_socinfop = devm_kzalloc(&pdev->dev, sizeof(*mtk_socinfop), GFP_KERNEL);
+ if (!mtk_socinfop)
+ return -ENOMEM;
+
+ mtk_socinfop->dev = &pdev->dev;
+
+ ret = mtk_socinfo_get_socinfo_data(mtk_socinfop);
+ if (ret < 0)
+ return dev_err_probe(mtk_socinfop->dev, ret, "Failed to get socinfo data\n");
+
+ ret = mtk_socinfo_create_socinfo_node(mtk_socinfop);
+ if (ret)
+ return dev_err_probe(mtk_socinfop->dev, ret, "Cannot create node\n");
+
+ platform_set_drvdata(pdev, mtk_socinfop);
+ return 0;
+}
+
+static void mtk_socinfo_remove(struct platform_device *pdev)
+{
+ struct mtk_socinfo *mtk_socinfop = platform_get_drvdata(pdev);
+
+ soc_device_unregister(mtk_socinfop->soc_dev);
+}
+
+static struct platform_driver mtk_socinfo = {
+ .probe = mtk_socinfo_probe,
+ .remove_new = mtk_socinfo_remove,
+ .driver = {
+ .name = "mtk-socinfo",
+ },
+};
+module_platform_driver(mtk_socinfo);
+
+MODULE_AUTHOR("William-TW LIN <william-tw.lin@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek socinfo driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/microchip/Kconfig b/drivers/soc/microchip/Kconfig
index 9b0fdd95276e..19f4b576f822 100644
--- a/drivers/soc/microchip/Kconfig
+++ b/drivers/soc/microchip/Kconfig
@@ -1,5 +1,5 @@
config POLARFIRE_SOC_SYS_CTRL
- tristate "POLARFIRE_SOC_SYS_CTRL"
+ tristate "Microchip PolarFire SoC (MPFS) system controller support"
depends on POLARFIRE_SOC_MAILBOX
depends on MTD
help
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index c6ca4de42586..5af33b0e3470 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -268,4 +268,13 @@ config QCOM_INLINE_CRYPTO_ENGINE
tristate
select QCOM_SCM
+config QCOM_PBS
+ tristate "PBS trigger support for Qualcomm Technologies, Inc. PMICS"
+ depends on SPMI
+ help
+ This driver supports configuring software programmable boot sequencer (PBS)
+ trigger event through PBS RAM on Qualcomm Technologies, Inc. PMICs.
+ This module provides the APIs to the client drivers that wants to send the
+ PBS trigger event to the PBS RAM.
+
endmenu
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 05b3d54e8dc9..ca0bece0dfff 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS_rpmh-rsc.o := -I$(src)
+CFLAGS_qcom_aoss.o := -I$(src)
obj-$(CONFIG_QCOM_AOSS_QMP) += qcom_aoss.o
obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
@@ -34,3 +35,4 @@ obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o
obj-$(CONFIG_QCOM_ICC_BWMON) += icc-bwmon.o
qcom_ice-objs += ice.o
obj-$(CONFIG_QCOM_INLINE_CRYPTO_ENGINE) += qcom_ice.o
+obj-$(CONFIG_QCOM_PBS) += qcom-pbs.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 1f8b315576a4..50749e870efa 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -399,7 +399,7 @@ static int apr_uevent(const struct device *dev, struct kobj_uevent_env *env)
return add_uevent_var(env, "MODALIAS=apr:%s", adev->name);
}
-struct bus_type aprbus = {
+const struct bus_type aprbus = {
.name = "aprbus",
.match = apr_device_match,
.probe = apr_device_probe,
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 4ca88eaebf06..cbef0dea1d5d 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -859,6 +859,8 @@ static int llcc_update_act_ctrl(u32 sid,
ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg,
slice_status, !(slice_status & status),
0, LLCC_STATUS_READ_DELAY);
+ if (ret)
+ return ret;
if (drv_data->version >= LLCC_VERSION_4_1_0_0)
ret = regmap_write(drv_data->bcast_regmap, act_clear_reg,
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index f4bfd24386f1..f913e9bd57ed 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -265,10 +265,17 @@ static int pmic_glink_probe(struct platform_device *pdev)
pg->client_mask = *match_data;
+ pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
+ if (IS_ERR(pg->pdr)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr),
+ "failed to initialize pdr\n");
+ return ret;
+ }
+
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI)) {
ret = pmic_glink_add_aux_device(pg, &pg->ucsi_aux, "ucsi");
if (ret)
- return ret;
+ goto out_release_pdr_handle;
}
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_ALTMODE)) {
ret = pmic_glink_add_aux_device(pg, &pg->altmode_aux, "altmode");
@@ -281,17 +288,11 @@ static int pmic_glink_probe(struct platform_device *pdev)
goto out_release_altmode_aux;
}
- pg->pdr = pdr_handle_alloc(pmic_glink_pdr_callback, pg);
- if (IS_ERR(pg->pdr)) {
- ret = dev_err_probe(&pdev->dev, PTR_ERR(pg->pdr), "failed to initialize pdr\n");
- goto out_release_aux_devices;
- }
-
service = pdr_add_lookup(pg->pdr, "tms/servreg", "msm/adsp/charger_pd");
if (IS_ERR(service)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(service),
"failed adding pdr lookup for charger_pd\n");
- goto out_release_pdr_handle;
+ goto out_release_aux_devices;
}
mutex_lock(&__pmic_glink_lock);
@@ -300,8 +301,6 @@ static int pmic_glink_probe(struct platform_device *pdev)
return 0;
-out_release_pdr_handle:
- pdr_handle_release(pg->pdr);
out_release_aux_devices:
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_BATT))
pmic_glink_del_aux_device(pg, &pg->ps_aux);
@@ -311,6 +310,8 @@ out_release_altmode_aux:
out_release_ucsi_aux:
if (pg->client_mask & BIT(PMIC_GLINK_CLIENT_UCSI))
pmic_glink_del_aux_device(pg, &pg->ucsi_aux);
+out_release_pdr_handle:
+ pdr_handle_release(pg->pdr);
return ret;
}
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index 5fcd0fdd2faa..b3808fc24c69 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -76,7 +76,7 @@ struct pmic_glink_altmode_port {
struct work_struct work;
- struct device *bridge;
+ struct auxiliary_device *bridge;
enum typec_orientation orientation;
u16 svid;
@@ -230,7 +230,7 @@ static void pmic_glink_altmode_worker(struct work_struct *work)
else
pmic_glink_altmode_enable_usb(altmode, alt_port);
- drm_aux_hpd_bridge_notify(alt_port->bridge,
+ drm_aux_hpd_bridge_notify(&alt_port->bridge->dev,
alt_port->hpd_state ?
connector_status_connected :
connector_status_disconnected);
@@ -454,7 +454,7 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
alt_port->index = port;
INIT_WORK(&alt_port->work, pmic_glink_altmode_worker);
- alt_port->bridge = drm_dp_hpd_bridge_register(dev, to_of_node(fwnode));
+ alt_port->bridge = devm_drm_dp_hpd_bridge_alloc(dev, to_of_node(fwnode));
if (IS_ERR(alt_port->bridge)) {
fwnode_handle_put(fwnode);
return PTR_ERR(alt_port->bridge);
@@ -510,6 +510,16 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
}
}
+ for (port = 0; port < ARRAY_SIZE(altmode->ports); port++) {
+ alt_port = &altmode->ports[port];
+ if (!alt_port->bridge)
+ continue;
+
+ ret = devm_drm_dp_hpd_bridge_add(dev, alt_port->bridge);
+ if (ret)
+ return ret;
+ }
+
altmode->client = devm_pmic_glink_register_client(dev,
altmode->owner_id,
pmic_glink_altmode_callback,
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index bdcf44b85b2f..2e8f24d5da80 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -89,7 +89,6 @@
* @base: Base address of this instance of QUP wrapper core
* @clks: Handle to the primary & optional secondary AHB clocks
* @num_clks: Count of clocks
- * @to_core: Core ICC path
*/
struct geni_wrapper {
struct device *dev;
diff --git a/drivers/soc/qcom/qcom-pbs.c b/drivers/soc/qcom/qcom-pbs.c
new file mode 100644
index 000000000000..6af49b5060e5
--- /dev/null
+++ b/drivers/soc/qcom/qcom-pbs.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/spmi.h>
+#include <linux/soc/qcom/qcom-pbs.h>
+
+#define PBS_CLIENT_TRIG_CTL 0x42
+#define PBS_CLIENT_SW_TRIG_BIT BIT(7)
+#define PBS_CLIENT_SCRATCH1 0x50
+#define PBS_CLIENT_SCRATCH2 0x51
+#define PBS_CLIENT_SCRATCH2_ERROR 0xFF
+
+#define RETRIES 2000
+#define DELAY 1100
+
+struct pbs_dev {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock;
+ struct device_link *link;
+
+ u32 base;
+};
+
+static int qcom_pbs_wait_for_ack(struct pbs_dev *pbs, u8 bit_pos)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read_poll_timeout(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2,
+ val, val & BIT(bit_pos), DELAY, DELAY * RETRIES);
+
+ if (ret < 0) {
+ dev_err(pbs->dev, "Timeout for PBS ACK/NACK for bit %u\n", bit_pos);
+ return -ETIMEDOUT;
+ }
+
+ if (val == PBS_CLIENT_SCRATCH2_ERROR) {
+ ret = regmap_write(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, 0);
+ dev_err(pbs->dev, "NACK from PBS for bit %u\n", bit_pos);
+ return -EINVAL;
+ }
+
+ dev_dbg(pbs->dev, "PBS sequence for bit %u executed!\n", bit_pos);
+ return 0;
+}
+
+/**
+ * qcom_pbs_trigger_event() - Trigger the PBS RAM sequence
+ * @pbs: Pointer to PBS device
+ * @bitmap: bitmap
+ *
+ * This function is used to trigger the PBS RAM sequence to be
+ * executed by the client driver.
+ *
+ * The PBS trigger sequence involves
+ * 1. setting the PBS sequence bit in PBS_CLIENT_SCRATCH1
+ * 2. Initiating the SW PBS trigger
+ * 3. Checking the equivalent bit in PBS_CLIENT_SCRATCH2 for the
+ * completion of the sequence.
+ * 4. If PBS_CLIENT_SCRATCH2 == 0xFF, the PBS sequence failed to execute
+ *
+ * Return: 0 on success, < 0 on failure
+ */
+int qcom_pbs_trigger_event(struct pbs_dev *pbs, u8 bitmap)
+{
+ unsigned int val;
+ u16 bit_pos;
+ int ret;
+
+ if (WARN_ON(!bitmap))
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(pbs))
+ return -EINVAL;
+
+ mutex_lock(&pbs->lock);
+ ret = regmap_read(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, &val);
+ if (ret < 0)
+ goto out;
+
+ if (val == PBS_CLIENT_SCRATCH2_ERROR) {
+ /* PBS error - clear SCRATCH2 register */
+ ret = regmap_write(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, 0);
+ if (ret < 0)
+ goto out;
+ }
+
+ for (bit_pos = 0; bit_pos < 8; bit_pos++) {
+ if (!(bitmap & BIT(bit_pos)))
+ continue;
+
+ /* Clear the PBS sequence bit position */
+ ret = regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2,
+ BIT(bit_pos), 0);
+ if (ret < 0)
+ goto out_clear_scratch1;
+
+ /* Set the PBS sequence bit position */
+ ret = regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH1,
+ BIT(bit_pos), BIT(bit_pos));
+ if (ret < 0)
+ goto out_clear_scratch1;
+
+ /* Initiate the SW trigger */
+ ret = regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_TRIG_CTL,
+ PBS_CLIENT_SW_TRIG_BIT, PBS_CLIENT_SW_TRIG_BIT);
+ if (ret < 0)
+ goto out_clear_scratch1;
+
+ ret = qcom_pbs_wait_for_ack(pbs, bit_pos);
+ if (ret < 0)
+ goto out_clear_scratch1;
+
+ /* Clear the PBS sequence bit position */
+ regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH1, BIT(bit_pos), 0);
+ regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH2, BIT(bit_pos), 0);
+ }
+
+out_clear_scratch1:
+ /* Clear all the requested bitmap */
+ ret = regmap_update_bits(pbs->regmap, pbs->base + PBS_CLIENT_SCRATCH1, bitmap, 0);
+
+out:
+ mutex_unlock(&pbs->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_pbs_trigger_event);
+
+/**
+ * get_pbs_client_device() - Get the PBS device used by client
+ * @dev: Client device
+ *
+ * This function is used to get the PBS device that is being
+ * used by the client.
+ *
+ * Return: pbs_dev on success, ERR_PTR on failure
+ */
+struct pbs_dev *get_pbs_client_device(struct device *dev)
+{
+ struct device_node *pbs_dev_node;
+ struct platform_device *pdev;
+ struct pbs_dev *pbs;
+
+ pbs_dev_node = of_parse_phandle(dev->of_node, "qcom,pbs", 0);
+ if (!pbs_dev_node) {
+ dev_err(dev, "Missing qcom,pbs property\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdev = of_find_device_by_node(pbs_dev_node);
+ if (!pdev) {
+ dev_err(dev, "Unable to find PBS dev_node\n");
+ pbs = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ pbs = platform_get_drvdata(pdev);
+ if (!pbs) {
+ dev_err(dev, "Cannot get pbs instance from %s\n", dev_name(&pdev->dev));
+ platform_device_put(pdev);
+ pbs = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ pbs->link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
+ if (!pbs->link) {
+ dev_err(&pdev->dev, "Failed to create device link to consumer %s\n", dev_name(dev));
+ platform_device_put(pdev);
+ pbs = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+out:
+ of_node_put(pbs_dev_node);
+ return pbs;
+}
+EXPORT_SYMBOL_GPL(get_pbs_client_device);
+
+static int qcom_pbs_probe(struct platform_device *pdev)
+{
+ struct pbs_dev *pbs;
+ u32 val;
+ int ret;
+
+ pbs = devm_kzalloc(&pdev->dev, sizeof(*pbs), GFP_KERNEL);
+ if (!pbs)
+ return -ENOMEM;
+
+ pbs->dev = &pdev->dev;
+ pbs->regmap = dev_get_regmap(pbs->dev->parent, NULL);
+ if (!pbs->regmap) {
+ dev_err(pbs->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ ret = device_property_read_u32(pbs->dev, "reg", &val);
+ if (ret < 0) {
+ dev_err(pbs->dev, "Couldn't find reg, ret = %d\n", ret);
+ return ret;
+ }
+ pbs->base = val;
+ mutex_init(&pbs->lock);
+
+ platform_set_drvdata(pdev, pbs);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_pbs_match_table[] = {
+ { .compatible = "qcom,pbs" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_pbs_match_table);
+
+static struct platform_driver qcom_pbs_driver = {
+ .driver = {
+ .name = "qcom-pbs",
+ .of_match_table = qcom_pbs_match_table,
+ },
+ .probe = qcom_pbs_probe,
+};
+module_platform_driver(qcom_pbs_driver)
+
+MODULE_DESCRIPTION("QCOM PBS DRIVER");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index aff0cfb71482..ca2f6b7629ce 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -3,6 +3,7 @@
* Copyright (c) 2019, Linaro Ltd
*/
#include <linux/clk-provider.h>
+#include <linux/debugfs.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mailbox_client.h>
@@ -13,6 +14,9 @@
#include <linux/slab.h>
#include <linux/soc/qcom/qcom_aoss.h>
+#define CREATE_TRACE_POINTS
+#include "trace-aoss.h"
+
#define QMP_DESC_MAGIC 0x0
#define QMP_DESC_VERSION 0x4
#define QMP_DESC_FEATURES 0x8
@@ -44,6 +48,8 @@
#define QMP_NUM_COOLING_RESOURCES 2
+#define QMP_DEBUGFS_FILES 4
+
static bool qmp_cdev_max_state = 1;
struct qmp_cooling_device {
@@ -65,6 +71,8 @@ struct qmp_cooling_device {
* @tx_lock: provides synchronization between multiple callers of qmp_send()
* @qdss_clk: QDSS clock hw struct
* @cooling_devs: thermal cooling devices
+ * @debugfs_root: directory for the developer/tester interface
+ * @debugfs_files: array of individual debugfs entries under debugfs_root
*/
struct qmp {
void __iomem *msgram;
@@ -82,6 +90,8 @@ struct qmp {
struct clk_hw qdss_clk;
struct qmp_cooling_device *cooling_devs;
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_files[QMP_DEBUGFS_FILES];
};
static void qmp_kick(struct qmp *qmp)
@@ -214,7 +224,7 @@ static bool qmp_message_empty(struct qmp *qmp)
*
* Return: 0 on success, negative errno on failure
*/
-int qmp_send(struct qmp *qmp, const char *fmt, ...)
+int __printf(2, 3) qmp_send(struct qmp *qmp, const char *fmt, ...)
{
char buf[QMP_MSG_LEN];
long time_left;
@@ -235,6 +245,8 @@ int qmp_send(struct qmp *qmp, const char *fmt, ...)
mutex_lock(&qmp->tx_lock);
+ trace_aoss_send(buf);
+
/* The message RAM only implements 32-bit accesses */
__iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
buf, sizeof(buf) / sizeof(u32));
@@ -256,6 +268,8 @@ int qmp_send(struct qmp *qmp, const char *fmt, ...)
ret = 0;
}
+ trace_aoss_send_done(buf, ret);
+
mutex_unlock(&qmp->tx_lock);
return ret;
@@ -475,6 +489,91 @@ void qmp_put(struct qmp *qmp)
}
EXPORT_SYMBOL_GPL(qmp_put);
+struct qmp_debugfs_entry {
+ const char *name;
+ const char *fmt;
+ bool is_bool;
+ const char *true_val;
+ const char *false_val;
+};
+
+static const struct qmp_debugfs_entry qmp_debugfs_entries[QMP_DEBUGFS_FILES] = {
+ { "ddr_frequency_mhz", "{class: ddr, res: fixed, val: %u}", false },
+ { "prevent_aoss_sleep", "{class: aoss_slp, res: sleep: %s}", true, "enable", "disable" },
+ { "prevent_cx_collapse", "{class: cx_mol, res: cx, val: %s}", true, "mol", "off" },
+ { "prevent_ddr_collapse", "{class: ddr_mol, res: ddr, val: %s}", true, "mol", "off" },
+};
+
+static ssize_t qmp_debugfs_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *pos)
+{
+ const struct qmp_debugfs_entry *entry = NULL;
+ struct qmp *qmp = file->private_data;
+ char buf[QMP_MSG_LEN];
+ unsigned int uint_val;
+ const char *str_val;
+ bool bool_val;
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qmp->debugfs_files); i++) {
+ if (qmp->debugfs_files[i] == file->f_path.dentry) {
+ entry = &qmp_debugfs_entries[i];
+ break;
+ }
+ }
+ if (WARN_ON(!entry))
+ return -EFAULT;
+
+ if (entry->is_bool) {
+ ret = kstrtobool_from_user(user_buf, count, &bool_val);
+ if (ret)
+ return ret;
+
+ str_val = bool_val ? entry->true_val : entry->false_val;
+
+ ret = snprintf(buf, sizeof(buf), entry->fmt, str_val);
+ if (ret >= sizeof(buf))
+ return -EINVAL;
+ } else {
+ ret = kstrtou32_from_user(user_buf, count, 0, &uint_val);
+ if (ret)
+ return ret;
+
+ ret = snprintf(buf, sizeof(buf), entry->fmt, uint_val);
+ if (ret >= sizeof(buf))
+ return -EINVAL;
+ }
+
+ ret = qmp_send(qmp, buf);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations qmp_debugfs_fops = {
+ .open = simple_open,
+ .write = qmp_debugfs_write,
+};
+
+static void qmp_debugfs_create(struct qmp *qmp)
+{
+ const struct qmp_debugfs_entry *entry;
+ int i;
+
+ qmp->debugfs_root = debugfs_create_dir("qcom_aoss", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(qmp->debugfs_files); i++) {
+ entry = &qmp_debugfs_entries[i];
+
+ qmp->debugfs_files[i] = debugfs_create_file(entry->name, 0200,
+ qmp->debugfs_root,
+ qmp,
+ &qmp_debugfs_fops);
+ }
+}
+
static int qmp_probe(struct platform_device *pdev)
{
struct qmp *qmp;
@@ -523,6 +622,8 @@ static int qmp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qmp);
+ qmp_debugfs_create(qmp);
+
return 0;
err_close_qmp:
@@ -537,6 +638,8 @@ static void qmp_remove(struct platform_device *pdev)
{
struct qmp *qmp = platform_get_drvdata(pdev);
+ debugfs_remove_recursive(qmp->debugfs_root);
+
qmp_qdss_clk_remove(qmp);
qmp_cooling_devices_remove(qmp);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 690afc9a12f4..7191fa0c087f 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -655,8 +655,6 @@ invalid_canary:
void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
{
struct smem_partition *part;
- unsigned long flags;
- int ret;
void *ptr = ERR_PTR(-EPROBE_DEFER);
if (!__smem)
@@ -665,12 +663,6 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
if (WARN_ON(item >= __smem->item_count))
return ERR_PTR(-EINVAL);
- ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
- HWSPINLOCK_TIMEOUT,
- &flags);
- if (ret)
- return ERR_PTR(ret);
-
if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) {
part = &__smem->partitions[host];
ptr = qcom_smem_get_private(__smem, part, item, size);
@@ -681,10 +673,7 @@ void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
ptr = qcom_smem_get_global(__smem, item, size);
}
- hwspin_unlock_irqrestore(__smem->hwlock, &flags);
-
return ptr;
-
}
EXPORT_SYMBOL_GPL(qcom_smem_get);
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 914b2246148f..a21241cbeec7 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -58,8 +58,8 @@
* @valid_entries: number of allocated entries
* @flags:
* @entries: individual communication entries
- * @name: name of the entry
- * @value: content of the entry
+ * @entries.name: name of the entry
+ * @entries.value: content of the entry
*/
struct smp2p_smem_item {
u32 magic;
@@ -275,6 +275,8 @@ static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p)
*
* Handle notifications from the remote side to handle newly allocated entries
* or any changes to the state bits of existing entries.
+ *
+ * Return: %IRQ_HANDLED
*/
static irqreturn_t qcom_smp2p_intr(int irq, void *data)
{
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 6349a0debeb5..e8ff9819ac47 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -124,7 +124,7 @@ static const char *const pmic_models[] = {
[50] = "PM8350B",
[51] = "PMR735A",
[52] = "PMR735B",
- [55] = "PM2250",
+ [55] = "PM4125",
[58] = "PM8450",
[65] = "PM8010",
[69] = "PM8550VS",
@@ -424,8 +424,11 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(IPQ9510) },
{ qcom_board_id(QRB4210) },
{ qcom_board_id(QRB2210) },
+ { qcom_board_id(SM8475) },
+ { qcom_board_id(SM8475P) },
{ qcom_board_id(SA8775P) },
{ qcom_board_id(QRU1000) },
+ { qcom_board_id(SM8475_2) },
{ qcom_board_id(QDU1000) },
{ qcom_board_id(SM8650) },
{ qcom_board_id(SM4450) },
@@ -437,6 +440,8 @@ static const struct soc_id soc_id[] = {
{ qcom_board_id(IPQ5322) },
{ qcom_board_id(IPQ5312) },
{ qcom_board_id(IPQ5302) },
+ { qcom_board_id(QCS8550) },
+ { qcom_board_id(QCM8550) },
{ qcom_board_id(IPQ5300) },
};
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
index 2f0b1bfe7658..06e2c4c2a4a8 100644
--- a/drivers/soc/qcom/spm.c
+++ b/drivers/soc/qcom/spm.c
@@ -6,20 +6,40 @@
* SAW power controller driver
*/
-#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/linear_range.h>
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+
+#include <linux/regulator/driver.h>
+
#include <soc/qcom/spm.h>
+#define FIELD_SET(current, mask, val) \
+ (((current) & ~(mask)) | FIELD_PREP((mask), (val)))
+
#define SPM_CTL_INDEX 0x7f
#define SPM_CTL_INDEX_SHIFT 4
#define SPM_CTL_EN BIT(0)
+/* These registers might be specific to SPM 1.1 */
+#define SPM_VCTL_VLVL GENMASK(7, 0)
+#define SPM_PMIC_DATA_0_VLVL GENMASK(7, 0)
+#define SPM_PMIC_DATA_1_MIN_VSEL GENMASK(5, 0)
+#define SPM_PMIC_DATA_1_MAX_VSEL GENMASK(21, 16)
+
+#define SPM_1_1_AVS_CTL_AVS_ENABLED BIT(27)
+#define SPM_AVS_CTL_MAX_VLVL GENMASK(22, 17)
+#define SPM_AVS_CTL_MIN_VLVL GENMASK(15, 10)
+
enum spm_reg {
SPM_REG_CFG,
SPM_REG_SPM_CTL,
@@ -29,13 +49,44 @@ enum spm_reg {
SPM_REG_PMIC_DATA_1,
SPM_REG_VCTL,
SPM_REG_SEQ_ENTRY,
- SPM_REG_SPM_STS,
+ SPM_REG_STS0,
+ SPM_REG_STS1,
SPM_REG_PMIC_STS,
SPM_REG_AVS_CTL,
SPM_REG_AVS_LIMIT,
+ SPM_REG_RST,
SPM_REG_NR,
};
+#define MAX_PMIC_DATA 2
+#define MAX_SEQ_DATA 64
+
+struct spm_reg_data {
+ const u16 *reg_offset;
+ u32 spm_cfg;
+ u32 spm_dly;
+ u32 pmic_dly;
+ u32 pmic_data[MAX_PMIC_DATA];
+ u32 avs_ctl;
+ u32 avs_limit;
+ u8 seq[MAX_SEQ_DATA];
+ u8 start_index[PM_SLEEP_MODE_NR];
+
+ smp_call_func_t set_vdd;
+ /* for now we support only a single range */
+ struct linear_range *range;
+ unsigned int ramp_delay;
+ unsigned int init_uV;
+};
+
+struct spm_driver_data {
+ void __iomem *reg_base;
+ const struct spm_reg_data *reg_data;
+ struct device *dev;
+ unsigned int volt_sel;
+ int reg_cpu;
+};
+
static const u16 spm_reg_offset_v4_1[SPM_REG_NR] = {
[SPM_REG_AVS_CTL] = 0x904,
[SPM_REG_AVS_LIMIT] = 0x908,
@@ -169,6 +220,10 @@ static const struct spm_reg_data spm_reg_8226_cpu = {
static const u16 spm_reg_offset_v1_1[SPM_REG_NR] = {
[SPM_REG_CFG] = 0x08,
+ [SPM_REG_STS0] = 0x0c,
+ [SPM_REG_STS1] = 0x10,
+ [SPM_REG_VCTL] = 0x14,
+ [SPM_REG_AVS_CTL] = 0x18,
[SPM_REG_SPM_CTL] = 0x20,
[SPM_REG_PMIC_DLY] = 0x24,
[SPM_REG_PMIC_DATA_0] = 0x28,
@@ -176,7 +231,12 @@ static const u16 spm_reg_offset_v1_1[SPM_REG_NR] = {
[SPM_REG_SEQ_ENTRY] = 0x80,
};
+static void smp_set_vdd_v1_1(void *data);
+
/* SPM register data for 8064 */
+static struct linear_range spm_v1_1_regulator_range =
+ REGULATOR_LINEAR_RANGE(700000, 0, 56, 12500);
+
static const struct spm_reg_data spm_reg_8064_cpu = {
.reg_offset = spm_reg_offset_v1_1,
.spm_cfg = 0x1F,
@@ -187,6 +247,10 @@ static const struct spm_reg_data spm_reg_8064_cpu = {
0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F },
.start_index[PM_SLEEP_MODE_STBY] = 0,
.start_index[PM_SLEEP_MODE_SPC] = 2,
+ .set_vdd = smp_set_vdd_v1_1,
+ .range = &spm_v1_1_regulator_range,
+ .init_uV = 1300000,
+ .ramp_delay = 1250,
};
static inline void spm_register_write(struct spm_driver_data *drv,
@@ -238,6 +302,178 @@ void spm_set_low_power_mode(struct spm_driver_data *drv,
spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val);
}
+static int spm_set_voltage_sel(struct regulator_dev *rdev, unsigned int selector)
+{
+ struct spm_driver_data *drv = rdev_get_drvdata(rdev);
+
+ drv->volt_sel = selector;
+
+ /* Always do the SAW register writes on the corresponding CPU */
+ return smp_call_function_single(drv->reg_cpu, drv->reg_data->set_vdd, drv, true);
+}
+
+static int spm_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct spm_driver_data *drv = rdev_get_drvdata(rdev);
+
+ return drv->volt_sel;
+}
+
+static const struct regulator_ops spm_reg_ops = {
+ .set_voltage_sel = spm_set_voltage_sel,
+ .get_voltage_sel = spm_get_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static void smp_set_vdd_v1_1(void *data)
+{
+ struct spm_driver_data *drv = data;
+ unsigned int vctl, data0, data1, avs_ctl, sts;
+ unsigned int vlevel, volt_sel;
+ bool avs_enabled;
+
+ volt_sel = drv->volt_sel;
+ vlevel = volt_sel | 0x80; /* band */
+
+ avs_ctl = spm_register_read(drv, SPM_REG_AVS_CTL);
+ vctl = spm_register_read(drv, SPM_REG_VCTL);
+ data0 = spm_register_read(drv, SPM_REG_PMIC_DATA_0);
+ data1 = spm_register_read(drv, SPM_REG_PMIC_DATA_1);
+
+ avs_enabled = avs_ctl & SPM_1_1_AVS_CTL_AVS_ENABLED;
+
+ /* If AVS is enabled, switch it off during the voltage change */
+ if (avs_enabled) {
+ avs_ctl &= ~SPM_1_1_AVS_CTL_AVS_ENABLED;
+ spm_register_write(drv, SPM_REG_AVS_CTL, avs_ctl);
+ }
+
+ /* Kick the state machine back to idle */
+ spm_register_write(drv, SPM_REG_RST, 1);
+
+ vctl = FIELD_SET(vctl, SPM_VCTL_VLVL, vlevel);
+ data0 = FIELD_SET(data0, SPM_PMIC_DATA_0_VLVL, vlevel);
+ data1 = FIELD_SET(data1, SPM_PMIC_DATA_1_MIN_VSEL, volt_sel);
+ data1 = FIELD_SET(data1, SPM_PMIC_DATA_1_MAX_VSEL, volt_sel);
+
+ spm_register_write(drv, SPM_REG_VCTL, vctl);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_0, data0);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_1, data1);
+
+ if (read_poll_timeout_atomic(spm_register_read,
+ sts, sts == vlevel,
+ 1, 200, false,
+ drv, SPM_REG_STS1)) {
+ dev_err_ratelimited(drv->dev, "timeout setting the voltage (%x %x)!\n", sts, vlevel);
+ goto enable_avs;
+ }
+
+ if (avs_enabled) {
+ unsigned int max_avs = volt_sel;
+ unsigned int min_avs = max(max_avs, 4U) - 4;
+
+ avs_ctl = FIELD_SET(avs_ctl, SPM_AVS_CTL_MIN_VLVL, min_avs);
+ avs_ctl = FIELD_SET(avs_ctl, SPM_AVS_CTL_MAX_VLVL, max_avs);
+ spm_register_write(drv, SPM_REG_AVS_CTL, avs_ctl);
+ }
+
+enable_avs:
+ if (avs_enabled) {
+ avs_ctl |= SPM_1_1_AVS_CTL_AVS_ENABLED;
+ spm_register_write(drv, SPM_REG_AVS_CTL, avs_ctl);
+ }
+}
+
+static int spm_get_cpu(struct device *dev)
+{
+ int cpu;
+ bool found;
+
+ for_each_possible_cpu(cpu) {
+ struct device_node *cpu_node, *saw_node;
+
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node)
+ continue;
+
+ saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+ found = (saw_node == dev->of_node);
+ of_node_put(saw_node);
+ of_node_put(cpu_node);
+
+ if (found)
+ return cpu;
+ }
+
+ /* L2 SPM is not bound to any CPU, voltage setting is not supported */
+
+ return -EOPNOTSUPP;
+}
+
+static int spm_register_regulator(struct device *dev, struct spm_driver_data *drv)
+{
+ struct regulator_config config = {
+ .dev = dev,
+ .driver_data = drv,
+ };
+ struct regulator_desc *rdesc;
+ struct regulator_dev *rdev;
+ int ret;
+ bool found;
+
+ if (!drv->reg_data->set_vdd)
+ return 0;
+
+ rdesc = devm_kzalloc(dev, sizeof(*rdesc), GFP_KERNEL);
+ if (!rdesc)
+ return -ENOMEM;
+
+ rdesc->name = "spm";
+ rdesc->of_match = of_match_ptr("regulator");
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->owner = THIS_MODULE;
+ rdesc->ops = &spm_reg_ops;
+
+ rdesc->linear_ranges = drv->reg_data->range;
+ rdesc->n_linear_ranges = 1;
+ rdesc->n_voltages = rdesc->linear_ranges[rdesc->n_linear_ranges - 1].max_sel + 1;
+ rdesc->ramp_delay = drv->reg_data->ramp_delay;
+
+ ret = spm_get_cpu(dev);
+ if (ret < 0)
+ return ret;
+
+ drv->reg_cpu = ret;
+ dev_dbg(dev, "SAW2 bound to CPU %d\n", drv->reg_cpu);
+
+ /*
+ * Program initial voltage, otherwise registration will also try
+ * setting the voltage, which might result in undervolting the CPU.
+ */
+ drv->volt_sel = DIV_ROUND_UP(drv->reg_data->init_uV - rdesc->min_uV,
+ rdesc->uV_step);
+ ret = linear_range_get_selector_high(drv->reg_data->range,
+ drv->reg_data->init_uV,
+ &drv->volt_sel,
+ &found);
+ if (ret) {
+ dev_err(dev, "Initial uV value out of bounds\n");
+ return ret;
+ }
+
+ /* Always do the SAW register writes on the corresponding CPU */
+ smp_call_function_single(drv->reg_cpu, drv->reg_data->set_vdd, drv, true);
+
+ rdev = devm_regulator_register(dev, rdesc, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(dev, "failed to register regulator\n");
+ return PTR_ERR(rdev);
+ }
+
+ return 0;
+}
+
static const struct of_device_id spm_match_table[] = {
{ .compatible = "qcom,sdm660-gold-saw2-v4.1-l2",
.data = &spm_reg_660_gold_l2 },
@@ -288,6 +524,7 @@ static int spm_dev_probe(struct platform_device *pdev)
return -ENODEV;
drv->reg_data = match_id->data;
+ drv->dev = &pdev->dev;
platform_set_drvdata(pdev, drv);
/* Write the SPM sequences first.. */
@@ -315,6 +552,9 @@ static int spm_dev_probe(struct platform_device *pdev)
if (drv->reg_data->reg_offset[SPM_REG_SPM_CTL])
spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
+ if (IS_ENABLED(CONFIG_REGULATOR))
+ return spm_register_regulator(&pdev->dev, drv);
+
return 0;
}
diff --git a/drivers/soc/qcom/trace-aoss.h b/drivers/soc/qcom/trace-aoss.h
new file mode 100644
index 000000000000..554029b33b44
--- /dev/null
+++ b/drivers/soc/qcom/trace-aoss.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM qcom_aoss
+
+#if !defined(_TRACE_QCOM_AOSS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_QCOM_AOSS_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(aoss_send,
+ TP_PROTO(const char *msg),
+ TP_ARGS(msg),
+ TP_STRUCT__entry(
+ __string(msg, msg)
+ ),
+ TP_fast_assign(
+ __assign_str(msg, msg);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+TRACE_EVENT(aoss_send_done,
+ TP_PROTO(const char *msg, int ret),
+ TP_ARGS(msg, ret),
+ TP_STRUCT__entry(
+ __string(msg, msg)
+ __field(int, ret)
+ ),
+ TP_fast_assign(
+ __assign_str(msg, msg);
+ __entry->ret = ret;
+ ),
+ TP_printk("%s: %d", __get_str(msg), __entry->ret)
+);
+
+#endif /* _TRACE_QCOM_AOSS_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace-aoss
+
+#include <trace/define_trace.h>
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 0986672f6375..5deca747fb77 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -34,6 +34,10 @@ config ARCH_RCAR_GEN3
select SYS_SUPPORTS_SH_CMT
select SYS_SUPPORTS_SH_TMU
+config ARCH_RCAR_GEN4
+ bool
+ select ARCH_RCAR_GEN3
+
config ARCH_RMOBILE
bool
select PM
@@ -240,7 +244,7 @@ config ARCH_R8A77961
config ARCH_R8A779F0
bool "ARM64 Platform support for R-Car S4-8"
- select ARCH_RCAR_GEN3
+ select ARCH_RCAR_GEN4
select SYSC_R8A779F0
help
This enables support for the Renesas R-Car S4-8 SoC.
@@ -261,18 +265,25 @@ config ARCH_R8A77970
config ARCH_R8A779A0
bool "ARM64 Platform support for R-Car V3U"
- select ARCH_RCAR_GEN3
+ select ARCH_RCAR_GEN4
select SYSC_R8A779A0
help
This enables support for the Renesas R-Car V3U SoC.
config ARCH_R8A779G0
bool "ARM64 Platform support for R-Car V4H"
- select ARCH_RCAR_GEN3
+ select ARCH_RCAR_GEN4
select SYSC_R8A779G0
help
This enables support for the Renesas R-Car V4H SoC.
+config ARCH_R8A779H0
+ bool "ARM64 Platform support for R-Car V4M"
+ select ARCH_RCAR_GEN4
+ select SYSC_R8A779H0
+ help
+ This enables support for the Renesas R-Car V4M SoC.
+
config ARCH_R8A774C0
bool "ARM64 Platform support for RZ/G2E"
select ARCH_RCAR_GEN3
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index 98fd97da6cd4..7ba02f3a4a4f 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -117,6 +117,7 @@ static const struct of_device_id rcar_rst_matches[] __initconst = {
{ .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_v3u },
{ .compatible = "renesas,r8a779f0-rst", .data = &rcar_rst_gen4 },
{ .compatible = "renesas,r8a779g0-rst", .data = &rcar_rst_gen4 },
+ { .compatible = "renesas,r8a779h0-rst", .data = &rcar_rst_gen4 },
{ /* sentinel */ }
};
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 27eae1a354ab..8f9b8d3736dc 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -270,6 +270,11 @@ static const struct renesas_soc soc_rcar_v4h __initconst __maybe_unused = {
.id = 0x5c,
};
+static const struct renesas_soc soc_rcar_v4m __initconst __maybe_unused = {
+ .family = &fam_rcar_gen4,
+ .id = 0x5d,
+};
+
static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
.family = &fam_shmobile,
.id = 0x37,
@@ -380,6 +385,9 @@ static const struct of_device_id renesas_socs[] __initconst __maybe_unused = {
#ifdef CONFIG_ARCH_R8A779G0
{ .compatible = "renesas,r8a779g0", .data = &soc_rcar_v4h },
#endif
+#ifdef CONFIG_ARCH_R8A779H0
+ { .compatible = "renesas,r8a779h0", .data = &soc_rcar_v4m },
+#endif
#ifdef CONFIG_ARCH_R9A07G043
#ifdef CONFIG_RISCV
{ .compatible = "renesas,r9a07g043", .data = &soc_rz_five },
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index 27ec99af77e3..1a5dfdc978dc 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -42,6 +42,7 @@ config EXYNOS_PMU
depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST)
select EXYNOS_PMU_ARM_DRIVERS if ARM && ARCH_EXYNOS
select MFD_CORE
+ select REGMAP_MMIO
# There is no need to enable these drivers for ARMv8
config EXYNOS_PMU_ARM_DRIVERS
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index 250537d7cfd6..fd8b6ac06656 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -5,6 +5,7 @@
//
// Exynos - CPU PMU(Power Management Unit) support
+#include <linux/arm-smccc.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/mfd/core.h>
@@ -12,19 +13,134 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/regmap.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
#include <linux/soc/samsung/exynos-pmu.h>
#include "exynos-pmu.h"
+#define PMUALIVE_MASK GENMASK(13, 0)
+#define TENSOR_SET_BITS (BIT(15) | BIT(14))
+#define TENSOR_CLR_BITS BIT(15)
+#define TENSOR_SMC_PMU_SEC_REG 0x82000504
+#define TENSOR_PMUREG_READ 0
+#define TENSOR_PMUREG_WRITE 1
+#define TENSOR_PMUREG_RMW 2
+
struct exynos_pmu_context {
struct device *dev;
const struct exynos_pmu_data *pmu_data;
+ struct regmap *pmureg;
};
void __iomem *pmu_base_addr;
static struct exynos_pmu_context *pmu_context;
+/* forward declaration */
+static struct platform_driver exynos_pmu_driver;
+
+/*
+ * Tensor SoCs are configured so that PMU_ALIVE registers can only be written
+ * from EL3, but are still read accessible. As Linux needs to write some of
+ * these registers, the following functions are provided and exposed via
+ * regmap.
+ *
+ * Note: This SMC interface is known to be implemented on gs101 and derivative
+ * SoCs.
+ */
+
+/* Write to a protected PMU register. */
+static int tensor_sec_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct arm_smccc_res res;
+ unsigned long pmu_base = (unsigned long)context;
+
+ arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
+ TENSOR_PMUREG_WRITE, val, 0, 0, 0, 0, &res);
+
+ /* returns -EINVAL if access isn't allowed or 0 */
+ if (res.a0)
+ pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
+
+ return (int)res.a0;
+}
+
+/* Read/Modify/Write a protected PMU register. */
+static int tensor_sec_reg_rmw(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ struct arm_smccc_res res;
+ unsigned long pmu_base = (unsigned long)context;
+
+ arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg,
+ TENSOR_PMUREG_RMW, mask, val, 0, 0, 0, &res);
+
+ /* returns -EINVAL if access isn't allowed or 0 */
+ if (res.a0)
+ pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0);
+
+ return (int)res.a0;
+}
+
+/*
+ * Read a protected PMU register. All PMU registers can be read by Linux.
+ * Note: The SMC read register is not used, as only registers that can be
+ * written are readable via SMC.
+ */
+static int tensor_sec_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ *val = pmu_raw_readl(reg);
+ return 0;
+}
+
+/*
+ * For SoCs that have set/clear bit hardware this function can be used when
+ * the PMU register will be accessed by multiple masters.
+ *
+ * For example, to set bits 13:8 in PMU reg offset 0x3e80
+ * tensor_set_bits_atomic(ctx, 0x3e80, 0x3f00, 0x3f00);
+ *
+ * Set bit 8, and clear bits 13:9 PMU reg offset 0x3e80
+ * tensor_set_bits_atomic(0x3e80, 0x100, 0x3f00);
+ */
+static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val,
+ u32 mask)
+{
+ int ret;
+ unsigned int i;
+
+ for (i = 0; i < 32; i++) {
+ if (!(mask & BIT(i)))
+ continue;
+
+ offset &= ~TENSOR_SET_BITS;
+
+ if (val & BIT(i))
+ offset |= TENSOR_SET_BITS;
+ else
+ offset |= TENSOR_CLR_BITS;
+
+ ret = tensor_sec_reg_write(ctx, offset, i);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+static int tensor_sec_update_bits(void *ctx, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ /*
+ * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF)
+ * as the target registers can be accessed by multiple masters.
+ */
+ if (reg > PMUALIVE_MASK)
+ return tensor_sec_reg_rmw(ctx, reg, mask, val);
+
+ return tensor_set_bits_atomic(ctx, reg, val, mask);
+}
void pmu_raw_writel(u32 val, u32 offset)
{
@@ -75,11 +191,41 @@ void exynos_sys_powerdown_conf(enum sys_powerdown mode)
#define exynos_pmu_data_arm_ptr(data) NULL
#endif
+static const struct regmap_config regmap_smccfg = {
+ .name = "pmu_regs",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .use_single_read = true,
+ .use_single_write = true,
+ .reg_read = tensor_sec_reg_read,
+ .reg_write = tensor_sec_reg_write,
+ .reg_update_bits = tensor_sec_update_bits,
+};
+
+static const struct regmap_config regmap_mmiocfg = {
+ .name = "pmu_regs",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+static const struct exynos_pmu_data gs101_pmu_data = {
+ .pmu_secure = true
+};
+
/*
* PMU platform driver and devicetree bindings.
*/
static const struct of_device_id exynos_pmu_of_device_ids[] = {
{
+ .compatible = "google,gs101-pmu",
+ .data = &gs101_pmu_data,
+ }, {
.compatible = "samsung,exynos3250-pmu",
.data = exynos_pmu_data_arm_ptr(exynos3250_pmu_data),
}, {
@@ -113,19 +259,75 @@ static const struct mfd_cell exynos_pmu_devs[] = {
{ .name = "exynos-clkout", },
};
+/**
+ * exynos_get_pmu_regmap() - Obtain pmureg regmap
+ *
+ * Find the pmureg regmap previously configured in probe() and return regmap
+ * pointer.
+ *
+ * Return: A pointer to regmap if found or ERR_PTR error value.
+ */
struct regmap *exynos_get_pmu_regmap(void)
{
struct device_node *np = of_find_matching_node(NULL,
exynos_pmu_of_device_ids);
if (np)
- return syscon_node_to_regmap(np);
+ return exynos_get_pmu_regmap_by_phandle(np, NULL);
return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
+/**
+ * exynos_get_pmu_regmap_by_phandle() - Obtain pmureg regmap via phandle
+ * @np: Device node holding PMU phandle property
+ * @propname: Name of property holding phandle value
+ *
+ * Find the pmureg regmap previously configured in probe() and return regmap
+ * pointer.
+ *
+ * Return: A pointer to regmap if found or ERR_PTR error value.
+ */
+struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
+ const char *propname)
+{
+ struct exynos_pmu_context *ctx;
+ struct device_node *pmu_np;
+ struct device *dev;
+
+ if (propname)
+ pmu_np = of_parse_phandle(np, propname, 0);
+ else
+ pmu_np = np;
+
+ if (!pmu_np)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * Determine if exynos-pmu device has probed and therefore regmap
+ * has been created and can be returned to the caller. Otherwise we
+ * return -EPROBE_DEFER.
+ */
+ dev = driver_find_device_by_of_node(&exynos_pmu_driver.driver,
+ (void *)pmu_np);
+
+ if (propname)
+ of_node_put(pmu_np);
+
+ if (!dev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ ctx = dev_get_drvdata(dev);
+
+ return ctx->pmureg;
+}
+EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle);
+
static int exynos_pmu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct regmap_config pmu_regmcfg;
+ struct regmap *regmap;
+ struct resource *res;
int ret;
pmu_base_addr = devm_platform_ioremap_resource(pdev, 0);
@@ -137,9 +339,38 @@ static int exynos_pmu_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!pmu_context)
return -ENOMEM;
- pmu_context->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
pmu_context->pmu_data = of_device_get_match_data(dev);
+ /* For SoCs that secure PMU register writes use custom regmap */
+ if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_secure) {
+ pmu_regmcfg = regmap_smccfg;
+ pmu_regmcfg.max_register = resource_size(res) -
+ pmu_regmcfg.reg_stride;
+ /* Need physical address for SMC call */
+ regmap = devm_regmap_init(dev, NULL,
+ (void *)(uintptr_t)res->start,
+ &pmu_regmcfg);
+ } else {
+ /* All other SoCs use a MMIO regmap */
+ pmu_regmcfg = regmap_mmiocfg;
+ pmu_regmcfg.max_register = resource_size(res) -
+ pmu_regmcfg.reg_stride;
+ regmap = devm_regmap_init_mmio(dev, pmu_base_addr,
+ &pmu_regmcfg);
+ }
+
+ if (IS_ERR(regmap))
+ return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
+ "regmap init failed\n");
+
+ pmu_context->pmureg = regmap;
+ pmu_context->dev = dev;
+
if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init)
pmu_context->pmu_data->pmu_init();
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
index 1c652ffd79b4..0a49a2c9a08e 100644
--- a/drivers/soc/samsung/exynos-pmu.h
+++ b/drivers/soc/samsung/exynos-pmu.h
@@ -21,6 +21,7 @@ struct exynos_pmu_conf {
struct exynos_pmu_data {
const struct exynos_pmu_conf *pmu_config;
const struct exynos_pmu_conf *pmu_config_extra;
+ bool pmu_secure;
void (*pmu_init)(void);
void (*powerdown_conf)(enum sys_powerdown);
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index f16beeabaa92..33512558af9f 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -133,6 +133,11 @@ config ARCH_TEGRA_234_SOC
help
Enable support for the NVIDIA Tegra234 SoC.
+config ARCH_TEGRA_241_SOC
+ bool "NVIDIA Tegra241 SoC"
+ help
+ Enable support for the NVIDIA Tegra241 SoC.
+
endif
endif
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index a2c28f493a75..b6bfd6729df3 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -3,11 +3,13 @@
* Copyright (c) 2013-2023, NVIDIA CORPORATION. All rights reserved.
*/
+#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/kobject.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
@@ -113,6 +115,28 @@ static void tegra_fuse_restore(void *base)
fuse->clk = NULL;
}
+static void tegra_fuse_print_sku_info(struct tegra_sku_info *tegra_sku_info)
+{
+ pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n",
+ tegra_revision_name[tegra_sku_info->revision],
+ tegra_sku_info->sku_id, tegra_sku_info->cpu_process_id,
+ tegra_sku_info->soc_process_id);
+ pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n",
+ tegra_sku_info->cpu_speedo_id, tegra_sku_info->soc_speedo_id);
+}
+
+static int tegra_fuse_add_lookups(struct tegra_fuse *fuse)
+{
+ fuse->lookups = kmemdup_array(fuse->soc->lookups, sizeof(*fuse->lookups),
+ fuse->soc->num_lookups, GFP_KERNEL);
+ if (!fuse->lookups)
+ return -ENOMEM;
+
+ nvmem_add_cell_lookups(fuse->lookups, fuse->soc->num_lookups);
+
+ return 0;
+}
+
static int tegra_fuse_probe(struct platform_device *pdev)
{
void __iomem *base = fuse->base;
@@ -130,15 +154,46 @@ static int tegra_fuse_probe(struct platform_device *pdev)
return PTR_ERR(fuse->base);
fuse->phys = res->start;
- fuse->clk = devm_clk_get(&pdev->dev, "fuse");
- if (IS_ERR(fuse->clk)) {
- if (PTR_ERR(fuse->clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
- PTR_ERR(fuse->clk));
+ /* Initialize the soc data and lookups if using ACPI boot. */
+ if (is_acpi_node(dev_fwnode(&pdev->dev)) && !fuse->soc) {
+ u8 chip;
- return PTR_ERR(fuse->clk);
+ tegra_acpi_init_apbmisc();
+
+ chip = tegra_get_chip_id();
+ switch (chip) {
+#if defined(CONFIG_ARCH_TEGRA_194_SOC)
+ case TEGRA194:
+ fuse->soc = &tegra194_fuse_soc;
+ break;
+#endif
+#if defined(CONFIG_ARCH_TEGRA_234_SOC)
+ case TEGRA234:
+ fuse->soc = &tegra234_fuse_soc;
+ break;
+#endif
+#if defined(CONFIG_ARCH_TEGRA_241_SOC)
+ case TEGRA241:
+ fuse->soc = &tegra241_fuse_soc;
+ break;
+#endif
+ default:
+ return dev_err_probe(&pdev->dev, -EINVAL, "Unsupported SoC: %02x\n", chip);
+ }
+
+ fuse->soc->init(fuse);
+ tegra_fuse_print_sku_info(&tegra_sku_info);
+ tegra_soc_device_register();
+
+ err = tegra_fuse_add_lookups(fuse);
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "failed to add FUSE lookups\n");
}
+ fuse->clk = devm_clk_get_optional(&pdev->dev, "fuse");
+ if (IS_ERR(fuse->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(fuse->clk), "failed to get FUSE clock\n");
+
platform_set_drvdata(pdev, fuse);
fuse->dev = &pdev->dev;
@@ -179,12 +234,8 @@ static int tegra_fuse_probe(struct platform_device *pdev)
}
fuse->rst = devm_reset_control_get_optional(&pdev->dev, "fuse");
- if (IS_ERR(fuse->rst)) {
- err = PTR_ERR(fuse->rst);
- dev_err(&pdev->dev, "failed to get FUSE reset: %pe\n",
- fuse->rst);
- return err;
- }
+ if (IS_ERR(fuse->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(fuse->rst), "failed to get FUSE reset\n");
/*
* FUSE clock is enabled at a boot time, hence this resume/suspend
@@ -262,10 +313,17 @@ static const struct dev_pm_ops tegra_fuse_pm = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_fuse_suspend, tegra_fuse_resume)
};
+static const struct acpi_device_id tegra_fuse_acpi_match[] = {
+ { "NVDA200F" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, tegra_fuse_acpi_match);
+
static struct platform_driver tegra_fuse_driver = {
.driver = {
.name = "tegra-fuse",
.of_match_table = tegra_fuse_match,
+ .acpi_match_table = tegra_fuse_acpi_match,
.pm = &tegra_fuse_pm,
.suppress_bind_attrs = true,
},
@@ -287,7 +345,16 @@ u32 __init tegra_fuse_read_early(unsigned int offset)
int tegra_fuse_readl(unsigned long offset, u32 *value)
{
- if (!fuse->read || !fuse->clk)
+ if (!fuse->dev)
+ return -EPROBE_DEFER;
+
+ /*
+ * Wait for fuse->clk to be initialized if device-tree boot is used.
+ */
+ if (is_of_node(dev_fwnode(fuse->dev)) && !fuse->clk)
+ return -EPROBE_DEFER;
+
+ if (!fuse->read)
return -EPROBE_DEFER;
if (IS_ERR(fuse->clk))
@@ -343,7 +410,8 @@ const struct attribute_group tegra_soc_attr_group = {
};
#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
- IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_241_SOC)
static ssize_t platform_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -370,7 +438,7 @@ const struct attribute_group tegra194_soc_attr_group = {
};
#endif
-struct device * __init tegra_soc_device_register(void)
+struct device *tegra_soc_device_register(void)
{
struct soc_device_attribute *attr;
struct soc_device *dev;
@@ -407,6 +475,7 @@ static int __init tegra_init_fuse(void)
const struct of_device_id *match;
struct device_node *np;
struct resource regs;
+ int err;
tegra_init_apbmisc();
@@ -497,22 +566,13 @@ static int __init tegra_init_fuse(void)
fuse->soc->init(fuse);
- pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n",
- tegra_revision_name[tegra_sku_info.revision],
- tegra_sku_info.sku_id, tegra_sku_info.cpu_process_id,
- tegra_sku_info.soc_process_id);
- pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n",
- tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id);
+ tegra_fuse_print_sku_info(&tegra_sku_info);
- if (fuse->soc->lookups) {
- size_t size = sizeof(*fuse->lookups) * fuse->soc->num_lookups;
-
- fuse->lookups = kmemdup(fuse->soc->lookups, size, GFP_KERNEL);
- if (fuse->lookups)
- nvmem_add_cell_lookups(fuse->lookups, fuse->soc->num_lookups);
- }
+ err = tegra_fuse_add_lookups(fuse);
+ if (err)
+ pr_err("failed to add FUSE lookups\n");
- return 0;
+ return err;
}
early_initcall(tegra_init_fuse);
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index e94d46372a63..eb14e5ff5a0a 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -38,7 +38,8 @@
defined(CONFIG_ARCH_TEGRA_210_SOC) || \
defined(CONFIG_ARCH_TEGRA_186_SOC) || \
defined(CONFIG_ARCH_TEGRA_194_SOC) || \
- defined(CONFIG_ARCH_TEGRA_234_SOC)
+ defined(CONFIG_ARCH_TEGRA_234_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_241_SOC)
static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
{
if (WARN_ON(!fuse->base))
@@ -678,3 +679,23 @@ const struct tegra_fuse_soc tegra234_fuse_soc = {
.clk_suspend_on = false,
};
#endif
+
+#if defined(CONFIG_ARCH_TEGRA_241_SOC)
+static const struct tegra_fuse_info tegra241_fuse_info = {
+ .read = tegra30_fuse_read,
+ .size = 0x16008,
+ .spare = 0xcf0,
+};
+
+static const struct nvmem_keepout tegra241_fuse_keepouts[] = {
+ { .start = 0xc, .end = 0x1600c }
+};
+
+const struct tegra_fuse_soc tegra241_fuse_soc = {
+ .init = tegra30_fuse_init,
+ .info = &tegra241_fuse_info,
+ .keepouts = tegra241_fuse_keepouts,
+ .num_keepouts = ARRAY_SIZE(tegra241_fuse_keepouts),
+ .soc_attr_group = &tegra194_soc_attr_group,
+};
+#endif
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
index 90f23be73894..9fee6ad6ad9e 100644
--- a/drivers/soc/tegra/fuse/fuse.h
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -69,6 +69,7 @@ struct tegra_fuse {
void tegra_init_revision(void);
void tegra_init_apbmisc(void);
+void tegra_acpi_init_apbmisc(void);
u32 __init tegra_fuse_read_spare(unsigned int spare);
u32 __init tegra_fuse_read_early(unsigned int offset);
@@ -123,7 +124,8 @@ extern const struct tegra_fuse_soc tegra186_fuse_soc;
#endif
#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
- IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_241_SOC)
extern const struct attribute_group tegra194_soc_attr_group;
#endif
@@ -135,4 +137,8 @@ extern const struct tegra_fuse_soc tegra194_fuse_soc;
extern const struct tegra_fuse_soc tegra234_fuse_soc;
#endif
+#ifdef CONFIG_ARCH_TEGRA_241_SOC
+extern const struct tegra_fuse_soc tegra241_fuse_soc;
+#endif
+
#endif
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index da970f3dbf35..e2ca5d55fd31 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -3,9 +3,11 @@
* Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved.
*/
+#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -62,6 +64,7 @@ bool tegra_is_silicon(void)
switch (tegra_get_chip_id()) {
case TEGRA194:
case TEGRA234:
+ case TEGRA241:
case TEGRA264:
if (tegra_get_platform() == 0)
return true;
@@ -160,9 +163,34 @@ void __init tegra_init_revision(void)
tegra_sku_info.platform = tegra_get_platform();
}
-void __init tegra_init_apbmisc(void)
+static void tegra_init_apbmisc_resources(struct resource *apbmisc,
+ struct resource *straps)
{
void __iomem *strapping_base;
+
+ apbmisc_base = ioremap(apbmisc->start, resource_size(apbmisc));
+ if (apbmisc_base)
+ chipid = readl_relaxed(apbmisc_base + 4);
+ else
+ pr_err("failed to map APBMISC registers\n");
+
+ strapping_base = ioremap(straps->start, resource_size(straps));
+ if (strapping_base) {
+ strapping = readl_relaxed(strapping_base);
+ iounmap(strapping_base);
+ } else {
+ pr_err("failed to map strapping options registers\n");
+ }
+}
+
+/**
+ * tegra_init_apbmisc - Initializes Tegra APBMISC and Strapping registers.
+ *
+ * This is called during early init as some of the old 32-bit ARM code needs
+ * information from the APBMISC registers very early during boot.
+ */
+void __init tegra_init_apbmisc(void)
+{
struct resource apbmisc, straps;
struct device_node *np;
@@ -219,23 +247,73 @@ void __init tegra_init_apbmisc(void)
}
}
- apbmisc_base = ioremap(apbmisc.start, resource_size(&apbmisc));
- if (!apbmisc_base) {
- pr_err("failed to map APBMISC registers\n");
- } else {
- chipid = readl_relaxed(apbmisc_base + 4);
+ tegra_init_apbmisc_resources(&apbmisc, &straps);
+ long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code");
+
+put:
+ of_node_put(np);
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id apbmisc_acpi_match[] = {
+ { "NVDA2010" },
+ { /* sentinel */ }
+};
+
+void tegra_acpi_init_apbmisc(void)
+{
+ struct resource *resources[2] = { NULL };
+ struct resource_entry *rentry;
+ struct acpi_device *adev = NULL;
+ struct list_head resource_list;
+ int rcount = 0;
+ int ret;
+
+ adev = acpi_dev_get_first_match_dev(apbmisc_acpi_match[0].id, NULL, -1);
+ if (!adev)
+ return;
+
+ INIT_LIST_HEAD(&resource_list);
+
+ ret = acpi_dev_get_memory_resources(adev, &resource_list);
+ if (ret < 0) {
+ pr_err("failed to get APBMISC memory resources");
+ goto out_put_acpi_dev;
}
- strapping_base = ioremap(straps.start, resource_size(&straps));
- if (!strapping_base) {
- pr_err("failed to map strapping options registers\n");
- } else {
- strapping = readl_relaxed(strapping_base);
- iounmap(strapping_base);
+ /*
+ * Get required memory resources.
+ *
+ * resources[0]: apbmisc.
+ * resources[1]: straps.
+ */
+ resource_list_for_each_entry(rentry, &resource_list) {
+ if (rcount >= ARRAY_SIZE(resources))
+ break;
+
+ resources[rcount++] = rentry->res;
}
- long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code");
+ if (!resources[0]) {
+ pr_err("failed to get APBMISC registers\n");
+ goto out_free_resource_list;
+ }
-put:
- of_node_put(np);
+ if (!resources[1]) {
+ pr_err("failed to get strapping options registers\n");
+ goto out_free_resource_list;
+ }
+
+ tegra_init_apbmisc_resources(resources[0], resources[1]);
+
+out_free_resource_list:
+ acpi_dev_free_resource_list(&resource_list);
+
+out_put_acpi_dev:
+ acpi_dev_put(adev);
+}
+#else
+void tegra_acpi_init_apbmisc(void)
+{
}
+#endif
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index f432aa022ace..d6bfcea5ee65 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -3,7 +3,7 @@
* drivers/soc/tegra/pmc.c
*
* Copyright (c) 2010 Google, Inc
- * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved.
*
* Author:
* Colin Cross <ccross@google.com>
@@ -384,6 +384,7 @@ struct tegra_pmc_soc {
bool has_blink_output;
bool has_usb_sleepwalk;
bool supports_core_domain;
+ bool has_single_mmio_aperture;
};
/**
@@ -1777,30 +1778,6 @@ static int tegra_io_pad_get_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id)
return TEGRA_IO_PAD_VOLTAGE_3V3;
}
-/**
- * tegra_io_rail_power_on() - enable power to I/O rail
- * @id: Tegra I/O pad ID for which to enable power
- *
- * See also: tegra_io_pad_power_enable()
- */
-int tegra_io_rail_power_on(unsigned int id)
-{
- return tegra_io_pad_power_enable(id);
-}
-EXPORT_SYMBOL(tegra_io_rail_power_on);
-
-/**
- * tegra_io_rail_power_off() - disable power to I/O rail
- * @id: Tegra I/O pad ID for which to disable power
- *
- * See also: tegra_io_pad_power_disable()
- */
-int tegra_io_rail_power_off(unsigned int id)
-{
- return tegra_io_pad_power_disable(id);
-}
-EXPORT_SYMBOL(tegra_io_rail_power_off);
-
#ifdef CONFIG_PM_SLEEP
enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void)
{
@@ -2909,31 +2886,33 @@ static int tegra_pmc_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wake");
- if (res) {
+ if (pmc->soc->has_single_mmio_aperture) {
+ pmc->wake = base;
+ pmc->aotag = base;
+ pmc->scratch = base;
+ } else {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "wake");
pmc->wake = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pmc->wake))
return PTR_ERR(pmc->wake);
- } else {
- pmc->wake = base;
- }
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aotag");
- if (res) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "aotag");
pmc->aotag = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pmc->aotag))
return PTR_ERR(pmc->aotag);
- } else {
- pmc->aotag = base;
- }
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scratch");
- if (res) {
- pmc->scratch = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pmc->scratch))
- return PTR_ERR(pmc->scratch);
- } else {
- pmc->scratch = base;
+ /* "scratch" is an optional aperture */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "scratch");
+ if (res) {
+ pmc->scratch = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmc->scratch))
+ return PTR_ERR(pmc->scratch);
+ } else {
+ pmc->scratch = NULL;
+ }
}
pmc->clk = devm_clk_get_optional(&pdev->dev, "pclk");
@@ -2945,12 +2924,15 @@ static int tegra_pmc_probe(struct platform_device *pdev)
* PMC should be last resort for restarting since it soft-resets
* CPU without resetting everything else.
*/
- err = devm_register_reboot_notifier(&pdev->dev,
- &tegra_pmc_reboot_notifier);
- if (err) {
- dev_err(&pdev->dev, "unable to register reboot notifier, %d\n",
- err);
- return err;
+ if (pmc->scratch) {
+ err = devm_register_reboot_notifier(&pdev->dev,
+ &tegra_pmc_reboot_notifier);
+ if (err) {
+ dev_err(&pdev->dev,
+ "unable to register reboot notifier, %d\n",
+ err);
+ return err;
+ }
}
err = devm_register_sys_off_handler(&pdev->dev,
@@ -3324,6 +3306,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
.num_pmc_clks = 0,
.has_blink_output = true,
.has_usb_sleepwalk = true,
+ .has_single_mmio_aperture = true,
};
static const char * const tegra30_powergates[] = {
@@ -3385,6 +3368,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
.has_usb_sleepwalk = true,
+ .has_single_mmio_aperture = true,
};
static const char * const tegra114_powergates[] = {
@@ -3442,6 +3426,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
.has_usb_sleepwalk = true,
+ .has_single_mmio_aperture = true,
};
static const char * const tegra124_powergates[] = {
@@ -3586,6 +3571,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
.has_usb_sleepwalk = true,
+ .has_single_mmio_aperture = true,
};
static const char * const tegra210_powergates[] = {
@@ -3749,6 +3735,7 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
.has_usb_sleepwalk = true,
+ .has_single_mmio_aperture = true,
};
static const struct tegra_io_pad_soc tegra186_io_pads[] = {
@@ -3946,6 +3933,7 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = {
.num_pmc_clks = 0,
.has_blink_output = false,
.has_usb_sleepwalk = false,
+ .has_single_mmio_aperture = false,
};
static const struct tegra_io_pad_soc tegra194_io_pads[] = {
@@ -4131,6 +4119,7 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = {
.num_pmc_clks = 0,
.has_blink_output = false,
.has_usb_sleepwalk = false,
+ .has_single_mmio_aperture = false,
};
static const struct tegra_io_pad_soc tegra234_io_pads[] = {
@@ -4220,6 +4209,7 @@ static const char * const tegra234_reset_sources[] = {
};
static const struct tegra_wake_event tegra234_wake_events[] = {
+ TEGRA_WAKE_GPIO("sd-wake", 8, 0, TEGRA234_MAIN_GPIO(G, 7)),
TEGRA_WAKE_IRQ("pmu", 24, 209),
TEGRA_WAKE_GPIO("power", 29, 1, TEGRA234_AON_GPIO(EE, 4)),
TEGRA_WAKE_GPIO("mgbe", 56, 0, TEGRA234_MAIN_GPIO(Y, 3)),
@@ -4259,6 +4249,7 @@ static const struct tegra_pmc_soc tegra234_pmc_soc = {
.pmc_clks_data = NULL,
.num_pmc_clks = 0,
.has_blink_output = false,
+ .has_single_mmio_aperture = false,
};
static const struct of_device_id tegra_pmc_match[] = {
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 0df732b03a2d..350b3dab3a05 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1932,24 +1932,18 @@ static void cqspi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static int cqspi_suspend(struct device *dev)
+static int cqspi_runtime_suspend(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
- struct spi_controller *host = dev_get_drvdata(dev);
- int ret;
- ret = spi_controller_suspend(host);
cqspi_controller_enable(cqspi, 0);
-
clk_disable_unprepare(cqspi->clk);
-
- return ret;
+ return 0;
}
-static int cqspi_resume(struct device *dev)
+static int cqspi_runtime_resume(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
- struct spi_controller *host = dev_get_drvdata(dev);
clk_prepare_enable(cqspi->clk);
cqspi_wait_idle(cqspi);
@@ -1957,12 +1951,27 @@ static int cqspi_resume(struct device *dev)
cqspi->current_cs = -1;
cqspi->sclk = 0;
+ return 0;
+}
+
+static int cqspi_suspend(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+ return spi_controller_suspend(cqspi->host);
+}
- return spi_controller_resume(host);
+static int cqspi_resume(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+ return spi_controller_resume(cqspi->host);
}
-static DEFINE_RUNTIME_DEV_PM_OPS(cqspi_dev_pm_ops, cqspi_suspend,
- cqspi_resume, NULL);
+static const struct dev_pm_ops cqspi_dev_pm_ops = {
+ RUNTIME_PM_OPS(cqspi_runtime_suspend, cqspi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(cqspi_suspend, cqspi_resume)
+};
static const struct cqspi_driver_platdata cdns_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE,
diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
index d45d9e2e1611..27c995b657f2 100644
--- a/drivers/spi/spi-cs42l43.c
+++ b/drivers/spi/spi-cs42l43.c
@@ -148,8 +148,7 @@ static void cs42l43_set_cs(struct spi_device *spi, bool is_high)
{
struct cs42l43_spi *priv = spi_controller_get_devdata(spi->controller);
- if (spi_get_chipselect(spi, 0) == 0)
- regmap_write(priv->regmap, CS42L43_SPI_CONFIG2, !is_high);
+ regmap_write(priv->regmap, CS42L43_SPI_CONFIG2, !is_high);
}
static int cs42l43_prepare_message(struct spi_controller *ctlr, struct spi_message *msg)
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 546cdce525fc..833a1bb7a914 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -2,6 +2,7 @@
// Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
// Copyright (C) 2008 Juergen Beisert
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -660,15 +661,15 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
<< MX51_ECSPI_CTRL_BL_OFFSET;
else {
if (spi_imx->usedma) {
- ctrl |= (spi_imx->bits_per_word *
- spi_imx_bytes_per_word(spi_imx->bits_per_word) - 1)
+ ctrl |= (spi_imx->bits_per_word - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
} else {
if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST)
- ctrl |= (MX51_ECSPI_CTRL_MAX_BURST - 1)
+ ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
else
- ctrl |= (spi_imx->count * spi_imx->bits_per_word - 1)
+ ctrl |= spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
+ BITS_PER_BYTE) * spi_imx->bits_per_word
<< MX51_ECSPI_CTRL_BL_OFFSET;
}
}
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index 07d20ca1164c..4337ca51d7aa 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -85,6 +85,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa2a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa823), (unsigned long)&cnl_info },
{ },
};
MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 1bf080339b5a..88cbe4f00cc3 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -39,6 +39,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/mxs-spi.h>
#include <trace/events/spi.h>
+#include <linux/dma/mxs-dma.h>
#define DRIVER_NAME "mxs-spi"
@@ -252,7 +253,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi,
desc = dmaengine_prep_slave_sg(ssp->dmach,
&dma_xfer[sg_count].sg, 1,
(flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
if (!desc) {
dev_err(ssp->dev,
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index a0c9fea908f5..ddf1c684bcc7 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -53,8 +53,6 @@
/* per-register bitmasks: */
#define OMAP2_MCSPI_IRQSTATUS_EOW BIT(17)
-#define OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY BIT(0)
-#define OMAP2_MCSPI_IRQSTATUS_RX0_FULL BIT(2)
#define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
#define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
@@ -293,7 +291,7 @@ static void omap2_mcspi_set_mode(struct spi_controller *ctlr)
}
static void omap2_mcspi_set_fifo(const struct spi_device *spi,
- struct spi_transfer *t, int enable, int dma_enabled)
+ struct spi_transfer *t, int enable)
{
struct spi_controller *ctlr = spi->controller;
struct omap2_mcspi_cs *cs = spi->controller_state;
@@ -314,28 +312,20 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
else
max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
- if (dma_enabled)
- wcnt = t->len / bytes_per_word;
- else
- wcnt = 0;
+
+ wcnt = t->len / bytes_per_word;
if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
goto disable_fifo;
xferlevel = wcnt << 16;
if (t->rx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFER;
- if (dma_enabled)
- xferlevel |= (bytes_per_word - 1) << 8;
- else
- xferlevel |= (max_fifo_depth - 1) << 8;
+ xferlevel |= (bytes_per_word - 1) << 8;
}
if (t->tx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFET;
- if (dma_enabled)
- xferlevel |= bytes_per_word - 1;
- else
- xferlevel |= (max_fifo_depth - 1);
+ xferlevel |= bytes_per_word - 1;
}
mcspi_write_reg(ctlr, OMAP2_MCSPI_XFERLEVEL, xferlevel);
@@ -892,113 +882,6 @@ out:
return count - c;
}
-static unsigned
-omap2_mcspi_txrx_piofifo(struct spi_device *spi, struct spi_transfer *xfer)
-{
- struct omap2_mcspi_cs *cs = spi->controller_state;
- struct omap2_mcspi *mcspi;
- unsigned int count, c;
- unsigned int iter, cwc;
- int last_request;
- void __iomem *base = cs->base;
- void __iomem *tx_reg;
- void __iomem *rx_reg;
- void __iomem *chstat_reg;
- void __iomem *irqstat_reg;
- int word_len, bytes_per_word;
- u8 *rx;
- const u8 *tx;
-
- mcspi = spi_controller_get_devdata(spi->controller);
- count = xfer->len;
- c = count;
- word_len = cs->word_len;
- bytes_per_word = mcspi_bytes_per_word(word_len);
-
- /*
- * We store the pre-calculated register addresses on stack to speed
- * up the transfer loop.
- */
- tx_reg = base + OMAP2_MCSPI_TX0;
- rx_reg = base + OMAP2_MCSPI_RX0;
- chstat_reg = base + OMAP2_MCSPI_CHSTAT0;
- irqstat_reg = base + OMAP2_MCSPI_IRQSTATUS;
-
- if (c < (word_len >> 3))
- return 0;
-
- rx = xfer->rx_buf;
- tx = xfer->tx_buf;
-
- do {
- /* calculate number of words in current iteration */
- cwc = min((unsigned int)mcspi->fifo_depth / bytes_per_word,
- c / bytes_per_word);
- last_request = cwc != (mcspi->fifo_depth / bytes_per_word);
- if (tx) {
- if (mcspi_wait_for_reg_bit(irqstat_reg,
- OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY) < 0) {
- dev_err(&spi->dev, "TX Empty timed out\n");
- goto out;
- }
- writel_relaxed(OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY, irqstat_reg);
-
- for (iter = 0; iter < cwc; iter++, tx += bytes_per_word) {
- if (bytes_per_word == 1)
- writel_relaxed(*tx, tx_reg);
- else if (bytes_per_word == 2)
- writel_relaxed(*((u16 *)tx), tx_reg);
- else if (bytes_per_word == 4)
- writel_relaxed(*((u32 *)tx), tx_reg);
- }
- }
-
- if (rx) {
- if (!last_request &&
- mcspi_wait_for_reg_bit(irqstat_reg,
- OMAP2_MCSPI_IRQSTATUS_RX0_FULL) < 0) {
- dev_err(&spi->dev, "RX_FULL timed out\n");
- goto out;
- }
- writel_relaxed(OMAP2_MCSPI_IRQSTATUS_RX0_FULL, irqstat_reg);
-
- for (iter = 0; iter < cwc; iter++, rx += bytes_per_word) {
- if (last_request &&
- mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_RXS) < 0) {
- dev_err(&spi->dev, "RXS timed out\n");
- goto out;
- }
- if (bytes_per_word == 1)
- *rx = readl_relaxed(rx_reg);
- else if (bytes_per_word == 2)
- *((u16 *)rx) = readl_relaxed(rx_reg);
- else if (bytes_per_word == 4)
- *((u32 *)rx) = readl_relaxed(rx_reg);
- }
- }
-
- if (last_request) {
- if (mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_EOT) < 0) {
- dev_err(&spi->dev, "EOT timed out\n");
- goto out;
- }
- if (mcspi_wait_for_reg_bit(chstat_reg,
- OMAP2_MCSPI_CHSTAT_TXFFE) < 0) {
- dev_err(&spi->dev, "TXFFE timed out\n");
- goto out;
- }
- omap2_mcspi_set_enable(spi, 0);
- }
- c -= cwc * bytes_per_word;
- } while (c >= bytes_per_word);
-
-out:
- omap2_mcspi_set_enable(spi, 1);
- return count - c;
-}
-
static u32 omap2_mcspi_calc_divisor(u32 speed_hz, u32 ref_clk_hz)
{
u32 div;
@@ -1323,9 +1206,7 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
ctlr->cur_msg_mapped &&
ctlr->can_dma(ctlr, spi, t))
- omap2_mcspi_set_fifo(spi, t, 1, 1);
- else if (t->len > OMAP2_MCSPI_MAX_FIFODEPTH)
- omap2_mcspi_set_fifo(spi, t, 1, 0);
+ omap2_mcspi_set_fifo(spi, t, 1);
omap2_mcspi_set_enable(spi, 1);
@@ -1338,8 +1219,6 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
ctlr->cur_msg_mapped &&
ctlr->can_dma(ctlr, spi, t))
count = omap2_mcspi_txrx_dma(spi, t);
- else if (mcspi->fifo_depth > 0)
- count = omap2_mcspi_txrx_piofifo(spi, t);
else
count = omap2_mcspi_txrx_pio(spi, t);
@@ -1352,7 +1231,7 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
omap2_mcspi_set_enable(spi, 0);
if (mcspi->fifo_depth > 0)
- omap2_mcspi_set_fifo(spi, t, 0, 0);
+ omap2_mcspi_set_fifo(spi, t, 0);
out:
/* Restore defaults if they were overriden */
@@ -1375,7 +1254,7 @@ out:
omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH));
if (mcspi->fifo_depth > 0 && t)
- omap2_mcspi_set_fifo(spi, t, 0, 0);
+ omap2_mcspi_set_fifo(spi, t, 0);
return status;
}
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index b07bb5e5811e..942c3117ab3a 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -25,11 +25,13 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/wait.h>
+#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
@@ -166,10 +168,8 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
int scr;
u8 cdm = 0;
u32 speed;
- u8 bits_per_word;
/* Start with the generic configuration for this device. */
- bits_per_word = spi->bits_per_word;
speed = spi->max_speed_hz;
/*
@@ -177,9 +177,6 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
* the transfer to overwrite the generic configuration with zeros.
*/
if (t) {
- if (t->bits_per_word)
- bits_per_word = t->bits_per_word;
-
if (t->speed_hz)
speed = min(t->speed_hz, spi->max_speed_hz);
}
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index b9934b9c2d70..9f30e0edadfe 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -384,7 +384,7 @@ static struct attribute *ssb_device_attrs[] = {
};
ATTRIBUTE_GROUPS(ssb_device);
-static struct bus_type ssb_bustype = {
+static const struct bus_type ssb_bustype = {
.name = "ssb",
.match = ssb_bus_match,
.probe = ssb_device_probe,
diff --git a/drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt b/drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt
index b1f9474f36d5..f34a95611645 100644
--- a/drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt
+++ b/drivers/staging/fieldbus/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt
@@ -48,7 +48,7 @@ Example of usage:
-----------------
This example places the bridge on top of the i.MX WEIM parallel bus, see:
-Documentation/devicetree/bindings/bus/imx-weim.txt
+Documentation/devicetree/bindings/memory-controllers/fsl/fsl,imx-weim.yaml
&weim {
controller@0,0 {
diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c
index a3cb68cfa0f9..01883fbcd79b 100644
--- a/drivers/staging/greybus/pwm.c
+++ b/drivers/staging/greybus/pwm.c
@@ -16,8 +16,6 @@
struct gb_pwm_chip {
struct gb_connection *connection;
- u8 pwm_max; /* max pwm number */
-
struct pwm_chip chip;
};
@@ -26,32 +24,33 @@ static inline struct gb_pwm_chip *pwm_chip_to_gb_pwm_chip(struct pwm_chip *chip)
return container_of(chip, struct gb_pwm_chip, chip);
}
-static int gb_pwm_count_operation(struct gb_pwm_chip *pwmc)
+static int gb_pwm_get_npwm(struct gb_connection *connection)
{
struct gb_pwm_count_response response;
int ret;
- ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_PWM_COUNT,
+ ret = gb_operation_sync(connection, GB_PWM_TYPE_PWM_COUNT,
NULL, 0, &response, sizeof(response));
if (ret)
return ret;
- pwmc->pwm_max = response.count;
- return 0;
+
+ /*
+ * The request returns the highest allowed PWM id parameter. So add one
+ * to get the number of PWMs.
+ */
+ return response.count + 1;
}
-static int gb_pwm_activate_operation(struct gb_pwm_chip *pwmc,
- u8 which)
+static int gb_pwm_activate_operation(struct pwm_chip *chip, u8 which)
{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
struct gb_pwm_activate_request request;
struct gbphy_device *gbphy_dev;
int ret;
- if (which > pwmc->pwm_max)
- return -EINVAL;
-
request.which = which;
- gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
return ret;
@@ -64,19 +63,16 @@ static int gb_pwm_activate_operation(struct gb_pwm_chip *pwmc,
return ret;
}
-static int gb_pwm_deactivate_operation(struct gb_pwm_chip *pwmc,
- u8 which)
+static int gb_pwm_deactivate_operation(struct pwm_chip *chip, u8 which)
{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
struct gb_pwm_deactivate_request request;
struct gbphy_device *gbphy_dev;
int ret;
- if (which > pwmc->pwm_max)
- return -EINVAL;
-
request.which = which;
- gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
return ret;
@@ -89,21 +85,19 @@ static int gb_pwm_deactivate_operation(struct gb_pwm_chip *pwmc,
return ret;
}
-static int gb_pwm_config_operation(struct gb_pwm_chip *pwmc,
+static int gb_pwm_config_operation(struct pwm_chip *chip,
u8 which, u32 duty, u32 period)
{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
struct gb_pwm_config_request request;
struct gbphy_device *gbphy_dev;
int ret;
- if (which > pwmc->pwm_max)
- return -EINVAL;
-
request.which = which;
request.duty = cpu_to_le32(duty);
request.period = cpu_to_le32(period);
- gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
return ret;
@@ -116,20 +110,18 @@ static int gb_pwm_config_operation(struct gb_pwm_chip *pwmc,
return ret;
}
-static int gb_pwm_set_polarity_operation(struct gb_pwm_chip *pwmc,
+static int gb_pwm_set_polarity_operation(struct pwm_chip *chip,
u8 which, u8 polarity)
{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
struct gb_pwm_polarity_request request;
struct gbphy_device *gbphy_dev;
int ret;
- if (which > pwmc->pwm_max)
- return -EINVAL;
-
request.which = which;
request.polarity = polarity;
- gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
return ret;
@@ -142,19 +134,16 @@ static int gb_pwm_set_polarity_operation(struct gb_pwm_chip *pwmc,
return ret;
}
-static int gb_pwm_enable_operation(struct gb_pwm_chip *pwmc,
- u8 which)
+static int gb_pwm_enable_operation(struct pwm_chip *chip, u8 which)
{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
struct gb_pwm_enable_request request;
struct gbphy_device *gbphy_dev;
int ret;
- if (which > pwmc->pwm_max)
- return -EINVAL;
-
request.which = which;
- gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
return ret;
@@ -167,22 +156,19 @@ static int gb_pwm_enable_operation(struct gb_pwm_chip *pwmc,
return ret;
}
-static int gb_pwm_disable_operation(struct gb_pwm_chip *pwmc,
- u8 which)
+static int gb_pwm_disable_operation(struct pwm_chip *chip, u8 which)
{
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
struct gb_pwm_disable_request request;
struct gbphy_device *gbphy_dev;
int ret;
- if (which > pwmc->pwm_max)
- return -EINVAL;
-
request.which = which;
ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_DISABLE,
&request, sizeof(request), NULL, 0);
- gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
+ gbphy_dev = to_gbphy_dev(pwmchip_parent(chip));
gbphy_runtime_put_autosuspend(gbphy_dev);
return ret;
@@ -190,19 +176,15 @@ static int gb_pwm_disable_operation(struct gb_pwm_chip *pwmc,
static int gb_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
-
- return gb_pwm_activate_operation(pwmc, pwm->hwpwm);
+ return gb_pwm_activate_operation(chip, pwm->hwpwm);
};
static void gb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
-
if (pwm_is_enabled(pwm))
- dev_warn(chip->dev, "freeing PWM device without disabling\n");
+ dev_warn(pwmchip_parent(chip), "freeing PWM device without disabling\n");
- gb_pwm_deactivate_operation(pwmc, pwm->hwpwm);
+ gb_pwm_deactivate_operation(chip, pwm->hwpwm);
}
static int gb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -212,22 +194,21 @@ static int gb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
bool enabled = pwm->state.enabled;
u64 period = state->period;
u64 duty_cycle = state->duty_cycle;
- struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
/* Set polarity */
if (state->polarity != pwm->state.polarity) {
if (enabled) {
- gb_pwm_disable_operation(pwmc, pwm->hwpwm);
+ gb_pwm_disable_operation(chip, pwm->hwpwm);
enabled = false;
}
- err = gb_pwm_set_polarity_operation(pwmc, pwm->hwpwm, state->polarity);
+ err = gb_pwm_set_polarity_operation(chip, pwm->hwpwm, state->polarity);
if (err)
return err;
}
if (!state->enabled) {
if (enabled)
- gb_pwm_disable_operation(pwmc, pwm->hwpwm);
+ gb_pwm_disable_operation(chip, pwm->hwpwm);
return 0;
}
@@ -243,13 +224,13 @@ static int gb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (duty_cycle > period)
duty_cycle = period;
- err = gb_pwm_config_operation(pwmc, pwm->hwpwm, duty_cycle, period);
+ err = gb_pwm_config_operation(chip, pwm->hwpwm, duty_cycle, period);
if (err)
return err;
/* enable/disable */
if (!enabled)
- return gb_pwm_enable_operation(pwmc, pwm->hwpwm);
+ return gb_pwm_enable_operation(chip, pwm->hwpwm);
return 0;
}
@@ -266,61 +247,59 @@ static int gb_pwm_probe(struct gbphy_device *gbphy_dev,
struct gb_connection *connection;
struct gb_pwm_chip *pwmc;
struct pwm_chip *chip;
- int ret;
-
- pwmc = kzalloc(sizeof(*pwmc), GFP_KERNEL);
- if (!pwmc)
- return -ENOMEM;
+ int ret, npwm;
connection = gb_connection_create(gbphy_dev->bundle,
le16_to_cpu(gbphy_dev->cport_desc->id),
NULL);
- if (IS_ERR(connection)) {
- ret = PTR_ERR(connection);
- goto exit_pwmc_free;
- }
-
- pwmc->connection = connection;
- gb_connection_set_data(connection, pwmc);
- gb_gbphy_set_data(gbphy_dev, pwmc);
+ if (IS_ERR(connection))
+ return PTR_ERR(connection);
ret = gb_connection_enable(connection);
if (ret)
goto exit_connection_destroy;
/* Query number of pwms present */
- ret = gb_pwm_count_operation(pwmc);
- if (ret)
+ ret = gb_pwm_get_npwm(connection);
+ if (ret < 0)
+ goto exit_connection_disable;
+ npwm = ret;
+
+ chip = pwmchip_alloc(&gbphy_dev->dev, npwm, sizeof(*pwmc));
+ if (IS_ERR(chip)) {
+ ret = PTR_ERR(chip);
goto exit_connection_disable;
+ }
+ gb_gbphy_set_data(gbphy_dev, chip);
- chip = &pwmc->chip;
+ pwmc = pwm_chip_to_gb_pwm_chip(chip);
+ pwmc->connection = connection;
- chip->dev = &gbphy_dev->dev;
chip->ops = &gb_pwm_ops;
- chip->npwm = pwmc->pwm_max + 1;
ret = pwmchip_add(chip);
if (ret) {
dev_err(&gbphy_dev->dev,
"failed to register PWM: %d\n", ret);
- goto exit_connection_disable;
+ goto exit_pwmchip_put;
}
gbphy_runtime_put_autosuspend(gbphy_dev);
return 0;
+exit_pwmchip_put:
+ pwmchip_put(chip);
exit_connection_disable:
gb_connection_disable(connection);
exit_connection_destroy:
gb_connection_destroy(connection);
-exit_pwmc_free:
- kfree(pwmc);
return ret;
}
static void gb_pwm_remove(struct gbphy_device *gbphy_dev)
{
- struct gb_pwm_chip *pwmc = gb_gbphy_get_data(gbphy_dev);
+ struct pwm_chip *chip = gb_gbphy_get_data(gbphy_dev);
+ struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
struct gb_connection *connection = pwmc->connection;
int ret;
@@ -328,10 +307,10 @@ static void gb_pwm_remove(struct gbphy_device *gbphy_dev)
if (ret)
gbphy_runtime_get_noresume(gbphy_dev);
- pwmchip_remove(&pwmc->chip);
+ pwmchip_remove(chip);
+ pwmchip_put(chip);
gb_connection_disable(connection);
gb_connection_destroy(connection);
- kfree(pwmc);
}
static const struct gbphy_device_id gb_pwm_id_table[] = {
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index e748a5d04e97..9149d41fe65b 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -608,7 +608,7 @@ static void ad5933_work(struct work_struct *work)
struct ad5933_state, work.work);
struct iio_dev *indio_dev = i2c_get_clientdata(st->client);
__be16 buf[2];
- int val[2];
+ u16 val[2];
unsigned char status;
int ret;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
index f44e6412f4e3..d0db2efe0045 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -3723,12 +3723,10 @@ apply_min_padding:
static int atomisp_set_crop(struct atomisp_device *isp,
const struct v4l2_mbus_framefmt *format,
+ struct v4l2_subdev_state *sd_state,
int which)
{
struct atomisp_input_subdev *input = &isp->inputs[isp->asd.input_curr];
- struct v4l2_subdev_state pad_state = {
- .pads = &input->pad_cfg,
- };
struct v4l2_subdev_selection sel = {
.which = which,
.target = V4L2_SEL_TGT_CROP,
@@ -3754,7 +3752,7 @@ static int atomisp_set_crop(struct atomisp_device *isp,
sel.r.left = ((input->native_rect.width - sel.r.width) / 2) & ~1;
sel.r.top = ((input->native_rect.height - sel.r.height) / 2) & ~1;
- ret = v4l2_subdev_call(input->camera, pad, set_selection, &pad_state, &sel);
+ ret = v4l2_subdev_call(input->camera, pad, set_selection, sd_state, &sel);
if (ret)
dev_err(isp->dev, "Error setting crop to %ux%u @%ux%u: %d\n",
sel.r.width, sel.r.height, sel.r.left, sel.r.top, ret);
@@ -3770,9 +3768,6 @@ int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
const struct atomisp_format_bridge *fmt, *snr_fmt;
struct atomisp_sub_device *asd = &isp->asd;
struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
- struct v4l2_subdev_state pad_state = {
- .pads = &input->pad_cfg,
- };
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -3809,11 +3804,16 @@ int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
dev_dbg(isp->dev, "try_mbus_fmt: asking for %ux%u\n",
format.format.width, format.format.height);
- ret = atomisp_set_crop(isp, &format.format, V4L2_SUBDEV_FORMAT_TRY);
- if (ret)
- return ret;
+ v4l2_subdev_lock_state(input->try_sd_state);
+
+ ret = atomisp_set_crop(isp, &format.format, input->try_sd_state,
+ V4L2_SUBDEV_FORMAT_TRY);
+ if (ret == 0)
+ ret = v4l2_subdev_call(input->camera, pad, set_fmt,
+ input->try_sd_state, &format);
+
+ v4l2_subdev_unlock_state(input->try_sd_state);
- ret = v4l2_subdev_call(input->camera, pad, set_fmt, &pad_state, &format);
if (ret)
return ret;
@@ -4238,9 +4238,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
struct atomisp_device *isp = asd->isp;
struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
const struct atomisp_format_bridge *format;
- struct v4l2_subdev_state pad_state = {
- .pads = &input->pad_cfg,
- };
+ struct v4l2_subdev_state *act_sd_state;
struct v4l2_subdev_format vformat = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
@@ -4268,12 +4266,18 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
/* Disable dvs if resolution can't be supported by sensor */
if (asd->params.video_dis_en && asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
- ret = atomisp_set_crop(isp, &vformat.format, V4L2_SUBDEV_FORMAT_TRY);
- if (ret)
- return ret;
+ v4l2_subdev_lock_state(input->try_sd_state);
+
+ ret = atomisp_set_crop(isp, &vformat.format, input->try_sd_state,
+ V4L2_SUBDEV_FORMAT_TRY);
+ if (ret == 0) {
+ vformat.which = V4L2_SUBDEV_FORMAT_TRY;
+ ret = v4l2_subdev_call(input->camera, pad, set_fmt,
+ input->try_sd_state, &vformat);
+ }
+
+ v4l2_subdev_unlock_state(input->try_sd_state);
- vformat.which = V4L2_SUBDEV_FORMAT_TRY;
- ret = v4l2_subdev_call(input->camera, pad, set_fmt, &pad_state, &vformat);
if (ret)
return ret;
@@ -4291,12 +4295,18 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
}
}
- ret = atomisp_set_crop(isp, &vformat.format, V4L2_SUBDEV_FORMAT_ACTIVE);
- if (ret)
- return ret;
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+
+ ret = atomisp_set_crop(isp, &vformat.format, act_sd_state,
+ V4L2_SUBDEV_FORMAT_ACTIVE);
+ if (ret == 0) {
+ vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(input->camera, pad, set_fmt, act_sd_state, &vformat);
+ }
+
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
- vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(input->camera, pad, set_fmt, NULL, &vformat);
if (ret)
return ret;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp_internal.h
index f7b4bee9574b..d5b077e602ca 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_internal.h
@@ -132,8 +132,8 @@ struct atomisp_input_subdev {
/* Sensor rects for sensors which support crop */
struct v4l2_rect native_rect;
struct v4l2_rect active_rect;
- /* Sensor pad_cfg for which == V4L2_SUBDEV_FORMAT_TRY calls */
- struct v4l2_subdev_pad_config pad_cfg;
+ /* Sensor state for which == V4L2_SUBDEV_FORMAT_TRY calls */
+ struct v4l2_subdev_state *try_sd_state;
struct v4l2_subdev *motor;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
index 01b7fa9b56a2..5b2d88c02d36 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
@@ -781,12 +781,20 @@ static int atomisp_enum_framesizes(struct file *file, void *priv,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.code = input->code,
};
+ struct v4l2_subdev_state *act_sd_state;
int ret;
+ if (!input->camera)
+ return -EINVAL;
+
if (input->crop_support)
return atomisp_enum_framesizes_crop(isp, fsize);
- ret = v4l2_subdev_call(input->camera, pad, enum_frame_size, NULL, &fse);
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+ ret = v4l2_subdev_call(input->camera, pad, enum_frame_size,
+ act_sd_state, &fse);
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
if (ret)
return ret;
@@ -803,18 +811,25 @@ static int atomisp_enum_frameintervals(struct file *file, void *priv,
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
struct v4l2_subdev_frame_interval_enum fie = {
- .code = atomisp_in_fmt_conv[0].code,
+ .code = atomisp_in_fmt_conv[0].code,
.index = fival->index,
.width = fival->width,
.height = fival->height,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
+ struct v4l2_subdev_state *act_sd_state;
int ret;
- ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
- pad, enum_frame_interval, NULL,
- &fie);
+ if (!input->camera)
+ return -EINVAL;
+
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+ ret = v4l2_subdev_call(input->camera, pad, enum_frame_interval,
+ act_sd_state, &fie);
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
if (ret)
return ret;
@@ -830,30 +845,25 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh,
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
+ struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
struct v4l2_subdev_mbus_code_enum code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
const struct atomisp_format_bridge *format;
- struct v4l2_subdev *camera;
+ struct v4l2_subdev_state *act_sd_state;
unsigned int i, fi = 0;
- int rval;
+ int ret;
- camera = isp->inputs[asd->input_curr].camera;
- if(!camera) {
- dev_err(isp->dev, "%s(): camera is NULL, device is %s\n",
- __func__, vdev->name);
+ if (!input->camera)
return -EINVAL;
- }
- rval = v4l2_subdev_call(camera, pad, enum_mbus_code, NULL, &code);
- if (rval == -ENOIOCTLCMD) {
- dev_warn(isp->dev,
- "enum_mbus_code pad op not supported by %s. Please fix your sensor driver!\n",
- camera->name);
- }
-
- if (rval)
- return rval;
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+ ret = v4l2_subdev_call(input->camera, pad, enum_mbus_code,
+ act_sd_state, &code);
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
+ if (ret)
+ return ret;
for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) {
format = &atomisp_output_fmts[i];
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
index c1c8501ec61f..547e1444ad97 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -862,6 +862,9 @@ static void atomisp_unregister_entities(struct atomisp_device *isp)
v4l2_device_unregister(&isp->v4l2_dev);
media_device_unregister(&isp->media_dev);
media_device_cleanup(&isp->media_dev);
+
+ for (i = 0; i < isp->input_cnt; i++)
+ __v4l2_subdev_state_free(isp->inputs[i].try_sd_state);
}
static int atomisp_register_entities(struct atomisp_device *isp)
@@ -933,32 +936,49 @@ v4l2_device_failed:
static void atomisp_init_sensor(struct atomisp_input_subdev *input)
{
+ static struct lock_class_key try_sd_state_key;
struct v4l2_subdev_mbus_code_enum mbus_code_enum = { };
struct v4l2_subdev_frame_size_enum fse = { };
- struct v4l2_subdev_state sd_state = {
- .pads = &input->pad_cfg,
- };
struct v4l2_subdev_selection sel = { };
+ struct v4l2_subdev_state *try_sd_state, *act_sd_state;
int i, err;
+ /*
+ * FIXME: Drivers are not supposed to use __v4l2_subdev_state_alloc()
+ * but atomisp needs this for try_fmt on its /dev/video# node since
+ * it emulates a normal v4l2 device there, passing through try_fmt /
+ * set_fmt to the sensor.
+ */
+ try_sd_state = __v4l2_subdev_state_alloc(input->camera,
+ "atomisp:try_sd_state->lock", &try_sd_state_key);
+ if (IS_ERR(try_sd_state))
+ return;
+
+ input->try_sd_state = try_sd_state;
+
+ act_sd_state = v4l2_subdev_lock_and_get_active_state(input->camera);
+
mbus_code_enum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- err = v4l2_subdev_call(input->camera, pad, enum_mbus_code, NULL, &mbus_code_enum);
+ err = v4l2_subdev_call(input->camera, pad, enum_mbus_code,
+ act_sd_state, &mbus_code_enum);
if (!err)
input->code = mbus_code_enum.code;
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_NATIVE_SIZE;
- err = v4l2_subdev_call(input->camera, pad, get_selection, NULL, &sel);
+ err = v4l2_subdev_call(input->camera, pad, get_selection,
+ act_sd_state, &sel);
if (err)
- return;
+ goto unlock_act_sd_state;
input->native_rect = sel.r;
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_CROP_DEFAULT;
- err = v4l2_subdev_call(input->camera, pad, get_selection, NULL, &sel);
+ err = v4l2_subdev_call(input->camera, pad, get_selection,
+ act_sd_state, &sel);
if (err)
- return;
+ goto unlock_act_sd_state;
input->active_rect = sel.r;
@@ -973,7 +993,8 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input)
fse.code = input->code;
fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- err = v4l2_subdev_call(input->camera, pad, enum_frame_size, NULL, &fse);
+ err = v4l2_subdev_call(input->camera, pad, enum_frame_size,
+ act_sd_state, &fse);
if (err)
break;
@@ -989,22 +1010,26 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input)
* for padding, set the crop rect to cover the entire sensor instead
* of only the default active area.
*
- * Do this for both try and active formats since the try_crop rect in
- * pad_cfg may influence (clamp) future try_fmt calls with which == try.
+ * Do this for both try and active formats since the crop rect in
+ * try_sd_state may influence (clamp size) in calls with which == try.
*/
sel.which = V4L2_SUBDEV_FORMAT_TRY;
sel.target = V4L2_SEL_TGT_CROP;
sel.r = input->native_rect;
- err = v4l2_subdev_call(input->camera, pad, set_selection, &sd_state, &sel);
+ v4l2_subdev_lock_state(input->try_sd_state);
+ err = v4l2_subdev_call(input->camera, pad, set_selection,
+ input->try_sd_state, &sel);
+ v4l2_subdev_unlock_state(input->try_sd_state);
if (err)
- return;
+ goto unlock_act_sd_state;
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_CROP;
sel.r = input->native_rect;
- err = v4l2_subdev_call(input->camera, pad, set_selection, NULL, &sel);
+ err = v4l2_subdev_call(input->camera, pad, set_selection,
+ act_sd_state, &sel);
if (err)
- return;
+ goto unlock_act_sd_state;
dev_info(input->camera->dev, "Supports crop native %dx%d active %dx%d binning %d\n",
input->native_rect.width, input->native_rect.height,
@@ -1012,6 +1037,10 @@ static void atomisp_init_sensor(struct atomisp_input_subdev *input)
input->binning_support);
input->crop_support = true;
+
+unlock_act_sd_state:
+ if (act_sd_state)
+ v4l2_subdev_unlock_state(act_sd_state);
}
int atomisp_register_device_nodes(struct atomisp_device *isp)
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index b0b262de6480..283804b49e91 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1515,7 +1515,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_TXPOWER)
RFbSetPower(priv, priv->wCurrentRate,
- conf->chandef.chan->hw_value);
+ conf->chanreq.oper.chan->hw_value);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
dev_dbg(&priv->pcid->dev,
@@ -1684,6 +1684,10 @@ static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops vnt_mac_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = vnt_tx_80211,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = vnt_start,
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 2abae90f3f52..7bbed462f062 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -794,7 +794,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
vnt_set_bss_mode(priv);
if (changed & (BSS_CHANGED_TXPOWER | BSS_CHANGED_BANDWIDTH))
- vnt_rf_setpower(priv, conf->chandef.chan);
+ vnt_rf_setpower(priv, conf->chanreq.oper.chan);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
dev_dbg(&priv->usb->dev,
@@ -956,6 +956,10 @@ static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
static const struct ieee80211_ops vnt_mac_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = vnt_tx_80211,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = vnt_start,
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index a5f58988130a..c1fbcdd16182 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -759,6 +759,29 @@ static ssize_t emulate_tas_store(struct config_item *item,
return count;
}
+static int target_try_configure_unmap(struct se_device *dev,
+ const char *config_opt)
+{
+ if (!dev->transport->configure_unmap) {
+ pr_err("Generic Block Discard not supported\n");
+ return -ENOSYS;
+ }
+
+ if (!target_dev_configured(dev)) {
+ pr_err("Generic Block Discard setup for %s requires device to be configured\n",
+ config_opt);
+ return -ENODEV;
+ }
+
+ if (!dev->transport->configure_unmap(dev)) {
+ pr_err("Generic Block Discard setup for %s failed\n",
+ config_opt);
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
static ssize_t emulate_tpu_store(struct config_item *item,
const char *page, size_t count)
{
@@ -776,11 +799,9 @@ static ssize_t emulate_tpu_store(struct config_item *item,
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
- if (!dev->transport->configure_unmap ||
- !dev->transport->configure_unmap(dev)) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
- }
+ ret = target_try_configure_unmap(dev, "emulate_tpu");
+ if (ret)
+ return ret;
}
da->emulate_tpu = flag;
@@ -806,11 +827,9 @@ static ssize_t emulate_tpws_store(struct config_item *item,
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !da->max_unmap_block_desc_count) {
- if (!dev->transport->configure_unmap ||
- !dev->transport->configure_unmap(dev)) {
- pr_err("Generic Block Discard not supported\n");
- return -ENOSYS;
- }
+ ret = target_try_configure_unmap(dev, "emulate_tpws");
+ if (ret)
+ return ret;
}
da->emulate_tpws = flag;
@@ -1022,12 +1041,9 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item,
* Discard supported is detected iblock_configure_device().
*/
if (flag && !da->max_unmap_block_desc_count) {
- if (!dev->transport->configure_unmap ||
- !dev->transport->configure_unmap(dev)) {
- pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set because max_unmap_block_desc_count is zero\n",
- da->da_dev);
- return -ENOSYS;
- }
+ ret = target_try_configure_unmap(dev, "unmap_zeroes_data");
+ if (ret)
+ return ret;
}
da->unmap_zeroes_data = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 8eb9eb7ce5df..7f6ca8177845 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -91,7 +91,7 @@ static int iblock_configure_device(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
struct block_device *bd;
struct blk_integrity *bi;
blk_mode_t mode = BLK_OPEN_READ;
@@ -117,14 +117,14 @@ static int iblock_configure_device(struct se_device *dev)
else
dev->dev_flags |= DF_READ_ONLY;
- bdev_handle = bdev_open_by_path(ib_dev->ibd_udev_path, mode, ib_dev,
+ bdev_file = bdev_file_open_by_path(ib_dev->ibd_udev_path, mode, ib_dev,
NULL);
- if (IS_ERR(bdev_handle)) {
- ret = PTR_ERR(bdev_handle);
+ if (IS_ERR(bdev_file)) {
+ ret = PTR_ERR(bdev_file);
goto out_free_bioset;
}
- ib_dev->ibd_bdev_handle = bdev_handle;
- ib_dev->ibd_bd = bd = bdev_handle->bdev;
+ ib_dev->ibd_bdev_file = bdev_file;
+ ib_dev->ibd_bd = bd = file_bdev(bdev_file);
q = bdev_get_queue(bd);
@@ -180,7 +180,7 @@ static int iblock_configure_device(struct se_device *dev)
return 0;
out_blkdev_put:
- bdev_release(ib_dev->ibd_bdev_handle);
+ fput(ib_dev->ibd_bdev_file);
out_free_bioset:
bioset_exit(&ib_dev->ibd_bio_set);
out:
@@ -205,8 +205,8 @@ static void iblock_destroy_device(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
- if (ib_dev->ibd_bdev_handle)
- bdev_release(ib_dev->ibd_bdev_handle);
+ if (ib_dev->ibd_bdev_file)
+ fput(ib_dev->ibd_bdev_file);
bioset_exit(&ib_dev->ibd_bio_set);
}
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 683f9a55945b..91f6f4280666 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -32,7 +32,7 @@ struct iblock_dev {
u32 ibd_flags;
struct bio_set ibd_bio_set;
struct block_device *ibd_bd;
- struct bdev_handle *ibd_bdev_handle;
+ struct file *ibd_bdev_file;
bool ibd_readonly;
struct iblock_dev_plug *ibd_plug;
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 41b7489d37ce..f98ebb18666b 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -352,7 +352,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct Scsi_Host *sh = sd->host;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
int ret;
if (scsi_device_get(sd)) {
@@ -366,18 +366,18 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
* Claim exclusive struct block_device access to struct scsi_device
* for TYPE_DISK and TYPE_ZBC using supplied udev_path
*/
- bdev_handle = bdev_open_by_path(dev->udev_path,
+ bdev_file = bdev_file_open_by_path(dev->udev_path,
BLK_OPEN_WRITE | BLK_OPEN_READ, pdv, NULL);
- if (IS_ERR(bdev_handle)) {
+ if (IS_ERR(bdev_file)) {
pr_err("pSCSI: bdev_open_by_path() failed\n");
scsi_device_put(sd);
- return PTR_ERR(bdev_handle);
+ return PTR_ERR(bdev_file);
}
- pdv->pdv_bdev_handle = bdev_handle;
+ pdv->pdv_bdev_file = bdev_file;
ret = pscsi_add_device_to_list(dev, sd);
if (ret) {
- bdev_release(bdev_handle);
+ fput(bdev_file);
scsi_device_put(sd);
return ret;
}
@@ -564,9 +564,9 @@ static void pscsi_destroy_device(struct se_device *dev)
* from pscsi_create_type_disk()
*/
if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) &&
- pdv->pdv_bdev_handle) {
- bdev_release(pdv->pdv_bdev_handle);
- pdv->pdv_bdev_handle = NULL;
+ pdv->pdv_bdev_file) {
+ fput(pdv->pdv_bdev_file);
+ pdv->pdv_bdev_file = NULL;
}
/*
* For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
@@ -907,12 +907,15 @@ new_bio:
return 0;
fail:
- if (bio)
- bio_put(bio);
+ if (bio) {
+ bio_uninit(bio);
+ kfree(bio);
+ }
while (req->bio) {
bio = req->bio;
req->bio = bio->bi_next;
- bio_put(bio);
+ bio_uninit(bio);
+ kfree(bio);
}
req->biotail = NULL;
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -994,8 +997,8 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
- if (pdv->pdv_bdev_handle)
- return bdev_nr_sectors(pdv->pdv_bdev_handle->bdev);
+ if (pdv->pdv_bdev_file)
+ return bdev_nr_sectors(file_bdev(pdv->pdv_bdev_file));
return 0;
}
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index b0a3ef136592..9acaa21e4c78 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -37,7 +37,7 @@ struct pscsi_dev_virt {
int pdv_channel_id;
int pdv_target_id;
int pdv_lun_id;
- struct bdev_handle *pdv_bdev_handle;
+ struct file *pdv_bdev_file;
struct scsi_device *pdv_sd;
struct Scsi_Host *pdv_lld_host;
} ____cacheline_aligned;
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index 4b1092127694..1892e49a8e6a 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -90,13 +90,14 @@ static int optee_register_device(const uuid_t *device_uuid, u32 func)
if (rc) {
pr_err("device registration failed, err: %d\n", rc);
put_device(&optee_device->dev);
+ return rc;
}
if (func == PTA_CMD_GET_DEVICES_SUPP)
device_create_file(&optee_device->dev,
&dev_attr_need_supplicant);
- return rc;
+ return 0;
}
static int __optee_enumerate_devices(u32 func)
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 792d6fae4354..e59c20d74b36 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -1226,7 +1226,7 @@ static int tee_client_device_uevent(const struct device *dev,
return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id);
}
-struct bus_type tee_bus_type = {
+const struct bus_type tee_bus_type = {
.name = "tee",
.match = tee_client_device_match,
.uevent = tee_client_device_uevent,
diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
index 3b04c6ec4fca..40d664a66cdc 100644
--- a/drivers/thermal/intel/intel_hfi.c
+++ b/drivers/thermal/intel/intel_hfi.c
@@ -607,7 +607,7 @@ void __init intel_hfi_init(void)
/* There is one HFI instance per die/package. */
max_hfi_instances = topology_max_packages() *
- topology_max_die_per_package();
+ topology_max_dies_per_package();
/*
* This allocation may fail. CPU hotplug callbacks must check
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index bc6eb0dd66a4..4ba649370aa1 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -587,7 +587,7 @@ static int powerclamp_idle_injection_register(void)
poll_pkg_cstate_enable = false;
if (cpumask_equal(cpu_present_mask, idle_injection_cpu_mask)) {
ii_dev = idle_inject_register_full(idle_injection_cpu_mask, idle_inject_update);
- if (topology_max_packages() == 1 && topology_max_die_per_package() == 1)
+ if (topology_max_packages() == 1 && topology_max_dies_per_package() == 1)
poll_pkg_cstate_enable = true;
} else {
ii_dev = idle_inject_register(idle_injection_cpu_mask);
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index 11a7f8108bbb..f6c2e5964b8f 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -494,7 +494,7 @@ static int __init pkg_temp_thermal_init(void)
if (!x86_match_cpu(pkg_temp_thermal_ids))
return -ENODEV;
- max_id = topology_max_packages() * topology_max_die_per_package();
+ max_id = topology_max_packages() * topology_max_dies_per_package();
zones = kcalloc(max_id, sizeof(struct zone_device *),
GFP_KERNEL);
if (!zones)
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 900114ba4371..fad40c4bc710 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -1249,6 +1249,9 @@ int tb_port_update_credits(struct tb_port *port)
ret = tb_port_do_update_credits(port);
if (ret)
return ret;
+
+ if (!port->dual_link_port)
+ return 0;
return tb_port_do_update_credits(port->dual_link_port);
}
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 87e4795275fe..6f798f6a2b84 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -203,7 +203,7 @@ struct tb_regs_switch_header {
#define ROUTER_CS_5_WOP BIT(1)
#define ROUTER_CS_5_WOU BIT(2)
#define ROUTER_CS_5_WOD BIT(3)
-#define ROUTER_CS_5_C3S BIT(23)
+#define ROUTER_CS_5_CNS BIT(23)
#define ROUTER_CS_5_PTO BIT(24)
#define ROUTER_CS_5_UTO BIT(25)
#define ROUTER_CS_5_HCO BIT(26)
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index f8f0d24ff6e4..1515eff8cc3e 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -290,7 +290,7 @@ int usb4_switch_setup(struct tb_switch *sw)
}
/* TBT3 supported by the CM */
- val |= ROUTER_CS_5_C3S;
+ val &= ~ROUTER_CS_5_CNS;
return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
}
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 6e05c5c7bca1..c2a4e88b328f 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -108,13 +108,15 @@ config HVC_DCC_SERIALIZE_SMP
config HVC_RISCV_SBI
bool "RISC-V SBI console support"
- depends on RISCV_SBI
+ depends on RISCV_SBI && NONPORTABLE
select HVC_DRIVER
help
This enables support for console output via RISC-V SBI calls, which
- is normally used only during boot to output printk.
+ is normally used only during boot to output printk. This driver
+ conflicts with real console drivers and should not be enabled on
+ systems that directly access the console.
- If you don't know what do to here, say Y.
+ If you don't know what do to here, say N.
config HVCS
tristate "IBM Hypervisor Virtual Console Server support"
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 2d1f350a4bea..c1d43f040c43 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -357,9 +357,9 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
long rate;
int ret;
- clk_disable_unprepare(d->clk);
rate = clk_round_rate(d->clk, newrate);
- if (rate > 0) {
+ if (rate > 0 && p->uartclk != rate) {
+ clk_disable_unprepare(d->clk);
/*
* Note that any clock-notifer worker will block in
* serial8250_update_uartclk() until we are done.
@@ -367,8 +367,8 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
ret = clk_set_rate(d->clk, newrate);
if (!ret)
p->uartclk = rate;
+ clk_prepare_enable(d->clk);
}
- clk_prepare_enable(d->clk);
dw8250_do_set_termios(p, termios, old);
}
diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
index cd258922bd78..2dda737b1660 100644
--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
+++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
@@ -302,7 +302,7 @@ static void pci1xxxx_process_read_data(struct uart_port *port,
* to read, the data is received one byte at a time.
*/
while (valid_burst_count--) {
- if (*buff_index >= (RX_BUF_SIZE - UART_BURST_SIZE))
+ if (*buff_index > (RX_BUF_SIZE - UART_BURST_SIZE))
break;
burst_buf = (u32 *)&rx_buff[*buff_index];
*burst_buf = readl(port->membase + UART_RX_BURST_FIFO);
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index fccec1698a54..cf2c890a560f 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1339,11 +1339,41 @@ static void pl011_start_tx_pio(struct uart_amba_port *uap)
}
}
+static void pl011_rs485_tx_start(struct uart_amba_port *uap)
+{
+ struct uart_port *port = &uap->port;
+ u32 cr;
+
+ /* Enable transmitter */
+ cr = pl011_read(uap, REG_CR);
+ cr |= UART011_CR_TXE;
+
+ /* Disable receiver if half-duplex */
+ if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
+ cr &= ~UART011_CR_RXE;
+
+ if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
+ cr &= ~UART011_CR_RTS;
+ else
+ cr |= UART011_CR_RTS;
+
+ pl011_write(cr, uap, REG_CR);
+
+ if (port->rs485.delay_rts_before_send)
+ mdelay(port->rs485.delay_rts_before_send);
+
+ uap->rs485_tx_started = true;
+}
+
static void pl011_start_tx(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
+ if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
+ !uap->rs485_tx_started)
+ pl011_rs485_tx_start(uap);
+
if (!pl011_dma_tx_start(uap))
pl011_start_tx_pio(uap);
}
@@ -1424,42 +1454,12 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
return true;
}
-static void pl011_rs485_tx_start(struct uart_amba_port *uap)
-{
- struct uart_port *port = &uap->port;
- u32 cr;
-
- /* Enable transmitter */
- cr = pl011_read(uap, REG_CR);
- cr |= UART011_CR_TXE;
-
- /* Disable receiver if half-duplex */
- if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
- cr &= ~UART011_CR_RXE;
-
- if (port->rs485.flags & SER_RS485_RTS_ON_SEND)
- cr &= ~UART011_CR_RTS;
- else
- cr |= UART011_CR_RTS;
-
- pl011_write(cr, uap, REG_CR);
-
- if (port->rs485.delay_rts_before_send)
- mdelay(port->rs485.delay_rts_before_send);
-
- uap->rs485_tx_started = true;
-}
-
/* Returns true if tx interrupts have to be (kept) enabled */
static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
{
struct circ_buf *xmit = &uap->port.state->xmit;
int count = uap->fifosize >> 1;
- if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
- !uap->rs485_tx_started)
- pl011_rs485_tx_start(uap);
-
if (uap->port.x_char) {
if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
return true;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 5ddf110aedbe..bbcbc91482af 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -2345,9 +2345,12 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
lpuart32_write(&sport->port, bd, UARTBAUD);
lpuart32_serial_setbrg(sport, baud);
- lpuart32_write(&sport->port, modem, UARTMODIR);
- lpuart32_write(&sport->port, ctrl, UARTCTRL);
+ /* disable CTS before enabling UARTCTRL_TE to avoid pending idle preamble */
+ lpuart32_write(&sport->port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
/* restore control register */
+ lpuart32_write(&sport->port, ctrl, UARTCTRL);
+ /* re-enable the CTS if needed */
+ lpuart32_write(&sport->port, modem, UARTMODIR);
if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE)
sport->is_cs7 = true;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 4aa72d5aeafb..e14813250616 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -462,8 +462,7 @@ static void imx_uart_stop_tx(struct uart_port *port)
}
}
-/* called with port.lock taken and irqs off */
-static void imx_uart_stop_rx(struct uart_port *port)
+static void imx_uart_stop_rx_with_loopback_ctrl(struct uart_port *port, bool loopback)
{
struct imx_port *sport = (struct imx_port *)port;
u32 ucr1, ucr2, ucr4, uts;
@@ -485,7 +484,7 @@ static void imx_uart_stop_rx(struct uart_port *port)
/* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */
if (port->rs485.flags & SER_RS485_ENABLED &&
port->rs485.flags & SER_RS485_RTS_ON_SEND &&
- sport->have_rtscts && !sport->have_rtsgpio) {
+ sport->have_rtscts && !sport->have_rtsgpio && loopback) {
uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
uts |= UTS_LOOP;
imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
@@ -498,6 +497,16 @@ static void imx_uart_stop_rx(struct uart_port *port)
}
/* called with port.lock taken and irqs off */
+static void imx_uart_stop_rx(struct uart_port *port)
+{
+ /*
+ * Stop RX and enable loopback in order to make sure RS485 bus
+ * is not blocked. Se comment in imx_uart_probe().
+ */
+ imx_uart_stop_rx_with_loopback_ctrl(port, true);
+}
+
+/* called with port.lock taken and irqs off */
static void imx_uart_enable_ms(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
@@ -682,9 +691,14 @@ static void imx_uart_start_tx(struct uart_port *port)
imx_uart_rts_inactive(sport, &ucr2);
imx_uart_writel(sport, ucr2, UCR2);
+ /*
+ * Since we are about to transmit we can not stop RX
+ * with loopback enabled because that will make our
+ * transmitted data being just looped to RX.
+ */
if (!(port->rs485.flags & SER_RS485_RX_DURING_TX) &&
!port->rs485_rx_during_tx_gpio)
- imx_uart_stop_rx(port);
+ imx_uart_stop_rx_with_loopback_ctrl(port, false);
sport->tx_state = WAIT_AFTER_RTS;
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 3ec725555bcc..4749331fe618 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -605,13 +605,16 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s)
return;
}
- pending = uart_port_tx(&s->port, ch,
+ pending = uart_port_tx_flags(&s->port, ch, UART_TX_NOSTOP,
!(mxs_read(s, REG_STAT) & AUART_STAT_TXFF),
mxs_write(ch, s, REG_DATA));
if (pending)
mxs_set(AUART_INTR_TXIEN, s, REG_INTR);
else
mxs_clr(AUART_INTR_TXIEN, s, REG_INTR);
+
+ if (uart_tx_stopped(&s->port))
+ mxs_auart_stop_tx(&s->port);
}
static void mxs_auart_rx_char(struct mxs_auart_port *s)
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index e63a8fbe63bd..99e08737f293 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -851,19 +851,21 @@ static void qcom_geni_serial_stop_tx(struct uart_port *uport)
}
static void qcom_geni_serial_send_chunk_fifo(struct uart_port *uport,
- unsigned int remaining)
+ unsigned int chunk)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
struct circ_buf *xmit = &uport->state->xmit;
- unsigned int tx_bytes;
+ unsigned int tx_bytes, c, remaining = chunk;
u8 buf[BYTES_PER_FIFO_WORD];
while (remaining) {
memset(buf, 0, sizeof(buf));
tx_bytes = min(remaining, BYTES_PER_FIFO_WORD);
- memcpy(buf, &xmit->buf[xmit->tail], tx_bytes);
- uart_xmit_advance(uport, tx_bytes);
+ for (c = 0; c < tx_bytes ; c++) {
+ buf[c] = xmit->buf[xmit->tail];
+ uart_xmit_advance(uport, 1);
+ }
iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1);
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
index 88975a4df306..72b6f4f326e2 100644
--- a/drivers/tty/serial/serial_port.c
+++ b/drivers/tty/serial/serial_port.c
@@ -46,8 +46,31 @@ out:
return 0;
}
+static int serial_port_runtime_suspend(struct device *dev)
+{
+ struct serial_port_device *port_dev = to_serial_base_port_device(dev);
+ struct uart_port *port = port_dev->port;
+ unsigned long flags;
+ bool busy;
+
+ if (port->flags & UPF_DEAD)
+ return 0;
+
+ uart_port_lock_irqsave(port, &flags);
+ busy = __serial_port_busy(port);
+ if (busy)
+ port->ops->start_tx(port);
+ uart_port_unlock_irqrestore(port, flags);
+
+ if (busy)
+ pm_runtime_mark_last_busy(dev);
+
+ return busy ? -EBUSY : 0;
+}
+
static DEFINE_RUNTIME_DEV_PM_OPS(serial_port_pm,
- NULL, serial_port_runtime_resume, NULL);
+ serial_port_runtime_suspend,
+ serial_port_runtime_resume, NULL);
static int serial_port_probe(struct device *dev)
{
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 794b77512740..693e932d6feb 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -251,7 +251,9 @@ static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *ter
writel_relaxed(cr3, port->membase + ofs->cr3);
writel_relaxed(cr1, port->membase + ofs->cr1);
- rs485conf->flags |= SER_RS485_RX_DURING_TX;
+ if (!port->rs485_rx_during_tx_gpio)
+ rs485conf->flags |= SER_RS485_RX_DURING_TX;
+
} else {
stm32_usart_clr_bits(port, ofs->cr3,
USART_CR3_DEM | USART_CR3_DEP);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 156efda7c80d..38a765eadbe2 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -381,7 +381,7 @@ static void vc_uniscr_delete(struct vc_data *vc, unsigned int nr)
u32 *ln = vc->vc_uni_lines[vc->state.y];
unsigned int x = vc->state.x, cols = vc->vc_cols;
- memcpy(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln));
+ memmove(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln));
memset32(&ln[cols - nr], ' ', nr);
}
}
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 029d017fc1b6..eac7fff6992d 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1469,7 +1469,7 @@ static int ufshcd_devfreq_target(struct device *dev,
int ret = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
ktime_t start;
- bool scale_up, sched_clk_scaling_suspend_work = false;
+ bool scale_up = false, sched_clk_scaling_suspend_work = false;
struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
unsigned long irq_flags;
@@ -3057,7 +3057,7 @@ bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
*/
static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
{
- u32 mask = 1U << task_tag;
+ u32 mask;
unsigned long flags;
int err;
@@ -3075,6 +3075,8 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
return 0;
}
+ mask = 1U << task_tag;
+
/* clear outstanding transaction before retry */
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_utrl_clear(hba, mask);
@@ -6352,7 +6354,6 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
ufshcd_hold(hba);
if (!ufshcd_is_clkgating_allowed(hba))
ufshcd_setup_clocks(hba, true);
- ufshcd_release(hba);
pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
ufshcd_vops_resume(hba, pm_op);
} else {
@@ -10592,7 +10593,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
if (err < 0)
goto out_remove_scsi_host;
- hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
+ hba->tmf_queue = blk_mq_alloc_queue(&hba->tmf_tag_set, NULL, NULL);
if (IS_ERR(hba->tmf_queue)) {
err = PTR_ERR(hba->tmf_queue);
goto free_tmf_tag_set;
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 39eef470f8fa..8fde5204e88b 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -1712,8 +1712,8 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
* 2. Poll queues do not need ESI.
*/
nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
- ret = platform_msi_domain_alloc_irqs(hba->dev, nr_irqs,
- ufs_qcom_write_msi_msg);
+ ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs,
+ ufs_qcom_write_msi_msg);
if (ret) {
dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
return ret;
@@ -1742,7 +1742,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
devm_free_irq(hba->dev, desc->irq, hba);
}
msi_unlock_descs(hba->dev);
- platform_msi_domain_free_irqs(hba->dev);
+ platform_device_msi_free_irqs_all(hba->dev);
} else {
if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
host->hw_ver.step == 0)
@@ -1818,7 +1818,7 @@ static void ufs_qcom_remove(struct platform_device *pdev)
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
- platform_msi_domain_free_irqs(hba->dev);
+ platform_device_msi_free_irqs_all(hba->dev);
}
static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = {
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index aeca902ab6cc..fd1beb10bba7 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -828,7 +828,11 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
return;
}
- if (request->complete) {
+ /*
+ * zlp request is appended by driver, needn't call usb_gadget_giveback_request() to notify
+ * gadget composite driver.
+ */
+ if (request->complete && request->buf != priv_dev->zlp_buf) {
spin_unlock(&priv_dev->lock);
usb_gadget_giveback_request(&priv_ep->endpoint,
request);
@@ -2540,11 +2544,11 @@ static int cdns3_gadget_ep_disable(struct usb_ep *ep)
while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
+ list_del_init(&priv_req->list);
kfree(priv_req->request.buf);
cdns3_gadget_ep_free_request(&priv_ep->endpoint,
&priv_req->request);
- list_del_init(&priv_req->list);
--priv_ep->wa2_counter;
}
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index 33548771a0d3..465e9267b49c 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -395,7 +395,6 @@ pm_put:
return ret;
}
-
/**
* cdns_wakeup_irq - interrupt handler for wakeup events
* @irq: irq number for cdns3/cdnsp core device
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
index 04b6d12f2b9a..ee917f1b091c 100644
--- a/drivers/usb/cdns3/drd.c
+++ b/drivers/usb/cdns3/drd.c
@@ -156,7 +156,8 @@ bool cdns_is_device(struct cdns *cdns)
*/
static void cdns_otg_disable_irq(struct cdns *cdns)
{
- writel(0, &cdns->otg_irq_regs->ien);
+ if (cdns->version)
+ writel(0, &cdns->otg_irq_regs->ien);
}
/**
@@ -422,15 +423,20 @@ int cdns_drd_init(struct cdns *cdns)
cdns->otg_regs = (void __iomem *)&cdns->otg_v1_regs->cmd;
- if (readl(&cdns->otg_cdnsp_regs->did) == OTG_CDNSP_DID) {
+ state = readl(&cdns->otg_cdnsp_regs->did);
+
+ if (OTG_CDNSP_CHECK_DID(state)) {
cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
&cdns->otg_cdnsp_regs->ien;
cdns->version = CDNSP_CONTROLLER_V2;
- } else {
+ } else if (OTG_CDNS3_CHECK_DID(state)) {
cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
&cdns->otg_v1_regs->ien;
writel(1, &cdns->otg_v1_regs->simulate);
cdns->version = CDNS3_CONTROLLER_V1;
+ } else {
+ dev_err(cdns->dev, "not supporte DID=0x%08x\n", state);
+ return -EINVAL;
}
dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
@@ -483,7 +489,6 @@ int cdns_drd_exit(struct cdns *cdns)
return 0;
}
-
/* Indicate the cdns3 core was power lost before */
bool cdns_power_is_lost(struct cdns *cdns)
{
diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h
index cbdf94f73ed9..d72370c321d3 100644
--- a/drivers/usb/cdns3/drd.h
+++ b/drivers/usb/cdns3/drd.h
@@ -79,7 +79,11 @@ struct cdnsp_otg_regs {
__le32 susp_timing_ctrl;
};
-#define OTG_CDNSP_DID 0x0004034E
+/* CDNSP driver supports 0x000403xx Cadence USB controller family. */
+#define OTG_CDNSP_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040300)
+
+/* CDNS3 driver supports 0x000402xx Cadence USB controller family. */
+#define OTG_CDNS3_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040200)
/*
* Common registers interface for both CDNS3 and CDNSP version of DRD.
diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
index 6164fc4c96a4..ceca4d839dfd 100644
--- a/drivers/usb/cdns3/host.c
+++ b/drivers/usb/cdns3/host.c
@@ -18,6 +18,11 @@
#include "../host/xhci.h"
#include "../host/xhci-plat.h"
+/*
+ * The XECP_PORT_CAP_REG and XECP_AUX_CTRL_REG1 exist only
+ * in Cadence USB3 dual-role controller, so it can't be used
+ * with Cadence CDNSP dual-role controller.
+ */
#define XECP_PORT_CAP_REG 0x8000
#define XECP_AUX_CTRL_REG1 0x8120
@@ -57,6 +62,8 @@ static const struct xhci_plat_priv xhci_plat_cdns3_xhci = {
.resume_quirk = xhci_cdns3_resume_quirk,
};
+static const struct xhci_plat_priv xhci_plat_cdnsp_xhci;
+
static int __cdns_host_init(struct cdns *cdns)
{
struct platform_device *xhci;
@@ -81,8 +88,13 @@ static int __cdns_host_init(struct cdns *cdns)
goto err1;
}
- cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
- sizeof(struct xhci_plat_priv), GFP_KERNEL);
+ if (cdns->version < CDNSP_CONTROLLER_V2)
+ cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci,
+ sizeof(struct xhci_plat_priv), GFP_KERNEL);
+ else
+ cdns->xhci_plat_data = kmemdup(&xhci_plat_cdnsp_xhci,
+ sizeof(struct xhci_plat_priv), GFP_KERNEL);
+
if (!cdns->xhci_plat_data) {
ret = -ENOMEM;
goto err1;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 12b6dfeaf658..edf74458474a 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1664,9 +1664,10 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
usb_put_urb(urb);
}
-static void usb_giveback_urb_bh(struct tasklet_struct *t)
+static void usb_giveback_urb_bh(struct work_struct *work)
{
- struct giveback_urb_bh *bh = from_tasklet(bh, t, bh);
+ struct giveback_urb_bh *bh =
+ container_of(work, struct giveback_urb_bh, bh);
struct list_head local_list;
spin_lock_irq(&bh->lock);
@@ -1691,9 +1692,9 @@ static void usb_giveback_urb_bh(struct tasklet_struct *t)
spin_lock_irq(&bh->lock);
if (!list_empty(&bh->head)) {
if (bh->high_prio)
- tasklet_hi_schedule(&bh->bh);
+ queue_work(system_bh_highpri_wq, &bh->bh);
else
- tasklet_schedule(&bh->bh);
+ queue_work(system_bh_wq, &bh->bh);
}
bh->running = false;
spin_unlock_irq(&bh->lock);
@@ -1706,7 +1707,7 @@ static void usb_giveback_urb_bh(struct tasklet_struct *t)
* @status: completion status code for the URB.
*
* Context: atomic. The completion callback is invoked in caller's context.
- * For HCDs with HCD_BH flag set, the completion callback is invoked in tasklet
+ * For HCDs with HCD_BH flag set, the completion callback is invoked in BH
* context (except for URBs submitted to the root hub which always complete in
* caller's context).
*
@@ -1725,7 +1726,7 @@ void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
struct giveback_urb_bh *bh;
bool running;
- /* pass status to tasklet via unlinked */
+ /* pass status to BH via unlinked */
if (likely(!urb->unlinked))
urb->unlinked = status;
@@ -1747,9 +1748,9 @@ void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
if (running)
;
else if (bh->high_prio)
- tasklet_hi_schedule(&bh->bh);
+ queue_work(system_bh_highpri_wq, &bh->bh);
else
- tasklet_schedule(&bh->bh);
+ queue_work(system_bh_wq, &bh->bh);
}
EXPORT_SYMBOL_GPL(usb_hcd_giveback_urb);
@@ -2540,7 +2541,7 @@ static void init_giveback_urb_bh(struct giveback_urb_bh *bh)
spin_lock_init(&bh->lock);
INIT_LIST_HEAD(&bh->head);
- tasklet_setup(&bh->bh, usb_giveback_urb_bh);
+ INIT_WORK(&bh->bh, usb_giveback_urb_bh);
}
struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
@@ -2926,7 +2927,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
&& device_can_wakeup(&hcd->self.root_hub->dev))
dev_dbg(hcd->self.controller, "supports USB remote wakeup\n");
- /* initialize tasklets */
+ /* initialize BHs */
init_giveback_urb_bh(&hcd->high_prio_bh);
hcd->high_prio_bh.high_prio = true;
init_giveback_urb_bh(&hcd->low_prio_bh);
@@ -3036,7 +3037,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
mutex_unlock(&usb_bus_idr_lock);
/*
- * tasklet_kill() isn't needed here because:
+ * flush_work() isn't needed here because:
* - driver's disconnect() called from usb_disconnect() should
* make sure its URBs are completed during the disconnect()
* callback
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index c628c1abc907..4d63496f98b6 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -573,7 +573,7 @@ static int match_location(struct usb_device *peer_hdev, void *p)
struct usb_hub *peer_hub = usb_hub_to_struct_hub(peer_hdev);
struct usb_device *hdev = to_usb_device(port_dev->dev.parent->parent);
- if (!peer_hub)
+ if (!peer_hub || port_dev->connect_type == USB_PORT_NOT_USED)
return 0;
hcd = bus_to_hcd(hdev->bus);
@@ -584,7 +584,8 @@ static int match_location(struct usb_device *peer_hdev, void *p)
for (port1 = 1; port1 <= peer_hdev->maxchild; port1++) {
peer = peer_hub->ports[port1 - 1];
- if (peer && peer->location == port_dev->location) {
+ if (peer && peer->connect_type != USB_PORT_NOT_USED &&
+ peer->location == port_dev->location) {
link_peers_report(port_dev, peer);
return 1; /* done */
}
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index e3eea965e57b..e120611a5174 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -376,7 +376,6 @@
/* Global HWPARAMS4 Register */
#define DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(n) (((n) & (0x0f << 13)) >> 13)
#define DWC3_MAX_HIBER_SCRATCHBUFS 15
-#define DWC3_EXT_BUFF_CONTROL BIT(21)
/* Global HWPARAMS6 Register */
#define DWC3_GHWPARAMS6_BCSUPPORT BIT(14)
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 564976b3e2b9..28f49400f3e8 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -673,12 +673,6 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
}
- if (dep->endpoint.fifo_mode) {
- if (!(dwc->hwparams.hwparams4 & DWC3_EXT_BUFF_CONTROL))
- return -EINVAL;
- params.param1 |= DWC3_DEPCFG_EBC_HWO_NOWB | DWC3_DEPCFG_USE_EBC;
- }
-
return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
}
@@ -2656,6 +2650,11 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
int ret;
spin_lock_irqsave(&dwc->lock, flags);
+ if (!dwc->pullups_connected) {
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ return 0;
+ }
+
dwc->connected = false;
/*
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index fd7a4e94397e..55a56cf67d73 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -26,8 +26,6 @@ struct dwc3;
#define DWC3_DEPCFG_XFER_NOT_READY_EN BIT(10)
#define DWC3_DEPCFG_FIFO_ERROR_EN BIT(11)
#define DWC3_DEPCFG_STREAM_EVENT_EN BIT(13)
-#define DWC3_DEPCFG_EBC_HWO_NOWB BIT(14)
-#define DWC3_DEPCFG_USE_EBC BIT(15)
#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16)
#define DWC3_DEPCFG_STREAM_CAPABLE BIT(24)
#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25)
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index ca5d5f564998..28f4e6552e84 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1338,7 +1338,15 @@ parse_ntb:
"Parsed NTB with %d frames\n", dgram_counter);
to_process -= block_len;
- if (to_process != 0) {
+
+ /*
+ * Windows NCM driver avoids USB ZLPs by adding a 1-byte
+ * zero pad as needed.
+ */
+ if (to_process == 1 &&
+ (*(unsigned char *)(ntb_ptr + block_len) == 0x00)) {
+ to_process--;
+ } else if ((to_process > 0) && (block_len != 0)) {
ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
goto parse_ntb;
}
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 10c5d7f726a1..f90eeecf27de 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2036,7 +2036,8 @@ static irqreturn_t omap_udc_iso_irq(int irq, void *_dev)
static inline int machine_without_vbus_sense(void)
{
- return machine_is_omap_osk() || machine_is_sx1();
+ return machine_is_omap_osk() || machine_is_omap_palmte() ||
+ machine_is_sx1();
}
static int omap_udc_start(struct usb_gadget *g,
diff --git a/drivers/usb/host/uhci-grlib.c b/drivers/usb/host/uhci-grlib.c
index ac3fc5970315..cfebb833668e 100644
--- a/drivers/usb/host/uhci-grlib.c
+++ b/drivers/usb/host/uhci-grlib.c
@@ -22,6 +22,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
static int uhci_grlib_init(struct usb_hcd *hcd)
{
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index f0d8a607ff21..4f64b814d4aa 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -326,7 +326,13 @@ static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhc
/* how many trbs will be queued past the enqueue segment? */
trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1);
- if (trbs_past_seg <= 0)
+ /*
+ * Consider expanding the ring already if num_trbs fills the current
+ * segment (i.e. trbs_past_seg == 0), not only when num_trbs goes into
+ * the next segment. Avoids confusing full ring with special empty ring
+ * case below
+ */
+ if (trbs_past_seg < 0)
return 0;
/* Empty ring special case, enqueue stuck on link trb while dequeue advanced */
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index 76862ba40f35..0e5e4cb74c87 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -2521,21 +2521,19 @@ static const struct hc_driver isp1760_hc_driver = {
int __init isp1760_init_kmem_once(void)
{
urb_listitem_cachep = kmem_cache_create("isp1760_urb_listitem",
- sizeof(struct urb_listitem), 0, SLAB_TEMPORARY |
- SLAB_MEM_SPREAD, NULL);
+ sizeof(struct urb_listitem), 0, SLAB_TEMPORARY, NULL);
if (!urb_listitem_cachep)
return -ENOMEM;
qtd_cachep = kmem_cache_create("isp1760_qtd",
- sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY |
- SLAB_MEM_SPREAD, NULL);
+ sizeof(struct isp1760_qtd), 0, SLAB_TEMPORARY, NULL);
if (!qtd_cachep)
goto destroy_urb_listitem;
qh_cachep = kmem_cache_create("isp1760_qh", sizeof(struct isp1760_qh),
- 0, SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
+ 0, SLAB_TEMPORARY, NULL);
if (!qh_cachep)
goto destroy_qtd;
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index ae41578bd014..70165dd86b5d 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -21,7 +21,9 @@ static const struct class role_class = {
struct usb_role_switch {
struct device dev;
struct mutex lock; /* device lock*/
+ struct module *module; /* the module this device depends on */
enum usb_role role;
+ bool registered;
/* From descriptor */
struct device *usb2_port;
@@ -48,6 +50,9 @@ int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
if (IS_ERR_OR_NULL(sw))
return 0;
+ if (!sw->registered)
+ return -EOPNOTSUPP;
+
mutex_lock(&sw->lock);
ret = sw->set(sw, role);
@@ -73,7 +78,7 @@ enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
{
enum usb_role role;
- if (IS_ERR_OR_NULL(sw))
+ if (IS_ERR_OR_NULL(sw) || !sw->registered)
return USB_ROLE_NONE;
mutex_lock(&sw->lock);
@@ -135,7 +140,7 @@ struct usb_role_switch *usb_role_switch_get(struct device *dev)
usb_role_switch_match);
if (!IS_ERR_OR_NULL(sw))
- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+ WARN_ON(!try_module_get(sw->module));
return sw;
}
@@ -157,7 +162,7 @@ struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *fwnode)
sw = fwnode_connection_find_match(fwnode, "usb-role-switch",
NULL, usb_role_switch_match);
if (!IS_ERR_OR_NULL(sw))
- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+ WARN_ON(!try_module_get(sw->module));
return sw;
}
@@ -172,7 +177,7 @@ EXPORT_SYMBOL_GPL(fwnode_usb_role_switch_get);
void usb_role_switch_put(struct usb_role_switch *sw)
{
if (!IS_ERR_OR_NULL(sw)) {
- module_put(sw->dev.parent->driver->owner);
+ module_put(sw->module);
put_device(&sw->dev);
}
}
@@ -189,15 +194,18 @@ struct usb_role_switch *
usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
{
struct device *dev;
+ struct usb_role_switch *sw = NULL;
if (!fwnode)
return NULL;
dev = class_find_device_by_fwnode(&role_class, fwnode);
- if (dev)
- WARN_ON(!try_module_get(dev->parent->driver->owner));
+ if (dev) {
+ sw = to_role_switch(dev);
+ WARN_ON(!try_module_get(sw->module));
+ }
- return dev ? to_role_switch(dev) : NULL;
+ return sw;
}
EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode);
@@ -338,6 +346,7 @@ usb_role_switch_register(struct device *parent,
sw->set = desc->set;
sw->get = desc->get;
+ sw->module = parent->driver->owner;
sw->dev.parent = parent;
sw->dev.fwnode = desc->fwnode;
sw->dev.class = &role_class;
@@ -352,6 +361,8 @@ usb_role_switch_register(struct device *parent,
return ERR_PTR(ret);
}
+ sw->registered = true;
+
/* TODO: Symlinks for the host port and the device controller. */
return sw;
@@ -366,8 +377,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_register);
*/
void usb_role_switch_unregister(struct usb_role_switch *sw)
{
- if (!IS_ERR_OR_NULL(sw))
+ if (!IS_ERR_OR_NULL(sw)) {
+ sw->registered = false;
device_unregister(&sw->dev);
+ }
}
EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 4e0eef1440b7..300aeef160e7 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1105,7 +1105,7 @@ static void isd200_dump_driveid(struct us_data *us, u16 *id)
static int isd200_get_inquiry_data( struct us_data *us )
{
struct isd200_info *info = (struct isd200_info *)us->extra;
- int retStatus = ISD200_GOOD;
+ int retStatus;
u16 *id = info->id;
usb_stor_dbg(us, "Entering isd200_get_inquiry_data\n");
@@ -1137,6 +1137,13 @@ static int isd200_get_inquiry_data( struct us_data *us )
isd200_fix_driveid(id);
isd200_dump_driveid(us, id);
+ /* Prevent division by 0 in isd200_scsi_to_ata() */
+ if (id[ATA_ID_HEADS] == 0 || id[ATA_ID_SECTORS] == 0) {
+ usb_stor_dbg(us, " Invalid ATA Identify data\n");
+ retStatus = ISD200_ERROR;
+ goto Done;
+ }
+
memset(&info->InquiryData, 0, sizeof(info->InquiryData));
/* Standard IDE interface only supports disks */
@@ -1202,6 +1209,7 @@ static int isd200_get_inquiry_data( struct us_data *us )
}
}
+ Done:
usb_stor_dbg(us, "Leaving isd200_get_inquiry_data %08X\n", retStatus);
return(retStatus);
@@ -1481,22 +1489,27 @@ static int isd200_init_info(struct us_data *us)
static int isd200_Initialization(struct us_data *us)
{
+ int rc = 0;
+
usb_stor_dbg(us, "ISD200 Initialization...\n");
/* Initialize ISD200 info struct */
- if (isd200_init_info(us) == ISD200_ERROR) {
+ if (isd200_init_info(us) < 0) {
usb_stor_dbg(us, "ERROR Initializing ISD200 Info struct\n");
+ rc = -ENOMEM;
} else {
/* Get device specific data */
- if (isd200_get_inquiry_data(us) != ISD200_GOOD)
+ if (isd200_get_inquiry_data(us) != ISD200_GOOD) {
usb_stor_dbg(us, "ISD200 Initialization Failure\n");
- else
+ rc = -EINVAL;
+ } else {
usb_stor_dbg(us, "ISD200 Initialization complete\n");
+ }
}
- return 0;
+ return rc;
}
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index c54e9805da53..12cf9940e5b6 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -180,6 +180,13 @@ static int slave_configure(struct scsi_device *sdev)
sdev->use_192_bytes_for_3f = 1;
/*
+ * Some devices report generic values until the media has been
+ * accessed. Force a READ(10) prior to querying device
+ * characteristics.
+ */
+ sdev->read_before_ms = 1;
+
+ /*
* Some devices don't like MODE SENSE with page=0x3f,
* which is the command used for checking if a device
* is write-protected. Now that we tell the sd driver
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 9707f53cfda9..71ace274761f 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -879,6 +879,13 @@ static int uas_slave_configure(struct scsi_device *sdev)
sdev->guess_capacity = 1;
/*
+ * Some devices report generic values until the media has been
+ * accessed. Force a READ(10) prior to querying device
+ * characteristics.
+ */
+ sdev->read_before_ms = 1;
+
+ /*
* Some devices don't like MODE SENSE with page=0x3f,
* which is the command used for checking if a device
* is write-protected. Now that we tell the sd driver
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index f81bec0c7b86..f8ea3054be54 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -559,16 +559,21 @@ static ssize_t hpd_show(struct device *dev, struct device_attribute *attr, char
}
static DEVICE_ATTR_RO(hpd);
-static struct attribute *dp_altmode_attrs[] = {
+static struct attribute *displayport_attrs[] = {
&dev_attr_configuration.attr,
&dev_attr_pin_assignment.attr,
&dev_attr_hpd.attr,
NULL
};
-static const struct attribute_group dp_altmode_group = {
+static const struct attribute_group displayport_group = {
.name = "displayport",
- .attrs = dp_altmode_attrs,
+ .attrs = displayport_attrs,
+};
+
+static const struct attribute_group *displayport_groups[] = {
+ &displayport_group,
+ NULL,
};
int dp_altmode_probe(struct typec_altmode *alt)
@@ -576,7 +581,6 @@ int dp_altmode_probe(struct typec_altmode *alt)
const struct typec_altmode *port = typec_altmode_get_partner(alt);
struct fwnode_handle *fwnode;
struct dp_altmode *dp;
- int ret;
/* FIXME: Port can only be DFP_U. */
@@ -587,10 +591,6 @@ int dp_altmode_probe(struct typec_altmode *alt)
DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo)))
return -ENODEV;
- ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group);
- if (ret)
- return ret;
-
dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
@@ -624,7 +624,6 @@ void dp_altmode_remove(struct typec_altmode *alt)
{
struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
- sysfs_remove_group(&alt->dev.kobj, &dp_altmode_group);
cancel_work_sync(&dp->work);
if (dp->connector_fwnode) {
@@ -649,6 +648,7 @@ static struct typec_altmode_driver dp_altmode_driver = {
.driver = {
.name = "typec_displayport",
.owner = THIS_MODULE,
+ .dev_groups = displayport_groups,
},
};
module_typec_altmode_driver(dp_altmode_driver);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index f7d7daa60c8d..096597231027 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -3743,9 +3743,6 @@ static void tcpm_detach(struct tcpm_port *port)
if (tcpm_port_is_disconnected(port))
port->hard_reset_count = 0;
- port->try_src_count = 0;
- port->try_snk_count = 0;
-
if (!port->attached)
return;
@@ -4876,7 +4873,11 @@ static void run_state_machine(struct tcpm_port *port)
break;
case PORT_RESET:
tcpm_reset_port(port);
- tcpm_set_cc(port, TYPEC_CC_OPEN);
+ if (port->self_powered)
+ tcpm_set_cc(port, TYPEC_CC_OPEN);
+ else
+ tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
+ TYPEC_CC_RD : tcpm_rp_cc(port));
tcpm_set_state(port, PORT_RESET_WAIT_OFF,
PD_T_ERROR_RECOVERY);
break;
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index 53a7ede8556d..faccc942b381 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -301,6 +301,7 @@ static const struct of_device_id pmic_glink_ucsi_of_quirks[] = {
{ .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
{ .compatible = "qcom,sc8280xp-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
{ .compatible = "qcom,sm8350-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
+ { .compatible = "qcom,sm8550-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
{}
};
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f2ed7167c848..4b2fcb228a0a 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -141,10 +141,8 @@ struct vhost_net {
unsigned tx_zcopy_err;
/* Flush in progress. Protected by tx vq lock. */
bool tx_flush;
- /* Private page frag */
- struct page_frag page_frag;
- /* Refcount bias of page frag */
- int refcnt_bias;
+ /* Private page frag cache */
+ struct page_frag_cache pf_cache;
};
static unsigned vhost_net_zcopy_mask __read_mostly;
@@ -655,41 +653,6 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
!vhost_vq_avail_empty(vq->dev, vq);
}
-static bool vhost_net_page_frag_refill(struct vhost_net *net, unsigned int sz,
- struct page_frag *pfrag, gfp_t gfp)
-{
- if (pfrag->page) {
- if (pfrag->offset + sz <= pfrag->size)
- return true;
- __page_frag_cache_drain(pfrag->page, net->refcnt_bias);
- }
-
- pfrag->offset = 0;
- net->refcnt_bias = 0;
- if (SKB_FRAG_PAGE_ORDER) {
- /* Avoid direct reclaim but allow kswapd to wake */
- pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
- __GFP_COMP | __GFP_NOWARN |
- __GFP_NORETRY,
- SKB_FRAG_PAGE_ORDER);
- if (likely(pfrag->page)) {
- pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
- goto done;
- }
- }
- pfrag->page = alloc_page(gfp);
- if (likely(pfrag->page)) {
- pfrag->size = PAGE_SIZE;
- goto done;
- }
- return false;
-
-done:
- net->refcnt_bias = USHRT_MAX;
- page_ref_add(pfrag->page, USHRT_MAX - 1);
- return true;
-}
-
#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
@@ -699,7 +662,6 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
struct vhost_net *net = container_of(vq->dev, struct vhost_net,
dev);
struct socket *sock = vhost_vq_get_backend(vq);
- struct page_frag *alloc_frag = &net->page_frag;
struct virtio_net_hdr *gso;
struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
struct tun_xdp_hdr *hdr;
@@ -710,6 +672,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
int sock_hlen = nvq->sock_hlen;
void *buf;
int copied;
+ int ret;
if (unlikely(len < nvq->sock_hlen))
return -EFAULT;
@@ -719,18 +682,17 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
return -ENOSPC;
buflen += SKB_DATA_ALIGN(len + pad);
- alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
- if (unlikely(!vhost_net_page_frag_refill(net, buflen,
- alloc_frag, GFP_KERNEL)))
+ buf = page_frag_alloc_align(&net->pf_cache, buflen, GFP_KERNEL,
+ SMP_CACHE_BYTES);
+ if (unlikely(!buf))
return -ENOMEM;
- buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
- copied = copy_page_from_iter(alloc_frag->page,
- alloc_frag->offset +
- offsetof(struct tun_xdp_hdr, gso),
- sock_hlen, from);
- if (copied != sock_hlen)
- return -EFAULT;
+ copied = copy_from_iter(buf + offsetof(struct tun_xdp_hdr, gso),
+ sock_hlen, from);
+ if (copied != sock_hlen) {
+ ret = -EFAULT;
+ goto err;
+ }
hdr = buf;
gso = &hdr->gso;
@@ -743,27 +705,30 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
vhost16_to_cpu(vq, gso->csum_start) +
vhost16_to_cpu(vq, gso->csum_offset) + 2);
- if (vhost16_to_cpu(vq, gso->hdr_len) > len)
- return -EINVAL;
+ if (vhost16_to_cpu(vq, gso->hdr_len) > len) {
+ ret = -EINVAL;
+ goto err;
+ }
}
len -= sock_hlen;
- copied = copy_page_from_iter(alloc_frag->page,
- alloc_frag->offset + pad,
- len, from);
- if (copied != len)
- return -EFAULT;
+ copied = copy_from_iter(buf + pad, len, from);
+ if (copied != len) {
+ ret = -EFAULT;
+ goto err;
+ }
xdp_init_buff(xdp, buflen, NULL);
xdp_prepare_buff(xdp, buf, pad, len, true);
hdr->buflen = buflen;
- --net->refcnt_bias;
- alloc_frag->offset += buflen;
-
++nvq->batched_xdp;
return 0;
+
+err:
+ page_frag_free(buf);
+ return ret;
}
static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
@@ -1353,8 +1318,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
vqs[VHOST_NET_VQ_RX]);
f->private_data = n;
- n->page_frag.page = NULL;
- n->refcnt_bias = 0;
+ n->pf_cache.va = NULL;
return 0;
}
@@ -1422,8 +1386,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
kfree(n->dev.vqs);
- if (n->page_frag.page)
- __page_frag_cache_drain(n->page_frag.page, n->refcnt_bias);
+ page_frag_cache_drain(&n->pf_cache);
kvfree(n);
return 0;
}
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 1183e7a871f8..46823c2e2ba1 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -2399,11 +2399,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
struct fbcon_ops *ops = info->fbcon_par;
struct fbcon_display *p = &fb_display[vc->vc_num];
int resize, ret, old_userfont, old_width, old_height, old_charcount;
- char *old_data = NULL;
+ u8 *old_data = vc->vc_font.data;
resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
- if (p->userfont)
- old_data = vc->vc_font.data;
vc->vc_font.data = (void *)(p->fontdata = data);
old_userfont = p->userfont;
if ((p->userfont = userfont))
@@ -2437,13 +2435,13 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h, int charcount,
update_screen(vc);
}
- if (old_data && (--REFCOUNT(old_data) == 0))
+ if (old_userfont && (--REFCOUNT(old_data) == 0))
kfree(old_data - FONT_EXTRA_WORDS * sizeof(int));
return 0;
err_out:
p->fontdata = old_data;
- vc->vc_font.data = (void *)old_data;
+ vc->vc_font.data = old_data;
if (userfont) {
p->userfont = old_userfont;
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index c26ee6fd73c9..8fdccf033b2d 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -1010,8 +1010,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
goto getmem_done;
}
pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
- } else {
- goto err1;
}
/*
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 7d22051b15a2..d78fe7137799 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -512,7 +512,6 @@ config S3C2410_WATCHDOG
tristate "S3C6410/S5Pv210/Exynos Watchdog"
depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
select WATCHDOG_CORE
- select MFD_SYSCON if ARCH_EXYNOS
help
Watchdog timer block in the Samsung S3C64xx, S5Pv210 and Exynos
SoCs. This will reboot the system when the timer expires with
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 349d30462c8c..686cf544d0ae 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -24,9 +24,9 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/delay.h>
+#include <linux/soc/samsung/exynos-pmu.h>
#define S3C2410_WTCON 0x00
#define S3C2410_WTDAT 0x04
@@ -699,11 +699,11 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
return ret;
if (wdt->drv_data->quirks & QUIRKS_HAVE_PMUREG) {
- wdt->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
- "samsung,syscon-phandle");
+ wdt->pmureg = exynos_get_pmu_regmap_by_phandle(dev->of_node,
+ "samsung,syscon-phandle");
if (IS_ERR(wdt->pmureg))
return dev_err_probe(dev, PTR_ERR(wdt->pmureg),
- "syscon regmap lookup failed.\n");
+ "PMU regmap lookup failed.\n");
}
wdt_irq = platform_get_irq(pdev, 0);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index b8cfea7812d6..2faa4bf78c7a 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -923,8 +923,8 @@ static void shutdown_pirq(struct irq_data *data)
return;
do_mask(info, EVT_MASK_REASON_EXPLICIT);
- xen_evtchn_close(evtchn);
xen_irq_info_cleanup(info);
+ xen_evtchn_close(evtchn);
}
static void enable_pirq(struct irq_data *data)
@@ -956,6 +956,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
{
evtchn_port_t evtchn;
+ bool close_evtchn = false;
if (!info) {
xen_irq_free_desc(irq);
@@ -975,7 +976,7 @@ static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
struct xenbus_device *dev;
if (!info->is_static)
- xen_evtchn_close(evtchn);
+ close_evtchn = true;
switch (info->type) {
case IRQT_VIRQ:
@@ -995,6 +996,9 @@ static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
}
xen_irq_info_cleanup(info);
+
+ if (close_evtchn)
+ xen_evtchn_close(evtchn);
}
xen_free_irq(info);
@@ -2216,7 +2220,7 @@ static __init void xen_alloc_callback_vector(void)
return;
pr_info("Xen HVM callback vector for event delivery is enabled\n");
- alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_xen_hvm_callback);
+ sysvec_install(HYPERVISOR_CALLBACK_VECTOR, sysvec_xen_hvm_callback);
}
#else
void xen_setup_callback_vector(void) {}
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 26ffb8755ffb..f93f73ecefee 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -317,7 +317,7 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
rc = -EFAULT;
goto out_free;
}
- if (copy_to_user(arg->gref_ids, gref_ids,
+ if (copy_to_user(arg->gref_ids_flex, gref_ids,
sizeof(gref_ids[0]) * op.count)) {
rc = -EFAULT;
goto out_free;
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
index 508655273145..c63f317e3df3 100644
--- a/drivers/xen/pcpu.c
+++ b/drivers/xen/pcpu.c
@@ -65,7 +65,7 @@ struct pcpu {
uint32_t flags;
};
-static struct bus_type xen_pcpu_subsys = {
+static const struct bus_type xen_pcpu_subsys = {
.name = "xen_cpu",
.dev_name = "xen_cpu",
};
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 35b6e306026a..67dfa4778864 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -1223,18 +1223,13 @@ struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd)
kioreq->ioreq = (struct ioreq *)(page_to_virt(pages[0]));
mmap_write_unlock(mm);
- size = sizeof(*ports) * kioreq->vcpus;
- ports = kzalloc(size, GFP_KERNEL);
- if (!ports) {
- ret = -ENOMEM;
+ ports = memdup_array_user(u64_to_user_ptr(ioeventfd->ports),
+ kioreq->vcpus, sizeof(*ports));
+ if (IS_ERR(ports)) {
+ ret = PTR_ERR(ports);
goto error_kfree;
}
- if (copy_from_user(ports, u64_to_user_ptr(ioeventfd->ports), size)) {
- ret = -EFAULT;
- goto error_kfree_ports;
- }
-
for (i = 0; i < kioreq->vcpus; i++) {
kioreq->ports[i].vcpu = i;
kioreq->ports[i].port = ports[i];
@@ -1256,7 +1251,7 @@ struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd)
error_unbind:
while (--i >= 0)
unbind_from_irqhandler(irq_from_evtchn(ports[i]), &kioreq->ports[i]);
-error_kfree_ports:
+
kfree(ports);
error_kfree:
kfree(kioreq);
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 8cd583db20b1..b293d7652f15 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -237,7 +237,7 @@ static const struct attribute_group *balloon_groups[] = {
NULL
};
-static struct bus_type balloon_subsys = {
+static const struct bus_type balloon_subsys = {
.name = BALLOON_CLASS_NAME,
.dev_name = BALLOON_CLASS_NAME,
};
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 32835b4b9bc5..51b3124b0d56 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -116,14 +116,15 @@ EXPORT_SYMBOL_GPL(xenbus_strstate);
* @dev: xenbus device
* @path: path to watch
* @watch: watch to register
+ * @will_handle: events queuing determine callback
* @callback: callback to register
*
* Register a @watch on the given path, using the given xenbus_watch structure
- * for storage, and the given @callback function as the callback. On success,
- * the given @path will be saved as @watch->node, and remains the
- * caller's to free. On error, @watch->node will
- * be NULL, the device will switch to %XenbusStateClosing, and the error will
- * be saved in the store.
+ * for storage, @will_handle function as the callback to determine if each
+ * event need to be queued, and the given @callback function as the callback.
+ * On success, the given @path will be saved as @watch->node, and remains the
+ * caller's to free. On error, @watch->node will be NULL, the device will
+ * switch to %XenbusStateClosing, and the error will be saved in the store.
*
* Returns: %0 on success or -errno on error
*/
@@ -158,11 +159,13 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path);
* xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
* @dev: xenbus device
* @watch: watch to register
+ * @will_handle: events queuing determine callback
* @callback: callback to register
* @pathfmt: format of path to watch
*
* Register a watch on the given @path, using the given xenbus_watch
- * structure for storage, and the given @callback function as the
+ * structure for storage, @will_handle function as the callback to determine if
+ * each event need to be queued, and the given @callback function as the
* callback. On success, the watched path (@path/@path2) will be saved
* as @watch->node, and becomes the caller's to kfree().
* On error, watch->node will be NULL, so the caller has nothing to
diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c
index 025edfccedcf..f49d19977e82 100644
--- a/drivers/zorro/zorro-driver.c
+++ b/drivers/zorro/zorro-driver.c
@@ -150,7 +150,7 @@ static int zorro_uevent(const struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-struct bus_type zorro_bus_type = {
+const struct bus_type zorro_bus_type = {
.name = "zorro",
.dev_name = "zorro",
.dev_groups = zorro_device_attribute_groups,
diff --git a/drivers/zorro/zorro.h b/drivers/zorro/zorro.h
index f84df9fb4c20..df44e35203fd 100644
--- a/drivers/zorro/zorro.h
+++ b/drivers/zorro/zorro.h
@@ -4,7 +4,7 @@
* Zorro bus
*/
-extern struct bus_type zorro_bus_type;
+extern const struct bus_type zorro_bus_type;
#ifdef CONFIG_ZORRO_NAMES